filename
stringlengths
3
9
code
stringlengths
4
1.87M
776020.c
/* * Author: Tatu Ylonen <[email protected]> * Copyright (c) 1995 Tatu Ylonen <[email protected]>, Espoo, Finland * All rights reserved * * As far as I am concerned, the code I have written for this software * can be used freely for any purpose. Any derived versions of this * software must be clearly marked as such, and if the derived work is * incompatible with the protocol description in the RFC file, it must be * called by a name other than "ssh" or "Secure Shell". * * * Copyright (c) 1999 Niels Provos. All rights reserved. * Copyright (c) 1999, 2000 Markus Friedl. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "includes.h" RCSID("$OpenBSD: cipher.c,v 1.77 2005/07/16 01:35:24 djm Exp $"); #include "xmalloc.h" #include "log.h" #include "cipher.h" #include <openssl/md5.h> /* compatibility with old or broken OpenSSL versions */ #include "openbsd-compat/openssl-compat.h" extern const EVP_CIPHER *evp_ssh1_bf(void); extern const EVP_CIPHER *evp_ssh1_3des(void); extern void ssh1_3des_iv(EVP_CIPHER_CTX *, int, u_char *, int); extern const EVP_CIPHER *evp_aes_128_ctr(void); extern void ssh_aes_ctr_iv(EVP_CIPHER_CTX *, int, u_char *, u_int); struct Cipher { char *name; int number; /* for ssh1 only */ u_int block_size; u_int key_len; u_int discard_len; const EVP_CIPHER *(*evptype)(void); } ciphers[] = { { "none", SSH_CIPHER_NONE, 8, 0, 0, EVP_enc_null }, { "des", SSH_CIPHER_DES, 8, 8, 0, EVP_des_cbc }, { "3des", SSH_CIPHER_3DES, 8, 16, 0, evp_ssh1_3des }, { "blowfish", SSH_CIPHER_BLOWFISH, 8, 32, 0, evp_ssh1_bf }, { "3des-cbc", SSH_CIPHER_SSH2, 8, 24, 0, EVP_des_ede3_cbc }, { "blowfish-cbc", SSH_CIPHER_SSH2, 8, 16, 0, EVP_bf_cbc }, { "cast128-cbc", SSH_CIPHER_SSH2, 8, 16, 0, EVP_cast5_cbc }, { "arcfour", SSH_CIPHER_SSH2, 8, 16, 0, EVP_rc4 }, { "arcfour128", SSH_CIPHER_SSH2, 8, 16, 1536, EVP_rc4 }, { "arcfour256", SSH_CIPHER_SSH2, 8, 32, 1536, EVP_rc4 }, { "aes128-cbc", SSH_CIPHER_SSH2, 16, 16, 0, EVP_aes_128_cbc }, { "aes192-cbc", SSH_CIPHER_SSH2, 16, 24, 0, EVP_aes_192_cbc }, { "aes256-cbc", SSH_CIPHER_SSH2, 16, 32, 0, EVP_aes_256_cbc }, { "[email protected]", SSH_CIPHER_SSH2, 16, 32, 0, EVP_aes_256_cbc }, { "aes128-ctr", SSH_CIPHER_SSH2, 16, 16, 0, evp_aes_128_ctr }, { "aes192-ctr", SSH_CIPHER_SSH2, 16, 24, 0, evp_aes_128_ctr }, { "aes256-ctr", SSH_CIPHER_SSH2, 16, 32, 0, evp_aes_128_ctr }, #ifdef USE_CIPHER_ACSS { "[email protected]", SSH_CIPHER_SSH2, 16, 5, 0, EVP_acss }, #endif { NULL, SSH_CIPHER_INVALID, 0, 0, 0, NULL } }; /*--*/ u_int cipher_blocksize(const Cipher *c) { return (c->block_size); } u_int cipher_keylen(const Cipher *c) { return (c->key_len); } u_int cipher_get_number(const Cipher *c) { return (c->number); } u_int cipher_mask_ssh1(int client) { u_int mask = 0; mask |= 1 << SSH_CIPHER_3DES; /* Mandatory */ mask |= 1 << SSH_CIPHER_BLOWFISH; if (client) { mask |= 1 << SSH_CIPHER_DES; } return mask; } Cipher * cipher_by_name(const char *name) { Cipher *c; for (c = ciphers; c->name != NULL; c++) if (strcmp(c->name, name) == 0) return c; return NULL; } Cipher * cipher_by_number(int id) { Cipher *c; for (c = ciphers; c->name != NULL; c++) if (c->number == id) return c; return NULL; } #define CIPHER_SEP "," int ciphers_valid(const char *names) { Cipher *c; char *cipher_list, *cp; char *p; if (names == NULL || strcmp(names, "") == 0) return 0; cipher_list = cp = xstrdup(names); for ((p = strsep(&cp, CIPHER_SEP)); p && *p != '\0'; (p = strsep(&cp, CIPHER_SEP))) { c = cipher_by_name(p); if (c == NULL || c->number != SSH_CIPHER_SSH2) { debug("bad cipher %s [%s]", p, names); xfree(cipher_list); return 0; } else { debug3("cipher ok: %s [%s]", p, names); } } debug3("ciphers ok: [%s]", names); xfree(cipher_list); return 1; } /* * Parses the name of the cipher. Returns the number of the corresponding * cipher, or -1 on error. */ int cipher_number(const char *name) { Cipher *c; if (name == NULL) return -1; for (c = ciphers; c->name != NULL; c++) if (strcasecmp(c->name, name) == 0) return c->number; return -1; } char * cipher_name(int id) { Cipher *c = cipher_by_number(id); return (c==NULL) ? "<unknown>" : c->name; } void cipher_init(CipherContext *cc, Cipher *cipher, const u_char *key, u_int keylen, const u_char *iv, u_int ivlen, int do_encrypt) { static int dowarn = 1; #ifdef SSH_OLD_EVP EVP_CIPHER *type; #else const EVP_CIPHER *type; int klen; #endif u_char *junk, *discard; if (cipher->number == SSH_CIPHER_DES) { if (dowarn) { error("Warning: use of DES is strongly discouraged " "due to cryptographic weaknesses"); dowarn = 0; } if (keylen > 8) keylen = 8; } cc->plaintext = (cipher->number == SSH_CIPHER_NONE); if (keylen < cipher->key_len) fatal("cipher_init: key length %d is insufficient for %s.", keylen, cipher->name); if (iv != NULL && ivlen < cipher->block_size) fatal("cipher_init: iv length %d is insufficient for %s.", ivlen, cipher->name); cc->cipher = cipher; type = (*cipher->evptype)(); EVP_CIPHER_CTX_init(&cc->evp); #ifdef SSH_OLD_EVP if (type->key_len > 0 && type->key_len != keylen) { debug("cipher_init: set keylen (%d -> %d)", type->key_len, keylen); type->key_len = keylen; } EVP_CipherInit(&cc->evp, type, (u_char *)key, (u_char *)iv, (do_encrypt == CIPHER_ENCRYPT)); #else if (EVP_CipherInit(&cc->evp, type, NULL, (u_char *)iv, (do_encrypt == CIPHER_ENCRYPT)) == 0) fatal("cipher_init: EVP_CipherInit failed for %s", cipher->name); klen = EVP_CIPHER_CTX_key_length(&cc->evp); if (klen > 0 && keylen != (u_int)klen) { debug2("cipher_init: set keylen (%d -> %d)", klen, keylen); if (EVP_CIPHER_CTX_set_key_length(&cc->evp, keylen) == 0) fatal("cipher_init: set keylen failed (%d -> %d)", klen, keylen); } if (EVP_CipherInit(&cc->evp, NULL, (u_char *)key, NULL, -1) == 0) fatal("cipher_init: EVP_CipherInit: set key failed for %s", cipher->name); #endif if (cipher->discard_len > 0) { junk = xmalloc(cipher->discard_len); discard = xmalloc(cipher->discard_len); if (EVP_Cipher(&cc->evp, discard, junk, cipher->discard_len) == 0) fatal("evp_crypt: EVP_Cipher failed during discard"); memset(discard, 0, cipher->discard_len); xfree(junk); xfree(discard); } } void cipher_crypt(CipherContext *cc, u_char *dest, const u_char *src, u_int len) { if (len % cc->cipher->block_size) fatal("cipher_encrypt: bad plaintext length %d", len); if (EVP_Cipher(&cc->evp, dest, (u_char *)src, len) == 0) fatal("evp_crypt: EVP_Cipher failed"); } void cipher_cleanup(CipherContext *cc) { if (EVP_CIPHER_CTX_cleanup(&cc->evp) == 0) error("cipher_cleanup: EVP_CIPHER_CTX_cleanup failed"); } /* * Selects the cipher, and keys if by computing the MD5 checksum of the * passphrase and using the resulting 16 bytes as the key. */ void cipher_set_key_string(CipherContext *cc, Cipher *cipher, const char *passphrase, int do_encrypt) { MD5_CTX md; u_char digest[16]; MD5_Init(&md); MD5_Update(&md, (const u_char *)passphrase, strlen(passphrase)); MD5_Final(digest, &md); cipher_init(cc, cipher, digest, 16, NULL, 0, do_encrypt); memset(digest, 0, sizeof(digest)); memset(&md, 0, sizeof(md)); } /* * Exports an IV from the CipherContext required to export the key * state back from the unprivileged child to the privileged parent * process. */ int cipher_get_keyiv_len(const CipherContext *cc) { Cipher *c = cc->cipher; int ivlen; if (c->number == SSH_CIPHER_3DES) ivlen = 24; else ivlen = EVP_CIPHER_CTX_iv_length(&cc->evp); return (ivlen); } void cipher_get_keyiv(CipherContext *cc, u_char *iv, u_int len) { Cipher *c = cc->cipher; int evplen; switch (c->number) { case SSH_CIPHER_SSH2: case SSH_CIPHER_DES: case SSH_CIPHER_BLOWFISH: evplen = EVP_CIPHER_CTX_iv_length(&cc->evp); if (evplen <= 0) return; if ((u_int)evplen != len) fatal("%s: wrong iv length %d != %d", __func__, evplen, len); #if OPENSSL_VERSION_NUMBER < 0x00907000L if (c->evptype == evp_rijndael) ssh_rijndael_iv(&cc->evp, 0, iv, len); else #endif if (c->evptype == evp_aes_128_ctr) ssh_aes_ctr_iv(&cc->evp, 0, iv, len); else memcpy(iv, cc->evp.iv, len); break; case SSH_CIPHER_3DES: ssh1_3des_iv(&cc->evp, 0, iv, 24); break; default: fatal("%s: bad cipher %d", __func__, c->number); } } void cipher_set_keyiv(CipherContext *cc, u_char *iv) { Cipher *c = cc->cipher; int evplen = 0; switch (c->number) { case SSH_CIPHER_SSH2: case SSH_CIPHER_DES: case SSH_CIPHER_BLOWFISH: evplen = EVP_CIPHER_CTX_iv_length(&cc->evp); if (evplen == 0) return; #if OPENSSL_VERSION_NUMBER < 0x00907000L if (c->evptype == evp_rijndael) ssh_rijndael_iv(&cc->evp, 1, iv, evplen); else #endif if (c->evptype == evp_aes_128_ctr) ssh_aes_ctr_iv(&cc->evp, 1, iv, evplen); else memcpy(cc->evp.iv, iv, evplen); break; case SSH_CIPHER_3DES: ssh1_3des_iv(&cc->evp, 1, iv, 24); break; default: fatal("%s: bad cipher %d", __func__, c->number); } } #if OPENSSL_VERSION_NUMBER < 0x00907000L #define EVP_X_STATE(evp) &(evp).c #define EVP_X_STATE_LEN(evp) sizeof((evp).c) #else #define EVP_X_STATE(evp) (evp).cipher_data #define EVP_X_STATE_LEN(evp) (evp).cipher->ctx_size #endif int cipher_get_keycontext(const CipherContext *cc, u_char *dat) { Cipher *c = cc->cipher; int plen = 0; if (c->evptype == EVP_rc4 || c->evptype == EVP_acss) { plen = EVP_X_STATE_LEN(cc->evp); if (dat == NULL) return (plen); memcpy(dat, EVP_X_STATE(cc->evp), plen); } return (plen); } void cipher_set_keycontext(CipherContext *cc, u_char *dat) { Cipher *c = cc->cipher; int plen; if (c->evptype == EVP_rc4 || c->evptype == EVP_acss) { plen = EVP_X_STATE_LEN(cc->evp); memcpy(EVP_X_STATE(cc->evp), dat, plen); } }
794835.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include <assert.h> #include <string.h> #include <errno.h> #include "nimble/nimble_opt.h" #include "host/ble_hs_adv.h" #include "host/ble_hs_hci.h" #include "ble_hs_priv.h" #if MYNEWT #include "bsp/bsp.h" #else #define bssnz_t #endif /** * GAP - Generic Access Profile. * * Design overview: * * GAP procedures are initiated by the application via function calls. Such * functions return when either of the following happens: * * (1) The procedure completes (success or failure). * (2) The procedure cannot proceed until a BLE peer responds. * * For (1), the result of the procedure if fully indicated by the function * return code. * For (2), the procedure result is indicated by an application-configured * callback. The callback is executed when the procedure completes. * * The GAP is always in one of two states: * 1. Free * 2. Preempted * * While GAP is in the free state, new procedures can be started at will. * While GAP is in the preempted state, no new procedures are allowed. The * host sets GAP to the preempted state when it needs to ensure no ongoing * procedures, a condition required for some HCI commands to succeed. The host * must take care to take GAP out of the preempted state as soon as possible. * * Notes on thread-safety: * 1. The ble_hs mutex must always be unlocked when an application callback is * executed. The purpose of this requirement is to allow callbacks to * initiate additional host procedures, which may require locking of the * mutex. * 2. Functions called directly by the application never call callbacks. * Generally, these functions lock the ble_hs mutex at the start, and only * unlock it at return. * 3. Functions which do call callbacks (receive handlers and timer * expirations) generally only lock the mutex long enough to modify * affected state and make copies of data needed for the callback. A copy * of various pieces of data is called a "snapshot" (struct * ble_gap_snapshot). The sole purpose of snapshots is to allow callbacks * to be executed after unlocking the mutex. */ /** GAP procedure op codes. */ #define BLE_GAP_OP_NULL 0 #define BLE_GAP_OP_M_DISC 1 #define BLE_GAP_OP_M_CONN 2 #define BLE_GAP_OP_S_ADV 1 #define BLE_GAP_OP_S_PERIODIC_ADV 2 #define BLE_GAP_OP_SYNC 1 /** * If an attempt to cancel an active procedure fails, the attempt is retried * at this rate (ms). */ #define BLE_GAP_CANCEL_RETRY_TIMEOUT_MS 100 /* ms */ #define BLE_GAP_UPDATE_TIMEOUT_MS 40000 /* ms */ #if MYNEWT_VAL(BLE_ROLE_CENTRAL) static const struct ble_gap_conn_params ble_gap_conn_params_dflt = { .scan_itvl = 0x0010, .scan_window = 0x0010, .itvl_min = BLE_GAP_INITIAL_CONN_ITVL_MIN, .itvl_max = BLE_GAP_INITIAL_CONN_ITVL_MAX, .latency = BLE_GAP_INITIAL_CONN_LATENCY, .supervision_timeout = BLE_GAP_INITIAL_SUPERVISION_TIMEOUT, .min_ce_len = BLE_GAP_INITIAL_CONN_MIN_CE_LEN, .max_ce_len = BLE_GAP_INITIAL_CONN_MAX_CE_LEN, }; #endif /** * The state of the in-progress master connection. If no master connection is * currently in progress, then the op field is set to BLE_GAP_OP_NULL. */ struct ble_gap_master_state { uint8_t op; uint8_t exp_set:1; ble_npl_time_t exp_os_ticks; ble_gap_event_fn *cb; void *cb_arg; /** * Indicates the type of master procedure that was preempted, or * BLE_GAP_OP_NULL if no procedure was preempted. */ uint8_t preempted_op; union { struct { uint8_t using_wl:1; uint8_t our_addr_type:2; uint8_t cancel:1; } conn; struct { uint8_t limited:1; } disc; }; }; static bssnz_t struct ble_gap_master_state ble_gap_master; #if MYNEWT_VAL(BLE_PERIODIC_ADV) /** * The state of the in-progress sync creation. If no sync creation connection is * currently in progress, then the op field is set to BLE_GAP_OP_NULL. */ struct ble_gap_sync_state { uint8_t op; struct ble_hs_periodic_sync *psync; ble_gap_event_fn *cb; void *cb_arg; }; static bssnz_t struct ble_gap_sync_state ble_gap_sync; #endif /** * The state of the in-progress slave connection. If no slave connection is * currently in progress, then the op field is set to BLE_GAP_OP_NULL. */ struct ble_gap_slave_state { uint8_t op; unsigned int our_addr_type:2; unsigned int preempted:1; /** Set to 1 if advertising was preempted. */ unsigned int connectable:1; #if MYNEWT_VAL(BLE_EXT_ADV) unsigned int configured:1; /** If instance is configured */ unsigned int scannable:1; unsigned int directed:1; unsigned int high_duty_directed:1; unsigned int legacy_pdu:1; unsigned int rnd_addr_set:1; #if MYNEWT_VAL(BLE_PERIODIC_ADV) unsigned int periodic_configured:1; uint8_t periodic_op; #endif uint8_t rnd_addr[6]; #else /* timer is used only with legacy advertising */ unsigned int exp_set:1; ble_npl_time_t exp_os_ticks; #endif ble_gap_event_fn *cb; void *cb_arg; }; static bssnz_t struct ble_gap_slave_state ble_gap_slave[BLE_ADV_INSTANCES]; struct ble_gap_update_entry { SLIST_ENTRY(ble_gap_update_entry) next; struct ble_gap_upd_params params; ble_npl_time_t exp_os_ticks; uint16_t conn_handle; }; SLIST_HEAD(ble_gap_update_entry_list, ble_gap_update_entry); struct ble_gap_snapshot { struct ble_gap_conn_desc *desc; ble_gap_event_fn *cb; void *cb_arg; }; static SLIST_HEAD(ble_gap_hook_list, ble_gap_event_listener) ble_gap_event_listener_list; static os_membuf_t ble_gap_update_entry_mem[ OS_MEMPOOL_SIZE(MYNEWT_VAL(BLE_GAP_MAX_PENDING_CONN_PARAM_UPDATE), sizeof (struct ble_gap_update_entry))]; static struct os_mempool ble_gap_update_entry_pool; static struct ble_gap_update_entry_list ble_gap_update_entries; static void ble_gap_update_entry_free(struct ble_gap_update_entry *entry); #if NIMBLE_BLE_CONNECT static struct ble_gap_update_entry * ble_gap_update_entry_find(uint16_t conn_handle, struct ble_gap_update_entry **out_prev); static void ble_gap_update_l2cap_cb(uint16_t conn_handle, int status, void *arg); #endif static struct ble_gap_update_entry * ble_gap_update_entry_remove(uint16_t conn_handle); #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) static int ble_gap_adv_enable_tx(int enable); #endif static int ble_gap_conn_cancel_tx(void); #if NIMBLE_BLE_SCAN && !MYNEWT_VAL(BLE_EXT_ADV) static int ble_gap_disc_enable_tx(int enable, int filter_duplicates); #endif STATS_SECT_DECL(ble_gap_stats) ble_gap_stats; STATS_NAME_START(ble_gap_stats) STATS_NAME(ble_gap_stats, wl_set) STATS_NAME(ble_gap_stats, wl_set_fail) STATS_NAME(ble_gap_stats, adv_stop) STATS_NAME(ble_gap_stats, adv_stop_fail) STATS_NAME(ble_gap_stats, adv_start) STATS_NAME(ble_gap_stats, adv_start_fail) STATS_NAME(ble_gap_stats, adv_set_data) STATS_NAME(ble_gap_stats, adv_set_data_fail) STATS_NAME(ble_gap_stats, adv_rsp_set_data) STATS_NAME(ble_gap_stats, adv_rsp_set_data_fail) STATS_NAME(ble_gap_stats, discover) STATS_NAME(ble_gap_stats, discover_fail) STATS_NAME(ble_gap_stats, initiate) STATS_NAME(ble_gap_stats, initiate_fail) STATS_NAME(ble_gap_stats, terminate) STATS_NAME(ble_gap_stats, terminate_fail) STATS_NAME(ble_gap_stats, cancel) STATS_NAME(ble_gap_stats, cancel_fail) STATS_NAME(ble_gap_stats, update) STATS_NAME(ble_gap_stats, update_fail) STATS_NAME(ble_gap_stats, connect_mst) STATS_NAME(ble_gap_stats, connect_slv) STATS_NAME(ble_gap_stats, disconnect) STATS_NAME(ble_gap_stats, rx_disconnect) STATS_NAME(ble_gap_stats, rx_update_complete) STATS_NAME(ble_gap_stats, rx_adv_report) STATS_NAME(ble_gap_stats, rx_conn_complete) STATS_NAME(ble_gap_stats, discover_cancel) STATS_NAME(ble_gap_stats, discover_cancel_fail) STATS_NAME(ble_gap_stats, security_initiate) STATS_NAME(ble_gap_stats, security_initiate_fail) STATS_NAME_END(ble_gap_stats) /***************************************************************************** * $debug * *****************************************************************************/ #if MYNEWT_VAL(BLE_HS_DEBUG) int ble_gap_dbg_update_active(uint16_t conn_handle) { const struct ble_gap_update_entry *entry; ble_hs_lock(); entry = ble_gap_update_entry_find(conn_handle, NULL); ble_hs_unlock(); return entry != NULL; } #endif /***************************************************************************** * $log * *****************************************************************************/ #if NIMBLE_BLE_SCAN && !MYNEWT_VAL(BLE_EXT_ADV) static void ble_gap_log_duration(int32_t duration_ms) { if (duration_ms == BLE_HS_FOREVER) { BLE_HS_LOG(INFO, "duration=forever"); } else { BLE_HS_LOG(INFO, "duration=%dms", duration_ms); } } #endif #if MYNEWT_VAL(BLE_ROLE_CENTRAL) && !MYNEWT_VAL(BLE_EXT_ADV) static void ble_gap_log_conn(uint8_t own_addr_type, const ble_addr_t *peer_addr, const struct ble_gap_conn_params *params) { if (peer_addr != NULL) { BLE_HS_LOG(INFO, "peer_addr_type=%d peer_addr=", peer_addr->type); BLE_HS_LOG_ADDR(INFO, peer_addr->val); } BLE_HS_LOG(INFO, " scan_itvl=%d scan_window=%d itvl_min=%d itvl_max=%d " "latency=%d supervision_timeout=%d min_ce_len=%d " "max_ce_len=%d own_addr_type=%d", params->scan_itvl, params->scan_window, params->itvl_min, params->itvl_max, params->latency, params->supervision_timeout, params->min_ce_len, params->max_ce_len, own_addr_type); } #endif #if NIMBLE_BLE_SCAN && !MYNEWT_VAL(BLE_EXT_ADV) static void ble_gap_log_disc(uint8_t own_addr_type, int32_t duration_ms, const struct ble_gap_disc_params *disc_params) { BLE_HS_LOG(INFO, "own_addr_type=%d filter_policy=%d passive=%d limited=%d " "filter_duplicates=%d ", own_addr_type, disc_params->filter_policy, disc_params->passive, disc_params->limited, disc_params->filter_duplicates); ble_gap_log_duration(duration_ms); } #endif #if NIMBLE_BLE_CONNECT static void ble_gap_log_update(uint16_t conn_handle, const struct ble_gap_upd_params *params) { BLE_HS_LOG(INFO, "connection parameter update; " "conn_handle=%d itvl_min=%d itvl_max=%d latency=%d " "supervision_timeout=%d min_ce_len=%d max_ce_len=%d", conn_handle, params->itvl_min, params->itvl_max, params->latency, params->supervision_timeout, params->min_ce_len, params->max_ce_len); } #endif #if MYNEWT_VAL(BLE_WHITELIST) static void ble_gap_log_wl(const ble_addr_t *addr, uint8_t white_list_count) { int i; BLE_HS_LOG(INFO, "count=%d ", white_list_count); for (i = 0; i < white_list_count; i++, addr++) { BLE_HS_LOG(INFO, "entry-%d={addr_type=%d addr=", i, addr->type); BLE_HS_LOG_ADDR(INFO, addr->val); BLE_HS_LOG(INFO, "} "); } } #endif #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) static void ble_gap_log_adv(uint8_t own_addr_type, const ble_addr_t *direct_addr, const struct ble_gap_adv_params *adv_params) { BLE_HS_LOG(INFO, "disc_mode=%d", adv_params->disc_mode); if (direct_addr) { BLE_HS_LOG(INFO, " direct_addr_type=%d direct_addr=", direct_addr->type); BLE_HS_LOG_ADDR(INFO, direct_addr->val); } BLE_HS_LOG(INFO, " adv_channel_map=%d own_addr_type=%d " "adv_filter_policy=%d adv_itvl_min=%d adv_itvl_max=%d", adv_params->channel_map, own_addr_type, adv_params->filter_policy, adv_params->itvl_min, adv_params->itvl_max); } #endif /***************************************************************************** * $snapshot * *****************************************************************************/ static void ble_gap_fill_conn_desc(struct ble_hs_conn *conn, struct ble_gap_conn_desc *desc) { struct ble_hs_conn_addrs addrs; ble_hs_conn_addrs(conn, &addrs); desc->our_id_addr = addrs.our_id_addr; desc->peer_id_addr = addrs.peer_id_addr; desc->our_ota_addr = addrs.our_ota_addr; desc->peer_ota_addr = addrs.peer_ota_addr; desc->conn_handle = conn->bhc_handle; desc->conn_itvl = conn->bhc_itvl; desc->conn_latency = conn->bhc_latency; desc->supervision_timeout = conn->bhc_supervision_timeout; desc->master_clock_accuracy = conn->bhc_master_clock_accuracy; desc->sec_state = conn->bhc_sec_state; if (conn->bhc_flags & BLE_HS_CONN_F_MASTER) { desc->role = BLE_GAP_ROLE_MASTER; } else { desc->role = BLE_GAP_ROLE_SLAVE; } } static void ble_gap_conn_to_snapshot(struct ble_hs_conn *conn, struct ble_gap_snapshot *snap) { ble_gap_fill_conn_desc(conn, snap->desc); snap->cb = conn->bhc_cb; snap->cb_arg = conn->bhc_cb_arg; } static int ble_gap_find_snapshot(uint16_t handle, struct ble_gap_snapshot *snap) { struct ble_hs_conn *conn; ble_hs_lock(); conn = ble_hs_conn_find(handle); if (conn != NULL) { ble_gap_conn_to_snapshot(conn, snap); } ble_hs_unlock(); if (conn == NULL) { return BLE_HS_ENOTCONN; } else { return 0; } } int ble_gap_conn_find(uint16_t handle, struct ble_gap_conn_desc *out_desc) { struct ble_hs_conn *conn; ble_hs_lock(); conn = ble_hs_conn_find(handle); if (conn != NULL && out_desc != NULL) { ble_gap_fill_conn_desc(conn, out_desc); } ble_hs_unlock(); if (conn == NULL) { return BLE_HS_ENOTCONN; } else { return 0; } } int ble_gap_conn_find_by_addr(const ble_addr_t *addr, struct ble_gap_conn_desc *out_desc) { struct ble_hs_conn *conn; ble_hs_lock(); conn = ble_hs_conn_find_by_addr(addr); if (conn != NULL && out_desc != NULL) { ble_gap_fill_conn_desc(conn, out_desc); } ble_hs_unlock(); if (conn == NULL) { return BLE_HS_ENOTCONN; } return 0; } static int ble_gap_extract_conn_cb(uint16_t conn_handle, ble_gap_event_fn **out_cb, void **out_cb_arg) { const struct ble_hs_conn *conn; BLE_HS_DBG_ASSERT(conn_handle <= BLE_HCI_LE_CONN_HANDLE_MAX); ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); if (conn != NULL) { *out_cb = conn->bhc_cb; *out_cb_arg = conn->bhc_cb_arg; } else { *out_cb = NULL; *out_cb_arg = NULL; } ble_hs_unlock(); if (conn == NULL) { return BLE_HS_ENOTCONN; } else { return 0; } } int ble_gap_set_priv_mode(const ble_addr_t *peer_addr, uint8_t priv_mode) { return ble_hs_pvcy_set_mode(peer_addr, priv_mode); } int ble_gap_read_le_phy(uint16_t conn_handle, uint8_t *tx_phy, uint8_t *rx_phy) { struct ble_hci_le_rd_phy_cp cmd; struct ble_hci_le_rd_phy_rp rsp; struct ble_hs_conn *conn; int rc; ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); ble_hs_unlock(); if (conn == NULL) { return BLE_HS_ENOTCONN; } cmd.conn_handle = htole16(conn_handle); rc = ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_RD_PHY), &cmd, sizeof(cmd), &rsp, sizeof(rsp)); if (rc != 0) { return rc; } /* sanity check for response */ if (le16toh(rsp.conn_handle) != conn_handle) { return BLE_HS_ECONTROLLER; } *tx_phy = rsp.tx_phy; *rx_phy = rsp.rx_phy; return 0; } int ble_gap_set_prefered_default_le_phy(uint8_t tx_phys_mask, uint8_t rx_phys_mask) { struct ble_hci_le_set_default_phy_cp cmd; if (tx_phys_mask > (BLE_HCI_LE_PHY_1M_PREF_MASK | BLE_HCI_LE_PHY_2M_PREF_MASK | BLE_HCI_LE_PHY_CODED_PREF_MASK)) { return BLE_ERR_INV_HCI_CMD_PARMS; } if (rx_phys_mask > (BLE_HCI_LE_PHY_1M_PREF_MASK | BLE_HCI_LE_PHY_2M_PREF_MASK | BLE_HCI_LE_PHY_CODED_PREF_MASK)) { return BLE_ERR_INV_HCI_CMD_PARMS; } memset(&cmd, 0, sizeof(cmd)); if (tx_phys_mask == 0) { cmd.all_phys |= BLE_HCI_LE_PHY_NO_TX_PREF_MASK; } else { cmd.tx_phys = tx_phys_mask; } if (rx_phys_mask == 0) { cmd.all_phys |= BLE_HCI_LE_PHY_NO_RX_PREF_MASK; } else { cmd.rx_phys = rx_phys_mask; } return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_DEFAULT_PHY), &cmd, sizeof(cmd), NULL, 0); } int ble_gap_set_prefered_le_phy(uint16_t conn_handle, uint8_t tx_phys_mask, uint8_t rx_phys_mask, uint16_t phy_opts) { struct ble_hci_le_set_phy_cp cmd; struct ble_hs_conn *conn; ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); ble_hs_unlock(); if (conn == NULL) { return BLE_HS_ENOTCONN; } if (tx_phys_mask > (BLE_HCI_LE_PHY_1M_PREF_MASK | BLE_HCI_LE_PHY_2M_PREF_MASK | BLE_HCI_LE_PHY_CODED_PREF_MASK)) { return BLE_ERR_INV_HCI_CMD_PARMS; } if (rx_phys_mask > (BLE_HCI_LE_PHY_1M_PREF_MASK | BLE_HCI_LE_PHY_2M_PREF_MASK | BLE_HCI_LE_PHY_CODED_PREF_MASK)) { return BLE_ERR_INV_HCI_CMD_PARMS; } if (phy_opts > BLE_HCI_LE_PHY_CODED_S8_PREF) { return BLE_ERR_INV_HCI_CMD_PARMS; } memset(&cmd, 0, sizeof(cmd)); cmd.conn_handle = htole16(conn_handle); if (tx_phys_mask == 0) { cmd.all_phys |= BLE_HCI_LE_PHY_NO_TX_PREF_MASK; } else { cmd.tx_phys = tx_phys_mask; } if (rx_phys_mask == 0) { cmd.all_phys |= BLE_HCI_LE_PHY_NO_RX_PREF_MASK; } else { cmd.rx_phys = rx_phys_mask; } cmd.phy_options = htole16(phy_opts); return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_PHY), &cmd, sizeof(cmd), NULL, 0); } /***************************************************************************** * $misc * *****************************************************************************/ static int ble_gap_event_listener_call(struct ble_gap_event *event); static int ble_gap_call_event_cb(struct ble_gap_event *event, ble_gap_event_fn *cb, void *cb_arg) { int rc; BLE_HS_DBG_ASSERT(!ble_hs_locked_by_cur_task()); if (cb != NULL) { rc = cb(event, cb_arg); } else { if (event->type == BLE_GAP_EVENT_CONN_UPDATE_REQ) { /* Just copy peer parameters back into the reply. */ *event->conn_update_req.self_params = *event->conn_update_req.peer_params; } rc = 0; } return rc; } static int ble_gap_call_conn_event_cb(struct ble_gap_event *event, uint16_t conn_handle) { ble_gap_event_fn *cb; void *cb_arg; int rc; rc = ble_gap_extract_conn_cb(conn_handle, &cb, &cb_arg); if (rc != 0) { return rc; } rc = ble_gap_call_event_cb(event, cb, cb_arg); if (rc != 0) { return rc; } return 0; } static bool ble_gap_is_preempted(void) { int i; BLE_HS_DBG_ASSERT(ble_hs_locked_by_cur_task()); if (ble_gap_master.preempted_op != BLE_GAP_OP_NULL) { return true; } for (i = 0; i < BLE_ADV_INSTANCES; i++) { if (ble_gap_slave[i].preempted) { return true; } } return false; } #if NIMBLE_BLE_CONNECT static void ble_gap_master_reset_state(void) { ble_gap_master.op = BLE_GAP_OP_NULL; ble_gap_master.exp_set = 0; ble_gap_master.conn.cancel = 0; ble_hs_timer_resched(); } #endif static void ble_gap_slave_reset_state(uint8_t instance) { ble_gap_slave[instance].op = BLE_GAP_OP_NULL; #if !MYNEWT_VAL(BLE_EXT_ADV) ble_gap_slave[instance].exp_set = 0; ble_hs_timer_resched(); #endif } #if NIMBLE_BLE_CONNECT static bool ble_gap_has_client(struct ble_gap_master_state *out_state) { if (!out_state) { return 0; } return out_state->cb != NULL; } static void ble_gap_master_extract_state(struct ble_gap_master_state *out_state, int reset_state) { ble_hs_lock(); *out_state = ble_gap_master; if (reset_state) { ble_gap_master_reset_state(); ble_gap_master.preempted_op = BLE_GAP_OP_NULL; } ble_hs_unlock(); } #endif static void ble_gap_slave_extract_cb(uint8_t instance, ble_gap_event_fn **out_cb, void **out_cb_arg) { ble_hs_lock(); *out_cb = ble_gap_slave[instance].cb; *out_cb_arg = ble_gap_slave[instance].cb_arg; ble_gap_slave_reset_state(instance); ble_hs_unlock(); } static void ble_gap_adv_finished(uint8_t instance, int reason, uint16_t conn_handle, uint8_t num_events) { struct ble_gap_event event; ble_gap_event_fn *cb; void *cb_arg; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_ADV_COMPLETE; event.adv_complete.reason = reason; #if MYNEWT_VAL(BLE_EXT_ADV) event.adv_complete.instance = instance; event.adv_complete.conn_handle = conn_handle; event.adv_complete.num_ext_adv_events = num_events; #endif ble_gap_event_listener_call(&event); ble_gap_slave_extract_cb(instance, &cb, &cb_arg); if (cb != NULL) { cb(&event, cb_arg); } } #if NIMBLE_BLE_CONNECT static int ble_gap_master_connect_failure(int status) { struct ble_gap_master_state state; struct ble_gap_event event; int rc; ble_gap_master_extract_state(&state, 1); if (ble_gap_has_client(&state)) { memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_CONNECT; event.connect.status = status; rc = state.cb(&event, state.cb_arg); } else { rc = 0; } return rc; } static void ble_gap_master_connect_cancelled(void) { struct ble_gap_master_state state; struct ble_gap_event event; ble_gap_master_extract_state(&state, 1); if (state.cb != NULL) { memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_CONNECT; event.connect.conn_handle = BLE_HS_CONN_HANDLE_NONE; if (state.conn.cancel) { /* Connect procedure successfully cancelled. */ event.connect.status = BLE_HS_EAPP; } else { /* Connect procedure timed out. */ event.connect.status = BLE_HS_ETIMEOUT; } state.cb(&event, state.cb_arg); } } #endif #if NIMBLE_BLE_SCAN static void ble_gap_disc_report(void *desc) { struct ble_gap_master_state state; struct ble_gap_event event; memset(&event, 0, sizeof event); #if MYNEWT_VAL(BLE_EXT_ADV) event.type = BLE_GAP_EVENT_EXT_DISC; event.ext_disc = *((struct ble_gap_ext_disc_desc *)desc); #else event.type = BLE_GAP_EVENT_DISC; event.disc = *((struct ble_gap_disc_desc *)desc); #endif ble_gap_master_extract_state(&state, 0); if (ble_gap_has_client(&state)) { state.cb(&event, state.cb_arg); } ble_gap_event_listener_call(&event); } static void ble_gap_disc_complete(void) { struct ble_gap_master_state state; struct ble_gap_event event; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_DISC_COMPLETE; event.disc_complete.reason = 0; ble_gap_master_extract_state(&state, 1); if (ble_gap_has_client(&state)) { ble_gap_call_event_cb(&event, state.cb, state.cb_arg); } ble_gap_event_listener_call(&event); } #endif static void ble_gap_update_notify(uint16_t conn_handle, int status) { struct ble_gap_event event; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_CONN_UPDATE; event.conn_update.conn_handle = conn_handle; event.conn_update.status = status; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, conn_handle); /* Terminate the connection on procedure timeout. */ if (status == BLE_HS_ETIMEOUT) { ble_gap_terminate(conn_handle, BLE_ERR_REM_USER_CONN_TERM); } } static uint32_t ble_gap_master_ticks_until_exp(void) { ble_npl_stime_t ticks; if (ble_gap_master.op == BLE_GAP_OP_NULL || !ble_gap_master.exp_set) { /* Timer not set; infinity ticks until next event. */ return BLE_HS_FOREVER; } ticks = ble_gap_master.exp_os_ticks - ble_npl_time_get(); if (ticks > 0) { /* Timer not expired yet. */ return ticks; } /* Timer just expired. */ return 0; } #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) static uint32_t ble_gap_slave_ticks_until_exp(void) { ble_npl_stime_t ticks; if (ble_gap_slave[0].op == BLE_GAP_OP_NULL || !ble_gap_slave[0].exp_set) { /* Timer not set; infinity ticks until next event. */ return BLE_HS_FOREVER; } ticks = ble_gap_slave[0].exp_os_ticks - ble_npl_time_get(); if (ticks > 0) { /* Timer not expired yet. */ return ticks; } /* Timer just expired. */ return 0; } #endif /** * Finds the update procedure that expires soonest. * * @param out_ticks_from_now On success, the ticks until the update * procedure's expiry time gets written here. * * @return The connection handle of the update procedure * that expires soonest, or * BLE_HS_CONN_HANDLE_NONE if there are no * active update procedures. */ static uint16_t ble_gap_update_next_exp(int32_t *out_ticks_from_now) { struct ble_gap_update_entry *entry; ble_npl_time_t now; uint16_t conn_handle; int32_t best_ticks; int32_t ticks; BLE_HS_DBG_ASSERT(ble_hs_locked_by_cur_task()); conn_handle = BLE_HS_CONN_HANDLE_NONE; best_ticks = BLE_HS_FOREVER; now = ble_npl_time_get(); SLIST_FOREACH(entry, &ble_gap_update_entries, next) { ticks = entry->exp_os_ticks - now; if (ticks <= 0) { ticks = 0; } if (ticks < best_ticks) { conn_handle = entry->conn_handle; best_ticks = ticks; } } if (out_ticks_from_now != NULL) { *out_ticks_from_now = best_ticks; } return conn_handle; } #if NIMBLE_BLE_SCAN static void ble_gap_master_set_timer(uint32_t ticks_from_now) { ble_gap_master.exp_os_ticks = ble_npl_time_get() + ticks_from_now; ble_gap_master.exp_set = 1; ble_hs_timer_resched(); } #endif #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) static void ble_gap_slave_set_timer(uint32_t ticks_from_now) { ble_gap_slave[0].exp_os_ticks = ble_npl_time_get() + ticks_from_now; ble_gap_slave[0].exp_set = 1; ble_hs_timer_resched(); } #endif #if (NIMBLE_BLE_CONNECT || NIMBLE_BLE_SCAN) /** * Called when an error is encountered while the master-connection-fsm is * active. */ static void ble_gap_master_failed(int status) { switch (ble_gap_master.op) { case BLE_GAP_OP_M_CONN: STATS_INC(ble_gap_stats, initiate_fail); ble_gap_master_connect_failure(status); break; #if NIMBLE_BLE_SCAN case BLE_GAP_OP_M_DISC: STATS_INC(ble_gap_stats, initiate_fail); ble_gap_disc_complete(); ble_gap_master_reset_state(); break; #endif default: BLE_HS_DBG_ASSERT(0); break; } } #endif #if NIMBLE_BLE_CONNECT static void ble_gap_update_failed(uint16_t conn_handle, int status) { struct ble_gap_update_entry *entry; STATS_INC(ble_gap_stats, update_fail); ble_hs_lock(); entry = ble_gap_update_entry_remove(conn_handle); ble_hs_unlock(); ble_gap_update_entry_free(entry); ble_gap_update_notify(conn_handle, status); } #endif void ble_gap_conn_broken(uint16_t conn_handle, int reason) { struct ble_gap_update_entry *entry; struct ble_gap_snapshot snap; struct ble_gap_event event; int rc; memset(&event, 0, sizeof event); snap.desc = &event.disconnect.conn; rc = ble_gap_find_snapshot(conn_handle, &snap); if (rc != 0) { /* No longer connected. */ return; } /* If there was a connection update in progress, indicate to the * application that it did not complete. */ ble_hs_lock(); entry = ble_gap_update_entry_remove(conn_handle); ble_hs_unlock(); if (entry != NULL) { ble_gap_update_notify(conn_handle, reason); ble_gap_update_entry_free(entry); } /* Indicate the connection termination to each module. The order matters * here: gatts must come before gattc to ensure the application does not * get informed of spurious notify-tx events. */ ble_l2cap_sig_conn_broken(conn_handle, reason); ble_sm_connection_broken(conn_handle); ble_gatts_connection_broken(conn_handle); ble_gattc_connection_broken(conn_handle); ble_hs_flow_connection_broken(conn_handle);; ble_hs_atomic_conn_delete(conn_handle); event.type = BLE_GAP_EVENT_DISCONNECT; event.disconnect.reason = reason; ble_gap_event_listener_call(&event); ble_gap_call_event_cb(&event, snap.cb, snap.cb_arg); STATS_INC(ble_gap_stats, disconnect); } #if NIMBLE_BLE_CONNECT static void ble_gap_update_to_l2cap(const struct ble_gap_upd_params *params, struct ble_l2cap_sig_update_params *l2cap_params) { l2cap_params->itvl_min = params->itvl_min; l2cap_params->itvl_max = params->itvl_max; l2cap_params->slave_latency = params->latency; l2cap_params->timeout_multiplier = params->supervision_timeout; } #endif void ble_gap_rx_disconn_complete(const struct ble_hci_ev_disconn_cmp *ev) { #if NIMBLE_BLE_CONNECT struct ble_gap_event event; uint16_t handle = le16toh(ev->conn_handle); STATS_INC(ble_gap_stats, rx_disconnect); if (ev->status == 0) { ble_gap_conn_broken(handle, BLE_HS_HCI_ERR(ev->reason)); } else { memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_TERM_FAILURE; event.term_failure.conn_handle = handle; event.term_failure.status = BLE_HS_HCI_ERR(ev->status); ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, handle); } #endif } void ble_gap_rx_update_complete(const struct ble_hci_ev_le_subev_conn_upd_complete *ev) { #if NIMBLE_BLE_CONNECT struct ble_gap_update_entry *entry; struct ble_l2cap_sig_update_params l2cap_params; struct ble_gap_event event; struct ble_hs_conn *conn; uint16_t conn_handle; int cb_status; int call_cb; int rc; STATS_INC(ble_gap_stats, rx_update_complete); memset(&event, 0, sizeof event); memset(&l2cap_params, 0, sizeof l2cap_params); ble_hs_lock(); conn_handle = le16toh(ev->conn_handle); conn = ble_hs_conn_find(conn_handle); if (conn != NULL) { switch (ev->status) { case 0: /* Connection successfully updated. */ conn->bhc_itvl = le16toh(ev->conn_itvl); conn->bhc_latency = le16toh(ev->conn_latency); conn->bhc_supervision_timeout = le16toh(ev->supervision_timeout); break; case BLE_ERR_UNSUPP_REM_FEATURE: /* Peer reports that it doesn't support the procedure. This should * only happen if our controller sent the 4.1 Connection Parameters * Request Procedure. If we are the slave, fail over to the L2CAP * update procedure. */ entry = ble_gap_update_entry_find(conn_handle, NULL); if (entry != NULL && !(conn->bhc_flags & BLE_HS_CONN_F_MASTER)) { ble_gap_update_to_l2cap(&entry->params, &l2cap_params); entry->exp_os_ticks = ble_npl_time_get() + ble_npl_time_ms_to_ticks32(BLE_GAP_UPDATE_TIMEOUT_MS); } break; default: break; } } /* We aren't failing over to L2CAP, the update procedure is complete. */ if (l2cap_params.itvl_min == 0) { entry = ble_gap_update_entry_remove(conn_handle); ble_gap_update_entry_free(entry); } ble_hs_unlock(); if (l2cap_params.itvl_min != 0) { rc = ble_l2cap_sig_update(conn_handle, &l2cap_params, ble_gap_update_l2cap_cb, NULL); if (rc == 0) { call_cb = 0; } else { call_cb = 1; cb_status = rc; } } else { call_cb = 1; cb_status = BLE_HS_HCI_ERR(ev->status); } if (call_cb) { ble_gap_update_notify(conn_handle, cb_status); } #endif } /** * Tells you if there is an active central GAP procedure (connect or discover). */ int ble_gap_master_in_progress(void) { return ble_gap_master.op != BLE_GAP_OP_NULL; } static int ble_gap_adv_active_instance(uint8_t instance) { /* Assume read is atomic; mutex not necessary. */ return ble_gap_slave[instance].op == BLE_GAP_OP_S_ADV; } /** * Clears advertisement and discovery state. This function is necessary * when the controller loses its active state (e.g. on stack reset). */ void ble_gap_reset_state(int reason) { uint16_t conn_handle; while (1) { conn_handle = ble_hs_atomic_first_conn_handle(); if (conn_handle == BLE_HS_CONN_HANDLE_NONE) { break; } ble_gap_conn_broken(conn_handle, reason); } #if NIMBLE_BLE_ADVERTISE #if MYNEWT_VAL(BLE_EXT_ADV) uint8_t i; for (i = 0; i < BLE_ADV_INSTANCES; i++) { if (ble_gap_adv_active_instance(i)) { /* Indicate to application that advertising has stopped. */ ble_gap_adv_finished(i, reason, 0, 0); } } #else if (ble_gap_adv_active_instance(0)) { /* Indicate to application that advertising has stopped. */ ble_gap_adv_finished(0, reason, 0, 0); } #endif #endif #if (NIMBLE_BLE_SCAN || NIMBLE_BLE_CONNECT) ble_gap_master_failed(reason); #endif } #if NIMBLE_BLE_CONNECT static int ble_gap_accept_master_conn(void) { int rc; switch (ble_gap_master.op) { case BLE_GAP_OP_NULL: case BLE_GAP_OP_M_DISC: rc = BLE_HS_ENOENT; break; case BLE_GAP_OP_M_CONN: rc = 0; break; default: BLE_HS_DBG_ASSERT(0); rc = BLE_HS_ENOENT; break; } if (rc == 0) { STATS_INC(ble_gap_stats, connect_mst); } return rc; } static int ble_gap_accept_slave_conn(uint8_t instance) { int rc; if (instance >= BLE_ADV_INSTANCES) { rc = BLE_HS_ENOENT; } else if (!ble_gap_adv_active_instance(instance)) { rc = BLE_HS_ENOENT; } else { if (ble_gap_slave[instance].connectable) { rc = 0; } else { rc = BLE_HS_ENOENT; } } if (rc == 0) { STATS_INC(ble_gap_stats, connect_slv); } return rc; } #endif #if NIMBLE_BLE_SCAN static int ble_gap_rx_adv_report_sanity_check(const uint8_t *adv_data, uint8_t adv_data_len) { const struct ble_hs_adv_field *flags; int rc; STATS_INC(ble_gap_stats, rx_adv_report); if (ble_gap_master.op != BLE_GAP_OP_M_DISC) { return -1; } /* If a limited discovery procedure is active, discard non-limited * advertisements. */ if (ble_gap_master.disc.limited) { rc = ble_hs_adv_find_field(BLE_HS_ADV_TYPE_FLAGS, adv_data, adv_data_len, &flags); if ((rc == 0) && (flags->length == 2) && !(flags->value[0] & BLE_HS_ADV_F_DISC_LTD)) { return -1; } } return 0; } #endif void ble_gap_rx_adv_report(struct ble_gap_disc_desc *desc) { #if NIMBLE_BLE_SCAN if (ble_gap_rx_adv_report_sanity_check(desc->data, desc->length_data)) { return; } ble_gap_disc_report(desc); #endif } #if MYNEWT_VAL(BLE_EXT_ADV) #if NIMBLE_BLE_SCAN void ble_gap_rx_le_scan_timeout(void) { ble_gap_disc_complete(); } void ble_gap_rx_ext_adv_report(struct ble_gap_ext_disc_desc *desc) { if (ble_gap_rx_adv_report_sanity_check(desc->data, desc->length_data)) { return; } ble_gap_disc_report(desc); } #endif void ble_gap_rx_adv_set_terminated(const struct ble_hci_ev_le_subev_adv_set_terminated *ev) { uint16_t conn_handle; int reason; /* Currently spec allows only 0x3c and 0x43 when advertising was stopped * due to timeout or events limit, mp this for timeout error for now */ if (ev->status) { reason = BLE_HS_ETIMEOUT; conn_handle = 0; } else { reason = 0; conn_handle = le16toh(ev->conn_handle); } ble_gap_adv_finished(ev->adv_handle, reason, conn_handle, ev->num_events); } void ble_gap_rx_scan_req_rcvd(const struct ble_hci_ev_le_subev_scan_req_rcvd *ev) { struct ble_gap_event event; ble_gap_event_fn *cb; void *cb_arg; ble_gap_slave_extract_cb(ev->adv_handle, &cb, &cb_arg); if (cb != NULL) { memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_SCAN_REQ_RCVD; event.scan_req_rcvd.instance = ev->adv_handle; event.scan_req_rcvd.scan_addr.type = ev->peer_addr_type; memcpy(event.scan_req_rcvd.scan_addr.val, ev->peer_addr, BLE_DEV_ADDR_LEN); cb(&event, cb_arg); } } #endif /* Periodic adv events */ #if MYNEWT_VAL(BLE_PERIODIC_ADV) void ble_gap_rx_peroidic_adv_sync_estab(const struct ble_hci_ev_le_subev_periodic_adv_sync_estab *ev) { uint16_t sync_handle; struct ble_gap_event event; ble_gap_event_fn *cb; void *cb_arg; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_PERIODIC_SYNC; event.periodic_sync.status = ev->status; ble_hs_lock(); BLE_HS_DBG_ASSERT(ble_gap_sync.psync); if (!ev->status) { sync_handle = le16toh(ev->sync_handle); ble_gap_sync.psync->sync_handle = sync_handle; ble_gap_sync.psync->adv_sid = ev->sid; memcpy(ble_gap_sync.psync->advertiser_addr.val, ev->peer_addr, 6); ble_gap_sync.psync->advertiser_addr.type = ev->peer_addr_type; ble_gap_sync.psync->cb = ble_gap_sync.cb; ble_gap_sync.psync->cb_arg = ble_gap_sync.cb_arg; event.periodic_sync.sync_handle = sync_handle; event.periodic_sync.sid = ev->sid; event.periodic_sync.adv_addr = ble_gap_sync.psync->advertiser_addr; event.periodic_sync.adv_phy = ev->phy; event.periodic_sync.per_adv_ival = ev->interval; event.periodic_sync.adv_clk_accuracy = ev->aca; ble_hs_periodic_sync_insert(ble_gap_sync.psync); } else { ble_hs_periodic_sync_free(ble_gap_sync.psync); } cb = ble_gap_sync.cb; cb_arg = ble_gap_sync.cb_arg; ble_gap_sync.op = BLE_GAP_OP_NULL; ble_gap_sync.cb_arg = NULL; ble_gap_sync.cb_arg = NULL; ble_gap_sync.psync = NULL; ble_hs_unlock(); ble_gap_event_listener_call(&event); if (cb) { cb(&event, cb_arg); } } void ble_gap_rx_periodic_adv_rpt(const struct ble_hci_ev_le_subev_periodic_adv_rpt *ev) { struct ble_hs_periodic_sync *psync; struct ble_gap_event event; ble_gap_event_fn *cb; void *cb_arg; ble_hs_lock(); psync = ble_hs_periodic_sync_find_by_handle(le16toh(ev->sync_handle)); if (psync) { cb = psync->cb; cb_arg = psync->cb_arg; } ble_hs_unlock(); if (!psync || !cb) { return; } memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_PERIODIC_REPORT; event.periodic_report.sync_handle = psync->sync_handle; event.periodic_report.tx_power = ev->tx_power; event.periodic_report.rssi = ev->rssi; event.periodic_report.data_status = ev->data_status; event.periodic_report.data_length = ev->data_len; event.periodic_report.data = ev->data; /* TODO should we allow for listener too? this can be spammy and is more * like ACL data, not general event */ cb(&event, cb_arg); } void ble_gap_rx_periodic_adv_sync_lost(const struct ble_hci_ev_le_subev_periodic_adv_sync_lost *ev) { struct ble_hs_periodic_sync *psync; struct ble_gap_event event; ble_gap_event_fn *cb; void *cb_arg; ble_hs_lock(); /* The handle must be in the list */ psync = ble_hs_periodic_sync_find_by_handle(le16toh(ev->sync_handle)); BLE_HS_DBG_ASSERT(psync); cb = psync->cb; cb_arg = psync->cb_arg; /* Remove the handle from the list */ ble_hs_periodic_sync_remove(psync); ble_hs_unlock(); memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_PERIODIC_SYNC_LOST; event.periodic_sync_lost.sync_handle = psync->sync_handle; event.periodic_sync_lost.reason = BLE_HS_ETIMEOUT; /* remove any sync_lost event from queue */ ble_npl_eventq_remove(ble_hs_evq_get(), &psync->lost_ev); /* Free the memory occupied by psync as it is no longer needed */ ble_hs_periodic_sync_free(psync); ble_gap_event_listener_call(&event); if (cb) { cb(&event, cb_arg); } } #endif #if MYNEWT_VAL(BLE_PERIODIC_ADV_SYNC_TRANSFER) static int periodic_adv_transfer_disable(uint16_t conn_handle) { struct ble_hci_le_periodic_adv_sync_transfer_params_cp cmd; struct ble_hci_le_periodic_adv_sync_transfer_params_rp rsp; uint16_t opcode; int rc; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_SYNC_TRANSFER_PARAMS); cmd.conn_handle = htole16(conn_handle); cmd.sync_cte_type = 0x00; cmd.mode = 0x00; cmd.skip = 0x0000; cmd.sync_timeout = 0x000a; rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), &rsp, sizeof(rsp)); if (!rc) { BLE_HS_DBG_ASSERT(le16toh(rsp.conn_handle) == conn_handle); } return rc; } void ble_gap_rx_periodic_adv_sync_transfer(const struct ble_hci_ev_le_subev_periodic_adv_sync_transfer *ev) { struct ble_hci_le_periodic_adv_term_sync_cp cmd_term; struct ble_gap_event event; struct ble_hs_conn *conn; ble_gap_event_fn *cb; uint16_t sync_handle; uint16_t conn_handle; uint16_t opcode; void *cb_arg; conn_handle = le16toh(ev->conn_handle); ble_hs_lock(); /* Unfortunately spec sucks here as it doesn't explicitly stop * transfer reception on first transfer... for now just disable it on * every transfer event we get. */ periodic_adv_transfer_disable(conn_handle); conn = ble_hs_conn_find(le16toh(ev->conn_handle)); if (!conn || !conn->psync) { /* terminate sync if we didn't expect it */ if (!ev->status) { opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_TERM_SYNC); cmd_term.sync_handle = ev->sync_handle; ble_hs_hci_cmd_tx(opcode, &cmd_term, sizeof(cmd_term), NULL, 0); } ble_hs_unlock(); return; } cb = conn->psync->cb; cb_arg = conn->psync->cb_arg; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_PERIODIC_TRANSFER; event.periodic_transfer.status = ev->status; /* only sync handle is not valid on error */ if (ev->status) { sync_handle = 0; ble_hs_periodic_sync_free(conn->psync); } else { sync_handle = le16toh(ev->sync_handle); conn->psync->sync_handle = sync_handle; conn->psync->adv_sid = ev->sid; memcpy(conn->psync->advertiser_addr.val, ev->peer_addr, 6); conn->psync->advertiser_addr.type = ev->peer_addr_type; ble_hs_periodic_sync_insert(conn->psync); } conn->psync = NULL; event.periodic_transfer.sync_handle = sync_handle; event.periodic_transfer.conn_handle = conn_handle; event.periodic_transfer.service_data = le16toh(ev->service_data); event.periodic_transfer.sid = ev->sid; memcpy(event.periodic_transfer.adv_addr.val, ev->peer_addr, 6); event.periodic_transfer.adv_addr.type = ev->peer_addr_type; event.periodic_transfer.adv_phy = ev->phy; event.periodic_transfer.per_adv_itvl = le16toh(ev->interval); event.periodic_transfer.adv_clk_accuracy = ev->aca; ble_hs_unlock(); ble_gap_event_listener_call(&event); if (cb) { cb(&event, cb_arg); } } #endif #if NIMBLE_BLE_CONNECT static int ble_gap_rd_rem_sup_feat_tx(uint16_t handle) { struct ble_hci_le_rd_rem_feat_cp cmd; cmd.conn_handle = htole16(handle); return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_RD_REM_FEAT), &cmd, sizeof(cmd), NULL, 0); } #endif /** * Processes an incoming connection-complete HCI event. * instance parameter is valid only for slave connection. */ int ble_gap_rx_conn_complete(struct ble_gap_conn_complete *evt, uint8_t instance) { #if NIMBLE_BLE_CONNECT struct ble_gap_event event; struct ble_hs_conn *conn; int rc; STATS_INC(ble_gap_stats, rx_conn_complete); /* in that case *only* status field is valid so we determine role * based on error code */ if (evt->status != BLE_ERR_SUCCESS) { switch (evt->status) { case BLE_ERR_DIR_ADV_TMO: /* slave role (HD directed advertising) * * with ext advertising this is send from set terminated event */ #if !MYNEWT_VAL(BLE_EXT_ADV) if (ble_gap_adv_active()) { ble_gap_adv_finished(0, 0, 0, 0); } #endif break; case BLE_ERR_UNK_CONN_ID: /* master role */ if (ble_gap_master_in_progress()) { /* Connect procedure successfully cancelled. */ if (ble_gap_master.preempted_op == BLE_GAP_OP_M_CONN) { ble_gap_master_failed(BLE_HS_EPREEMPTED); } else { ble_gap_master_connect_cancelled(); } } break; default: /* this should never happen, unless controller is broken */ BLE_HS_LOG(INFO, "controller reported invalid error code in conn" "complete event: %u", evt->status); assert(0); break; } return 0; } /* Apply the event to the existing connection if it exists. */ if (ble_hs_atomic_conn_flags(evt->connection_handle, NULL) == 0) { /* XXX: Does this ever happen? */ return 0; } /* This event refers to a new connection. */ switch (evt->role) { case BLE_HCI_LE_CONN_COMPLETE_ROLE_MASTER: rc = ble_gap_accept_master_conn(); if (rc != 0) { return rc; } break; case BLE_HCI_LE_CONN_COMPLETE_ROLE_SLAVE: rc = ble_gap_accept_slave_conn(instance); if (rc != 0) { return rc; } break; default: BLE_HS_DBG_ASSERT(0); break; } /* We verified that there is a free connection when the procedure began. */ conn = ble_hs_conn_alloc(evt->connection_handle); BLE_HS_DBG_ASSERT(conn != NULL); conn->bhc_itvl = evt->conn_itvl; conn->bhc_latency = evt->conn_latency; conn->bhc_supervision_timeout = evt->supervision_timeout; conn->bhc_master_clock_accuracy = evt->master_clk_acc; if (evt->role == BLE_HCI_LE_CONN_COMPLETE_ROLE_MASTER) { conn->bhc_cb = ble_gap_master.cb; conn->bhc_cb_arg = ble_gap_master.cb_arg; conn->bhc_flags |= BLE_HS_CONN_F_MASTER; conn->bhc_our_addr_type = ble_gap_master.conn.our_addr_type; ble_gap_master_reset_state(); } else { conn->bhc_cb = ble_gap_slave[instance].cb; conn->bhc_cb_arg = ble_gap_slave[instance].cb_arg; conn->bhc_our_addr_type = ble_gap_slave[instance].our_addr_type; #if MYNEWT_VAL(BLE_EXT_ADV) memcpy(conn->bhc_our_rnd_addr, ble_gap_slave[instance].rnd_addr, 6); #endif ble_gap_slave_reset_state(instance); } conn->bhc_peer_addr.type = evt->peer_addr_type; memcpy(conn->bhc_peer_addr.val, evt->peer_addr, 6); conn->bhc_our_rpa_addr.type = BLE_ADDR_RANDOM; memcpy(conn->bhc_our_rpa_addr.val, evt->local_rpa, 6); /* If peer RPA is not set in the event and peer address * is RPA then store the peer RPA address so when the peer * address is resolved, the RPA is not forgotten. */ if (memcmp(BLE_ADDR_ANY->val, evt->peer_rpa, 6) == 0) { if (BLE_ADDR_IS_RPA(&conn->bhc_peer_addr)) { conn->bhc_peer_rpa_addr = conn->bhc_peer_addr; } } else { conn->bhc_peer_rpa_addr.type = BLE_ADDR_RANDOM; memcpy(conn->bhc_peer_rpa_addr.val, evt->peer_rpa, 6); } ble_hs_lock(); memset(&event, 0, sizeof event); ble_hs_conn_insert(conn); ble_hs_unlock(); event.type = BLE_GAP_EVENT_CONNECT; event.connect.conn_handle = evt->connection_handle; event.connect.status = 0; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, evt->connection_handle); ble_gap_rd_rem_sup_feat_tx(evt->connection_handle); return 0; #else return BLE_HS_ENOTSUP; #endif } void ble_gap_rx_rd_rem_sup_feat_complete(const struct ble_hci_ev_le_subev_rd_rem_used_feat *ev) { #if NIMBLE_BLE_CONNECT struct ble_hs_conn *conn; ble_hs_lock(); conn = ble_hs_conn_find(le16toh(ev->conn_handle)); if ((conn != NULL) && (ev->status == 0)) { conn->supported_feat = get_le32(ev->features); } ble_hs_unlock(); #endif } int ble_gap_rx_l2cap_update_req(uint16_t conn_handle, struct ble_gap_upd_params *params) { struct ble_gap_event event; int rc; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_L2CAP_UPDATE_REQ; event.conn_update_req.conn_handle = conn_handle; event.conn_update_req.peer_params = params; rc = ble_gap_call_conn_event_cb(&event, conn_handle); return rc; } void ble_gap_rx_phy_update_complete(const struct ble_hci_ev_le_subev_phy_update_complete *ev) { struct ble_gap_event event; uint16_t conn_handle = le16toh(ev->conn_handle); memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_PHY_UPDATE_COMPLETE; event.phy_updated.status = ev->status; event.phy_updated.conn_handle = conn_handle; event.phy_updated.tx_phy = ev->tx_phy; event.phy_updated.rx_phy = ev->rx_phy; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, conn_handle); } static int32_t ble_gap_master_timer(void) { uint32_t ticks_until_exp; int rc; ticks_until_exp = ble_gap_master_ticks_until_exp(); if (ticks_until_exp != 0) { /* Timer not expired yet. */ return ticks_until_exp; } /*** Timer expired; process event. */ switch (ble_gap_master.op) { case BLE_GAP_OP_M_CONN: rc = ble_gap_conn_cancel_tx(); if (rc != 0) { /* Failed to stop connecting; try again in 100 ms. */ return ble_npl_time_ms_to_ticks32(BLE_GAP_CANCEL_RETRY_TIMEOUT_MS); } else { /* Stop the timer now that the cancel command has been acked. */ ble_gap_master.exp_set = 0; /* Timeout gets reported when we receive a connection complete * event indicating the connect procedure has been cancelled. */ /* XXX: Set a timer to reset the controller if a connection * complete event isn't received within a reasonable interval. */ } break; case BLE_GAP_OP_M_DISC: #if NIMBLE_BLE_SCAN && !MYNEWT_VAL(BLE_EXT_ADV) /* When a discovery procedure times out, it is not a failure. */ rc = ble_gap_disc_enable_tx(0, 0); if (rc != 0) { /* Failed to stop discovery; try again in 100 ms. */ return ble_npl_time_ms_to_ticks32(BLE_GAP_CANCEL_RETRY_TIMEOUT_MS); } ble_gap_disc_complete(); #else assert(0); #endif break; default: BLE_HS_DBG_ASSERT(0); break; } return BLE_HS_FOREVER; } #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) static int32_t ble_gap_slave_timer(void) { uint32_t ticks_until_exp; int rc; ticks_until_exp = ble_gap_slave_ticks_until_exp(); if (ticks_until_exp != 0) { /* Timer not expired yet. */ return ticks_until_exp; } /*** Timer expired; process event. */ /* Stop advertising. */ rc = ble_gap_adv_enable_tx(0); if (rc != 0) { /* Failed to stop advertising; try again in 100 ms. */ return 100; } /* Clear the timer and cancel the current procedure. */ ble_gap_slave_reset_state(0); /* Indicate to application that advertising has stopped. */ ble_gap_adv_finished(0, BLE_HS_ETIMEOUT, 0, 0); return BLE_HS_FOREVER; } #endif static int32_t ble_gap_update_timer(void) { struct ble_gap_update_entry *entry; int32_t ticks_until_exp; uint16_t conn_handle; do { ble_hs_lock(); conn_handle = ble_gap_update_next_exp(&ticks_until_exp); if (ticks_until_exp == 0) { entry = ble_gap_update_entry_remove(conn_handle); } else { entry = NULL; } ble_hs_unlock(); if (entry != NULL) { ble_gap_update_entry_free(entry); } } while (entry != NULL); return ticks_until_exp; } int ble_gap_set_event_cb(uint16_t conn_handle, ble_gap_event_fn *cb, void *cb_arg) { struct ble_hs_conn *conn; ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); if (conn != NULL) { conn->bhc_cb = cb; conn->bhc_cb_arg = cb_arg; } ble_hs_unlock(); if (conn == NULL) { return BLE_HS_ENOTCONN; } return 0; } /** * Handles timed-out GAP procedures. * * @return The number of ticks until this function should * be called again. */ int32_t ble_gap_timer(void) { int32_t update_ticks; int32_t master_ticks; int32_t min_ticks; master_ticks = ble_gap_master_timer(); update_ticks = ble_gap_update_timer(); min_ticks = min(master_ticks, update_ticks); #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) min_ticks = min(min_ticks, ble_gap_slave_timer()); #endif return min_ticks; } /***************************************************************************** * $white list * *****************************************************************************/ #if MYNEWT_VAL(BLE_WHITELIST) static int ble_gap_wl_busy(void) { /* Check if an auto or selective connection establishment procedure is in * progress. */ return ble_gap_master.op == BLE_GAP_OP_M_CONN && ble_gap_master.conn.using_wl; } static int ble_gap_wl_tx_add(const ble_addr_t *addr) { struct ble_hci_le_add_whte_list_cp cmd; if (addr->type > BLE_ADDR_RANDOM) { return BLE_HS_EINVAL; } memcpy(cmd.addr, addr->val, BLE_DEV_ADDR_LEN); cmd.addr_type = addr->type; return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_ADD_WHITE_LIST), &cmd, sizeof(cmd), NULL, 0); } static int ble_gap_wl_tx_clear(void) { return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_CLEAR_WHITE_LIST), NULL, 0, NULL, 0 ); } #endif int ble_gap_wl_set(const ble_addr_t *addrs, uint8_t white_list_count) { #if MYNEWT_VAL(BLE_WHITELIST) int rc; int i; STATS_INC(ble_gap_stats, wl_set); ble_hs_lock(); if (white_list_count == 0) { rc = BLE_HS_EINVAL; goto done; } for (i = 0; i < white_list_count; i++) { if (addrs[i].type != BLE_ADDR_PUBLIC && addrs[i].type != BLE_ADDR_RANDOM) { rc = BLE_HS_EINVAL; goto done; } } if (ble_gap_wl_busy()) { rc = BLE_HS_EBUSY; goto done; } BLE_HS_LOG(INFO, "GAP procedure initiated: set whitelist; "); ble_gap_log_wl(addrs, white_list_count); BLE_HS_LOG(INFO, "\n"); rc = ble_gap_wl_tx_clear(); if (rc != 0) { goto done; } for (i = 0; i < white_list_count; i++) { rc = ble_gap_wl_tx_add(addrs + i); if (rc != 0) { goto done; } } rc = 0; done: ble_hs_unlock(); if (rc != 0) { STATS_INC(ble_gap_stats, wl_set_fail); } return rc; #else return BLE_HS_ENOTSUP; #endif } /***************************************************************************** * $stop advertise * *****************************************************************************/ #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) static int ble_gap_adv_enable_tx(int enable) { struct ble_hci_le_set_adv_enable_cp cmd; cmd.enable = !!enable; return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_ADV_ENABLE), &cmd, sizeof(cmd), NULL, 0); } static int ble_gap_adv_stop_no_lock(void) { bool active; int rc; BLE_HS_DBG_ASSERT(ble_hs_locked_by_cur_task()); STATS_INC(ble_gap_stats, adv_stop); active = ble_gap_adv_active(); BLE_HS_LOG(INFO, "GAP procedure initiated: stop advertising.\n"); rc = ble_gap_adv_enable_tx(0); if (rc != 0) { goto done; } ble_gap_slave_reset_state(0); if (!active) { rc = BLE_HS_EALREADY; } else { rc = 0; } done: if (rc != 0) { STATS_INC(ble_gap_stats, adv_stop_fail); } return rc; } #endif int ble_gap_adv_stop(void) { #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) int rc; ble_hs_lock(); rc = ble_gap_adv_stop_no_lock(); ble_hs_unlock(); return rc; #else return BLE_HS_ENOTSUP; #endif } /***************************************************************************** * $advertise * *****************************************************************************/ #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) static int ble_gap_adv_type(const struct ble_gap_adv_params *adv_params) { switch (adv_params->conn_mode) { case BLE_GAP_CONN_MODE_NON: if (adv_params->disc_mode == BLE_GAP_DISC_MODE_NON) { return BLE_HCI_ADV_TYPE_ADV_NONCONN_IND; } else { return BLE_HCI_ADV_TYPE_ADV_SCAN_IND; } case BLE_GAP_CONN_MODE_UND: return BLE_HCI_ADV_TYPE_ADV_IND; case BLE_GAP_CONN_MODE_DIR: if (adv_params->high_duty_cycle) { return BLE_HCI_ADV_TYPE_ADV_DIRECT_IND_HD; } else { return BLE_HCI_ADV_TYPE_ADV_DIRECT_IND_LD; } default: BLE_HS_DBG_ASSERT(0); return BLE_HCI_ADV_TYPE_ADV_IND; } } static void ble_gap_adv_dflt_itvls(uint8_t conn_mode, uint16_t *out_itvl_min, uint16_t *out_itvl_max) { switch (conn_mode) { case BLE_GAP_CONN_MODE_NON: *out_itvl_min = BLE_GAP_ADV_FAST_INTERVAL2_MIN; *out_itvl_max = BLE_GAP_ADV_FAST_INTERVAL2_MAX; break; case BLE_GAP_CONN_MODE_UND: *out_itvl_min = BLE_GAP_ADV_FAST_INTERVAL1_MIN; *out_itvl_max = BLE_GAP_ADV_FAST_INTERVAL1_MAX; break; case BLE_GAP_CONN_MODE_DIR: *out_itvl_min = BLE_GAP_ADV_FAST_INTERVAL1_MIN; *out_itvl_max = BLE_GAP_ADV_FAST_INTERVAL1_MAX; break; default: BLE_HS_DBG_ASSERT(0); *out_itvl_min = BLE_GAP_ADV_FAST_INTERVAL1_MIN; *out_itvl_max = BLE_GAP_ADV_FAST_INTERVAL1_MAX; break; } } static int ble_gap_adv_params_tx(uint8_t own_addr_type, const ble_addr_t *peer_addr, const struct ble_gap_adv_params *adv_params) { const ble_addr_t *peer_any = BLE_ADDR_ANY; struct ble_hci_le_set_adv_params_cp cmd; uint16_t opcode; uint16_t min; uint16_t max; /* Fill optional fields if application did not specify them. */ if ((adv_params->itvl_min == 0) && (adv_params->itvl_max == 0)) { ble_gap_adv_dflt_itvls(adv_params->conn_mode, &min, &max); cmd.min_interval = htole16(min); cmd.max_interval = htole16(max); } else { cmd.min_interval = htole16(adv_params->itvl_min); cmd.max_interval = htole16(adv_params->itvl_max); } cmd.type = ble_gap_adv_type(adv_params); cmd.own_addr_type = own_addr_type; if (peer_addr == NULL) { peer_addr = peer_any; } cmd.peer_addr_type = peer_addr->type; memcpy(&cmd.peer_addr, peer_addr->val, sizeof(cmd.peer_addr)); if (adv_params->channel_map == 0) { cmd.chan_map = BLE_GAP_ADV_DFLT_CHANNEL_MAP; } else { cmd.chan_map = adv_params->channel_map; } /* Zero is the default value for filter policy and high duty cycle */ cmd.filter_policy = adv_params->filter_policy; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_ADV_PARAMS); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); } static int ble_gap_adv_validate(uint8_t own_addr_type, const ble_addr_t *peer_addr, const struct ble_gap_adv_params *adv_params) { if (adv_params == NULL) { return BLE_HS_EINVAL; } if (own_addr_type > BLE_HCI_ADV_OWN_ADDR_MAX) { return BLE_HS_EINVAL; } if (adv_params->disc_mode >= BLE_GAP_DISC_MODE_MAX) { return BLE_HS_EINVAL; } if (ble_gap_slave[0].op != BLE_GAP_OP_NULL) { return BLE_HS_EALREADY; } switch (adv_params->conn_mode) { case BLE_GAP_CONN_MODE_NON: /* High duty cycle only allowed for directed advertising. */ if (adv_params->high_duty_cycle) { return BLE_HS_EINVAL; } break; case BLE_GAP_CONN_MODE_UND: /* High duty cycle only allowed for directed advertising. */ if (adv_params->high_duty_cycle) { return BLE_HS_EINVAL; } /* Don't allow connectable advertising if we won't be able to allocate * a new connection. */ if (!ble_hs_conn_can_alloc()) { return BLE_HS_ENOMEM; } break; case BLE_GAP_CONN_MODE_DIR: if (peer_addr == NULL) { return BLE_HS_EINVAL; } if (peer_addr->type != BLE_ADDR_PUBLIC && peer_addr->type != BLE_ADDR_RANDOM && peer_addr->type != BLE_ADDR_PUBLIC_ID && peer_addr->type != BLE_ADDR_RANDOM_ID) { return BLE_HS_EINVAL; } /* Don't allow connectable advertising if we won't be able to allocate * a new connection. */ if (!ble_hs_conn_can_alloc()) { return BLE_HS_ENOMEM; } break; default: return BLE_HS_EINVAL; } return 0; } #endif int ble_gap_adv_start(uint8_t own_addr_type, const ble_addr_t *direct_addr, int32_t duration_ms, const struct ble_gap_adv_params *adv_params, ble_gap_event_fn *cb, void *cb_arg) { #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) uint32_t duration_ticks; int rc; STATS_INC(ble_gap_stats, adv_start); ble_hs_lock(); rc = ble_gap_adv_validate(own_addr_type, direct_addr, adv_params); if (rc != 0) { goto done; } if (duration_ms != BLE_HS_FOREVER) { rc = ble_npl_time_ms_to_ticks(duration_ms, &duration_ticks); if (rc != 0) { /* Duration too great. */ rc = BLE_HS_EINVAL; goto done; } } if (!ble_hs_is_enabled()) { rc = BLE_HS_EDISABLED; goto done; } if (ble_gap_is_preempted()) { rc = BLE_HS_EPREEMPTED; goto done; } rc = ble_hs_id_use_addr(own_addr_type); if (rc != 0) { goto done; } BLE_HS_LOG(INFO, "GAP procedure initiated: advertise; "); ble_gap_log_adv(own_addr_type, direct_addr, adv_params); BLE_HS_LOG(INFO, "\n"); ble_gap_slave[0].cb = cb; ble_gap_slave[0].cb_arg = cb_arg; ble_gap_slave[0].our_addr_type = own_addr_type; if (adv_params->conn_mode != BLE_GAP_CONN_MODE_NON) { ble_gap_slave[0].connectable = 1; } else { ble_gap_slave[0].connectable = 0; } rc = ble_gap_adv_params_tx(own_addr_type, direct_addr, adv_params); if (rc != 0) { goto done; } ble_gap_slave[0].op = BLE_GAP_OP_S_ADV; rc = ble_gap_adv_enable_tx(1); if (rc != 0) { ble_gap_slave_reset_state(0); goto done; } if (duration_ms != BLE_HS_FOREVER) { ble_gap_slave_set_timer(duration_ticks); } rc = 0; done: ble_hs_unlock(); if (rc != 0) { STATS_INC(ble_gap_stats, adv_start_fail); } return rc; #else return BLE_HS_ENOTSUP; #endif } int ble_gap_adv_set_data(const uint8_t *data, int data_len) { #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) struct ble_hci_le_set_adv_data_cp cmd; uint16_t opcode; STATS_INC(ble_gap_stats, adv_set_data); /* Check for valid parameters */ if (((data == NULL) && (data_len != 0)) || (data_len > BLE_HCI_MAX_ADV_DATA_LEN)) { return BLE_ERR_INV_HCI_CMD_PARMS; } memcpy(cmd.adv_data, data, data_len); cmd.adv_data_len = data_len; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_ADV_DATA); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); #else return BLE_HS_ENOTSUP; #endif } int ble_gap_adv_rsp_set_data(const uint8_t *data, int data_len) { #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) struct ble_hci_le_set_scan_rsp_data_cp cmd; uint16_t opcode; /* Check for valid parameters */ if (((data == NULL) && (data_len != 0)) || (data_len > BLE_HCI_MAX_SCAN_RSP_DATA_LEN)) { return BLE_HS_EINVAL; } memcpy(cmd.scan_rsp, data, data_len); cmd.scan_rsp_len = data_len; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_SCAN_RSP_DATA); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); #else return BLE_HS_ENOTSUP; #endif } int ble_gap_adv_set_fields(const struct ble_hs_adv_fields *adv_fields) { #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) uint8_t buf[BLE_HS_ADV_MAX_SZ]; uint8_t buf_sz; int rc; rc = ble_hs_adv_set_fields(adv_fields, buf, &buf_sz, sizeof buf); if (rc != 0) { return rc; } rc = ble_gap_adv_set_data(buf, buf_sz); if (rc != 0) { return rc; } return 0; #else return BLE_HS_ENOTSUP; #endif } int ble_gap_adv_rsp_set_fields(const struct ble_hs_adv_fields *rsp_fields) { #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) uint8_t buf[BLE_HS_ADV_MAX_SZ]; uint8_t buf_sz; int rc; rc = ble_hs_adv_set_fields(rsp_fields, buf, &buf_sz, sizeof buf); if (rc != 0) { return rc; } rc = ble_gap_adv_rsp_set_data(buf, buf_sz); if (rc != 0) { return rc; } return 0; #else return BLE_HS_ENOTSUP; #endif } int ble_gap_adv_active(void) { return ble_gap_adv_active_instance(0); } #if MYNEWT_VAL(BLE_EXT_ADV) static int ble_gap_ext_adv_params_tx(uint8_t instance, const struct ble_gap_ext_adv_params *params, int8_t *selected_tx_power) { struct ble_hci_le_set_ext_adv_params_cp cmd; struct ble_hci_le_set_ext_adv_params_rp rsp; int rc; memset(&cmd, 0, sizeof(cmd)); cmd.adv_handle = instance; if (params->connectable) { cmd.props |= BLE_HCI_LE_SET_EXT_ADV_PROP_CONNECTABLE; } if (params->scannable) { cmd.props |= BLE_HCI_LE_SET_EXT_ADV_PROP_SCANNABLE; } if (params->directed) { cmd.props |= BLE_HCI_LE_SET_EXT_ADV_PROP_DIRECTED; cmd.peer_addr_type = params->peer.type; memcpy(cmd.peer_addr, params->peer.val, BLE_DEV_ADDR_LEN); } if (params->high_duty_directed) { cmd.props |= BLE_HCI_LE_SET_EXT_ADV_PROP_HD_DIRECTED; } if (params->legacy_pdu) { cmd.props |= BLE_HCI_LE_SET_EXT_ADV_PROP_LEGACY; } if (params->anonymous) { cmd.props |= BLE_HCI_LE_SET_EXT_ADV_PROP_ANON_ADV; } if (params->include_tx_power) { cmd.props |= BLE_HCI_LE_SET_EXT_ADV_PROP_INC_TX_PWR; } /* Fill optional fields if application did not specify them. */ if (params->itvl_min == 0 && params->itvl_max == 0) { /* TODO for now limited to legacy values*/ put_le24(cmd.pri_itvl_min, BLE_GAP_ADV_FAST_INTERVAL1_MIN); put_le24(cmd.pri_itvl_max, BLE_GAP_ADV_FAST_INTERVAL2_MAX); } else { put_le24(cmd.pri_itvl_min, params->itvl_min); put_le24(cmd.pri_itvl_max, params->itvl_max); } if (params->channel_map == 0) { cmd.pri_chan_map = BLE_GAP_ADV_DFLT_CHANNEL_MAP; } else { cmd.pri_chan_map = params->channel_map; } /* Zero is the default value for filter policy and high duty cycle */ cmd.filter_policy = params->filter_policy; cmd.tx_power = params->tx_power; if (params->legacy_pdu) { cmd.pri_phy = BLE_HCI_LE_PHY_1M; cmd.sec_phy = BLE_HCI_LE_PHY_1M; } else { cmd.pri_phy = params->primary_phy; cmd.sec_phy = params->secondary_phy; } cmd.own_addr_type = params->own_addr_type; cmd.sec_max_skip = 0; cmd.sid = params->sid; cmd.scan_req_notif = params->scan_req_notif; rc = ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_EXT_ADV_PARAM), &cmd, sizeof(cmd), &rsp, sizeof(rsp)); if (rc != 0) { return rc; } if (selected_tx_power) { *selected_tx_power = rsp.tx_power; } return 0; } static int ble_gap_ext_adv_params_validate(const struct ble_gap_ext_adv_params *params) { if (!params) { return BLE_HS_EINVAL; } if (params->own_addr_type > BLE_HCI_ADV_OWN_ADDR_MAX) { return BLE_HS_EINVAL; } /* Don't allow connectable advertising if we won't be able to allocate * a new connection. */ if (params->connectable && !ble_hs_conn_can_alloc()) { return BLE_HS_ENOMEM; } if (params->legacy_pdu) { /* not allowed for legacy PDUs */ if (params->anonymous || params->include_tx_power) { return BLE_HS_EINVAL; } } if (params->directed) { if (params->scannable && params->connectable) { return BLE_HS_EINVAL; } } if (!params->legacy_pdu) { /* not allowed for extended advertising PDUs */ if (params->connectable && params->scannable) { return BLE_HS_EINVAL; } /* HD directed advertising allowed only for legacy PDUs */ if (params->high_duty_directed) { return BLE_HS_EINVAL; } } return 0; } int ble_gap_ext_adv_configure(uint8_t instance, const struct ble_gap_ext_adv_params *params, int8_t *selected_tx_power, ble_gap_event_fn *cb, void *cb_arg) { int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } rc = ble_gap_ext_adv_params_validate(params); if (rc) { return rc; } ble_hs_lock(); if (ble_gap_adv_active_instance(instance)) { ble_hs_unlock(); return BLE_HS_EBUSY; } rc = ble_gap_ext_adv_params_tx(instance, params, selected_tx_power); if (rc) { ble_hs_unlock(); return rc; } ble_gap_slave[instance].configured = 1; ble_gap_slave[instance].cb = cb; ble_gap_slave[instance].cb_arg = cb_arg; ble_gap_slave[instance].our_addr_type = params->own_addr_type; ble_gap_slave[instance].connectable = params->connectable; ble_gap_slave[instance].scannable = params->scannable; ble_gap_slave[instance].directed = params->directed; ble_gap_slave[instance].high_duty_directed = params->high_duty_directed; ble_gap_slave[instance].legacy_pdu = params->legacy_pdu; ble_hs_unlock(); return 0; } static int ble_gap_ext_adv_set_addr_no_lock(uint8_t instance, const uint8_t *addr) { struct ble_hci_le_set_adv_set_rnd_addr_cp cmd; int rc; cmd.adv_handle = instance; memcpy(cmd.addr, addr, BLE_DEV_ADDR_LEN); rc = ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_ADV_SET_RND_ADDR), &cmd, sizeof(cmd), NULL, 0); if (rc != 0) { return rc; } ble_gap_slave[instance].rnd_addr_set = 1; memcpy(ble_gap_slave[instance].rnd_addr, addr, 6); return 0; } int ble_gap_ext_adv_set_addr(uint8_t instance, const ble_addr_t *addr) { int rc; if (instance >= BLE_ADV_INSTANCES || addr->type != BLE_ADDR_RANDOM) { return BLE_HS_EINVAL; } ble_hs_lock(); rc = ble_gap_ext_adv_set_addr_no_lock(instance, addr->val); ble_hs_unlock(); return rc; } int ble_gap_ext_adv_start(uint8_t instance, int duration, int max_events) { struct ble_hci_le_set_ext_adv_enable_cp *cmd; uint8_t buf[sizeof(*cmd) + sizeof(cmd->sets[0])]; const uint8_t *rnd_addr; uint16_t opcode; int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } ble_hs_lock(); if (!ble_gap_slave[instance].configured) { ble_hs_unlock(); return BLE_HS_EINVAL; } if (ble_gap_slave[instance].op != BLE_GAP_OP_NULL) { ble_hs_unlock(); return BLE_HS_EALREADY; } /* HD directed duration shall not be 0 or larger than >1.28s */ if (ble_gap_slave[instance].high_duty_directed && ((duration == 0) || (duration > 128)) ) { ble_hs_unlock(); return BLE_HS_EINVAL; } /* verify own address type if random address for instance wasn't explicitly * set */ switch (ble_gap_slave[instance].our_addr_type) { case BLE_OWN_ADDR_RANDOM: case BLE_OWN_ADDR_RPA_RANDOM_DEFAULT: if (ble_gap_slave[instance].rnd_addr_set) { break; } /* fall through */ case BLE_OWN_ADDR_PUBLIC: case BLE_OWN_ADDR_RPA_PUBLIC_DEFAULT: default: rc = ble_hs_id_use_addr(ble_gap_slave[instance].our_addr_type); if (rc) { ble_hs_unlock(); return BLE_HS_EINVAL; } break; } /* fallback to ID static random address if using random address and instance * wasn't configured with own address */ if (!ble_gap_slave[instance].rnd_addr_set) { switch (ble_gap_slave[instance].our_addr_type) { case BLE_OWN_ADDR_RANDOM: case BLE_OWN_ADDR_RPA_RANDOM_DEFAULT: rc = ble_hs_id_addr(BLE_ADDR_RANDOM, &rnd_addr, NULL); if (rc != 0) { ble_hs_unlock(); return rc; } rc = ble_gap_ext_adv_set_addr_no_lock(instance, rnd_addr); if (rc != 0) { ble_hs_unlock(); return rc; } break; default: break; } } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_EXT_ADV_ENABLE); cmd = (void *) buf; cmd->enable = 0x01; cmd->num_sets = 1; cmd->sets[0].adv_handle = instance; cmd->sets[0].duration = htole16(duration); cmd->sets[0].max_events = max_events; rc = ble_hs_hci_cmd_tx(opcode, cmd, sizeof(buf), NULL, 0); if (rc != 0) { ble_hs_unlock(); return rc; } ble_gap_slave[instance].op = BLE_GAP_OP_S_ADV; ble_hs_unlock(); return 0; } static int ble_gap_ext_adv_stop_no_lock(uint8_t instance) { struct ble_hci_le_set_ext_adv_enable_cp *cmd; uint8_t buf[sizeof(*cmd) + sizeof(cmd->sets[0])]; uint16_t opcode; bool active; int rc; if (!ble_gap_slave[instance].configured) { return BLE_HS_EINVAL; } active = ble_gap_adv_active_instance(instance); cmd = (void *) buf; cmd->enable = 0x00; cmd->num_sets = 1; cmd->sets[0].adv_handle = instance; cmd->sets[0].duration = 0x0000; cmd->sets[0].max_events = 0x00; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_EXT_ADV_ENABLE); rc = ble_hs_hci_cmd_tx(opcode, cmd, sizeof(buf), NULL, 0); if (rc != 0) { return rc; } ble_gap_slave[instance].op = BLE_GAP_OP_NULL; if (!active) { return BLE_HS_EALREADY; } else { return 0; } } int ble_gap_ext_adv_stop(uint8_t instance) { int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } ble_hs_lock(); rc = ble_gap_ext_adv_stop_no_lock(instance); ble_hs_unlock(); return rc; } static int ble_gap_ext_adv_set_data_validate(uint8_t instance, struct os_mbuf *data) { uint16_t len = OS_MBUF_PKTLEN(data); if (!ble_gap_slave[instance].configured) { return BLE_HS_EINVAL; } /* not allowed with directed advertising for legacy*/ if (ble_gap_slave[instance].legacy_pdu && ble_gap_slave[instance].directed) { return BLE_HS_EINVAL; } /* always allowed with legacy PDU but limited to legacy length */ if (ble_gap_slave[instance].legacy_pdu) { if (len > BLE_HS_ADV_MAX_SZ) { return BLE_HS_EINVAL; } return 0; } /* if already advertising, data must fit in single HCI command * as per BT 5.0 Vol 2, Part E, 7.8.54. Don't bother Controller with such * a request. */ if (ble_gap_slave[instance].op == BLE_GAP_OP_S_ADV) { if (len > min(MYNEWT_VAL(BLE_EXT_ADV_MAX_SIZE), 251)) { return BLE_HS_EINVAL; } } /* not allowed with scannable advertising */ if (ble_gap_slave[instance].scannable) { return BLE_HS_EINVAL; } return 0; } static int ble_gap_ext_adv_set(uint8_t instance, uint16_t opcode, struct os_mbuf **data) { /* in that case we always fit all data in single HCI command */ #if MYNEWT_VAL(BLE_EXT_ADV_MAX_SIZE) <= BLE_HCI_MAX_EXT_ADV_DATA_LEN static uint8_t buf[sizeof(struct ble_hci_le_set_ext_adv_data_cp) + \ MYNEWT_VAL(BLE_EXT_ADV_MAX_SIZE)]; struct ble_hci_le_set_ext_adv_data_cp *cmd = (void *)buf; uint16_t len = OS_MBUF_PKTLEN(*data); opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, opcode); cmd->adv_handle = instance; cmd->operation = BLE_HCI_LE_SET_DATA_OPER_COMPLETE; cmd->fragment_pref = 0; cmd->adv_data_len = len; os_mbuf_copydata(*data, 0, len, cmd->adv_data); os_mbuf_adj(*data, len); *data = os_mbuf_trim_front(*data); return ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); #else static uint8_t buf[sizeof(struct ble_hci_le_set_ext_adv_data_cp) + \ BLE_HCI_MAX_EXT_ADV_DATA_LEN]; struct ble_hci_le_set_ext_adv_data_cp *cmd = (void *)buf; uint16_t len = OS_MBUF_PKTLEN(*data); uint8_t op; int rc; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, opcode); cmd->adv_handle = instance; /* complete data */ if (len <= BLE_HCI_MAX_EXT_ADV_DATA_LEN) { cmd->operation = BLE_HCI_LE_SET_DATA_OPER_COMPLETE; cmd->fragment_pref = 0; cmd->adv_data_len = len; os_mbuf_copydata(*data, 0, len, cmd->adv_data); os_mbuf_adj(*data, len); *data = os_mbuf_trim_front(*data); return ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); } /* first fragment */ op = BLE_HCI_LE_SET_DATA_OPER_FIRST; do { cmd->operation = op; cmd->fragment_pref = 0; cmd->adv_data_len = BLE_HCI_MAX_EXT_ADV_DATA_LEN; os_mbuf_copydata(*data, 0, BLE_HCI_MAX_EXT_ADV_DATA_LEN, cmd->adv_data); os_mbuf_adj(*data, BLE_HCI_MAX_EXT_ADV_DATA_LEN); *data = os_mbuf_trim_front(*data); rc = ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); if (rc) { return rc; } len -= BLE_HCI_MAX_EXT_ADV_DATA_LEN; op = BLE_HCI_LE_SET_DATA_OPER_INT; } while (len > BLE_HCI_MAX_EXT_ADV_DATA_LEN); /* last fragment */ cmd->operation = BLE_HCI_LE_SET_DATA_OPER_LAST; cmd->fragment_pref = 0; cmd->adv_data_len = len; os_mbuf_copydata(*data, 0, len, cmd->adv_data); os_mbuf_adj(*data, len); *data = os_mbuf_trim_front(*data); return ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); #endif } int ble_gap_ext_adv_set_data(uint8_t instance, struct os_mbuf *data) { int rc; if (instance >= BLE_ADV_INSTANCES) { rc = BLE_HS_EINVAL; goto done; } ble_hs_lock(); rc = ble_gap_ext_adv_set_data_validate(instance, data); if (rc != 0) { ble_hs_unlock(); goto done; } rc = ble_gap_ext_adv_set(instance, BLE_HCI_OCF_LE_SET_EXT_ADV_DATA, &data); ble_hs_unlock(); done: os_mbuf_free_chain(data); return rc; } static int ble_gap_ext_adv_rsp_set_validate(uint8_t instance, struct os_mbuf *data) { uint16_t len = OS_MBUF_PKTLEN(data); if (!ble_gap_slave[instance].configured) { return BLE_HS_EINVAL; } /* not allowed with directed advertising */ if (ble_gap_slave[instance].directed && ble_gap_slave[instance].connectable) { return BLE_HS_EINVAL; } /* only allowed with scannable advertising */ if (!ble_gap_slave[instance].scannable) { return BLE_HS_EINVAL; } /* with legacy PDU limited to legacy length */ if (ble_gap_slave[instance].legacy_pdu) { if (len > BLE_HS_ADV_MAX_SZ) { return BLE_HS_EINVAL; } return 0; } /* if already advertising, data must fit in single HCI command * as per BT 5.0 Vol 2, Part E, 7.8.55. Don't bother Controller with such * a request. */ if (ble_gap_slave[instance].op == BLE_GAP_OP_S_ADV) { if (len > min(MYNEWT_VAL(BLE_EXT_ADV_MAX_SIZE), 251)) { return BLE_HS_EINVAL; } } return 0; } int ble_gap_ext_adv_rsp_set_data(uint8_t instance, struct os_mbuf *data) { int rc; if (instance >= BLE_ADV_INSTANCES) { rc = BLE_HS_EINVAL; goto done; } ble_hs_lock(); rc = ble_gap_ext_adv_rsp_set_validate(instance, data); if (rc != 0) { ble_hs_unlock(); goto done; } rc = ble_gap_ext_adv_set(instance, BLE_HCI_OCF_LE_SET_EXT_SCAN_RSP_DATA, &data); ble_hs_unlock(); done: os_mbuf_free_chain(data); return rc; } int ble_gap_ext_adv_remove(uint8_t instance) { struct ble_hci_le_remove_adv_set_cp cmd; uint16_t opcode; int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } ble_hs_lock(); if (!ble_gap_slave[instance].configured) { ble_hs_unlock(); return BLE_HS_EALREADY; } if (ble_gap_slave[instance].op == BLE_GAP_OP_S_ADV) { ble_hs_unlock(); return BLE_HS_EBUSY; } cmd.adv_handle = instance; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_REMOVE_ADV_SET); rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); if (rc != 0) { ble_hs_unlock(); return rc; } memset(&ble_gap_slave[instance], 0, sizeof(struct ble_gap_slave_state)); ble_hs_unlock(); return 0; } int ble_gap_ext_adv_clear(void) { int rc; uint8_t instance; uint16_t opcode; ble_hs_lock(); for (instance = 0; instance < BLE_ADV_INSTANCES; instance++) { /* If there is an active instance or periodic adv instance, * Don't send the command * */ if ((ble_gap_slave[instance].op == BLE_GAP_OP_S_ADV)) { ble_hs_unlock(); return BLE_HS_EBUSY; } #if MYNEWT_VAL(BLE_PERIODIC_ADV) if (ble_gap_slave[instance].periodic_op == BLE_GAP_OP_S_PERIODIC_ADV) { ble_hs_unlock(); return BLE_HS_EBUSY; } #endif } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_CLEAR_ADV_SETS); rc = ble_hs_hci_cmd_tx(opcode, NULL, 0, NULL, 0); if (rc != 0) { ble_hs_unlock(); return rc; } memset(ble_gap_slave, 0, sizeof(ble_gap_slave)); ble_hs_unlock(); return 0; } #if MYNEWT_VAL(BLE_PERIODIC_ADV) static int ble_gap_periodic_adv_params_tx(uint8_t instance, const struct ble_gap_periodic_adv_params *params) { struct ble_hci_le_set_periodic_adv_params_cp cmd; uint16_t opcode; cmd.adv_handle = instance; /* Fill optional fields if application did not specify them. */ if (params->itvl_min == 0 && params->itvl_max == 0) { /* TODO defines for those */ cmd.min_itvl = htole16(30 / 1.25); //30 ms cmd.max_itvl = htole16(60 / 1.25); //150 ms } else { cmd.min_itvl = htole16( params->itvl_min); cmd.max_itvl = htole16(params->itvl_max); } if (params->include_tx_power) { cmd.props = BLE_HCI_LE_SET_PERIODIC_ADV_PROP_INC_TX_PWR; } else { cmd.props = 0; } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_PERIODIC_ADV_PARAMS); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); } static int ble_gap_periodic_adv_params_validate( const struct ble_gap_periodic_adv_params *params) { if (!params) { return BLE_HS_EINVAL; } if (params->itvl_min && params->itvl_min < 6) { return BLE_HS_EINVAL; } if (params->itvl_max && params->itvl_max < 6) { return BLE_HS_EINVAL; } return 0; } int ble_gap_periodic_adv_configure(uint8_t instance, const struct ble_gap_periodic_adv_params *params) { int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } rc = ble_gap_periodic_adv_params_validate(params); if (rc) { return rc; } ble_hs_lock(); /* The corresponding extended advertising instance should be configured */ if (!ble_gap_slave[instance].configured) { ble_hs_unlock(); return ENOMEM; } /* Periodic advertising shall not be configured while it is already * running. * Bluetooth Core Specification, Section 7.8.61 */ if (ble_gap_slave[instance].periodic_op == BLE_GAP_OP_S_PERIODIC_ADV) { ble_hs_unlock(); return BLE_HS_EINVAL; } rc = ble_gap_periodic_adv_params_tx(instance, params); if (rc) { ble_hs_unlock(); return rc; } ble_gap_slave[instance].periodic_configured = 1; ble_hs_unlock(); return 0; } int ble_gap_periodic_adv_start(uint8_t instance) { struct ble_hci_le_set_periodic_adv_enable_cp cmd; uint16_t opcode; int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } ble_hs_lock(); /* Periodic advertising cannot start unless it is configured before */ if (!ble_gap_slave[instance].periodic_configured) { ble_hs_unlock(); return BLE_HS_EINVAL; } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_PERIODIC_ADV_ENABLE); cmd.enable = 0x01; cmd.adv_handle = instance; rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); if (rc != 0) { ble_hs_unlock(); return rc; } ble_gap_slave[instance].periodic_op = BLE_GAP_OP_S_PERIODIC_ADV; ble_hs_unlock(); return 0; } static int ble_gap_periodic_adv_set(uint8_t instance, struct os_mbuf **data) { /* In that case we always fit all data in single HCI command */ #if MYNEWT_VAL(BLE_EXT_ADV_MAX_SIZE) <= BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN static uint8_t buf[sizeof(struct ble_hci_le_set_periodic_adv_data_cp) + MYNEWT_VAL(BLE_EXT_ADV_MAX_SIZE)]; struct ble_hci_le_set_periodic_adv_data_cp *cmd = (void *) buf; uint16_t len = OS_MBUF_PKTLEN(*data); uint16_t opcode; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_PERIODIC_ADV_DATA); cmd->adv_handle = instance; cmd->operation = BLE_HCI_LE_SET_DATA_OPER_COMPLETE; cmd->adv_data_len = len; os_mbuf_copydata(*data, 0, len, cmd->adv_data); os_mbuf_adj(*data, len); *data = os_mbuf_trim_front(*data); return ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); #else static uint8_t buf[sizeof(struct ble_hci_le_set_periodic_adv_data_cp) + BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN]; struct ble_hci_le_set_periodic_adv_data_cp *cmd = (void *) buf; uint16_t len = OS_MBUF_PKTLEN(*data); uint16_t opcode; uint8_t op; int rc; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_PERIODIC_ADV_DATA); cmd->adv_handle = instance; /* Complete data */ if (len <= BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN) { cmd->operation = BLE_HCI_LE_SET_DATA_OPER_COMPLETE; cmd->adv_data_len = len; os_mbuf_copydata(*data, 0, len, cmd->adv_data); os_mbuf_adj(*data, len); *data = os_mbuf_trim_front(*data); return ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); } /* If the periodic advertising is already enabled, the periodic advertising * the op code shall be nothing but 0x03 * Bluetooth Core Specification, section 7.8.62 */ if (ble_gap_slave[instance].periodic_op == BLE_GAP_OP_S_PERIODIC_ADV) { return BLE_HS_EINVAL; } /* First fragment */ op = BLE_HCI_LE_SET_DATA_OPER_FIRST; do{ cmd->operation = op; cmd->adv_data_len = BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN; os_mbuf_copydata(*data, 0, BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN, cmd->adv_data); os_mbuf_adj(*data, BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN); *data = os_mbuf_trim_front(*data); rc = ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); if (rc) { return rc; } len -= BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN; op = BLE_HCI_LE_SET_DATA_OPER_INT; } while (len > BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN); /* Last fragment */ cmd->operation = BLE_HCI_LE_SET_DATA_OPER_LAST; cmd->adv_data_len = len; os_mbuf_copydata(*data, 0, len, cmd->adv_data); os_mbuf_adj(*data, len); *data = os_mbuf_trim_front(*data); return ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); #endif } static int ble_gap_periodic_adv_set_data_validate(uint8_t instance, struct os_mbuf *data) { /* The corresponding extended advertising instance should be configured */ if (!ble_gap_slave[instance].configured) { return BLE_HS_EINVAL; } if (ble_gap_slave[instance].legacy_pdu) { return BLE_HS_EINVAL; } /* One more check states that if the periodic advertising is already * enabled, the operation shall be 0x03 (Complete). * This check is handled during sending the data to the controller, as the * length checks are already checked there, so this saves duplicate code */ return 0; } int ble_gap_periodic_adv_set_data(uint8_t instance, struct os_mbuf *data) { int rc; if (instance >= BLE_ADV_INSTANCES) { rc = BLE_HS_EINVAL; goto done; } ble_hs_lock(); rc = ble_gap_periodic_adv_set_data_validate(instance, data); if (rc != 0) { ble_hs_unlock(); goto done; } rc = ble_gap_periodic_adv_set(instance, &data); ble_hs_unlock(); done: os_mbuf_free_chain(data); return rc; } static int ble_gap_periodic_adv_stop_no_lock(uint8_t instance) { struct ble_hci_le_set_periodic_adv_enable_cp cmd; uint16_t opcode; int rc; cmd.enable = 0x00; cmd.adv_handle = instance; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_PERIODIC_ADV_ENABLE); rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); if (rc != 0) { return rc; } ble_gap_slave[instance].periodic_op = BLE_GAP_OP_NULL; return 0; } int ble_gap_periodic_adv_stop(uint8_t instance) { int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } ble_hs_lock(); rc = ble_gap_periodic_adv_stop_no_lock(instance); ble_hs_unlock(); return rc; } static void ble_gap_npl_sync_lost(struct ble_npl_event *ev) { struct ble_hs_periodic_sync *psync; struct ble_gap_event event; ble_gap_event_fn *cb; void *cb_arg; /* this psync is no longer on list so no lock needed */ psync = ble_npl_event_get_arg(ev); cb = psync->cb; cb_arg = psync->cb_arg; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_PERIODIC_SYNC_LOST; event.periodic_sync_lost.sync_handle = psync->sync_handle; event.periodic_sync_lost.reason = BLE_HS_EDONE; /* Free the memory occupied by psync as it is no longer needed */ ble_hs_periodic_sync_free(psync); ble_gap_event_listener_call(&event); if (cb) { cb(&event, cb_arg); } } int ble_gap_periodic_adv_sync_create(const ble_addr_t *addr, uint8_t adv_sid, const struct ble_gap_periodic_sync_params *params, ble_gap_event_fn *cb, void *cb_arg) { struct ble_hci_le_periodic_adv_create_sync_cp cmd; struct ble_hs_periodic_sync *psync; uint16_t opcode; int rc; if (addr && (addr->type > BLE_ADDR_RANDOM)) { return BLE_HS_EINVAL; } if (adv_sid > 0x0f) { return BLE_HS_EINVAL; } if ((params->skip > 0x1f3) || (params->sync_timeout > 0x4000) || (params->sync_timeout < 0x0A)) { return BLE_HS_EINVAL; } ble_hs_lock(); /* No sync can be created if another sync is still pending */ if (ble_gap_sync.op == BLE_GAP_OP_SYNC) { ble_hs_unlock(); return BLE_HS_EBUSY; } /* cannot create another sync if already synchronized */ if (ble_hs_periodic_sync_find(addr, adv_sid)) { ble_hs_unlock(); return BLE_HS_EALREADY; } /* preallocate sync element */ psync = ble_hs_periodic_sync_alloc(); if (!psync) { ble_hs_unlock(); return BLE_HS_ENOMEM; } ble_npl_event_init(&psync->lost_ev, ble_gap_npl_sync_lost, psync); if (addr) { cmd.options = 0x00; cmd.peer_addr_type = addr->type; memcpy(cmd.peer_addr, addr->val, BLE_DEV_ADDR_LEN); } else { cmd.options = 0x01; cmd.peer_addr_type = BLE_ADDR_ANY->type; memcpy(cmd.peer_addr, BLE_ADDR_ANY->val, BLE_DEV_ADDR_LEN); } cmd.sid = adv_sid; cmd.skip = params->skip; cmd.sync_timeout = htole16(params->sync_timeout); cmd.sync_cte_type = 0x00; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_CREATE_SYNC); rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); if (!rc) { /* This shall be reset upon receiving sync_established event, * or if the sync is cancelled before receiving that event. */ ble_gap_sync.op = BLE_GAP_OP_SYNC; ble_gap_sync.cb = cb; ble_gap_sync.cb_arg = cb_arg; ble_gap_sync.psync = psync; } else { ble_hs_periodic_sync_free(psync); } ble_hs_unlock(); return rc; } int ble_gap_periodic_adv_sync_create_cancel(void) { uint16_t opcode; int rc = 0; ble_hs_lock(); if (ble_gap_sync.op != BLE_GAP_OP_SYNC) { ble_hs_unlock(); return BLE_HS_EBUSY; } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_CREATE_SYNC_CANCEL); rc = ble_hs_hci_cmd_tx(opcode, NULL, 0, NULL, 0); ble_hs_unlock(); return rc; } int ble_gap_periodic_adv_sync_terminate(uint16_t sync_handle) { struct ble_hci_le_periodic_adv_term_sync_cp cmd; struct ble_hs_periodic_sync *psync; uint16_t opcode; int rc; ble_hs_lock(); if (ble_gap_sync.op == BLE_GAP_OP_SYNC) { ble_hs_unlock(); return BLE_HS_EBUSY; } /* The handle must be in the list. If it doesn't exist, it means * that the sync may have been lost at the same moment in which * the app wants to terminate that sync handle */ psync = ble_hs_periodic_sync_find_by_handle(sync_handle); if (!psync) { /* Sync already terminated.*/ ble_hs_unlock(); return BLE_HS_ENOTCONN; } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_TERM_SYNC); cmd.sync_handle = htole16(sync_handle); rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); if (rc == 0) { /* Remove the handle from the list */ ble_hs_periodic_sync_remove(psync); /* send sync_lost event, this is to mimic connection behavior and thus * simplify application error handling */ ble_npl_eventq_put(ble_hs_evq_get(), &psync->lost_ev); } ble_hs_unlock(); return rc; } #if MYNEWT_VAL(BLE_PERIODIC_ADV_SYNC_TRANSFER) int ble_gap_periodic_adv_sync_reporting(uint16_t sync_handle, bool enable) { struct ble_hci_le_periodic_adv_receive_enable_cp cmd; struct ble_hs_periodic_sync *psync; uint16_t opcode; int rc; ble_hs_lock(); if (ble_gap_sync.op == BLE_GAP_OP_SYNC) { ble_hs_unlock(); return BLE_HS_EBUSY; } psync = ble_hs_periodic_sync_find_by_handle(sync_handle); if (!psync) { ble_hs_unlock(); return BLE_HS_ENOTCONN; } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_RECEIVE_ENABLE); cmd.sync_handle = htole16(sync_handle); cmd.enable = enable ? 0x01 : 0x00; rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); ble_hs_unlock(); return rc; } int ble_gap_periodic_adv_sync_transfer(uint16_t sync_handle, uint16_t conn_handle, uint16_t service_data) { struct ble_hci_le_periodic_adv_sync_transfer_cp cmd; struct ble_hci_le_periodic_adv_sync_transfer_rp rsp; struct ble_hs_periodic_sync *psync; struct ble_hs_conn *conn; uint16_t opcode; int rc; ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); if (!conn) { ble_hs_unlock(); return BLE_HS_ENOTCONN; } psync = ble_hs_periodic_sync_find_by_handle(sync_handle); if (!psync) { ble_hs_unlock(); return BLE_HS_ENOTCONN; } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_SYNC_TRANSFER); cmd.conn_handle = htole16(conn_handle); cmd.sync_handle = htole16(sync_handle); cmd.service_data = htole16(service_data); rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), &rsp, sizeof(rsp)); if (!rc) { BLE_HS_DBG_ASSERT(le16toh(rsp.conn_handle) == conn_handle); } ble_hs_unlock(); return rc; } int ble_gap_periodic_adv_sync_set_info(uint8_t instance, uint16_t conn_handle, uint16_t service_data) { struct ble_hci_le_periodic_adv_set_info_transfer_cp cmd; struct ble_hci_le_periodic_adv_set_info_transfer_rp rsp; struct ble_hs_conn *conn; uint16_t opcode; int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } ble_hs_lock(); if (ble_gap_slave[instance].periodic_op != BLE_GAP_OP_S_PERIODIC_ADV) { /* periodic adv not enabled */ ble_hs_unlock(); return BLE_HS_EINVAL; } conn = ble_hs_conn_find(conn_handle); if (!conn) { ble_hs_unlock(); return BLE_HS_ENOTCONN; } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_SET_INFO_TRANSFER); cmd.conn_handle = htole16(conn_handle); cmd.adv_handle = instance; cmd.service_data = htole16(service_data); rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), &rsp, sizeof(rsp)); if (!rc) { BLE_HS_DBG_ASSERT(le16toh(rsp.conn_handle) == conn_handle); } ble_hs_unlock(); return rc; } static int periodic_adv_transfer_enable(uint16_t conn_handle, const struct ble_gap_periodic_sync_params *params) { struct ble_hci_le_periodic_adv_sync_transfer_params_cp cmd; struct ble_hci_le_periodic_adv_sync_transfer_params_rp rsp; uint16_t opcode; int rc; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_SYNC_TRANSFER_PARAMS); cmd.conn_handle = htole16(conn_handle); cmd.sync_cte_type = 0x00; cmd.mode = params->reports_disabled ? 0x01 : 0x02; cmd.skip = htole16(params->skip); cmd.sync_timeout = htole16(params->sync_timeout); rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), &rsp, sizeof(rsp)); if (!rc) { BLE_HS_DBG_ASSERT(le16toh(rsp.conn_handle) == conn_handle); } return rc; } int ble_gap_periodic_adv_sync_receive(uint16_t conn_handle, const struct ble_gap_periodic_sync_params *params, ble_gap_event_fn *cb, void *cb_arg) { struct ble_hs_conn *conn; int rc; ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); if (!conn) { ble_hs_unlock(); return BLE_HS_ENOTCONN; } if (params) { if (conn->psync) { ble_hs_unlock(); return BLE_HS_EALREADY; } conn->psync = ble_hs_periodic_sync_alloc(); if (!conn->psync) { ble_hs_unlock(); return BLE_HS_ENOMEM; } rc = periodic_adv_transfer_enable(conn_handle, params); if (rc) { ble_hs_periodic_sync_free(conn->psync); conn->psync = NULL; } else { conn->psync->cb = cb; conn->psync->cb_arg = cb_arg; ble_npl_event_init(&conn->psync->lost_ev, ble_gap_npl_sync_lost, conn->psync); } } else { if (!conn->psync) { ble_hs_unlock(); return BLE_HS_EALREADY; } rc = periodic_adv_transfer_disable(conn_handle); if (!rc) { ble_hs_periodic_sync_free(conn->psync); conn->psync = NULL; } } ble_hs_unlock(); return rc; } #endif int ble_gap_add_dev_to_periodic_adv_list(const ble_addr_t *peer_addr, uint8_t adv_sid) { struct ble_hci_le_add_dev_to_periodic_adv_list_cp cmd; uint16_t opcode; if ((peer_addr->type > BLE_ADDR_RANDOM) || (adv_sid > 0x0f)) { return BLE_ERR_INV_HCI_CMD_PARMS; } cmd.peer_addr_type = peer_addr->type; memcpy(cmd.peer_addr, peer_addr->val, BLE_DEV_ADDR_LEN); cmd.sid = adv_sid; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_ADD_DEV_TO_PERIODIC_ADV_LIST); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); } int ble_gap_rem_dev_from_periodic_adv_list(const ble_addr_t *peer_addr, uint8_t adv_sid) { struct ble_hci_le_rem_dev_from_periodic_adv_list_cp cmd; uint16_t opcode; if ((peer_addr->type > BLE_ADDR_RANDOM) || (adv_sid > 0x0f)) { return BLE_ERR_INV_HCI_CMD_PARMS; } cmd.peer_addr_type = peer_addr->type; memcpy(cmd.peer_addr, peer_addr->val, BLE_DEV_ADDR_LEN); cmd.sid = adv_sid; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_REM_DEV_FROM_PERIODIC_ADV_LIST); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); } int ble_gap_clear_periodic_adv_list(void) { uint16_t opcode; int rc = 0; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_CLEAR_PERIODIC_ADV_LIST); rc = ble_hs_hci_cmd_tx(opcode, NULL, 0, NULL, 0); return rc; } int ble_gap_read_periodic_adv_list_size(uint8_t *per_adv_list_size) { struct ble_hci_le_rd_periodic_adv_list_size_rp rsp; uint16_t opcode; int rc = 0; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_RD_PERIODIC_ADV_LIST_SIZE); rc = ble_hs_hci_cmd_tx(opcode, NULL, 0, &rsp, sizeof(rsp)); if (rc != 0) { return rc; } *per_adv_list_size = rsp.list_size; return 0; } #endif /***************************************************************************** * $discovery procedures * *****************************************************************************/ #if MYNEWT_VAL(BLE_EXT_ADV) && NIMBLE_BLE_SCAN static int ble_gap_ext_disc_tx_params(uint8_t own_addr_type, uint8_t filter_policy, const struct ble_hs_hci_ext_scan_param *uncoded_params, const struct ble_hs_hci_ext_scan_param *coded_params) { struct ble_hci_le_set_ext_scan_params_cp *cmd; struct scan_params *params; uint8_t buf[sizeof(*cmd) + 2 * sizeof(*params)]; uint8_t len = sizeof(*cmd); /* Check own addr type */ if (own_addr_type > BLE_HCI_ADV_OWN_ADDR_MAX) { return BLE_ERR_INV_HCI_CMD_PARMS; } /* Check scanner filter policy */ if (filter_policy > BLE_HCI_SCAN_FILT_MAX) { return BLE_ERR_INV_HCI_CMD_PARMS; } cmd = (void *) buf; params = cmd->scans; cmd->filter_policy = filter_policy; cmd->own_addr_type = own_addr_type; cmd->phys = 0; if (uncoded_params) { cmd->phys |= BLE_HCI_LE_PHY_1M_PREF_MASK; params->type = uncoded_params->scan_type; params->itvl = htole16(uncoded_params->scan_itvl); params->window = htole16(uncoded_params->scan_window); len += sizeof(*params); params++; } if (coded_params) { cmd->phys |= BLE_HCI_LE_PHY_CODED_PREF_MASK; params->type = coded_params->scan_type; params->itvl = htole16(coded_params->scan_itvl); params->window = htole16(coded_params->scan_window); len += sizeof(*params); params++; } if (!cmd->phys) { return BLE_ERR_INV_HCI_CMD_PARMS; } return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_EXT_SCAN_PARAM), cmd, len, NULL, 0); } static int ble_gap_ext_disc_enable_tx(uint8_t enable, uint8_t filter_duplicates, uint16_t duration, uint16_t period) { struct ble_hci_le_set_ext_scan_enable_cp cmd; cmd.enable = enable; cmd.filter_dup = filter_duplicates; cmd.duration = htole16(duration); cmd.period = htole16(period); return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_EXT_SCAN_ENABLE), &cmd, sizeof(cmd), NULL, 0); } #endif #endif #if NIMBLE_BLE_SCAN #if !MYNEWT_VAL(BLE_EXT_ADV) static int ble_gap_disc_enable_tx(int enable, int filter_duplicates) { struct ble_hci_le_set_scan_enable_cp cmd; uint16_t opcode; cmd.enable = !!enable; cmd.filter_duplicates = !!filter_duplicates; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_SCAN_ENABLE); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); } static int ble_gap_disc_tx_params(uint8_t own_addr_type, const struct ble_gap_disc_params *disc_params) { struct ble_hci_le_set_scan_params_cp cmd; uint16_t opcode; if (disc_params->passive) { cmd.scan_type = BLE_HCI_SCAN_TYPE_PASSIVE; } else { cmd.scan_type = BLE_HCI_SCAN_TYPE_ACTIVE; } cmd.scan_itvl = htole16(disc_params->itvl); cmd.scan_window = htole16(disc_params->window); cmd.own_addr_type = own_addr_type; cmd.filter_policy = disc_params->filter_policy; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_SCAN_PARAMS); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); } #endif static int ble_gap_disc_disable_tx(void) { #if MYNEWT_VAL(BLE_EXT_ADV) return ble_gap_ext_disc_enable_tx(0, 0, 0, 0); #else return ble_gap_disc_enable_tx(0, 0); #endif } static int ble_gap_disc_cancel_no_lock(void) { int rc; STATS_INC(ble_gap_stats, discover_cancel); if (!ble_gap_disc_active()) { rc = BLE_HS_EALREADY; goto done; } rc = ble_gap_disc_disable_tx(); if (rc != 0) { goto done; } ble_gap_master_reset_state(); done: if (rc != 0) { STATS_INC(ble_gap_stats, discover_cancel_fail); } return rc; } #endif int ble_gap_disc_cancel(void) { #if NIMBLE_BLE_SCAN int rc; ble_hs_lock(); rc = ble_gap_disc_cancel_no_lock(); ble_hs_unlock(); return rc; #else return BLE_HS_ENOTSUP; #endif } #if NIMBLE_BLE_SCAN static int ble_gap_disc_ext_validate(uint8_t own_addr_type) { if (own_addr_type > BLE_HCI_ADV_OWN_ADDR_MAX) { return BLE_HS_EINVAL; } if (ble_gap_conn_active()) { return BLE_HS_EBUSY; } if (ble_gap_disc_active()) { return BLE_HS_EALREADY; } if (!ble_hs_is_enabled()) { return BLE_HS_EDISABLED; } if (ble_gap_is_preempted()) { return BLE_HS_EPREEMPTED; } return 0; } #endif #if MYNEWT_VAL(BLE_EXT_ADV) && NIMBLE_BLE_SCAN static void ble_gap_ext_disc_fill_dflts(uint8_t limited, struct ble_hs_hci_ext_scan_param *disc_params) { if (disc_params->scan_itvl == 0) { if (limited) { disc_params->scan_itvl = BLE_GAP_LIM_DISC_SCAN_INT; } else { disc_params->scan_itvl = BLE_GAP_SCAN_FAST_INTERVAL_MIN; } } if (disc_params->scan_window == 0) { if (limited) { disc_params->scan_window = BLE_GAP_LIM_DISC_SCAN_WINDOW; } else { disc_params->scan_window = BLE_GAP_SCAN_FAST_WINDOW; } } } static void ble_gap_ext_scan_params_to_hci(const struct ble_gap_ext_disc_params *params, struct ble_hs_hci_ext_scan_param *hci_params) { memset(hci_params, 0, sizeof(*hci_params)); if (params->passive) { hci_params->scan_type = BLE_HCI_SCAN_TYPE_PASSIVE; } else { hci_params->scan_type = BLE_HCI_SCAN_TYPE_ACTIVE; } hci_params->scan_itvl = params->itvl; hci_params->scan_window = params->window; } #endif int ble_gap_ext_disc(uint8_t own_addr_type, uint16_t duration, uint16_t period, uint8_t filter_duplicates, uint8_t filter_policy, uint8_t limited, const struct ble_gap_ext_disc_params *uncoded_params, const struct ble_gap_ext_disc_params *coded_params, ble_gap_event_fn *cb, void *cb_arg) { #if NIMBLE_BLE_SCAN && MYNEWT_VAL(BLE_EXT_ADV) struct ble_hs_hci_ext_scan_param ucp; struct ble_hs_hci_ext_scan_param cp; int rc; STATS_INC(ble_gap_stats, discover); ble_hs_lock(); rc = ble_gap_disc_ext_validate(own_addr_type); if (rc != 0) { goto done; } /* Make a copy of the parameter structure and fill unspecified values with * defaults. */ if (uncoded_params) { ble_gap_ext_scan_params_to_hci(uncoded_params, &ucp); ble_gap_ext_disc_fill_dflts(limited, &ucp); /* XXX: We should do it only once */ if (!uncoded_params->passive) { rc = ble_hs_id_use_addr(own_addr_type); if (rc != 0) { goto done; } } } if (coded_params) { ble_gap_ext_scan_params_to_hci(coded_params, &cp); ble_gap_ext_disc_fill_dflts(limited, &cp); /* XXX: We should do it only once */ if (!coded_params->passive) { rc = ble_hs_id_use_addr(own_addr_type); if (rc != 0) { goto done; } } } ble_gap_master.disc.limited = limited; ble_gap_master.cb = cb; ble_gap_master.cb_arg = cb_arg; rc = ble_gap_ext_disc_tx_params(own_addr_type, filter_policy, uncoded_params ? &ucp : NULL, coded_params ? &cp : NULL); if (rc != 0) { goto done; } ble_gap_master.op = BLE_GAP_OP_M_DISC; rc = ble_gap_ext_disc_enable_tx(1, filter_duplicates, duration, period); if (rc != 0) { ble_gap_master_reset_state(); goto done; } rc = 0; done: ble_hs_unlock(); if (rc != 0) { STATS_INC(ble_gap_stats, discover_fail); } return rc; #else return BLE_HS_ENOTSUP; #endif } #if NIMBLE_BLE_SCAN && !MYNEWT_VAL(BLE_EXT_ADV) static void ble_gap_disc_fill_dflts(struct ble_gap_disc_params *disc_params) { if (disc_params->itvl == 0) { if (disc_params->limited) { disc_params->itvl = BLE_GAP_LIM_DISC_SCAN_INT; } else { disc_params->itvl = BLE_GAP_SCAN_FAST_INTERVAL_MIN; } } if (disc_params->window == 0) { if (disc_params->limited) { disc_params->window = BLE_GAP_LIM_DISC_SCAN_WINDOW; } else { disc_params->window = BLE_GAP_SCAN_FAST_WINDOW; } } } static int ble_gap_disc_validate(uint8_t own_addr_type, const struct ble_gap_disc_params *disc_params) { if (disc_params == NULL) { return BLE_HS_EINVAL; } /* Check interval and window */ if ((disc_params->itvl < BLE_HCI_SCAN_ITVL_MIN) || (disc_params->itvl > BLE_HCI_SCAN_ITVL_MAX) || (disc_params->window < BLE_HCI_SCAN_WINDOW_MIN) || (disc_params->window > BLE_HCI_SCAN_WINDOW_MAX) || (disc_params->itvl < disc_params->window)) { return BLE_HS_EINVAL; } /* Check scanner filter policy */ if (disc_params->filter_policy > BLE_HCI_SCAN_FILT_MAX) { return BLE_HS_EINVAL; } return ble_gap_disc_ext_validate(own_addr_type); } #endif int ble_gap_disc(uint8_t own_addr_type, int32_t duration_ms, const struct ble_gap_disc_params *disc_params, ble_gap_event_fn *cb, void *cb_arg) { #if NIMBLE_BLE_SCAN #if MYNEWT_VAL(BLE_EXT_ADV) struct ble_gap_ext_disc_params p = {0}; p.itvl = disc_params->itvl; p.passive = disc_params->passive; p.window = disc_params->window; if (duration_ms == BLE_HS_FOREVER) { duration_ms = 0; } else if (duration_ms == 0) { duration_ms = BLE_GAP_DISC_DUR_DFLT; } return ble_gap_ext_disc(own_addr_type, duration_ms/10, 0, disc_params->filter_duplicates, disc_params->filter_policy, disc_params->limited, &p, NULL, cb, cb_arg); #else struct ble_gap_disc_params params; uint32_t duration_ticks = 0; int rc; STATS_INC(ble_gap_stats, discover); ble_hs_lock(); /* Make a copy of the parameter strcuture and fill unspecified values with * defaults. */ params = *disc_params; ble_gap_disc_fill_dflts(&params); rc = ble_gap_disc_validate(own_addr_type, &params); if (rc != 0) { goto done; } if (duration_ms == 0) { duration_ms = BLE_GAP_DISC_DUR_DFLT; } if (duration_ms != BLE_HS_FOREVER) { rc = ble_npl_time_ms_to_ticks(duration_ms, &duration_ticks); if (rc != 0) { /* Duration too great. */ rc = BLE_HS_EINVAL; goto done; } } if (!params.passive) { rc = ble_hs_id_use_addr(own_addr_type); if (rc != 0) { goto done; } } ble_gap_master.disc.limited = params.limited; ble_gap_master.cb = cb; ble_gap_master.cb_arg = cb_arg; BLE_HS_LOG(INFO, "GAP procedure initiated: discovery; "); ble_gap_log_disc(own_addr_type, duration_ms, &params); BLE_HS_LOG(INFO, "\n"); rc = ble_gap_disc_tx_params(own_addr_type, &params); if (rc != 0) { goto done; } ble_gap_master.op = BLE_GAP_OP_M_DISC; rc = ble_gap_disc_enable_tx(1, params.filter_duplicates); if (rc != 0) { ble_gap_master_reset_state(); goto done; } if (duration_ms != BLE_HS_FOREVER) { ble_gap_master_set_timer(duration_ticks); } rc = 0; done: ble_hs_unlock(); if (rc != 0) { STATS_INC(ble_gap_stats, discover_fail); } return rc; #endif #else return BLE_HS_ENOTSUP; #endif } int ble_gap_disc_active(void) { /* Assume read is atomic; mutex not necessary. */ return ble_gap_master.op == BLE_GAP_OP_M_DISC; } #if MYNEWT_VAL(BLE_ROLE_CENTRAL) && !MYNEWT_VAL(BLE_EXT_ADV) /***************************************************************************** * $connection establishment procedures * *****************************************************************************/ static int ble_gap_conn_create_tx(uint8_t own_addr_type, const ble_addr_t *peer_addr, const struct ble_gap_conn_params *params) { struct ble_hci_le_create_conn_cp cmd; uint16_t opcode; cmd.scan_itvl = htole16(params->scan_itvl); cmd.scan_window = htole16(params->scan_window); if (peer_addr == NULL) { /* Application wants to connect to any device in the white list. The * peer address type and peer address fields are ignored by the * controller; fill them with dummy values. */ cmd.filter_policy = BLE_HCI_CONN_FILT_USE_WL; cmd.peer_addr_type = 0; memset(cmd.peer_addr, 0, sizeof(cmd.peer_addr)); } else { cmd.filter_policy = BLE_HCI_CONN_FILT_NO_WL; cmd.peer_addr_type = peer_addr->type; memcpy(cmd.peer_addr, peer_addr->val, sizeof(cmd.peer_addr)); } cmd.own_addr_type = own_addr_type; cmd.min_conn_itvl = htole16(params->itvl_min); cmd.max_conn_itvl = htole16(params->itvl_max); cmd.conn_latency = htole16(params->latency); cmd.tmo = htole16(params->supervision_timeout); cmd.min_ce = htole16(params->min_ce_len); cmd.max_ce = htole16(params->max_ce_len); opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_CREATE_CONN); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); } #endif #if MYNEWT_VAL(BLE_EXT_ADV) #if MYNEWT_VAL(BLE_ROLE_CENTRAL) static int ble_gap_check_conn_params(uint8_t phy, const struct ble_gap_conn_params *params) { if (phy != BLE_HCI_LE_PHY_2M) { /* Check scan interval and window */ if ((params->scan_itvl < BLE_HCI_SCAN_ITVL_MIN) || (params->scan_itvl > BLE_HCI_SCAN_ITVL_MAX) || (params->scan_window < BLE_HCI_SCAN_WINDOW_MIN) || (params->scan_window > BLE_HCI_SCAN_WINDOW_MAX) || (params->scan_itvl < params->scan_window)) { return BLE_ERR_INV_HCI_CMD_PARMS; } } /* Check connection interval min */ if ((params->itvl_min < BLE_HCI_CONN_ITVL_MIN) || (params->itvl_min > BLE_HCI_CONN_ITVL_MAX)) { return BLE_ERR_INV_HCI_CMD_PARMS; } /* Check connection interval max */ if ((params->itvl_max < BLE_HCI_CONN_ITVL_MIN) || (params->itvl_max > BLE_HCI_CONN_ITVL_MAX) || (params->itvl_max < params->itvl_min)) { return BLE_ERR_INV_HCI_CMD_PARMS; } /* Check connection latency */ if ((params->latency < BLE_HCI_CONN_LATENCY_MIN) || (params->latency > BLE_HCI_CONN_LATENCY_MAX)) { return BLE_ERR_INV_HCI_CMD_PARMS; } /* Check supervision timeout */ if ((params->supervision_timeout < BLE_HCI_CONN_SPVN_TIMEOUT_MIN) || (params->supervision_timeout > BLE_HCI_CONN_SPVN_TIMEOUT_MAX)) { return BLE_ERR_INV_HCI_CMD_PARMS; } /* Check connection event length */ if (params->min_ce_len > params->max_ce_len) { return BLE_ERR_INV_HCI_CMD_PARMS; } return 0; } static int ble_gap_ext_conn_create_tx( uint8_t own_addr_type, const ble_addr_t *peer_addr, uint8_t phy_mask, const struct ble_gap_conn_params *phy_1m_conn_params, const struct ble_gap_conn_params *phy_2m_conn_params, const struct ble_gap_conn_params *phy_coded_conn_params) { struct ble_hci_le_ext_create_conn_cp *cmd; struct conn_params *params; uint8_t buf[sizeof(*cmd) + 3 * sizeof(*params)]; uint8_t len = sizeof(*cmd); int rc; /* Check own addr type */ if (own_addr_type > BLE_HCI_ADV_OWN_ADDR_MAX) { return BLE_ERR_INV_HCI_CMD_PARMS; } if (phy_mask > (BLE_HCI_LE_PHY_1M_PREF_MASK | BLE_HCI_LE_PHY_2M_PREF_MASK | BLE_HCI_LE_PHY_CODED_PREF_MASK)) { return BLE_ERR_INV_HCI_CMD_PARMS; } cmd = (void *) buf; params = cmd->conn_params; if (peer_addr == NULL) { /* Application wants to connect to any device in the white list. The * peer address type and peer address fields are ignored by the * controller; fill them with dummy values. */ cmd->filter_policy = BLE_HCI_CONN_FILT_USE_WL; cmd->peer_addr_type = 0; memset(cmd->peer_addr, 0, sizeof(cmd->peer_addr)); } else { /* Check peer addr type */ if (peer_addr->type > BLE_HCI_CONN_PEER_ADDR_MAX) { return BLE_ERR_INV_HCI_CMD_PARMS; } cmd->filter_policy = BLE_HCI_CONN_FILT_NO_WL; cmd->peer_addr_type = peer_addr->type; memcpy(cmd->peer_addr, peer_addr->val, sizeof(cmd->peer_addr)); } cmd->own_addr_type = own_addr_type; cmd->init_phy_mask = phy_mask; if (phy_mask & BLE_GAP_LE_PHY_1M_MASK) { rc = ble_gap_check_conn_params(BLE_HCI_LE_PHY_1M, phy_1m_conn_params); if (rc) { return rc; } params->scan_itvl = htole16(phy_1m_conn_params->scan_itvl); params->scan_window = htole16(phy_1m_conn_params->scan_window); params->conn_min_itvl = htole16(phy_1m_conn_params->itvl_min); params->conn_max_itvl = htole16(phy_1m_conn_params->itvl_max); params->conn_latency = htole16(phy_1m_conn_params->latency); params->supervision_timeout = htole16(phy_1m_conn_params->supervision_timeout); params->min_ce = htole16(phy_1m_conn_params->min_ce_len); params->max_ce = htole16(phy_1m_conn_params->max_ce_len); params++; len += sizeof(*params); } if (phy_mask & BLE_GAP_LE_PHY_2M_MASK) { rc = ble_gap_check_conn_params(BLE_HCI_LE_PHY_2M, phy_2m_conn_params); if (rc) { return rc; } params->scan_itvl = htole16(phy_2m_conn_params->scan_itvl); params->scan_window = htole16(phy_2m_conn_params->scan_window); params->conn_min_itvl = htole16(phy_2m_conn_params->itvl_min); params->conn_max_itvl = htole16(phy_2m_conn_params->itvl_max); params->conn_latency = htole16(phy_2m_conn_params->latency); params->supervision_timeout = htole16(phy_2m_conn_params->supervision_timeout); params->min_ce = htole16(phy_2m_conn_params->min_ce_len); params->max_ce = htole16(phy_2m_conn_params->max_ce_len); params++; len += sizeof(*params); } if (phy_mask & BLE_GAP_LE_PHY_CODED_MASK) { rc = ble_gap_check_conn_params(BLE_HCI_LE_PHY_CODED, phy_coded_conn_params); if (rc) { return rc; } params->scan_itvl = htole16(phy_coded_conn_params->scan_itvl); params->scan_window = htole16(phy_coded_conn_params->scan_window); params->conn_min_itvl = htole16(phy_coded_conn_params->itvl_min); params->conn_max_itvl = htole16(phy_coded_conn_params->itvl_max); params->conn_latency = htole16(phy_coded_conn_params->latency); params->supervision_timeout = htole16(phy_coded_conn_params->supervision_timeout); params->min_ce = htole16(phy_coded_conn_params->min_ce_len); params->max_ce = htole16(phy_coded_conn_params->max_ce_len); params++; len += sizeof(*params); } return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_EXT_CREATE_CONN), cmd, len, NULL, 0); } #endif /** * Initiates a connect procedure. * * @param own_addr_type The type of address the stack should use for * itself during connection establishment. * o BLE_OWN_ADDR_PUBLIC * o BLE_OWN_ADDR_RANDOM * o BLE_OWN_ADDR_RPA_PUBLIC_DEFAULT * o BLE_OWN_ADDR_RPA_RANDOM_DEFAULT * @param peer_addr The address of the peer to connect to. * If this parameter is NULL, the white list * is used. * @param duration_ms The duration of the discovery procedure. * On expiration, the procedure ends and a * BLE_GAP_EVENT_DISC_COMPLETE event is * reported. Units are milliseconds. * @param phy_mask Define on which PHYs connection attempt should * be done * @param phy_1m_conn_params Additional arguments specifying the * particulars of the connect procedure. When * BLE_GAP_LE_PHY_1M_MASK is set in phy_mask * this parameter can be specify to null for * default values. * @param phy_2m_conn_params Additional arguments specifying the * particulars of the connect procedure. When * BLE_GAP_LE_PHY_2M_MASK is set in phy_mask * this parameter can be specify to null for * default values. * @param phy_coded_conn_params Additional arguments specifying the * particulars of the connect procedure. When * BLE_GAP_LE_PHY_CODED_MASK is set in * phy_mask this parameter can be specify to * null for default values. * @param cb The callback to associate with this connect * procedure. When the connect procedure * completes, the result is reported through * this callback. If the connect procedure * succeeds, the connection inherits this * callback as its event-reporting mechanism. * @param cb_arg The optional argument to pass to the callback * function. * * @return 0 on success; * BLE_HS_EALREADY if a connection attempt is * already in progress; * BLE_HS_EBUSY if initiating a connection is not * possible because scanning is in progress; * BLE_HS_EDONE if the specified peer is already * connected; * Other nonzero on error. */ int ble_gap_ext_connect(uint8_t own_addr_type, const ble_addr_t *peer_addr, int32_t duration_ms, uint8_t phy_mask, const struct ble_gap_conn_params *phy_1m_conn_params, const struct ble_gap_conn_params *phy_2m_conn_params, const struct ble_gap_conn_params *phy_coded_conn_params, ble_gap_event_fn *cb, void *cb_arg) { #if MYNEWT_VAL(BLE_ROLE_CENTRAL) ble_npl_time_t duration_ticks; int rc; STATS_INC(ble_gap_stats, initiate); ble_hs_lock(); if (ble_gap_conn_active()) { rc = BLE_HS_EALREADY; goto done; } if (ble_gap_disc_active()) { rc = BLE_HS_EBUSY; goto done; } if (!ble_hs_is_enabled()) { return BLE_HS_EDISABLED; } if (ble_gap_is_preempted()) { rc = BLE_HS_EPREEMPTED; goto done; } if (!ble_hs_conn_can_alloc()) { rc = BLE_HS_ENOMEM; goto done; } if (peer_addr && peer_addr->type != BLE_ADDR_PUBLIC && peer_addr->type != BLE_ADDR_RANDOM && peer_addr->type != BLE_ADDR_PUBLIC_ID && peer_addr->type != BLE_ADDR_RANDOM_ID) { rc = BLE_HS_EINVAL; goto done; } if ((phy_mask & BLE_GAP_LE_PHY_1M_MASK) && phy_1m_conn_params == NULL) { phy_1m_conn_params = &ble_gap_conn_params_dflt; } if ((phy_mask & BLE_GAP_LE_PHY_2M_MASK) && phy_2m_conn_params == NULL) { phy_2m_conn_params = &ble_gap_conn_params_dflt; } if ((phy_mask & BLE_GAP_LE_PHY_CODED_MASK) && phy_coded_conn_params == NULL) { phy_coded_conn_params = &ble_gap_conn_params_dflt; } if (duration_ms == 0) { duration_ms = BLE_GAP_CONN_DUR_DFLT; } if (duration_ms != BLE_HS_FOREVER) { rc = ble_npl_time_ms_to_ticks(duration_ms, &duration_ticks); if (rc != 0) { /* Duration too great. */ rc = BLE_HS_EINVAL; goto done; } } /* Verify peer not already connected. */ if (ble_hs_conn_find_by_addr(peer_addr) != NULL) { rc = BLE_HS_EDONE; goto done; } /* XXX: Verify conn_params. */ rc = ble_hs_id_use_addr(own_addr_type); if (rc != 0) { goto done; } ble_gap_master.cb = cb; ble_gap_master.cb_arg = cb_arg; ble_gap_master.conn.using_wl = peer_addr == NULL; ble_gap_master.conn.our_addr_type = own_addr_type; ble_gap_master.op = BLE_GAP_OP_M_CONN; rc = ble_gap_ext_conn_create_tx(own_addr_type, peer_addr, phy_mask, phy_1m_conn_params, phy_2m_conn_params, phy_coded_conn_params); if (rc != 0) { ble_gap_master_reset_state(); goto done; } if (duration_ms != BLE_HS_FOREVER) { ble_gap_master_set_timer(duration_ticks); } rc = 0; done: ble_hs_unlock(); if (rc != 0) { STATS_INC(ble_gap_stats, initiate_fail); } return rc; #else return BLE_HS_ENOTSUP; #endif } #endif int ble_gap_connect(uint8_t own_addr_type, const ble_addr_t *peer_addr, int32_t duration_ms, const struct ble_gap_conn_params *conn_params, ble_gap_event_fn *cb, void *cb_arg) { #if MYNEWT_VAL(BLE_ROLE_CENTRAL) #if MYNEWT_VAL(BLE_EXT_ADV) return ble_gap_ext_connect(own_addr_type, peer_addr, duration_ms, BLE_GAP_LE_PHY_1M_MASK, conn_params, NULL, NULL, cb, cb_arg); #else uint32_t duration_ticks; int rc; STATS_INC(ble_gap_stats, initiate); ble_hs_lock(); if (ble_gap_conn_active()) { rc = BLE_HS_EALREADY; goto done; } if (ble_gap_disc_active()) { rc = BLE_HS_EBUSY; goto done; } if (!ble_hs_is_enabled()) { rc = BLE_HS_EDISABLED; goto done; } if (ble_gap_is_preempted()) { rc = BLE_HS_EPREEMPTED; goto done; } if (!ble_hs_conn_can_alloc()) { rc = BLE_HS_ENOMEM; goto done; } if (peer_addr && peer_addr->type != BLE_ADDR_PUBLIC && peer_addr->type != BLE_ADDR_RANDOM && peer_addr->type != BLE_ADDR_PUBLIC_ID && peer_addr->type != BLE_ADDR_RANDOM_ID) { rc = BLE_HS_EINVAL; goto done; } if (conn_params == NULL) { conn_params = &ble_gap_conn_params_dflt; } if (duration_ms == 0) { duration_ms = BLE_GAP_CONN_DUR_DFLT; } if (duration_ms != BLE_HS_FOREVER) { rc = ble_npl_time_ms_to_ticks(duration_ms, &duration_ticks); if (rc != 0) { /* Duration too great. */ rc = BLE_HS_EINVAL; goto done; } } /* Verify peer not already connected. */ if (ble_hs_conn_find_by_addr(peer_addr) != NULL) { rc = BLE_HS_EDONE; goto done; } /* XXX: Verify conn_params. */ rc = ble_hs_id_use_addr(own_addr_type); if (rc != 0) { goto done; } BLE_HS_LOG(INFO, "GAP procedure initiated: connect; "); ble_gap_log_conn(own_addr_type, peer_addr, conn_params); BLE_HS_LOG(INFO, "\n"); ble_gap_master.cb = cb; ble_gap_master.cb_arg = cb_arg; ble_gap_master.conn.using_wl = peer_addr == NULL; ble_gap_master.conn.our_addr_type = own_addr_type; ble_gap_master.op = BLE_GAP_OP_M_CONN; rc = ble_gap_conn_create_tx(own_addr_type, peer_addr, conn_params); if (rc != 0) { ble_gap_master_reset_state(); goto done; } if (duration_ms != BLE_HS_FOREVER) { ble_gap_master_set_timer(duration_ticks); } rc = 0; done: ble_hs_unlock(); if (rc != 0) { STATS_INC(ble_gap_stats, initiate_fail); } return rc; #endif #else return BLE_HS_ENOTSUP; #endif } int ble_gap_conn_active(void) { /* Assume read is atomic; mutex not necessary. */ return ble_gap_master.op == BLE_GAP_OP_M_CONN; } /***************************************************************************** * $terminate connection procedure * *****************************************************************************/ int ble_gap_terminate_with_conn(struct ble_hs_conn *conn, uint8_t hci_reason) { struct ble_hci_lc_disconnect_cp cmd; int rc; BLE_HS_DBG_ASSERT(ble_hs_locked_by_cur_task()); if (conn->bhc_flags & BLE_HS_CONN_F_TERMINATING) { return BLE_HS_EALREADY; } BLE_HS_LOG(INFO, "GAP procedure initiated: terminate connection; " "conn_handle=%d hci_reason=%d\n", conn->bhc_handle, hci_reason); cmd.conn_handle = htole16(conn->bhc_handle); cmd.reason = hci_reason; rc = ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LINK_CTRL, BLE_HCI_OCF_DISCONNECT_CMD), &cmd, sizeof(cmd), NULL, 0); if (rc != 0) { return rc; } conn->bhc_flags |= BLE_HS_CONN_F_TERMINATING; return 0; } int ble_gap_terminate(uint16_t conn_handle, uint8_t hci_reason) { struct ble_hs_conn *conn; int rc; STATS_INC(ble_gap_stats, terminate); ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); if (conn == NULL) { rc = BLE_HS_ENOTCONN; goto done; } rc = ble_gap_terminate_with_conn(conn, hci_reason); done: ble_hs_unlock(); if (rc != 0) { STATS_INC(ble_gap_stats, terminate_fail); } return rc; } /***************************************************************************** * $cancel * *****************************************************************************/ static int ble_gap_conn_cancel_tx(void) { int rc; rc = ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_CREATE_CONN_CANCEL), NULL, 0, NULL, 0); if (rc != 0) { return rc; } return 0; } #if NIMBLE_BLE_CONNECT static int ble_gap_conn_cancel_no_lock(void) { int rc; STATS_INC(ble_gap_stats, cancel); if (!ble_gap_conn_active()) { rc = BLE_HS_EALREADY; goto done; } BLE_HS_LOG(INFO, "GAP procedure initiated: cancel connection\n"); rc = ble_gap_conn_cancel_tx(); if (rc != 0) { goto done; } ble_gap_master.conn.cancel = 1; rc = 0; done: if (rc != 0) { STATS_INC(ble_gap_stats, cancel_fail); } return rc; } #endif int ble_gap_conn_cancel(void) { #if MYNEWT_VAL(BLE_ROLE_CENTRAL) int rc; ble_hs_lock(); rc = ble_gap_conn_cancel_no_lock(); ble_hs_unlock(); return rc; #else return BLE_HS_ENOTSUP; #endif } /***************************************************************************** * $update connection parameters * *****************************************************************************/ #if NIMBLE_BLE_CONNECT static struct ble_gap_update_entry * ble_gap_update_entry_alloc(void) { struct ble_gap_update_entry *entry; entry = os_memblock_get(&ble_gap_update_entry_pool); if (entry != NULL) { memset(entry, 0, sizeof *entry); } return entry; } #endif static void ble_gap_update_entry_free(struct ble_gap_update_entry *entry) { int rc; if (entry != NULL) { #if MYNEWT_VAL(BLE_HS_DEBUG) memset(entry, 0xff, sizeof *entry); #endif rc = os_memblock_put(&ble_gap_update_entry_pool, entry); BLE_HS_DBG_ASSERT_EVAL(rc == 0); } } static struct ble_gap_update_entry * ble_gap_update_entry_find(uint16_t conn_handle, struct ble_gap_update_entry **out_prev) { struct ble_gap_update_entry *entry; struct ble_gap_update_entry *prev; BLE_HS_DBG_ASSERT(ble_hs_locked_by_cur_task()); prev = NULL; SLIST_FOREACH(entry, &ble_gap_update_entries, next) { if (entry->conn_handle == conn_handle) { break; } prev = entry; } if (out_prev != NULL) { *out_prev = prev; } return entry; } static struct ble_gap_update_entry * ble_gap_update_entry_remove(uint16_t conn_handle) { struct ble_gap_update_entry *entry; struct ble_gap_update_entry *prev; entry = ble_gap_update_entry_find(conn_handle, &prev); if (entry != NULL) { if (prev == NULL) { SLIST_REMOVE_HEAD(&ble_gap_update_entries, next); } else { SLIST_NEXT(prev, next) = SLIST_NEXT(entry, next); } ble_hs_timer_resched(); } return entry; } #if NIMBLE_BLE_CONNECT static void ble_gap_update_l2cap_cb(uint16_t conn_handle, int status, void *arg) { struct ble_gap_update_entry *entry; /* Report failures and rejections. Success gets reported when the * controller sends the connection update complete event. */ ble_hs_lock(); entry = ble_gap_update_entry_remove(conn_handle); ble_hs_unlock(); if (entry != NULL) { ble_gap_update_entry_free(entry); if (status != 0) { ble_gap_update_notify(conn_handle, status); } /* On success let's wait for the controller to notify about update */ } } static int ble_gap_tx_param_pos_reply(uint16_t conn_handle, struct ble_gap_upd_params *params) { struct ble_hci_le_rem_conn_param_rr_cp cmd; cmd.conn_handle = htole16(conn_handle); cmd.conn_itvl_min = htole16(params->itvl_min); cmd.conn_itvl_max = htole16(params->itvl_max); cmd.conn_latency = htole16(params->latency); cmd.supervision_timeout = htole16(params->supervision_timeout); cmd.min_ce = htole16(params->min_ce_len); cmd.max_ce = htole16(params->max_ce_len); return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_REM_CONN_PARAM_RR), &cmd, sizeof(cmd), NULL, 0); } static int ble_gap_tx_param_neg_reply(uint16_t conn_handle, uint8_t reject_reason) { struct ble_hci_le_rem_conn_params_nrr_cp cmd; cmd.conn_handle = htole16(conn_handle); cmd.reason = reject_reason; return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_REM_CONN_PARAM_NRR), &cmd, sizeof(cmd), NULL, 0); } #endif void ble_gap_rx_param_req(const struct ble_hci_ev_le_subev_rem_conn_param_req *ev) { #if NIMBLE_BLE_CONNECT struct ble_gap_upd_params peer_params; struct ble_gap_upd_params self_params; struct ble_gap_event event; uint16_t conn_handle; int rc; memset(&event, 0, sizeof event); peer_params.itvl_min = le16toh(ev->min_interval); peer_params.itvl_max = le16toh(ev->max_interval); peer_params.latency = le16toh(ev->latency); peer_params.supervision_timeout = le16toh(ev->timeout); peer_params.min_ce_len = 0; peer_params.max_ce_len = 0; /* Copy the peer params into the self params to make it easy on the * application. The application callback will change only the fields which * it finds unsuitable. */ self_params = peer_params; conn_handle = le16toh(ev->conn_handle); memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_CONN_UPDATE_REQ; event.conn_update_req.conn_handle = conn_handle; event.conn_update_req.self_params = &self_params; event.conn_update_req.peer_params = &peer_params; rc = ble_gap_call_conn_event_cb(&event, conn_handle); if (rc == 0) { rc = ble_gap_tx_param_pos_reply(conn_handle, &self_params); if (rc != 0) { ble_gap_update_failed(conn_handle, rc); } } else { ble_gap_tx_param_neg_reply(conn_handle, rc); } #endif } #if NIMBLE_BLE_CONNECT static int ble_gap_update_tx(uint16_t conn_handle, const struct ble_gap_upd_params *params) { struct ble_hci_le_conn_update_cp cmd; cmd.conn_handle = htole16(conn_handle); cmd.conn_itvl_min = htole16(params->itvl_min); cmd.conn_itvl_max = htole16(params->itvl_max); cmd.conn_latency = htole16(params->latency); cmd.supervision_timeout = htole16(params->supervision_timeout); cmd.min_ce_len = htole16(params->min_ce_len); cmd.max_ce_len = htole16(params->max_ce_len); return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_CONN_UPDATE), &cmd, sizeof(cmd), NULL, 0); } static bool ble_gap_validate_conn_params(const struct ble_gap_upd_params *params) { /* Requirements from Bluetooth spec. v4.2 [Vol 2, Part E], 7.8.18 */ if (params->itvl_min > params->itvl_max) { return false; } if (params->itvl_min < 0x0006 || params->itvl_max > 0x0C80) { return false; } if (params->latency > 0x01F3) { return false; } /* According to specification mentioned above we should make sure that: * supervision_timeout_ms > (1 + latency) * 2 * max_interval_ms * => * supervision_timeout * 10 ms > (1 + latency) * 2 * itvl_max * 1.25ms */ if (params->supervision_timeout <= (((1 + params->latency) * params->itvl_max) / 4)) { return false; } return true; } #endif int ble_gap_update_params(uint16_t conn_handle, const struct ble_gap_upd_params *params) { #if NIMBLE_BLE_CONNECT struct ble_l2cap_sig_update_params l2cap_params; struct ble_gap_update_entry *entry; struct ble_gap_update_entry *dup; struct ble_hs_conn *conn; int l2cap_update; int rc; l2cap_update = 0; /* Validate parameters with a spec */ if (!ble_gap_validate_conn_params(params)) { return BLE_HS_EINVAL; } STATS_INC(ble_gap_stats, update); memset(&l2cap_params, 0, sizeof l2cap_params); entry = NULL; ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); if (conn == NULL) { rc = BLE_HS_ENOTCONN; goto done; } /* Don't allow two concurrent updates to the same connection. */ dup = ble_gap_update_entry_find(conn_handle, NULL); if (dup != NULL) { rc = BLE_HS_EALREADY; goto done; } entry = ble_gap_update_entry_alloc(); if (entry == NULL) { rc = BLE_HS_ENOMEM; goto done; } entry->conn_handle = conn_handle; entry->params = *params; entry->exp_os_ticks = ble_npl_time_get() + ble_npl_time_ms_to_ticks32(BLE_GAP_UPDATE_TIMEOUT_MS); BLE_HS_LOG(INFO, "GAP procedure initiated: "); ble_gap_log_update(conn_handle, params); BLE_HS_LOG(INFO, "\n"); /* * If LL update procedure is not supported on this connection and we are * the slave, fail over to the L2CAP update procedure. */ if ((conn->supported_feat & BLE_HS_HCI_LE_FEAT_CONN_PARAM_REQUEST) == 0 && !(conn->bhc_flags & BLE_HS_CONN_F_MASTER)) { l2cap_update = 1; rc = 0; } else { rc = ble_gap_update_tx(conn_handle, params); } done: ble_hs_unlock(); if (!l2cap_update) { ble_hs_timer_resched(); } else { ble_gap_update_to_l2cap(params, &l2cap_params); rc = ble_l2cap_sig_update(conn_handle, &l2cap_params, ble_gap_update_l2cap_cb, NULL); } ble_hs_lock(); if (rc == 0) { SLIST_INSERT_HEAD(&ble_gap_update_entries, entry, next); } else { ble_gap_update_entry_free(entry); STATS_INC(ble_gap_stats, update_fail); } ble_hs_unlock(); return rc; #else return BLE_HS_ENOTSUP; #endif } /***************************************************************************** * $security * *****************************************************************************/ int ble_gap_security_initiate(uint16_t conn_handle) { #if NIMBLE_BLE_SM struct ble_store_value_sec value_sec; struct ble_store_key_sec key_sec; struct ble_hs_conn_addrs addrs; ble_hs_conn_flags_t conn_flags; struct ble_hs_conn *conn; int rc; STATS_INC(ble_gap_stats, security_initiate); ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); if (conn != NULL) { conn_flags = conn->bhc_flags; ble_hs_conn_addrs(conn, &addrs); memset(&key_sec, 0, sizeof key_sec); key_sec.peer_addr = addrs.peer_id_addr; } ble_hs_unlock(); if (conn == NULL) { rc = BLE_HS_ENOTCONN; goto done; } if (conn_flags & BLE_HS_CONN_F_MASTER) { /* Search the security database for an LTK for this peer. If one * is found, perform the encryption procedure rather than the pairing * procedure. */ rc = ble_store_read_peer_sec(&key_sec, &value_sec); if (rc == 0 && value_sec.ltk_present) { rc = ble_sm_enc_initiate(conn_handle, value_sec.key_size, value_sec.ltk, value_sec.ediv, value_sec.rand_num, value_sec.authenticated); if (rc != 0) { goto done; } } else { rc = ble_sm_pair_initiate(conn_handle); if (rc != 0) { goto done; } } } else { rc = ble_sm_slave_initiate(conn_handle); if (rc != 0) { goto done; } } rc = 0; done: if (rc != 0) { STATS_INC(ble_gap_stats, security_initiate_fail); } return rc; #else return BLE_HS_ENOTSUP; #endif } int ble_gap_pair_initiate(uint16_t conn_handle) { int rc; rc = ble_sm_pair_initiate(conn_handle); return rc; } int ble_gap_encryption_initiate(uint16_t conn_handle, uint8_t key_size, const uint8_t *ltk, uint16_t ediv, uint64_t rand_val, int auth) { #if NIMBLE_BLE_SM ble_hs_conn_flags_t conn_flags; int rc; rc = ble_hs_atomic_conn_flags(conn_handle, &conn_flags); if (rc != 0) { return rc; } if (!(conn_flags & BLE_HS_CONN_F_MASTER)) { return BLE_HS_EROLE; } rc = ble_sm_enc_initiate(conn_handle, key_size, ltk, ediv, rand_val, auth); return rc; #else return BLE_HS_ENOTSUP; #endif } int ble_gap_unpair(const ble_addr_t *peer_addr) { struct ble_hs_conn *conn; if (ble_addr_cmp(peer_addr, BLE_ADDR_ANY) == 0) { return BLE_HS_EINVAL; } ble_hs_lock(); conn = ble_hs_conn_find_by_addr(peer_addr); if (conn != NULL) { ble_gap_terminate_with_conn(conn, BLE_ERR_REM_USER_CONN_TERM); } ble_hs_unlock(); ble_hs_pvcy_remove_entry(peer_addr->type, peer_addr->val); return ble_store_util_delete_peer(peer_addr); } int ble_gap_unpair_oldest_peer(void) { ble_addr_t oldest_peer_id_addr; int num_peers; int rc; rc = ble_store_util_bonded_peers( &oldest_peer_id_addr, &num_peers, 1); if (rc != 0) { return rc; } if (num_peers == 0) { return BLE_HS_ENOENT; } rc = ble_gap_unpair(&oldest_peer_id_addr); if (rc != 0) { return rc; } return 0; } int ble_gap_unpair_oldest_except(const ble_addr_t *peer_addr) { ble_addr_t peer_id_addrs[MYNEWT_VAL(BLE_STORE_MAX_BONDS)]; int num_peers; int rc, i; rc = ble_store_util_bonded_peers( &peer_id_addrs[0], &num_peers, MYNEWT_VAL(BLE_STORE_MAX_BONDS)); if (rc != 0) { return rc; } if (num_peers == 0) { return BLE_HS_ENOENT; } for (i = 0; i < num_peers; i++) { if (ble_addr_cmp(peer_addr, &peer_id_addrs[i]) != 0) { break; } } if (i >= num_peers) { return BLE_HS_ENOMEM; } return ble_gap_unpair(&peer_id_addrs[i]); } void ble_gap_passkey_event(uint16_t conn_handle, struct ble_gap_passkey_params *passkey_params) { #if NIMBLE_BLE_SM struct ble_gap_event event; BLE_HS_LOG(DEBUG, "send passkey action request %d\n", passkey_params->action); memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_PASSKEY_ACTION; event.passkey.conn_handle = conn_handle; event.passkey.params = *passkey_params; ble_gap_call_conn_event_cb(&event, conn_handle); #endif } void ble_gap_enc_event(uint16_t conn_handle, int status, int security_restored, int bonded) { #if NIMBLE_BLE_SM struct ble_gap_event event; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_ENC_CHANGE; event.enc_change.conn_handle = conn_handle; event.enc_change.status = status; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, conn_handle); if (status != 0) { return; } /* If encryption succeded and encryption has been restored for bonded device, * notify gatt server so it has chance to send notification/indication if needed. */ if (security_restored) { ble_gatts_bonding_restored(conn_handle); return; } /* If this is fresh pairing and bonding has been established, * notify gatt server about that so previous subscriptions (before bonding) * can be stored. */ if (bonded) { ble_gatts_bonding_established(conn_handle); } #endif } void ble_gap_identity_event(uint16_t conn_handle) { #if NIMBLE_BLE_SM struct ble_gap_event event; BLE_HS_LOG(DEBUG, "send identity changed"); memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_IDENTITY_RESOLVED; event.identity_resolved.conn_handle = conn_handle; ble_gap_call_conn_event_cb(&event, conn_handle); #endif } int ble_gap_repeat_pairing_event(const struct ble_gap_repeat_pairing *rp) { #if NIMBLE_BLE_SM struct ble_gap_event event; int rc; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_REPEAT_PAIRING; event.repeat_pairing = *rp; rc = ble_gap_call_conn_event_cb(&event, rp->conn_handle); return rc; #else return 0; #endif } /***************************************************************************** * $rssi * *****************************************************************************/ int ble_gap_conn_rssi(uint16_t conn_handle, int8_t *out_rssi) { int rc; rc = ble_hs_hci_util_read_rssi(conn_handle, out_rssi); return rc; } /***************************************************************************** * $notify * *****************************************************************************/ void ble_gap_notify_rx_event(uint16_t conn_handle, uint16_t attr_handle, struct os_mbuf *om, int is_indication) { #if !MYNEWT_VAL(BLE_GATT_NOTIFY) && !MYNEWT_VAL(BLE_GATT_INDICATE) return; #endif struct ble_gap_event event; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_NOTIFY_RX; event.notify_rx.conn_handle = conn_handle; event.notify_rx.attr_handle = attr_handle; event.notify_rx.om = om; event.notify_rx.indication = is_indication; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, conn_handle); os_mbuf_free_chain(event.notify_rx.om); } void ble_gap_notify_tx_event(int status, uint16_t conn_handle, uint16_t attr_handle, int is_indication) { #if MYNEWT_VAL(BLE_GATT_NOTIFY) || MYNEWT_VAL(BLE_GATT_INDICATE) struct ble_gap_event event; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_NOTIFY_TX; event.notify_tx.conn_handle = conn_handle; event.notify_tx.status = status; event.notify_tx.attr_handle = attr_handle; event.notify_tx.indication = is_indication; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, conn_handle); #endif } /***************************************************************************** * $subscribe * *****************************************************************************/ void ble_gap_subscribe_event(uint16_t conn_handle, uint16_t attr_handle, uint8_t reason, uint8_t prev_notify, uint8_t cur_notify, uint8_t prev_indicate, uint8_t cur_indicate) { struct ble_gap_event event; BLE_HS_DBG_ASSERT(prev_notify != cur_notify || prev_indicate != cur_indicate); BLE_HS_DBG_ASSERT(reason == BLE_GAP_SUBSCRIBE_REASON_WRITE || reason == BLE_GAP_SUBSCRIBE_REASON_TERM || reason == BLE_GAP_SUBSCRIBE_REASON_RESTORE); memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_SUBSCRIBE; event.subscribe.conn_handle = conn_handle; event.subscribe.attr_handle = attr_handle; event.subscribe.reason = reason; event.subscribe.prev_notify = !!prev_notify; event.subscribe.cur_notify = !!cur_notify; event.subscribe.prev_indicate = !!prev_indicate; event.subscribe.cur_indicate = !!cur_indicate; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, conn_handle); } /***************************************************************************** * $mtu * *****************************************************************************/ void ble_gap_mtu_event(uint16_t conn_handle, uint16_t cid, uint16_t mtu) { struct ble_gap_event event; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_MTU; event.mtu.conn_handle = conn_handle; event.mtu.channel_id = cid; event.mtu.value = mtu; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, conn_handle); } /***************************************************************************** * $preempt * *****************************************************************************/ void ble_gap_preempt_no_lock(void) { int rc; int i; (void)rc; (void)i; #if NIMBLE_BLE_ADVERTISE #if MYNEWT_VAL(BLE_EXT_ADV) for (i = 0; i < BLE_ADV_INSTANCES; i++) { rc = ble_gap_ext_adv_stop_no_lock(i); if (rc == 0) { ble_gap_slave[i].preempted = 1; } } #else rc = ble_gap_adv_stop_no_lock(); if (rc == 0) { ble_gap_slave[0].preempted = 1; } #endif #endif #if NIMBLE_BLE_CONNECT rc = ble_gap_conn_cancel_no_lock(); if (rc == 0) { ble_gap_master.preempted_op = BLE_GAP_OP_M_CONN; } #endif #if NIMBLE_BLE_SCAN rc = ble_gap_disc_cancel_no_lock(); if (rc == 0) { ble_gap_master.preempted_op = BLE_GAP_OP_M_DISC; } #endif } /** * @brief Preempts the GAP if it is not already preempted. * * Aborts all active GAP procedures and prevents new ones from being started. * This function is used to ensure an idle GAP so that the controller's * resolving list can be modified. When done accessing the resolving list, the * caller must call `ble_gap_preempt_done()` to permit new GAP procedures. * * On preemption, all aborted GAP procedures are reported with a status or * reason code of BLE_HS_EPREEMPTED. An attempt to initiate a new GAP * procedure during preemption fails with a return code of BLE_HS_EPREEMPTED. */ void ble_gap_preempt(void) { ble_hs_lock(); if (!ble_gap_is_preempted()) { ble_gap_preempt_no_lock(); } ble_hs_unlock(); } /** * Takes GAP out of the preempted state, allowing new GAP procedures to be * initiated. This function should only be called after a call to * `ble_gap_preempt()`. */ static struct ble_npl_mutex preempt_done_mutex; void ble_gap_preempt_done(void) { struct ble_gap_event event; ble_gap_event_fn *master_cb; void *master_arg; int disc_preempted; int i; static struct { ble_gap_event_fn *cb; void *arg; } slaves[BLE_ADV_INSTANCES]; disc_preempted = 0; /* Protects slaves from accessing by multiple threads */ ble_npl_mutex_pend(&preempt_done_mutex, 0xFFFFFFFF); memset(slaves, 0, sizeof(slaves)); ble_hs_lock(); for (i = 0; i < BLE_ADV_INSTANCES; i++) { if (ble_gap_slave[i].preempted) { ble_gap_slave[i].preempted = 0; slaves[i].cb = ble_gap_slave[i].cb; slaves[i].arg = ble_gap_slave[i].cb_arg; } } if (ble_gap_master.preempted_op == BLE_GAP_OP_M_DISC) { ble_gap_master.preempted_op = BLE_GAP_OP_NULL; disc_preempted = 1; master_cb = ble_gap_master.cb; master_arg = ble_gap_master.cb_arg; } ble_hs_unlock(); event.type = BLE_GAP_EVENT_ADV_COMPLETE; event.adv_complete.reason = BLE_HS_EPREEMPTED; for (i = 0; i < BLE_ADV_INSTANCES; i++) { if (slaves[i].cb) { #if MYNEWT_VAL(BLE_EXT_ADV) event.adv_complete.instance = i; event.adv_complete.conn_handle = i; #endif ble_gap_call_event_cb(&event, slaves[i].cb, slaves[i].arg); } } ble_npl_mutex_release(&preempt_done_mutex); if (disc_preempted) { event.type = BLE_GAP_EVENT_DISC_COMPLETE; event.disc_complete.reason = BLE_HS_EPREEMPTED; ble_gap_call_event_cb(&event, master_cb, master_arg); } } int ble_gap_event_listener_register(struct ble_gap_event_listener *listener, ble_gap_event_fn *fn, void *arg) { struct ble_gap_event_listener *evl = NULL; int rc; SLIST_FOREACH(evl, &ble_gap_event_listener_list, link) { if (evl == listener) { break; } } if (!evl) { if (fn) { memset(listener, 0, sizeof(*listener)); listener->fn = fn; listener->arg = arg; SLIST_INSERT_HEAD(&ble_gap_event_listener_list, listener, link); rc = 0; } else { rc = BLE_HS_EINVAL; } } else { rc = BLE_HS_EALREADY; } return rc; } int ble_gap_event_listener_unregister(struct ble_gap_event_listener *listener) { struct ble_gap_event_listener *evl = NULL; int rc; /* * We check if element exists on the list only for sanity to let caller * know whether it registered its listener before. */ SLIST_FOREACH(evl, &ble_gap_event_listener_list, link) { if (evl == listener) { break; } } if (!evl) { rc = BLE_HS_ENOENT; } else { SLIST_REMOVE(&ble_gap_event_listener_list, listener, ble_gap_event_listener, link); rc = 0; } return rc; } static int ble_gap_event_listener_call(struct ble_gap_event *event) { struct ble_gap_event_listener *evl = NULL; SLIST_FOREACH(evl, &ble_gap_event_listener_list, link) { evl->fn(event, evl->arg); } return 0; } /***************************************************************************** * $init * *****************************************************************************/ int ble_gap_init(void) { int rc; memset(&ble_gap_master, 0, sizeof(ble_gap_master)); memset(ble_gap_slave, 0, sizeof(ble_gap_slave)); #if MYNEWT_VAL(BLE_PERIODIC_ADV) memset(&ble_gap_sync, 0, sizeof(ble_gap_sync)); #endif ble_npl_mutex_init(&preempt_done_mutex); SLIST_INIT(&ble_gap_update_entries); SLIST_INIT(&ble_gap_event_listener_list); rc = os_mempool_init(&ble_gap_update_entry_pool, MYNEWT_VAL(BLE_GAP_MAX_PENDING_CONN_PARAM_UPDATE), sizeof (struct ble_gap_update_entry), ble_gap_update_entry_mem, "ble_gap_update"); switch (rc) { case 0: break; case OS_ENOMEM: rc = BLE_HS_ENOMEM; goto err; default: rc = BLE_HS_EOS; goto err; } rc = stats_init_and_reg( STATS_HDR(ble_gap_stats), STATS_SIZE_INIT_PARMS(ble_gap_stats, STATS_SIZE_32), STATS_NAME_INIT_PARMS(ble_gap_stats), "ble_gap"); if (rc != 0) { goto err; } return 0; err: return rc; }
253268.c
#include <math.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <complex.h> #ifdef complex #undef complex #endif #ifdef I #undef I #endif #if defined(_WIN64) typedef long long BLASLONG; typedef unsigned long long BLASULONG; #else typedef long BLASLONG; typedef unsigned long BLASULONG; #endif #ifdef LAPACK_ILP64 typedef BLASLONG blasint; #if defined(_WIN64) #define blasabs(x) llabs(x) #else #define blasabs(x) labs(x) #endif #else typedef int blasint; #define blasabs(x) abs(x) #endif typedef blasint integer; typedef unsigned int uinteger; typedef char *address; typedef short int shortint; typedef float real; typedef double doublereal; typedef struct { real r, i; } complex; typedef struct { doublereal r, i; } doublecomplex; #ifdef _MSC_VER static inline _Fcomplex Cf(complex *z) {_Fcomplex zz={z->r , z->i}; return zz;} static inline _Dcomplex Cd(doublecomplex *z) {_Dcomplex zz={z->r , z->i};return zz;} static inline _Fcomplex * _pCf(complex *z) {return (_Fcomplex*)z;} static inline _Dcomplex * _pCd(doublecomplex *z) {return (_Dcomplex*)z;} #else static inline _Complex float Cf(complex *z) {return z->r + z->i*_Complex_I;} static inline _Complex double Cd(doublecomplex *z) {return z->r + z->i*_Complex_I;} static inline _Complex float * _pCf(complex *z) {return (_Complex float*)z;} static inline _Complex double * _pCd(doublecomplex *z) {return (_Complex double*)z;} #endif #define pCf(z) (*_pCf(z)) #define pCd(z) (*_pCd(z)) typedef int logical; typedef short int shortlogical; typedef char logical1; typedef char integer1; #define TRUE_ (1) #define FALSE_ (0) /* Extern is for use with -E */ #ifndef Extern #define Extern extern #endif /* I/O stuff */ typedef int flag; typedef int ftnlen; typedef int ftnint; /*external read, write*/ typedef struct { flag cierr; ftnint ciunit; flag ciend; char *cifmt; ftnint cirec; } cilist; /*internal read, write*/ typedef struct { flag icierr; char *iciunit; flag iciend; char *icifmt; ftnint icirlen; ftnint icirnum; } icilist; /*open*/ typedef struct { flag oerr; ftnint ounit; char *ofnm; ftnlen ofnmlen; char *osta; char *oacc; char *ofm; ftnint orl; char *oblnk; } olist; /*close*/ typedef struct { flag cerr; ftnint cunit; char *csta; } cllist; /*rewind, backspace, endfile*/ typedef struct { flag aerr; ftnint aunit; } alist; /* inquire */ typedef struct { flag inerr; ftnint inunit; char *infile; ftnlen infilen; ftnint *inex; /*parameters in standard's order*/ ftnint *inopen; ftnint *innum; ftnint *innamed; char *inname; ftnlen innamlen; char *inacc; ftnlen inacclen; char *inseq; ftnlen inseqlen; char *indir; ftnlen indirlen; char *infmt; ftnlen infmtlen; char *inform; ftnint informlen; char *inunf; ftnlen inunflen; ftnint *inrecl; ftnint *innrec; char *inblank; ftnlen inblanklen; } inlist; #define VOID void union Multitype { /* for multiple entry points */ integer1 g; shortint h; integer i; /* longint j; */ real r; doublereal d; complex c; doublecomplex z; }; typedef union Multitype Multitype; struct Vardesc { /* for Namelist */ char *name; char *addr; ftnlen *dims; int type; }; typedef struct Vardesc Vardesc; struct Namelist { char *name; Vardesc **vars; int nvars; }; typedef struct Namelist Namelist; #define abs(x) ((x) >= 0 ? (x) : -(x)) #define dabs(x) (fabs(x)) #define f2cmin(a,b) ((a) <= (b) ? (a) : (b)) #define f2cmax(a,b) ((a) >= (b) ? (a) : (b)) #define dmin(a,b) (f2cmin(a,b)) #define dmax(a,b) (f2cmax(a,b)) #define bit_test(a,b) ((a) >> (b) & 1) #define bit_clear(a,b) ((a) & ~((uinteger)1 << (b))) #define bit_set(a,b) ((a) | ((uinteger)1 << (b))) #define abort_() { sig_die("Fortran abort routine called", 1); } #define c_abs(z) (cabsf(Cf(z))) #define c_cos(R,Z) { pCf(R)=ccos(Cf(Z)); } #ifdef _MSC_VER #define c_div(c, a, b) {Cf(c)._Val[0] = (Cf(a)._Val[0]/Cf(b)._Val[0]); Cf(c)._Val[1]=(Cf(a)._Val[1]/Cf(b)._Val[1]);} #define z_div(c, a, b) {Cd(c)._Val[0] = (Cd(a)._Val[0]/Cd(b)._Val[0]); Cd(c)._Val[1]=(Cd(a)._Val[1]/df(b)._Val[1]);} #else #define c_div(c, a, b) {pCf(c) = Cf(a)/Cf(b);} #define z_div(c, a, b) {pCd(c) = Cd(a)/Cd(b);} #endif #define c_exp(R, Z) {pCf(R) = cexpf(Cf(Z));} #define c_log(R, Z) {pCf(R) = clogf(Cf(Z));} #define c_sin(R, Z) {pCf(R) = csinf(Cf(Z));} //#define c_sqrt(R, Z) {*(R) = csqrtf(Cf(Z));} #define c_sqrt(R, Z) {pCf(R) = csqrtf(Cf(Z));} #define d_abs(x) (fabs(*(x))) #define d_acos(x) (acos(*(x))) #define d_asin(x) (asin(*(x))) #define d_atan(x) (atan(*(x))) #define d_atn2(x, y) (atan2(*(x),*(y))) #define d_cnjg(R, Z) { pCd(R) = conj(Cd(Z)); } #define r_cnjg(R, Z) { pCf(R) = conjf(Cf(Z)); } #define d_cos(x) (cos(*(x))) #define d_cosh(x) (cosh(*(x))) #define d_dim(__a, __b) ( *(__a) > *(__b) ? *(__a) - *(__b) : 0.0 ) #define d_exp(x) (exp(*(x))) #define d_imag(z) (cimag(Cd(z))) #define r_imag(z) (cimagf(Cf(z))) #define d_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x))) #define r_int(__x) (*(__x)>0 ? floor(*(__x)) : -floor(- *(__x))) #define d_lg10(x) ( 0.43429448190325182765 * log(*(x)) ) #define r_lg10(x) ( 0.43429448190325182765 * log(*(x)) ) #define d_log(x) (log(*(x))) #define d_mod(x, y) (fmod(*(x), *(y))) #define u_nint(__x) ((__x)>=0 ? floor((__x) + .5) : -floor(.5 - (__x))) #define d_nint(x) u_nint(*(x)) #define u_sign(__a,__b) ((__b) >= 0 ? ((__a) >= 0 ? (__a) : -(__a)) : -((__a) >= 0 ? (__a) : -(__a))) #define d_sign(a,b) u_sign(*(a),*(b)) #define r_sign(a,b) u_sign(*(a),*(b)) #define d_sin(x) (sin(*(x))) #define d_sinh(x) (sinh(*(x))) #define d_sqrt(x) (sqrt(*(x))) #define d_tan(x) (tan(*(x))) #define d_tanh(x) (tanh(*(x))) #define i_abs(x) abs(*(x)) #define i_dnnt(x) ((integer)u_nint(*(x))) #define i_len(s, n) (n) #define i_nint(x) ((integer)u_nint(*(x))) #define i_sign(a,b) ((integer)u_sign((integer)*(a),(integer)*(b))) #define pow_dd(ap, bp) ( pow(*(ap), *(bp))) #define pow_si(B,E) spow_ui(*(B),*(E)) #define pow_ri(B,E) spow_ui(*(B),*(E)) #define pow_di(B,E) dpow_ui(*(B),*(E)) #define pow_zi(p, a, b) {pCd(p) = zpow_ui(Cd(a), *(b));} #define pow_ci(p, a, b) {pCf(p) = cpow_ui(Cf(a), *(b));} #define pow_zz(R,A,B) {pCd(R) = cpow(Cd(A),*(B));} #define s_cat(lpp, rpp, rnp, np, llp) { ftnlen i, nc, ll; char *f__rp, *lp; ll = (llp); lp = (lpp); for(i=0; i < (int)*(np); ++i) { nc = ll; if((rnp)[i] < nc) nc = (rnp)[i]; ll -= nc; f__rp = (rpp)[i]; while(--nc >= 0) *lp++ = *(f__rp)++; } while(--ll >= 0) *lp++ = ' '; } #define s_cmp(a,b,c,d) ((integer)strncmp((a),(b),f2cmin((c),(d)))) #define s_copy(A,B,C,D) { int __i,__m; for (__i=0, __m=f2cmin((C),(D)); __i<__m && (B)[__i] != 0; ++__i) (A)[__i] = (B)[__i]; } #define sig_die(s, kill) { exit(1); } #define s_stop(s, n) {exit(0);} static char junk[] = "\n@(#)LIBF77 VERSION 19990503\n"; #define z_abs(z) (cabs(Cd(z))) #define z_exp(R, Z) {pCd(R) = cexp(Cd(Z));} #define z_sqrt(R, Z) {pCd(R) = csqrt(Cd(Z));} #define myexit_() break; #define mycycle() continue; #define myceiling(w) {ceil(w)} #define myhuge(w) {HUGE_VAL} //#define mymaxloc_(w,s,e,n) {if (sizeof(*(w)) == sizeof(double)) dmaxloc_((w),*(s),*(e),n); else dmaxloc_((w),*(s),*(e),n);} #define mymaxloc(w,s,e,n) {dmaxloc_(w,*(s),*(e),n)} /* procedure parameter types for -A and -C++ */ #define F2C_proc_par_types 1 #ifdef __cplusplus typedef logical (*L_fp)(...); #else typedef logical (*L_fp)(); #endif static float spow_ui(float x, integer n) { float pow=1.0; unsigned long int u; if(n != 0) { if(n < 0) n = -n, x = 1/x; for(u = n; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } static double dpow_ui(double x, integer n) { double pow=1.0; unsigned long int u; if(n != 0) { if(n < 0) n = -n, x = 1/x; for(u = n; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } #ifdef _MSC_VER static _Fcomplex cpow_ui(complex x, integer n) { complex pow={1.0,0.0}; unsigned long int u; if(n != 0) { if(n < 0) n = -n, x.r = 1/x.r, x.i=1/x.i; for(u = n; ; ) { if(u & 01) pow.r *= x.r, pow.i *= x.i; if(u >>= 1) x.r *= x.r, x.i *= x.i; else break; } } _Fcomplex p={pow.r, pow.i}; return p; } #else static _Complex float cpow_ui(_Complex float x, integer n) { _Complex float pow=1.0; unsigned long int u; if(n != 0) { if(n < 0) n = -n, x = 1/x; for(u = n; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } #endif #ifdef _MSC_VER static _Dcomplex zpow_ui(_Dcomplex x, integer n) { _Dcomplex pow={1.0,0.0}; unsigned long int u; if(n != 0) { if(n < 0) n = -n, x._Val[0] = 1/x._Val[0], x._Val[1] =1/x._Val[1]; for(u = n; ; ) { if(u & 01) pow._Val[0] *= x._Val[0], pow._Val[1] *= x._Val[1]; if(u >>= 1) x._Val[0] *= x._Val[0], x._Val[1] *= x._Val[1]; else break; } } _Dcomplex p = {pow._Val[0], pow._Val[1]}; return p; } #else static _Complex double zpow_ui(_Complex double x, integer n) { _Complex double pow=1.0; unsigned long int u; if(n != 0) { if(n < 0) n = -n, x = 1/x; for(u = n; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } #endif static integer pow_ii(integer x, integer n) { integer pow; unsigned long int u; if (n <= 0) { if (n == 0 || x == 1) pow = 1; else if (x != -1) pow = x == 0 ? 1/x : 0; else n = -n; } if ((n > 0) || !(n == 0 || x == 1 || x != -1)) { u = n; for(pow = 1; ; ) { if(u & 01) pow *= x; if(u >>= 1) x *= x; else break; } } return pow; } static integer dmaxloc_(double *w, integer s, integer e, integer *n) { double m; integer i, mi; for(m=w[s-1], mi=s, i=s+1; i<=e; i++) if (w[i-1]>m) mi=i ,m=w[i-1]; return mi-s+1; } static integer smaxloc_(float *w, integer s, integer e, integer *n) { float m; integer i, mi; for(m=w[s-1], mi=s, i=s+1; i<=e; i++) if (w[i-1]>m) mi=i ,m=w[i-1]; return mi-s+1; } static inline void cdotc_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) { integer n = *n_, incx = *incx_, incy = *incy_, i; #ifdef _MSC_VER _Fcomplex zdotc = {0.0, 0.0}; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc._Val[0] += conjf(Cf(&x[i]))._Val[0] * Cf(&y[i])._Val[0]; zdotc._Val[1] += conjf(Cf(&x[i]))._Val[1] * Cf(&y[i])._Val[1]; } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc._Val[0] += conjf(Cf(&x[i*incx]))._Val[0] * Cf(&y[i*incy])._Val[0]; zdotc._Val[1] += conjf(Cf(&x[i*incx]))._Val[1] * Cf(&y[i*incy])._Val[1]; } } pCf(z) = zdotc; } #else _Complex float zdotc = 0.0; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += conjf(Cf(&x[i])) * Cf(&y[i]); } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += conjf(Cf(&x[i*incx])) * Cf(&y[i*incy]); } } pCf(z) = zdotc; } #endif static inline void zdotc_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) { integer n = *n_, incx = *incx_, incy = *incy_, i; #ifdef _MSC_VER _Dcomplex zdotc = {0.0, 0.0}; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc._Val[0] += conj(Cd(&x[i]))._Val[0] * Cd(&y[i])._Val[0]; zdotc._Val[1] += conj(Cd(&x[i]))._Val[1] * Cd(&y[i])._Val[1]; } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc._Val[0] += conj(Cd(&x[i*incx]))._Val[0] * Cd(&y[i*incy])._Val[0]; zdotc._Val[1] += conj(Cd(&x[i*incx]))._Val[1] * Cd(&y[i*incy])._Val[1]; } } pCd(z) = zdotc; } #else _Complex double zdotc = 0.0; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += conj(Cd(&x[i])) * Cd(&y[i]); } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += conj(Cd(&x[i*incx])) * Cd(&y[i*incy]); } } pCd(z) = zdotc; } #endif static inline void cdotu_(complex *z, integer *n_, complex *x, integer *incx_, complex *y, integer *incy_) { integer n = *n_, incx = *incx_, incy = *incy_, i; #ifdef _MSC_VER _Fcomplex zdotc = {0.0, 0.0}; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc._Val[0] += Cf(&x[i])._Val[0] * Cf(&y[i])._Val[0]; zdotc._Val[1] += Cf(&x[i])._Val[1] * Cf(&y[i])._Val[1]; } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc._Val[0] += Cf(&x[i*incx])._Val[0] * Cf(&y[i*incy])._Val[0]; zdotc._Val[1] += Cf(&x[i*incx])._Val[1] * Cf(&y[i*incy])._Val[1]; } } pCf(z) = zdotc; } #else _Complex float zdotc = 0.0; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += Cf(&x[i]) * Cf(&y[i]); } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += Cf(&x[i*incx]) * Cf(&y[i*incy]); } } pCf(z) = zdotc; } #endif static inline void zdotu_(doublecomplex *z, integer *n_, doublecomplex *x, integer *incx_, doublecomplex *y, integer *incy_) { integer n = *n_, incx = *incx_, incy = *incy_, i; #ifdef _MSC_VER _Dcomplex zdotc = {0.0, 0.0}; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc._Val[0] += Cd(&x[i])._Val[0] * Cd(&y[i])._Val[0]; zdotc._Val[1] += Cd(&x[i])._Val[1] * Cd(&y[i])._Val[1]; } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc._Val[0] += Cd(&x[i*incx])._Val[0] * Cd(&y[i*incy])._Val[0]; zdotc._Val[1] += Cd(&x[i*incx])._Val[1] * Cd(&y[i*incy])._Val[1]; } } pCd(z) = zdotc; } #else _Complex double zdotc = 0.0; if (incx == 1 && incy == 1) { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += Cd(&x[i]) * Cd(&y[i]); } } else { for (i=0;i<n;i++) { /* zdotc = zdotc + dconjg(x(i))* y(i) */ zdotc += Cd(&x[i*incx]) * Cd(&y[i*incy]); } } pCd(z) = zdotc; } #endif /* -- translated by f2c (version 20000121). You must link the resulting object file with the libraries: -lf2c -lm (in that order) */ /* Table of constant values */ static integer c__1 = 1; static integer c_n1 = -1; /* > \brief \b DPTTRS */ /* =========== DOCUMENTATION =========== */ /* Online html documentation available at */ /* http://www.netlib.org/lapack/explore-html/ */ /* > \htmlonly */ /* > Download DPTTRS + dependencies */ /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/dpttrs. f"> */ /* > [TGZ]</a> */ /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/dpttrs. f"> */ /* > [ZIP]</a> */ /* > <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/dpttrs. f"> */ /* > [TXT]</a> */ /* > \endhtmlonly */ /* Definition: */ /* =========== */ /* SUBROUTINE DPTTRS( N, NRHS, D, E, B, LDB, INFO ) */ /* INTEGER INFO, LDB, N, NRHS */ /* DOUBLE PRECISION B( LDB, * ), D( * ), E( * ) */ /* > \par Purpose: */ /* ============= */ /* > */ /* > \verbatim */ /* > */ /* > DPTTRS solves a tridiagonal system of the form */ /* > A * X = B */ /* > using the L*D*L**T factorization of A computed by DPTTRF. D is a */ /* > diagonal matrix specified in the vector D, L is a unit bidiagonal */ /* > matrix whose subdiagonal is specified in the vector E, and X and B */ /* > are N by NRHS matrices. */ /* > \endverbatim */ /* Arguments: */ /* ========== */ /* > \param[in] N */ /* > \verbatim */ /* > N is INTEGER */ /* > The order of the tridiagonal matrix A. N >= 0. */ /* > \endverbatim */ /* > */ /* > \param[in] NRHS */ /* > \verbatim */ /* > NRHS is INTEGER */ /* > The number of right hand sides, i.e., the number of columns */ /* > of the matrix B. NRHS >= 0. */ /* > \endverbatim */ /* > */ /* > \param[in] D */ /* > \verbatim */ /* > D is DOUBLE PRECISION array, dimension (N) */ /* > The n diagonal elements of the diagonal matrix D from the */ /* > L*D*L**T factorization of A. */ /* > \endverbatim */ /* > */ /* > \param[in] E */ /* > \verbatim */ /* > E is DOUBLE PRECISION array, dimension (N-1) */ /* > The (n-1) subdiagonal elements of the unit bidiagonal factor */ /* > L from the L*D*L**T factorization of A. E can also be regarded */ /* > as the superdiagonal of the unit bidiagonal factor U from the */ /* > factorization A = U**T*D*U. */ /* > \endverbatim */ /* > */ /* > \param[in,out] B */ /* > \verbatim */ /* > B is DOUBLE PRECISION array, dimension (LDB,NRHS) */ /* > On entry, the right hand side vectors B for the system of */ /* > linear equations. */ /* > On exit, the solution vectors, X. */ /* > \endverbatim */ /* > */ /* > \param[in] LDB */ /* > \verbatim */ /* > LDB is INTEGER */ /* > The leading dimension of the array B. LDB >= f2cmax(1,N). */ /* > \endverbatim */ /* > */ /* > \param[out] INFO */ /* > \verbatim */ /* > INFO is INTEGER */ /* > = 0: successful exit */ /* > < 0: if INFO = -k, the k-th argument had an illegal value */ /* > \endverbatim */ /* Authors: */ /* ======== */ /* > \author Univ. of Tennessee */ /* > \author Univ. of California Berkeley */ /* > \author Univ. of Colorado Denver */ /* > \author NAG Ltd. */ /* > \date December 2016 */ /* > \ingroup doublePTcomputational */ /* ===================================================================== */ /* Subroutine */ int dpttrs_(integer *n, integer *nrhs, doublereal *d__, doublereal *e, doublereal *b, integer *ldb, integer *info) { /* System generated locals */ integer b_dim1, b_offset, i__1, i__2, i__3; /* Local variables */ integer j, jb, nb; extern /* Subroutine */ int dptts2_(integer *, integer *, doublereal *, doublereal *, doublereal *, integer *), xerbla_(char *, integer *, ftnlen); extern integer ilaenv_(integer *, char *, char *, integer *, integer *, integer *, integer *, ftnlen, ftnlen); /* -- LAPACK computational routine (version 3.7.0) -- */ /* -- LAPACK is a software package provided by Univ. of Tennessee, -- */ /* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..-- */ /* December 2016 */ /* ===================================================================== */ /* Test the input arguments. */ /* Parameter adjustments */ --d__; --e; b_dim1 = *ldb; b_offset = 1 + b_dim1 * 1; b -= b_offset; /* Function Body */ *info = 0; if (*n < 0) { *info = -1; } else if (*nrhs < 0) { *info = -2; } else if (*ldb < f2cmax(1,*n)) { *info = -6; } if (*info != 0) { i__1 = -(*info); xerbla_("DPTTRS", &i__1, (ftnlen)6); return 0; } /* Quick return if possible */ if (*n == 0 || *nrhs == 0) { return 0; } /* Determine the number of right-hand sides to solve at a time. */ if (*nrhs == 1) { nb = 1; } else { /* Computing MAX */ i__1 = 1, i__2 = ilaenv_(&c__1, "DPTTRS", " ", n, nrhs, &c_n1, &c_n1, (ftnlen)6, (ftnlen)1); nb = f2cmax(i__1,i__2); } if (nb >= *nrhs) { dptts2_(n, nrhs, &d__[1], &e[1], &b[b_offset], ldb); } else { i__1 = *nrhs; i__2 = nb; for (j = 1; i__2 < 0 ? j >= i__1 : j <= i__1; j += i__2) { /* Computing MIN */ i__3 = *nrhs - j + 1; jb = f2cmin(i__3,nb); dptts2_(n, &jb, &d__[1], &e[1], &b[j * b_dim1 + 1], ldb); /* L10: */ } } return 0; /* End of DPTTRS */ } /* dpttrs_ */
866959.c
/* * Copyright (c) 2015-2016 Dmitry V. Levin <[email protected]> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "tests.h" #include <asm/unistd.h> #ifdef __NR_fstat # define TEST_SYSCALL_NR __NR_fstat # define TEST_SYSCALL_STR "fstat" # define SAMPLE_SIZE ((libc_off_t) (kernel_ulong_t) 43147718418ULL) # include "fstatx.c" #else SKIP_MAIN_UNDEFINED("__NR_fstat") #endif
173985.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE121_Stack_Based_Buffer_Overflow__CWE806_wchar_t_declare_snprintf_67b.c Label Definition File: CWE121_Stack_Based_Buffer_Overflow__CWE806.label.xml Template File: sources-sink-67b.tmpl.c */ /* * @description * CWE: 121 Stack Based Buffer Overflow * BadSource: Initialize data as a large string * GoodSource: Initialize data as a small string * Sinks: snprintf * BadSink : Copy data to string using snprintf * Flow Variant: 67 Data flow: data passed in a struct from one function to another in different source files * * */ #include "std_testcase.h" #include <wchar.h> #ifdef _WIN32 #define SNPRINTF _snwprintf #else #define SNPRINTF snprintf #endif typedef struct _CWE121_Stack_Based_Buffer_Overflow__CWE806_wchar_t_declare_snprintf_67_structType { wchar_t * structFirst; } CWE121_Stack_Based_Buffer_Overflow__CWE806_wchar_t_declare_snprintf_67_structType; #ifndef OMITBAD void CWE121_Stack_Based_Buffer_Overflow__CWE806_wchar_t_declare_snprintf_67b_badSink(CWE121_Stack_Based_Buffer_Overflow__CWE806_wchar_t_declare_snprintf_67_structType myStruct) { wchar_t * data = myStruct.structFirst; { wchar_t dest[50] = L""; /* POTENTIAL FLAW: Possible buffer overflow if data is larger than dest */ SNPRINTF(dest, wcslen(data), L"%s", data); printWLine(data); } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void CWE121_Stack_Based_Buffer_Overflow__CWE806_wchar_t_declare_snprintf_67b_goodG2BSink(CWE121_Stack_Based_Buffer_Overflow__CWE806_wchar_t_declare_snprintf_67_structType myStruct) { wchar_t * data = myStruct.structFirst; { wchar_t dest[50] = L""; /* POTENTIAL FLAW: Possible buffer overflow if data is larger than dest */ SNPRINTF(dest, wcslen(data), L"%s", data); printWLine(data); } } #endif /* OMITGOOD */
472250.c
/* $Header: /newbits/usr/lib/libcurses/RCS/beep.c,v 1.2 91/09/30 12:42:42 bin Exp Locker: bin $ * * The information contained herein is a trade secret of INETCO * Systems, and is confidential information. It is provided under * a license agreement, and may be copied or disclosed only under * the terms of that agreement. Any reproduction or disclosure of * this material without the express written authorization of * INETCO Systems or persuant to the license agreement is unlawful. * * Copyright (c) 1989 * An unpublished work by INETCO Systems, Ltd. * All rights reserved. */ #include <stdio.h> #include "curses.ext" beep() { register uchar * s; # ifdef DEBUG fprintf( outf, "BEEP\n"); # endif /* * Use audible bell if available, otherwise visible bell. */ if ( (s = bell) || (s = VB) || (s = "\007") ) write( 1, s, strlen(s) ); } flash() { register uchar * s; # ifdef DEBUG fprintf( outf, "FLASH\n"); # endif /* * Use visible bell if available, otherwise audible bell. */ if ( (s = VB) || (s = bell) || (s == "\007") ) write( 1, s, strlen(s ) ); }
455516.c
// $Id: params_dup.c 943 2008-12-12 15:11:28Z mitza $ #include "params_dup.h" #include <openssl/asn1.h> DSA * DSAPARAMS_DUP_WRAPPER_NAME (DSA * dsa) { return DSAparams_dup (dsa); } DH * DHPARAMS_DUP_WRAPPER_NAME (DH * dh) { return DHparams_dup (dh); }
615744.c
/* * Copyright(c) 2019 Intel Corporation * Copyright (c) 2016, Alliance for Open Media. All rights reserved * * This source code is subject to the terms of the BSD 2 Clause License and * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License * was not distributed with this source code in the LICENSE file, you can * obtain it at https://www.aomedia.org/license/software-license. If the Alliance for Open * Media Patent License 1.0 was not distributed with this source code in the * PATENTS file, you can obtain it at https://www.aomedia.org/license/patent-license. */ #include <stdlib.h> #include "EbTransforms.h" #include "aom_dsp_rtcd.h" static const int8_t *fwd_txfm_range_mult2_list[TXFM_TYPES] = {fdct4_range_mult2, fdct8_range_mult2, fdct16_range_mult2, fdct32_range_mult2, fdct64_range_mult2, fadst4_range_mult2, fadst8_range_mult2, fadst16_range_mult2, fadst32_range_mult2, fidtx4_range_mult2, fidtx8_range_mult2, fidtx16_range_mult2, fidtx32_range_mult2, fidtx64_range_mult2}; static const int8_t *fwd_txfm_shift_ls[TX_SIZES_ALL] = { fwd_shift_4x4, fwd_shift_8x8, fwd_shift_16x16, fwd_shift_32x32, fwd_shift_64x64, fwd_shift_4x8, fwd_shift_8x4, fwd_shift_8x16, fwd_shift_16x8, fwd_shift_16x32, fwd_shift_32x16, fwd_shift_32x64, fwd_shift_64x32, fwd_shift_4x16, fwd_shift_16x4, fwd_shift_8x32, fwd_shift_32x8, fwd_shift_16x64, fwd_shift_64x16, }; /***************************** * Defines *****************************/ #define BETA_P 1 #define BETA_N 3 /******************************************** * Constants ********************************************/ #define ALPHA_0000 0 #define ALPHA_0050 50 #define ALPHA_0100 100 #define ALPHA_0200 200 #define ALPHA_0300 300 #define ALPHA_0500 500 #define ALPHA_1000 1000 void svt_av1_gen_fwd_stage_range(int8_t *stage_range_col, int8_t *stage_range_row, const Txfm2dFlipCfg *cfg, int32_t bd) { // Take the shift from the larger dimension in the rectangular case. const int8_t *shift = cfg->shift; // i < MAX_TXFM_STAGE_NUM will mute above array bounds warning for (int32_t i = 0; i < cfg->stage_num_col && i < MAX_TXFM_STAGE_NUM; ++i) stage_range_col[i] = (int8_t)(cfg->stage_range_col[i] + shift[0] + bd + 1); // i < MAX_TXFM_STAGE_NUM will mute above array bounds warning for (int32_t i = 0; i < cfg->stage_num_row && i < MAX_TXFM_STAGE_NUM; ++i) stage_range_row[i] = (int8_t)(cfg->stage_range_row[i] + shift[0] + shift[1] + bd + 1); } #define range_check(stage, input, buf, size, bit) \ do { \ } while (0) void svt_av1_fdct4_new(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[4]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[0] + input[3]; bf1[1] = input[1] + input[2]; bf1[2] = -input[2] + input[1]; bf1[3] = -input[3] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[1] = half_btf(-cospi[32], bf0[1], cospi[32], bf0[0], cos_bit); bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit); bf1[3] = half_btf(cospi[48], bf0[3], -cospi[16], bf0[2], cos_bit); // stage 3 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[2]; bf1[2] = bf0[1]; bf1[3] = bf0[3]; } void svt_av1_fdct8_new(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[8]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[0] + input[7]; bf1[1] = input[1] + input[6]; bf1[2] = input[2] + input[5]; bf1[3] = input[3] + input[4]; bf1[4] = -input[4] + input[3]; bf1[5] = -input[5] + input[2]; bf1[6] = -input[6] + input[1]; bf1[7] = -input[7] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[2] = -bf0[2] + bf0[1]; bf1[3] = -bf0[3] + bf0[0]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; // stage 3 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[1] = half_btf(-cospi[32], bf0[1], cospi[32], bf0[0], cos_bit); bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit); bf1[3] = half_btf(cospi[48], bf0[3], -cospi[16], bf0[2], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[5] = -bf0[5] + bf0[4]; bf1[6] = -bf0[6] + bf0[7]; bf1[7] = bf0[7] + bf0[6]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); bf1[5] = half_btf(cospi[24], bf0[5], cospi[40], bf0[6], cos_bit); bf1[6] = half_btf(cospi[24], bf0[6], -cospi[40], bf0[5], cos_bit); bf1[7] = half_btf(cospi[56], bf0[7], -cospi[8], bf0[4], cos_bit); // stage 5 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[4]; bf1[2] = bf0[2]; bf1[3] = bf0[6]; bf1[4] = bf0[1]; bf1[5] = bf0[5]; bf1[6] = bf0[3]; bf1[7] = bf0[7]; } void svt_av1_fdct16_new(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[16]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[0] + input[15]; bf1[1] = input[1] + input[14]; bf1[2] = input[2] + input[13]; bf1[3] = input[3] + input[12]; bf1[4] = input[4] + input[11]; bf1[5] = input[5] + input[10]; bf1[6] = input[6] + input[9]; bf1[7] = input[7] + input[8]; bf1[8] = -input[8] + input[7]; bf1[9] = -input[9] + input[6]; bf1[10] = -input[10] + input[5]; bf1[11] = -input[11] + input[4]; bf1[12] = -input[12] + input[3]; bf1[13] = -input[13] + input[2]; bf1[14] = -input[14] + input[1]; bf1[15] = -input[15] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[7]; bf1[1] = bf0[1] + bf0[6]; bf1[2] = bf0[2] + bf0[5]; bf1[3] = bf0[3] + bf0[4]; bf1[4] = -bf0[4] + bf0[3]; bf1[5] = -bf0[5] + bf0[2]; bf1[6] = -bf0[6] + bf0[1]; bf1[7] = -bf0[7] + bf0[0]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit); bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit); bf1[12] = half_btf(cospi[32], bf0[12], cospi[32], bf0[11], cos_bit); bf1[13] = half_btf(cospi[32], bf0[13], cospi[32], bf0[10], cos_bit); bf1[14] = bf0[14]; bf1[15] = bf0[15]; // stage 3 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[2] = -bf0[2] + bf0[1]; bf1[3] = -bf0[3] + bf0[0]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; bf1[8] = bf0[8] + bf0[11]; bf1[9] = bf0[9] + bf0[10]; bf1[10] = -bf0[10] + bf0[9]; bf1[11] = -bf0[11] + bf0[8]; bf1[12] = -bf0[12] + bf0[15]; bf1[13] = -bf0[13] + bf0[14]; bf1[14] = bf0[14] + bf0[13]; bf1[15] = bf0[15] + bf0[12]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[1] = half_btf(-cospi[32], bf0[1], cospi[32], bf0[0], cos_bit); bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit); bf1[3] = half_btf(cospi[48], bf0[3], -cospi[16], bf0[2], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[5] = -bf0[5] + bf0[4]; bf1[6] = -bf0[6] + bf0[7]; bf1[7] = bf0[7] + bf0[6]; bf1[8] = bf0[8]; bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit); bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit); bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = half_btf(cospi[48], bf0[13], -cospi[16], bf0[10], cos_bit); bf1[14] = half_btf(cospi[16], bf0[14], cospi[48], bf0[9], cos_bit); bf1[15] = bf0[15]; // stage 5 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); bf1[5] = half_btf(cospi[24], bf0[5], cospi[40], bf0[6], cos_bit); bf1[6] = half_btf(cospi[24], bf0[6], -cospi[40], bf0[5], cos_bit); bf1[7] = half_btf(cospi[56], bf0[7], -cospi[8], bf0[4], cos_bit); bf1[8] = bf0[8] + bf0[9]; bf1[9] = -bf0[9] + bf0[8]; bf1[10] = -bf0[10] + bf0[11]; bf1[11] = bf0[11] + bf0[10]; bf1[12] = bf0[12] + bf0[13]; bf1[13] = -bf0[13] + bf0[12]; bf1[14] = -bf0[14] + bf0[15]; bf1[15] = bf0[15] + bf0[14]; // stage 6 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = bf0[6]; bf1[7] = bf0[7]; bf1[8] = half_btf(cospi[60], bf0[8], cospi[4], bf0[15], cos_bit); bf1[9] = half_btf(cospi[28], bf0[9], cospi[36], bf0[14], cos_bit); bf1[10] = half_btf(cospi[44], bf0[10], cospi[20], bf0[13], cos_bit); bf1[11] = half_btf(cospi[12], bf0[11], cospi[52], bf0[12], cos_bit); bf1[12] = half_btf(cospi[12], bf0[12], -cospi[52], bf0[11], cos_bit); bf1[13] = half_btf(cospi[44], bf0[13], -cospi[20], bf0[10], cos_bit); bf1[14] = half_btf(cospi[28], bf0[14], -cospi[36], bf0[9], cos_bit); bf1[15] = half_btf(cospi[60], bf0[15], -cospi[4], bf0[8], cos_bit); // stage 7 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[8]; bf1[2] = bf0[4]; bf1[3] = bf0[12]; bf1[4] = bf0[2]; bf1[5] = bf0[10]; bf1[6] = bf0[6]; bf1[7] = bf0[14]; bf1[8] = bf0[1]; bf1[9] = bf0[9]; bf1[10] = bf0[5]; bf1[11] = bf0[13]; bf1[12] = bf0[3]; bf1[13] = bf0[11]; bf1[14] = bf0[7]; bf1[15] = bf0[15]; } void svt_av1_fdct32_new(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[32]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[0] + input[31]; bf1[1] = input[1] + input[30]; bf1[2] = input[2] + input[29]; bf1[3] = input[3] + input[28]; bf1[4] = input[4] + input[27]; bf1[5] = input[5] + input[26]; bf1[6] = input[6] + input[25]; bf1[7] = input[7] + input[24]; bf1[8] = input[8] + input[23]; bf1[9] = input[9] + input[22]; bf1[10] = input[10] + input[21]; bf1[11] = input[11] + input[20]; bf1[12] = input[12] + input[19]; bf1[13] = input[13] + input[18]; bf1[14] = input[14] + input[17]; bf1[15] = input[15] + input[16]; bf1[16] = -input[16] + input[15]; bf1[17] = -input[17] + input[14]; bf1[18] = -input[18] + input[13]; bf1[19] = -input[19] + input[12]; bf1[20] = -input[20] + input[11]; bf1[21] = -input[21] + input[10]; bf1[22] = -input[22] + input[9]; bf1[23] = -input[23] + input[8]; bf1[24] = -input[24] + input[7]; bf1[25] = -input[25] + input[6]; bf1[26] = -input[26] + input[5]; bf1[27] = -input[27] + input[4]; bf1[28] = -input[28] + input[3]; bf1[29] = -input[29] + input[2]; bf1[30] = -input[30] + input[1]; bf1[31] = -input[31] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[15]; bf1[1] = bf0[1] + bf0[14]; bf1[2] = bf0[2] + bf0[13]; bf1[3] = bf0[3] + bf0[12]; bf1[4] = bf0[4] + bf0[11]; bf1[5] = bf0[5] + bf0[10]; bf1[6] = bf0[6] + bf0[9]; bf1[7] = bf0[7] + bf0[8]; bf1[8] = -bf0[8] + bf0[7]; bf1[9] = -bf0[9] + bf0[6]; bf1[10] = -bf0[10] + bf0[5]; bf1[11] = -bf0[11] + bf0[4]; bf1[12] = -bf0[12] + bf0[3]; bf1[13] = -bf0[13] + bf0[2]; bf1[14] = -bf0[14] + bf0[1]; bf1[15] = -bf0[15] + bf0[0]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = bf0[18]; bf1[19] = bf0[19]; bf1[20] = half_btf(-cospi[32], bf0[20], cospi[32], bf0[27], cos_bit); bf1[21] = half_btf(-cospi[32], bf0[21], cospi[32], bf0[26], cos_bit); bf1[22] = half_btf(-cospi[32], bf0[22], cospi[32], bf0[25], cos_bit); bf1[23] = half_btf(-cospi[32], bf0[23], cospi[32], bf0[24], cos_bit); bf1[24] = half_btf(cospi[32], bf0[24], cospi[32], bf0[23], cos_bit); bf1[25] = half_btf(cospi[32], bf0[25], cospi[32], bf0[22], cos_bit); bf1[26] = half_btf(cospi[32], bf0[26], cospi[32], bf0[21], cos_bit); bf1[27] = half_btf(cospi[32], bf0[27], cospi[32], bf0[20], cos_bit); bf1[28] = bf0[28]; bf1[29] = bf0[29]; bf1[30] = bf0[30]; bf1[31] = bf0[31]; // stage 3 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[7]; bf1[1] = bf0[1] + bf0[6]; bf1[2] = bf0[2] + bf0[5]; bf1[3] = bf0[3] + bf0[4]; bf1[4] = -bf0[4] + bf0[3]; bf1[5] = -bf0[5] + bf0[2]; bf1[6] = -bf0[6] + bf0[1]; bf1[7] = -bf0[7] + bf0[0]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit); bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit); bf1[12] = half_btf(cospi[32], bf0[12], cospi[32], bf0[11], cos_bit); bf1[13] = half_btf(cospi[32], bf0[13], cospi[32], bf0[10], cos_bit); bf1[14] = bf0[14]; bf1[15] = bf0[15]; bf1[16] = bf0[16] + bf0[23]; bf1[17] = bf0[17] + bf0[22]; bf1[18] = bf0[18] + bf0[21]; bf1[19] = bf0[19] + bf0[20]; bf1[20] = -bf0[20] + bf0[19]; bf1[21] = -bf0[21] + bf0[18]; bf1[22] = -bf0[22] + bf0[17]; bf1[23] = -bf0[23] + bf0[16]; bf1[24] = -bf0[24] + bf0[31]; bf1[25] = -bf0[25] + bf0[30]; bf1[26] = -bf0[26] + bf0[29]; bf1[27] = -bf0[27] + bf0[28]; bf1[28] = bf0[28] + bf0[27]; bf1[29] = bf0[29] + bf0[26]; bf1[30] = bf0[30] + bf0[25]; bf1[31] = bf0[31] + bf0[24]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[2] = -bf0[2] + bf0[1]; bf1[3] = -bf0[3] + bf0[0]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; bf1[8] = bf0[8] + bf0[11]; bf1[9] = bf0[9] + bf0[10]; bf1[10] = -bf0[10] + bf0[9]; bf1[11] = -bf0[11] + bf0[8]; bf1[12] = -bf0[12] + bf0[15]; bf1[13] = -bf0[13] + bf0[14]; bf1[14] = bf0[14] + bf0[13]; bf1[15] = bf0[15] + bf0[12]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = half_btf(-cospi[16], bf0[18], cospi[48], bf0[29], cos_bit); bf1[19] = half_btf(-cospi[16], bf0[19], cospi[48], bf0[28], cos_bit); bf1[20] = half_btf(-cospi[48], bf0[20], -cospi[16], bf0[27], cos_bit); bf1[21] = half_btf(-cospi[48], bf0[21], -cospi[16], bf0[26], cos_bit); bf1[22] = bf0[22]; bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = bf0[25]; bf1[26] = half_btf(cospi[48], bf0[26], -cospi[16], bf0[21], cos_bit); bf1[27] = half_btf(cospi[48], bf0[27], -cospi[16], bf0[20], cos_bit); bf1[28] = half_btf(cospi[16], bf0[28], cospi[48], bf0[19], cos_bit); bf1[29] = half_btf(cospi[16], bf0[29], cospi[48], bf0[18], cos_bit); bf1[30] = bf0[30]; bf1[31] = bf0[31]; // stage 5 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[1] = half_btf(-cospi[32], bf0[1], cospi[32], bf0[0], cos_bit); bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit); bf1[3] = half_btf(cospi[48], bf0[3], -cospi[16], bf0[2], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[5] = -bf0[5] + bf0[4]; bf1[6] = -bf0[6] + bf0[7]; bf1[7] = bf0[7] + bf0[6]; bf1[8] = bf0[8]; bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit); bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit); bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = half_btf(cospi[48], bf0[13], -cospi[16], bf0[10], cos_bit); bf1[14] = half_btf(cospi[16], bf0[14], cospi[48], bf0[9], cos_bit); bf1[15] = bf0[15]; bf1[16] = bf0[16] + bf0[19]; bf1[17] = bf0[17] + bf0[18]; bf1[18] = -bf0[18] + bf0[17]; bf1[19] = -bf0[19] + bf0[16]; bf1[20] = -bf0[20] + bf0[23]; bf1[21] = -bf0[21] + bf0[22]; bf1[22] = bf0[22] + bf0[21]; bf1[23] = bf0[23] + bf0[20]; bf1[24] = bf0[24] + bf0[27]; bf1[25] = bf0[25] + bf0[26]; bf1[26] = -bf0[26] + bf0[25]; bf1[27] = -bf0[27] + bf0[24]; bf1[28] = -bf0[28] + bf0[31]; bf1[29] = -bf0[29] + bf0[30]; bf1[30] = bf0[30] + bf0[29]; bf1[31] = bf0[31] + bf0[28]; // stage 6 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); bf1[5] = half_btf(cospi[24], bf0[5], cospi[40], bf0[6], cos_bit); bf1[6] = half_btf(cospi[24], bf0[6], -cospi[40], bf0[5], cos_bit); bf1[7] = half_btf(cospi[56], bf0[7], -cospi[8], bf0[4], cos_bit); bf1[8] = bf0[8] + bf0[9]; bf1[9] = -bf0[9] + bf0[8]; bf1[10] = -bf0[10] + bf0[11]; bf1[11] = bf0[11] + bf0[10]; bf1[12] = bf0[12] + bf0[13]; bf1[13] = -bf0[13] + bf0[12]; bf1[14] = -bf0[14] + bf0[15]; bf1[15] = bf0[15] + bf0[14]; bf1[16] = bf0[16]; bf1[17] = half_btf(-cospi[8], bf0[17], cospi[56], bf0[30], cos_bit); bf1[18] = half_btf(-cospi[56], bf0[18], -cospi[8], bf0[29], cos_bit); bf1[19] = bf0[19]; bf1[20] = bf0[20]; bf1[21] = half_btf(-cospi[40], bf0[21], cospi[24], bf0[26], cos_bit); bf1[22] = half_btf(-cospi[24], bf0[22], -cospi[40], bf0[25], cos_bit); bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = half_btf(cospi[24], bf0[25], -cospi[40], bf0[22], cos_bit); bf1[26] = half_btf(cospi[40], bf0[26], cospi[24], bf0[21], cos_bit); bf1[27] = bf0[27]; bf1[28] = bf0[28]; bf1[29] = half_btf(cospi[56], bf0[29], -cospi[8], bf0[18], cos_bit); bf1[30] = half_btf(cospi[8], bf0[30], cospi[56], bf0[17], cos_bit); bf1[31] = bf0[31]; // stage 7 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = bf0[6]; bf1[7] = bf0[7]; bf1[8] = half_btf(cospi[60], bf0[8], cospi[4], bf0[15], cos_bit); bf1[9] = half_btf(cospi[28], bf0[9], cospi[36], bf0[14], cos_bit); bf1[10] = half_btf(cospi[44], bf0[10], cospi[20], bf0[13], cos_bit); bf1[11] = half_btf(cospi[12], bf0[11], cospi[52], bf0[12], cos_bit); bf1[12] = half_btf(cospi[12], bf0[12], -cospi[52], bf0[11], cos_bit); bf1[13] = half_btf(cospi[44], bf0[13], -cospi[20], bf0[10], cos_bit); bf1[14] = half_btf(cospi[28], bf0[14], -cospi[36], bf0[9], cos_bit); bf1[15] = half_btf(cospi[60], bf0[15], -cospi[4], bf0[8], cos_bit); bf1[16] = bf0[16] + bf0[17]; bf1[17] = -bf0[17] + bf0[16]; bf1[18] = -bf0[18] + bf0[19]; bf1[19] = bf0[19] + bf0[18]; bf1[20] = bf0[20] + bf0[21]; bf1[21] = -bf0[21] + bf0[20]; bf1[22] = -bf0[22] + bf0[23]; bf1[23] = bf0[23] + bf0[22]; bf1[24] = bf0[24] + bf0[25]; bf1[25] = -bf0[25] + bf0[24]; bf1[26] = -bf0[26] + bf0[27]; bf1[27] = bf0[27] + bf0[26]; bf1[28] = bf0[28] + bf0[29]; bf1[29] = -bf0[29] + bf0[28]; bf1[30] = -bf0[30] + bf0[31]; bf1[31] = bf0[31] + bf0[30]; // stage 8 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = bf0[6]; bf1[7] = bf0[7]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = bf0[10]; bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = bf0[13]; bf1[14] = bf0[14]; bf1[15] = bf0[15]; bf1[16] = half_btf(cospi[62], bf0[16], cospi[2], bf0[31], cos_bit); bf1[17] = half_btf(cospi[30], bf0[17], cospi[34], bf0[30], cos_bit); bf1[18] = half_btf(cospi[46], bf0[18], cospi[18], bf0[29], cos_bit); bf1[19] = half_btf(cospi[14], bf0[19], cospi[50], bf0[28], cos_bit); bf1[20] = half_btf(cospi[54], bf0[20], cospi[10], bf0[27], cos_bit); bf1[21] = half_btf(cospi[22], bf0[21], cospi[42], bf0[26], cos_bit); bf1[22] = half_btf(cospi[38], bf0[22], cospi[26], bf0[25], cos_bit); bf1[23] = half_btf(cospi[6], bf0[23], cospi[58], bf0[24], cos_bit); bf1[24] = half_btf(cospi[6], bf0[24], -cospi[58], bf0[23], cos_bit); bf1[25] = half_btf(cospi[38], bf0[25], -cospi[26], bf0[22], cos_bit); bf1[26] = half_btf(cospi[22], bf0[26], -cospi[42], bf0[21], cos_bit); bf1[27] = half_btf(cospi[54], bf0[27], -cospi[10], bf0[20], cos_bit); bf1[28] = half_btf(cospi[14], bf0[28], -cospi[50], bf0[19], cos_bit); bf1[29] = half_btf(cospi[46], bf0[29], -cospi[18], bf0[18], cos_bit); bf1[30] = half_btf(cospi[30], bf0[30], -cospi[34], bf0[17], cos_bit); bf1[31] = half_btf(cospi[62], bf0[31], -cospi[2], bf0[16], cos_bit); // stage 9 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[16]; bf1[2] = bf0[8]; bf1[3] = bf0[24]; bf1[4] = bf0[4]; bf1[5] = bf0[20]; bf1[6] = bf0[12]; bf1[7] = bf0[28]; bf1[8] = bf0[2]; bf1[9] = bf0[18]; bf1[10] = bf0[10]; bf1[11] = bf0[26]; bf1[12] = bf0[6]; bf1[13] = bf0[22]; bf1[14] = bf0[14]; bf1[15] = bf0[30]; bf1[16] = bf0[1]; bf1[17] = bf0[17]; bf1[18] = bf0[9]; bf1[19] = bf0[25]; bf1[20] = bf0[5]; bf1[21] = bf0[21]; bf1[22] = bf0[13]; bf1[23] = bf0[29]; bf1[24] = bf0[3]; bf1[25] = bf0[19]; bf1[26] = bf0[11]; bf1[27] = bf0[27]; bf1[28] = bf0[7]; bf1[29] = bf0[23]; bf1[30] = bf0[15]; bf1[31] = bf0[31]; } void svt_av1_fdct64_new(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[64]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[0] + input[63]; bf1[1] = input[1] + input[62]; bf1[2] = input[2] + input[61]; bf1[3] = input[3] + input[60]; bf1[4] = input[4] + input[59]; bf1[5] = input[5] + input[58]; bf1[6] = input[6] + input[57]; bf1[7] = input[7] + input[56]; bf1[8] = input[8] + input[55]; bf1[9] = input[9] + input[54]; bf1[10] = input[10] + input[53]; bf1[11] = input[11] + input[52]; bf1[12] = input[12] + input[51]; bf1[13] = input[13] + input[50]; bf1[14] = input[14] + input[49]; bf1[15] = input[15] + input[48]; bf1[16] = input[16] + input[47]; bf1[17] = input[17] + input[46]; bf1[18] = input[18] + input[45]; bf1[19] = input[19] + input[44]; bf1[20] = input[20] + input[43]; bf1[21] = input[21] + input[42]; bf1[22] = input[22] + input[41]; bf1[23] = input[23] + input[40]; bf1[24] = input[24] + input[39]; bf1[25] = input[25] + input[38]; bf1[26] = input[26] + input[37]; bf1[27] = input[27] + input[36]; bf1[28] = input[28] + input[35]; bf1[29] = input[29] + input[34]; bf1[30] = input[30] + input[33]; bf1[31] = input[31] + input[32]; bf1[32] = -input[32] + input[31]; bf1[33] = -input[33] + input[30]; bf1[34] = -input[34] + input[29]; bf1[35] = -input[35] + input[28]; bf1[36] = -input[36] + input[27]; bf1[37] = -input[37] + input[26]; bf1[38] = -input[38] + input[25]; bf1[39] = -input[39] + input[24]; bf1[40] = -input[40] + input[23]; bf1[41] = -input[41] + input[22]; bf1[42] = -input[42] + input[21]; bf1[43] = -input[43] + input[20]; bf1[44] = -input[44] + input[19]; bf1[45] = -input[45] + input[18]; bf1[46] = -input[46] + input[17]; bf1[47] = -input[47] + input[16]; bf1[48] = -input[48] + input[15]; bf1[49] = -input[49] + input[14]; bf1[50] = -input[50] + input[13]; bf1[51] = -input[51] + input[12]; bf1[52] = -input[52] + input[11]; bf1[53] = -input[53] + input[10]; bf1[54] = -input[54] + input[9]; bf1[55] = -input[55] + input[8]; bf1[56] = -input[56] + input[7]; bf1[57] = -input[57] + input[6]; bf1[58] = -input[58] + input[5]; bf1[59] = -input[59] + input[4]; bf1[60] = -input[60] + input[3]; bf1[61] = -input[61] + input[2]; bf1[62] = -input[62] + input[1]; bf1[63] = -input[63] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[31]; bf1[1] = bf0[1] + bf0[30]; bf1[2] = bf0[2] + bf0[29]; bf1[3] = bf0[3] + bf0[28]; bf1[4] = bf0[4] + bf0[27]; bf1[5] = bf0[5] + bf0[26]; bf1[6] = bf0[6] + bf0[25]; bf1[7] = bf0[7] + bf0[24]; bf1[8] = bf0[8] + bf0[23]; bf1[9] = bf0[9] + bf0[22]; bf1[10] = bf0[10] + bf0[21]; bf1[11] = bf0[11] + bf0[20]; bf1[12] = bf0[12] + bf0[19]; bf1[13] = bf0[13] + bf0[18]; bf1[14] = bf0[14] + bf0[17]; bf1[15] = bf0[15] + bf0[16]; bf1[16] = -bf0[16] + bf0[15]; bf1[17] = -bf0[17] + bf0[14]; bf1[18] = -bf0[18] + bf0[13]; bf1[19] = -bf0[19] + bf0[12]; bf1[20] = -bf0[20] + bf0[11]; bf1[21] = -bf0[21] + bf0[10]; bf1[22] = -bf0[22] + bf0[9]; bf1[23] = -bf0[23] + bf0[8]; bf1[24] = -bf0[24] + bf0[7]; bf1[25] = -bf0[25] + bf0[6]; bf1[26] = -bf0[26] + bf0[5]; bf1[27] = -bf0[27] + bf0[4]; bf1[28] = -bf0[28] + bf0[3]; bf1[29] = -bf0[29] + bf0[2]; bf1[30] = -bf0[30] + bf0[1]; bf1[31] = -bf0[31] + bf0[0]; bf1[32] = bf0[32]; bf1[33] = bf0[33]; bf1[34] = bf0[34]; bf1[35] = bf0[35]; bf1[36] = bf0[36]; bf1[37] = bf0[37]; bf1[38] = bf0[38]; bf1[39] = bf0[39]; bf1[40] = half_btf(-cospi[32], bf0[40], cospi[32], bf0[55], cos_bit); bf1[41] = half_btf(-cospi[32], bf0[41], cospi[32], bf0[54], cos_bit); bf1[42] = half_btf(-cospi[32], bf0[42], cospi[32], bf0[53], cos_bit); bf1[43] = half_btf(-cospi[32], bf0[43], cospi[32], bf0[52], cos_bit); bf1[44] = half_btf(-cospi[32], bf0[44], cospi[32], bf0[51], cos_bit); bf1[45] = half_btf(-cospi[32], bf0[45], cospi[32], bf0[50], cos_bit); bf1[46] = half_btf(-cospi[32], bf0[46], cospi[32], bf0[49], cos_bit); bf1[47] = half_btf(-cospi[32], bf0[47], cospi[32], bf0[48], cos_bit); bf1[48] = half_btf(cospi[32], bf0[48], cospi[32], bf0[47], cos_bit); bf1[49] = half_btf(cospi[32], bf0[49], cospi[32], bf0[46], cos_bit); bf1[50] = half_btf(cospi[32], bf0[50], cospi[32], bf0[45], cos_bit); bf1[51] = half_btf(cospi[32], bf0[51], cospi[32], bf0[44], cos_bit); bf1[52] = half_btf(cospi[32], bf0[52], cospi[32], bf0[43], cos_bit); bf1[53] = half_btf(cospi[32], bf0[53], cospi[32], bf0[42], cos_bit); bf1[54] = half_btf(cospi[32], bf0[54], cospi[32], bf0[41], cos_bit); bf1[55] = half_btf(cospi[32], bf0[55], cospi[32], bf0[40], cos_bit); bf1[56] = bf0[56]; bf1[57] = bf0[57]; bf1[58] = bf0[58]; bf1[59] = bf0[59]; bf1[60] = bf0[60]; bf1[61] = bf0[61]; bf1[62] = bf0[62]; bf1[63] = bf0[63]; // stage 3 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[15]; bf1[1] = bf0[1] + bf0[14]; bf1[2] = bf0[2] + bf0[13]; bf1[3] = bf0[3] + bf0[12]; bf1[4] = bf0[4] + bf0[11]; bf1[5] = bf0[5] + bf0[10]; bf1[6] = bf0[6] + bf0[9]; bf1[7] = bf0[7] + bf0[8]; bf1[8] = -bf0[8] + bf0[7]; bf1[9] = -bf0[9] + bf0[6]; bf1[10] = -bf0[10] + bf0[5]; bf1[11] = -bf0[11] + bf0[4]; bf1[12] = -bf0[12] + bf0[3]; bf1[13] = -bf0[13] + bf0[2]; bf1[14] = -bf0[14] + bf0[1]; bf1[15] = -bf0[15] + bf0[0]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = bf0[18]; bf1[19] = bf0[19]; bf1[20] = half_btf(-cospi[32], bf0[20], cospi[32], bf0[27], cos_bit); bf1[21] = half_btf(-cospi[32], bf0[21], cospi[32], bf0[26], cos_bit); bf1[22] = half_btf(-cospi[32], bf0[22], cospi[32], bf0[25], cos_bit); bf1[23] = half_btf(-cospi[32], bf0[23], cospi[32], bf0[24], cos_bit); bf1[24] = half_btf(cospi[32], bf0[24], cospi[32], bf0[23], cos_bit); bf1[25] = half_btf(cospi[32], bf0[25], cospi[32], bf0[22], cos_bit); bf1[26] = half_btf(cospi[32], bf0[26], cospi[32], bf0[21], cos_bit); bf1[27] = half_btf(cospi[32], bf0[27], cospi[32], bf0[20], cos_bit); bf1[28] = bf0[28]; bf1[29] = bf0[29]; bf1[30] = bf0[30]; bf1[31] = bf0[31]; bf1[32] = bf0[32] + bf0[47]; bf1[33] = bf0[33] + bf0[46]; bf1[34] = bf0[34] + bf0[45]; bf1[35] = bf0[35] + bf0[44]; bf1[36] = bf0[36] + bf0[43]; bf1[37] = bf0[37] + bf0[42]; bf1[38] = bf0[38] + bf0[41]; bf1[39] = bf0[39] + bf0[40]; bf1[40] = -bf0[40] + bf0[39]; bf1[41] = -bf0[41] + bf0[38]; bf1[42] = -bf0[42] + bf0[37]; bf1[43] = -bf0[43] + bf0[36]; bf1[44] = -bf0[44] + bf0[35]; bf1[45] = -bf0[45] + bf0[34]; bf1[46] = -bf0[46] + bf0[33]; bf1[47] = -bf0[47] + bf0[32]; bf1[48] = -bf0[48] + bf0[63]; bf1[49] = -bf0[49] + bf0[62]; bf1[50] = -bf0[50] + bf0[61]; bf1[51] = -bf0[51] + bf0[60]; bf1[52] = -bf0[52] + bf0[59]; bf1[53] = -bf0[53] + bf0[58]; bf1[54] = -bf0[54] + bf0[57]; bf1[55] = -bf0[55] + bf0[56]; bf1[56] = bf0[56] + bf0[55]; bf1[57] = bf0[57] + bf0[54]; bf1[58] = bf0[58] + bf0[53]; bf1[59] = bf0[59] + bf0[52]; bf1[60] = bf0[60] + bf0[51]; bf1[61] = bf0[61] + bf0[50]; bf1[62] = bf0[62] + bf0[49]; bf1[63] = bf0[63] + bf0[48]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[7]; bf1[1] = bf0[1] + bf0[6]; bf1[2] = bf0[2] + bf0[5]; bf1[3] = bf0[3] + bf0[4]; bf1[4] = -bf0[4] + bf0[3]; bf1[5] = -bf0[5] + bf0[2]; bf1[6] = -bf0[6] + bf0[1]; bf1[7] = -bf0[7] + bf0[0]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit); bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit); bf1[12] = half_btf(cospi[32], bf0[12], cospi[32], bf0[11], cos_bit); bf1[13] = half_btf(cospi[32], bf0[13], cospi[32], bf0[10], cos_bit); bf1[14] = bf0[14]; bf1[15] = bf0[15]; bf1[16] = bf0[16] + bf0[23]; bf1[17] = bf0[17] + bf0[22]; bf1[18] = bf0[18] + bf0[21]; bf1[19] = bf0[19] + bf0[20]; bf1[20] = -bf0[20] + bf0[19]; bf1[21] = -bf0[21] + bf0[18]; bf1[22] = -bf0[22] + bf0[17]; bf1[23] = -bf0[23] + bf0[16]; bf1[24] = -bf0[24] + bf0[31]; bf1[25] = -bf0[25] + bf0[30]; bf1[26] = -bf0[26] + bf0[29]; bf1[27] = -bf0[27] + bf0[28]; bf1[28] = bf0[28] + bf0[27]; bf1[29] = bf0[29] + bf0[26]; bf1[30] = bf0[30] + bf0[25]; bf1[31] = bf0[31] + bf0[24]; bf1[32] = bf0[32]; bf1[33] = bf0[33]; bf1[34] = bf0[34]; bf1[35] = bf0[35]; bf1[36] = half_btf(-cospi[16], bf0[36], cospi[48], bf0[59], cos_bit); bf1[37] = half_btf(-cospi[16], bf0[37], cospi[48], bf0[58], cos_bit); bf1[38] = half_btf(-cospi[16], bf0[38], cospi[48], bf0[57], cos_bit); bf1[39] = half_btf(-cospi[16], bf0[39], cospi[48], bf0[56], cos_bit); bf1[40] = half_btf(-cospi[48], bf0[40], -cospi[16], bf0[55], cos_bit); bf1[41] = half_btf(-cospi[48], bf0[41], -cospi[16], bf0[54], cos_bit); bf1[42] = half_btf(-cospi[48], bf0[42], -cospi[16], bf0[53], cos_bit); bf1[43] = half_btf(-cospi[48], bf0[43], -cospi[16], bf0[52], cos_bit); bf1[44] = bf0[44]; bf1[45] = bf0[45]; bf1[46] = bf0[46]; bf1[47] = bf0[47]; bf1[48] = bf0[48]; bf1[49] = bf0[49]; bf1[50] = bf0[50]; bf1[51] = bf0[51]; bf1[52] = half_btf(cospi[48], bf0[52], -cospi[16], bf0[43], cos_bit); bf1[53] = half_btf(cospi[48], bf0[53], -cospi[16], bf0[42], cos_bit); bf1[54] = half_btf(cospi[48], bf0[54], -cospi[16], bf0[41], cos_bit); bf1[55] = half_btf(cospi[48], bf0[55], -cospi[16], bf0[40], cos_bit); bf1[56] = half_btf(cospi[16], bf0[56], cospi[48], bf0[39], cos_bit); bf1[57] = half_btf(cospi[16], bf0[57], cospi[48], bf0[38], cos_bit); bf1[58] = half_btf(cospi[16], bf0[58], cospi[48], bf0[37], cos_bit); bf1[59] = half_btf(cospi[16], bf0[59], cospi[48], bf0[36], cos_bit); bf1[60] = bf0[60]; bf1[61] = bf0[61]; bf1[62] = bf0[62]; bf1[63] = bf0[63]; // stage 5 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[2] = -bf0[2] + bf0[1]; bf1[3] = -bf0[3] + bf0[0]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; bf1[8] = bf0[8] + bf0[11]; bf1[9] = bf0[9] + bf0[10]; bf1[10] = -bf0[10] + bf0[9]; bf1[11] = -bf0[11] + bf0[8]; bf1[12] = -bf0[12] + bf0[15]; bf1[13] = -bf0[13] + bf0[14]; bf1[14] = bf0[14] + bf0[13]; bf1[15] = bf0[15] + bf0[12]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = half_btf(-cospi[16], bf0[18], cospi[48], bf0[29], cos_bit); bf1[19] = half_btf(-cospi[16], bf0[19], cospi[48], bf0[28], cos_bit); bf1[20] = half_btf(-cospi[48], bf0[20], -cospi[16], bf0[27], cos_bit); bf1[21] = half_btf(-cospi[48], bf0[21], -cospi[16], bf0[26], cos_bit); bf1[22] = bf0[22]; bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = bf0[25]; bf1[26] = half_btf(cospi[48], bf0[26], -cospi[16], bf0[21], cos_bit); bf1[27] = half_btf(cospi[48], bf0[27], -cospi[16], bf0[20], cos_bit); bf1[28] = half_btf(cospi[16], bf0[28], cospi[48], bf0[19], cos_bit); bf1[29] = half_btf(cospi[16], bf0[29], cospi[48], bf0[18], cos_bit); bf1[30] = bf0[30]; bf1[31] = bf0[31]; bf1[32] = bf0[32] + bf0[39]; bf1[33] = bf0[33] + bf0[38]; bf1[34] = bf0[34] + bf0[37]; bf1[35] = bf0[35] + bf0[36]; bf1[36] = -bf0[36] + bf0[35]; bf1[37] = -bf0[37] + bf0[34]; bf1[38] = -bf0[38] + bf0[33]; bf1[39] = -bf0[39] + bf0[32]; bf1[40] = -bf0[40] + bf0[47]; bf1[41] = -bf0[41] + bf0[46]; bf1[42] = -bf0[42] + bf0[45]; bf1[43] = -bf0[43] + bf0[44]; bf1[44] = bf0[44] + bf0[43]; bf1[45] = bf0[45] + bf0[42]; bf1[46] = bf0[46] + bf0[41]; bf1[47] = bf0[47] + bf0[40]; bf1[48] = bf0[48] + bf0[55]; bf1[49] = bf0[49] + bf0[54]; bf1[50] = bf0[50] + bf0[53]; bf1[51] = bf0[51] + bf0[52]; bf1[52] = -bf0[52] + bf0[51]; bf1[53] = -bf0[53] + bf0[50]; bf1[54] = -bf0[54] + bf0[49]; bf1[55] = -bf0[55] + bf0[48]; bf1[56] = -bf0[56] + bf0[63]; bf1[57] = -bf0[57] + bf0[62]; bf1[58] = -bf0[58] + bf0[61]; bf1[59] = -bf0[59] + bf0[60]; bf1[60] = bf0[60] + bf0[59]; bf1[61] = bf0[61] + bf0[58]; bf1[62] = bf0[62] + bf0[57]; bf1[63] = bf0[63] + bf0[56]; // stage 6 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[1] = half_btf(-cospi[32], bf0[1], cospi[32], bf0[0], cos_bit); bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit); bf1[3] = half_btf(cospi[48], bf0[3], -cospi[16], bf0[2], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[5] = -bf0[5] + bf0[4]; bf1[6] = -bf0[6] + bf0[7]; bf1[7] = bf0[7] + bf0[6]; bf1[8] = bf0[8]; bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit); bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit); bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = half_btf(cospi[48], bf0[13], -cospi[16], bf0[10], cos_bit); bf1[14] = half_btf(cospi[16], bf0[14], cospi[48], bf0[9], cos_bit); bf1[15] = bf0[15]; bf1[16] = bf0[16] + bf0[19]; bf1[17] = bf0[17] + bf0[18]; bf1[18] = -bf0[18] + bf0[17]; bf1[19] = -bf0[19] + bf0[16]; bf1[20] = -bf0[20] + bf0[23]; bf1[21] = -bf0[21] + bf0[22]; bf1[22] = bf0[22] + bf0[21]; bf1[23] = bf0[23] + bf0[20]; bf1[24] = bf0[24] + bf0[27]; bf1[25] = bf0[25] + bf0[26]; bf1[26] = -bf0[26] + bf0[25]; bf1[27] = -bf0[27] + bf0[24]; bf1[28] = -bf0[28] + bf0[31]; bf1[29] = -bf0[29] + bf0[30]; bf1[30] = bf0[30] + bf0[29]; bf1[31] = bf0[31] + bf0[28]; bf1[32] = bf0[32]; bf1[33] = bf0[33]; bf1[34] = half_btf(-cospi[8], bf0[34], cospi[56], bf0[61], cos_bit); bf1[35] = half_btf(-cospi[8], bf0[35], cospi[56], bf0[60], cos_bit); bf1[36] = half_btf(-cospi[56], bf0[36], -cospi[8], bf0[59], cos_bit); bf1[37] = half_btf(-cospi[56], bf0[37], -cospi[8], bf0[58], cos_bit); bf1[38] = bf0[38]; bf1[39] = bf0[39]; bf1[40] = bf0[40]; bf1[41] = bf0[41]; bf1[42] = half_btf(-cospi[40], bf0[42], cospi[24], bf0[53], cos_bit); bf1[43] = half_btf(-cospi[40], bf0[43], cospi[24], bf0[52], cos_bit); bf1[44] = half_btf(-cospi[24], bf0[44], -cospi[40], bf0[51], cos_bit); bf1[45] = half_btf(-cospi[24], bf0[45], -cospi[40], bf0[50], cos_bit); bf1[46] = bf0[46]; bf1[47] = bf0[47]; bf1[48] = bf0[48]; bf1[49] = bf0[49]; bf1[50] = half_btf(cospi[24], bf0[50], -cospi[40], bf0[45], cos_bit); bf1[51] = half_btf(cospi[24], bf0[51], -cospi[40], bf0[44], cos_bit); bf1[52] = half_btf(cospi[40], bf0[52], cospi[24], bf0[43], cos_bit); bf1[53] = half_btf(cospi[40], bf0[53], cospi[24], bf0[42], cos_bit); bf1[54] = bf0[54]; bf1[55] = bf0[55]; bf1[56] = bf0[56]; bf1[57] = bf0[57]; bf1[58] = half_btf(cospi[56], bf0[58], -cospi[8], bf0[37], cos_bit); bf1[59] = half_btf(cospi[56], bf0[59], -cospi[8], bf0[36], cos_bit); bf1[60] = half_btf(cospi[8], bf0[60], cospi[56], bf0[35], cos_bit); bf1[61] = half_btf(cospi[8], bf0[61], cospi[56], bf0[34], cos_bit); bf1[62] = bf0[62]; bf1[63] = bf0[63]; // stage 7 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); bf1[5] = half_btf(cospi[24], bf0[5], cospi[40], bf0[6], cos_bit); bf1[6] = half_btf(cospi[24], bf0[6], -cospi[40], bf0[5], cos_bit); bf1[7] = half_btf(cospi[56], bf0[7], -cospi[8], bf0[4], cos_bit); bf1[8] = bf0[8] + bf0[9]; bf1[9] = -bf0[9] + bf0[8]; bf1[10] = -bf0[10] + bf0[11]; bf1[11] = bf0[11] + bf0[10]; bf1[12] = bf0[12] + bf0[13]; bf1[13] = -bf0[13] + bf0[12]; bf1[14] = -bf0[14] + bf0[15]; bf1[15] = bf0[15] + bf0[14]; bf1[16] = bf0[16]; bf1[17] = half_btf(-cospi[8], bf0[17], cospi[56], bf0[30], cos_bit); bf1[18] = half_btf(-cospi[56], bf0[18], -cospi[8], bf0[29], cos_bit); bf1[19] = bf0[19]; bf1[20] = bf0[20]; bf1[21] = half_btf(-cospi[40], bf0[21], cospi[24], bf0[26], cos_bit); bf1[22] = half_btf(-cospi[24], bf0[22], -cospi[40], bf0[25], cos_bit); bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = half_btf(cospi[24], bf0[25], -cospi[40], bf0[22], cos_bit); bf1[26] = half_btf(cospi[40], bf0[26], cospi[24], bf0[21], cos_bit); bf1[27] = bf0[27]; bf1[28] = bf0[28]; bf1[29] = half_btf(cospi[56], bf0[29], -cospi[8], bf0[18], cos_bit); bf1[30] = half_btf(cospi[8], bf0[30], cospi[56], bf0[17], cos_bit); bf1[31] = bf0[31]; bf1[32] = bf0[32] + bf0[35]; bf1[33] = bf0[33] + bf0[34]; bf1[34] = -bf0[34] + bf0[33]; bf1[35] = -bf0[35] + bf0[32]; bf1[36] = -bf0[36] + bf0[39]; bf1[37] = -bf0[37] + bf0[38]; bf1[38] = bf0[38] + bf0[37]; bf1[39] = bf0[39] + bf0[36]; bf1[40] = bf0[40] + bf0[43]; bf1[41] = bf0[41] + bf0[42]; bf1[42] = -bf0[42] + bf0[41]; bf1[43] = -bf0[43] + bf0[40]; bf1[44] = -bf0[44] + bf0[47]; bf1[45] = -bf0[45] + bf0[46]; bf1[46] = bf0[46] + bf0[45]; bf1[47] = bf0[47] + bf0[44]; bf1[48] = bf0[48] + bf0[51]; bf1[49] = bf0[49] + bf0[50]; bf1[50] = -bf0[50] + bf0[49]; bf1[51] = -bf0[51] + bf0[48]; bf1[52] = -bf0[52] + bf0[55]; bf1[53] = -bf0[53] + bf0[54]; bf1[54] = bf0[54] + bf0[53]; bf1[55] = bf0[55] + bf0[52]; bf1[56] = bf0[56] + bf0[59]; bf1[57] = bf0[57] + bf0[58]; bf1[58] = -bf0[58] + bf0[57]; bf1[59] = -bf0[59] + bf0[56]; bf1[60] = -bf0[60] + bf0[63]; bf1[61] = -bf0[61] + bf0[62]; bf1[62] = bf0[62] + bf0[61]; bf1[63] = bf0[63] + bf0[60]; // stage 8 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = bf0[6]; bf1[7] = bf0[7]; bf1[8] = half_btf(cospi[60], bf0[8], cospi[4], bf0[15], cos_bit); bf1[9] = half_btf(cospi[28], bf0[9], cospi[36], bf0[14], cos_bit); bf1[10] = half_btf(cospi[44], bf0[10], cospi[20], bf0[13], cos_bit); bf1[11] = half_btf(cospi[12], bf0[11], cospi[52], bf0[12], cos_bit); bf1[12] = half_btf(cospi[12], bf0[12], -cospi[52], bf0[11], cos_bit); bf1[13] = half_btf(cospi[44], bf0[13], -cospi[20], bf0[10], cos_bit); bf1[14] = half_btf(cospi[28], bf0[14], -cospi[36], bf0[9], cos_bit); bf1[15] = half_btf(cospi[60], bf0[15], -cospi[4], bf0[8], cos_bit); bf1[16] = bf0[16] + bf0[17]; bf1[17] = -bf0[17] + bf0[16]; bf1[18] = -bf0[18] + bf0[19]; bf1[19] = bf0[19] + bf0[18]; bf1[20] = bf0[20] + bf0[21]; bf1[21] = -bf0[21] + bf0[20]; bf1[22] = -bf0[22] + bf0[23]; bf1[23] = bf0[23] + bf0[22]; bf1[24] = bf0[24] + bf0[25]; bf1[25] = -bf0[25] + bf0[24]; bf1[26] = -bf0[26] + bf0[27]; bf1[27] = bf0[27] + bf0[26]; bf1[28] = bf0[28] + bf0[29]; bf1[29] = -bf0[29] + bf0[28]; bf1[30] = -bf0[30] + bf0[31]; bf1[31] = bf0[31] + bf0[30]; bf1[32] = bf0[32]; bf1[33] = half_btf(-cospi[4], bf0[33], cospi[60], bf0[62], cos_bit); bf1[34] = half_btf(-cospi[60], bf0[34], -cospi[4], bf0[61], cos_bit); bf1[35] = bf0[35]; bf1[36] = bf0[36]; bf1[37] = half_btf(-cospi[36], bf0[37], cospi[28], bf0[58], cos_bit); bf1[38] = half_btf(-cospi[28], bf0[38], -cospi[36], bf0[57], cos_bit); bf1[39] = bf0[39]; bf1[40] = bf0[40]; bf1[41] = half_btf(-cospi[20], bf0[41], cospi[44], bf0[54], cos_bit); bf1[42] = half_btf(-cospi[44], bf0[42], -cospi[20], bf0[53], cos_bit); bf1[43] = bf0[43]; bf1[44] = bf0[44]; bf1[45] = half_btf(-cospi[52], bf0[45], cospi[12], bf0[50], cos_bit); bf1[46] = half_btf(-cospi[12], bf0[46], -cospi[52], bf0[49], cos_bit); bf1[47] = bf0[47]; bf1[48] = bf0[48]; bf1[49] = half_btf(cospi[12], bf0[49], -cospi[52], bf0[46], cos_bit); bf1[50] = half_btf(cospi[52], bf0[50], cospi[12], bf0[45], cos_bit); bf1[51] = bf0[51]; bf1[52] = bf0[52]; bf1[53] = half_btf(cospi[44], bf0[53], -cospi[20], bf0[42], cos_bit); bf1[54] = half_btf(cospi[20], bf0[54], cospi[44], bf0[41], cos_bit); bf1[55] = bf0[55]; bf1[56] = bf0[56]; bf1[57] = half_btf(cospi[28], bf0[57], -cospi[36], bf0[38], cos_bit); bf1[58] = half_btf(cospi[36], bf0[58], cospi[28], bf0[37], cos_bit); bf1[59] = bf0[59]; bf1[60] = bf0[60]; bf1[61] = half_btf(cospi[60], bf0[61], -cospi[4], bf0[34], cos_bit); bf1[62] = half_btf(cospi[4], bf0[62], cospi[60], bf0[33], cos_bit); bf1[63] = bf0[63]; // stage 9 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = bf0[6]; bf1[7] = bf0[7]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = bf0[10]; bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = bf0[13]; bf1[14] = bf0[14]; bf1[15] = bf0[15]; bf1[16] = half_btf(cospi[62], bf0[16], cospi[2], bf0[31], cos_bit); bf1[17] = half_btf(cospi[30], bf0[17], cospi[34], bf0[30], cos_bit); bf1[18] = half_btf(cospi[46], bf0[18], cospi[18], bf0[29], cos_bit); bf1[19] = half_btf(cospi[14], bf0[19], cospi[50], bf0[28], cos_bit); bf1[20] = half_btf(cospi[54], bf0[20], cospi[10], bf0[27], cos_bit); bf1[21] = half_btf(cospi[22], bf0[21], cospi[42], bf0[26], cos_bit); bf1[22] = half_btf(cospi[38], bf0[22], cospi[26], bf0[25], cos_bit); bf1[23] = half_btf(cospi[6], bf0[23], cospi[58], bf0[24], cos_bit); bf1[24] = half_btf(cospi[6], bf0[24], -cospi[58], bf0[23], cos_bit); bf1[25] = half_btf(cospi[38], bf0[25], -cospi[26], bf0[22], cos_bit); bf1[26] = half_btf(cospi[22], bf0[26], -cospi[42], bf0[21], cos_bit); bf1[27] = half_btf(cospi[54], bf0[27], -cospi[10], bf0[20], cos_bit); bf1[28] = half_btf(cospi[14], bf0[28], -cospi[50], bf0[19], cos_bit); bf1[29] = half_btf(cospi[46], bf0[29], -cospi[18], bf0[18], cos_bit); bf1[30] = half_btf(cospi[30], bf0[30], -cospi[34], bf0[17], cos_bit); bf1[31] = half_btf(cospi[62], bf0[31], -cospi[2], bf0[16], cos_bit); bf1[32] = bf0[32] + bf0[33]; bf1[33] = -bf0[33] + bf0[32]; bf1[34] = -bf0[34] + bf0[35]; bf1[35] = bf0[35] + bf0[34]; bf1[36] = bf0[36] + bf0[37]; bf1[37] = -bf0[37] + bf0[36]; bf1[38] = -bf0[38] + bf0[39]; bf1[39] = bf0[39] + bf0[38]; bf1[40] = bf0[40] + bf0[41]; bf1[41] = -bf0[41] + bf0[40]; bf1[42] = -bf0[42] + bf0[43]; bf1[43] = bf0[43] + bf0[42]; bf1[44] = bf0[44] + bf0[45]; bf1[45] = -bf0[45] + bf0[44]; bf1[46] = -bf0[46] + bf0[47]; bf1[47] = bf0[47] + bf0[46]; bf1[48] = bf0[48] + bf0[49]; bf1[49] = -bf0[49] + bf0[48]; bf1[50] = -bf0[50] + bf0[51]; bf1[51] = bf0[51] + bf0[50]; bf1[52] = bf0[52] + bf0[53]; bf1[53] = -bf0[53] + bf0[52]; bf1[54] = -bf0[54] + bf0[55]; bf1[55] = bf0[55] + bf0[54]; bf1[56] = bf0[56] + bf0[57]; bf1[57] = -bf0[57] + bf0[56]; bf1[58] = -bf0[58] + bf0[59]; bf1[59] = bf0[59] + bf0[58]; bf1[60] = bf0[60] + bf0[61]; bf1[61] = -bf0[61] + bf0[60]; bf1[62] = -bf0[62] + bf0[63]; bf1[63] = bf0[63] + bf0[62]; // stage 10 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = bf0[6]; bf1[7] = bf0[7]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = bf0[10]; bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = bf0[13]; bf1[14] = bf0[14]; bf1[15] = bf0[15]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = bf0[18]; bf1[19] = bf0[19]; bf1[20] = bf0[20]; bf1[21] = bf0[21]; bf1[22] = bf0[22]; bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = bf0[25]; bf1[26] = bf0[26]; bf1[27] = bf0[27]; bf1[28] = bf0[28]; bf1[29] = bf0[29]; bf1[30] = bf0[30]; bf1[31] = bf0[31]; bf1[32] = half_btf(cospi[63], bf0[32], cospi[1], bf0[63], cos_bit); bf1[33] = half_btf(cospi[31], bf0[33], cospi[33], bf0[62], cos_bit); bf1[34] = half_btf(cospi[47], bf0[34], cospi[17], bf0[61], cos_bit); bf1[35] = half_btf(cospi[15], bf0[35], cospi[49], bf0[60], cos_bit); bf1[36] = half_btf(cospi[55], bf0[36], cospi[9], bf0[59], cos_bit); bf1[37] = half_btf(cospi[23], bf0[37], cospi[41], bf0[58], cos_bit); bf1[38] = half_btf(cospi[39], bf0[38], cospi[25], bf0[57], cos_bit); bf1[39] = half_btf(cospi[7], bf0[39], cospi[57], bf0[56], cos_bit); bf1[40] = half_btf(cospi[59], bf0[40], cospi[5], bf0[55], cos_bit); bf1[41] = half_btf(cospi[27], bf0[41], cospi[37], bf0[54], cos_bit); bf1[42] = half_btf(cospi[43], bf0[42], cospi[21], bf0[53], cos_bit); bf1[43] = half_btf(cospi[11], bf0[43], cospi[53], bf0[52], cos_bit); bf1[44] = half_btf(cospi[51], bf0[44], cospi[13], bf0[51], cos_bit); bf1[45] = half_btf(cospi[19], bf0[45], cospi[45], bf0[50], cos_bit); bf1[46] = half_btf(cospi[35], bf0[46], cospi[29], bf0[49], cos_bit); bf1[47] = half_btf(cospi[3], bf0[47], cospi[61], bf0[48], cos_bit); bf1[48] = half_btf(cospi[3], bf0[48], -cospi[61], bf0[47], cos_bit); bf1[49] = half_btf(cospi[35], bf0[49], -cospi[29], bf0[46], cos_bit); bf1[50] = half_btf(cospi[19], bf0[50], -cospi[45], bf0[45], cos_bit); bf1[51] = half_btf(cospi[51], bf0[51], -cospi[13], bf0[44], cos_bit); bf1[52] = half_btf(cospi[11], bf0[52], -cospi[53], bf0[43], cos_bit); bf1[53] = half_btf(cospi[43], bf0[53], -cospi[21], bf0[42], cos_bit); bf1[54] = half_btf(cospi[27], bf0[54], -cospi[37], bf0[41], cos_bit); bf1[55] = half_btf(cospi[59], bf0[55], -cospi[5], bf0[40], cos_bit); bf1[56] = half_btf(cospi[7], bf0[56], -cospi[57], bf0[39], cos_bit); bf1[57] = half_btf(cospi[39], bf0[57], -cospi[25], bf0[38], cos_bit); bf1[58] = half_btf(cospi[23], bf0[58], -cospi[41], bf0[37], cos_bit); bf1[59] = half_btf(cospi[55], bf0[59], -cospi[9], bf0[36], cos_bit); bf1[60] = half_btf(cospi[15], bf0[60], -cospi[49], bf0[35], cos_bit); bf1[61] = half_btf(cospi[47], bf0[61], -cospi[17], bf0[34], cos_bit); bf1[62] = half_btf(cospi[31], bf0[62], -cospi[33], bf0[33], cos_bit); bf1[63] = half_btf(cospi[63], bf0[63], -cospi[1], bf0[32], cos_bit); // stage 11 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[32]; bf1[2] = bf0[16]; bf1[3] = bf0[48]; bf1[4] = bf0[8]; bf1[5] = bf0[40]; bf1[6] = bf0[24]; bf1[7] = bf0[56]; bf1[8] = bf0[4]; bf1[9] = bf0[36]; bf1[10] = bf0[20]; bf1[11] = bf0[52]; bf1[12] = bf0[12]; bf1[13] = bf0[44]; bf1[14] = bf0[28]; bf1[15] = bf0[60]; bf1[16] = bf0[2]; bf1[17] = bf0[34]; bf1[18] = bf0[18]; bf1[19] = bf0[50]; bf1[20] = bf0[10]; bf1[21] = bf0[42]; bf1[22] = bf0[26]; bf1[23] = bf0[58]; bf1[24] = bf0[6]; bf1[25] = bf0[38]; bf1[26] = bf0[22]; bf1[27] = bf0[54]; bf1[28] = bf0[14]; bf1[29] = bf0[46]; bf1[30] = bf0[30]; bf1[31] = bf0[62]; bf1[32] = bf0[1]; bf1[33] = bf0[33]; bf1[34] = bf0[17]; bf1[35] = bf0[49]; bf1[36] = bf0[9]; bf1[37] = bf0[41]; bf1[38] = bf0[25]; bf1[39] = bf0[57]; bf1[40] = bf0[5]; bf1[41] = bf0[37]; bf1[42] = bf0[21]; bf1[43] = bf0[53]; bf1[44] = bf0[13]; bf1[45] = bf0[45]; bf1[46] = bf0[29]; bf1[47] = bf0[61]; bf1[48] = bf0[3]; bf1[49] = bf0[35]; bf1[50] = bf0[19]; bf1[51] = bf0[51]; bf1[52] = bf0[11]; bf1[53] = bf0[43]; bf1[54] = bf0[27]; bf1[55] = bf0[59]; bf1[56] = bf0[7]; bf1[57] = bf0[39]; bf1[58] = bf0[23]; bf1[59] = bf0[55]; bf1[60] = bf0[15]; bf1[61] = bf0[47]; bf1[62] = bf0[31]; bf1[63] = bf0[63]; } void svt_av1_fadst4_new(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; int32_t bit = cos_bit; const int32_t *sinpi = sinpi_arr(bit); int32_t x0, x1, x2, x3; int32_t s0, s1, s2, s3, s4, s5, s6, s7; // stage 0 x0 = input[0]; x1 = input[1]; x2 = input[2]; x3 = input[3]; if (!(x0 | x1 | x2 | x3)) { output[0] = output[1] = output[2] = output[3] = 0; return; } //// stage 1 //s0 = range_check_value(sinpi[1] * x0, bit + stage_range[1]); //s1 = range_check_value(sinpi[4] * x0, bit + stage_range[1]); //s2 = range_check_value(sinpi[2] * x1, bit + stage_range[1]); //s3 = range_check_value(sinpi[1] * x1, bit + stage_range[1]); //s4 = range_check_value(sinpi[3] * x2, bit + stage_range[1]); //s5 = range_check_value(sinpi[4] * x3, bit + stage_range[1]); //s6 = range_check_value(sinpi[2] * x3, bit + stage_range[1]); //s7 = range_check_value(x0 + x1, stage_range[1]); //// stage 2 //s7 = range_check_value(s7 - x3, stage_range[2]); //// stage 3 //x0 = range_check_value(s0 + s2, bit + stage_range[3]); //x1 = range_check_value(sinpi[3] * s7, bit + stage_range[3]); //x2 = range_check_value(s1 - s3, bit + stage_range[3]); //x3 = range_check_value(s4, bit + stage_range[3]); //// stage 4 //x0 = range_check_value(x0 + s5, bit + stage_range[4]); //x2 = range_check_value(x2 + s6, bit + stage_range[4]); //// stage 5 //s0 = range_check_value(x0 + x3, bit + stage_range[5]); //s1 = range_check_value(x1, bit + stage_range[5]); //s2 = range_check_value(x2 - x3, bit + stage_range[5]); //s3 = range_check_value(x2 - x0, bit + stage_range[5]); //// stage 6 //s3 = range_check_value(s3 + x3, bit + stage_range[6]); // stage 1 s0 = sinpi[1] * x0; s1 = sinpi[4] * x0; s2 = sinpi[2] * x1; s3 = sinpi[1] * x1; s4 = sinpi[3] * x2; s5 = sinpi[4] * x3; s6 = sinpi[2] * x3; s7 = x0 + x1; // stage 2 s7 = s7 - x3; // stage 3 x0 = s0 + s2; x1 = sinpi[3] * s7; x2 = s1 - s3; x3 = s4; // stage 4 x0 = x0 + s5; x2 = x2 + s6; // stage 5 s0 = x0 + x3; s1 = x1; s2 = x2 - x3; s3 = x2 - x0; // stage 6 s3 = s3 + x3; // 1-D transform scaling factor is sqrt(2). output[0] = round_shift(s0, bit); output[1] = round_shift(s1, bit); output[2] = round_shift(s2, bit); output[3] = round_shift(s3, bit); } void svt_av1_fadst8_new(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[8]; // stage 0; // stage 1; assert(output != input); bf1 = output; bf1[0] = input[0]; bf1[1] = -input[7]; bf1[2] = -input[3]; bf1[3] = input[4]; bf1[4] = -input[1]; bf1[5] = input[6]; bf1[6] = input[2]; bf1[7] = -input[5]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit); bf1[3] = half_btf(cospi[32], bf0[2], -cospi[32], bf0[3], cos_bit); bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit); bf1[7] = half_btf(cospi[32], bf0[6], -cospi[32], bf0[7], cos_bit); // stage 3 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[2]; bf1[1] = bf0[1] + bf0[3]; bf1[2] = bf0[0] - bf0[2]; bf1[3] = bf0[1] - bf0[3]; bf1[4] = bf0[4] + bf0[6]; bf1[5] = bf0[5] + bf0[7]; bf1[6] = bf0[4] - bf0[6]; bf1[7] = bf0[5] - bf0[7]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit); bf1[5] = half_btf(cospi[48], bf0[4], -cospi[16], bf0[5], cos_bit); bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit); bf1[7] = half_btf(cospi[16], bf0[6], cospi[48], bf0[7], cos_bit); // stage 5 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[4]; bf1[1] = bf0[1] + bf0[5]; bf1[2] = bf0[2] + bf0[6]; bf1[3] = bf0[3] + bf0[7]; bf1[4] = bf0[0] - bf0[4]; bf1[5] = bf0[1] - bf0[5]; bf1[6] = bf0[2] - bf0[6]; bf1[7] = bf0[3] - bf0[7]; // stage 6 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = half_btf(cospi[4], bf0[0], cospi[60], bf0[1], cos_bit); bf1[1] = half_btf(cospi[60], bf0[0], -cospi[4], bf0[1], cos_bit); bf1[2] = half_btf(cospi[20], bf0[2], cospi[44], bf0[3], cos_bit); bf1[3] = half_btf(cospi[44], bf0[2], -cospi[20], bf0[3], cos_bit); bf1[4] = half_btf(cospi[36], bf0[4], cospi[28], bf0[5], cos_bit); bf1[5] = half_btf(cospi[28], bf0[4], -cospi[36], bf0[5], cos_bit); bf1[6] = half_btf(cospi[52], bf0[6], cospi[12], bf0[7], cos_bit); bf1[7] = half_btf(cospi[12], bf0[6], -cospi[52], bf0[7], cos_bit); // stage 7 bf0 = step; bf1 = output; bf1[0] = bf0[1]; bf1[1] = bf0[6]; bf1[2] = bf0[3]; bf1[3] = bf0[4]; bf1[4] = bf0[5]; bf1[5] = bf0[2]; bf1[6] = bf0[7]; bf1[7] = bf0[0]; } void svt_av1_fadst16_new(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[16]; // stage 0; // stage 1; assert(output != input); bf1 = output; bf1[0] = input[0]; bf1[1] = -input[15]; bf1[2] = -input[7]; bf1[3] = input[8]; bf1[4] = -input[3]; bf1[5] = input[12]; bf1[6] = input[4]; bf1[7] = -input[11]; bf1[8] = -input[1]; bf1[9] = input[14]; bf1[10] = input[6]; bf1[11] = -input[9]; bf1[12] = input[2]; bf1[13] = -input[13]; bf1[14] = -input[5]; bf1[15] = input[10]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit); bf1[3] = half_btf(cospi[32], bf0[2], -cospi[32], bf0[3], cos_bit); bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit); bf1[7] = half_btf(cospi[32], bf0[6], -cospi[32], bf0[7], cos_bit); bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(cospi[32], bf0[10], cospi[32], bf0[11], cos_bit); bf1[11] = half_btf(cospi[32], bf0[10], -cospi[32], bf0[11], cos_bit); bf1[12] = bf0[12]; bf1[13] = bf0[13]; bf1[14] = half_btf(cospi[32], bf0[14], cospi[32], bf0[15], cos_bit); bf1[15] = half_btf(cospi[32], bf0[14], -cospi[32], bf0[15], cos_bit); // stage 3 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[2]; bf1[1] = bf0[1] + bf0[3]; bf1[2] = bf0[0] - bf0[2]; bf1[3] = bf0[1] - bf0[3]; bf1[4] = bf0[4] + bf0[6]; bf1[5] = bf0[5] + bf0[7]; bf1[6] = bf0[4] - bf0[6]; bf1[7] = bf0[5] - bf0[7]; bf1[8] = bf0[8] + bf0[10]; bf1[9] = bf0[9] + bf0[11]; bf1[10] = bf0[8] - bf0[10]; bf1[11] = bf0[9] - bf0[11]; bf1[12] = bf0[12] + bf0[14]; bf1[13] = bf0[13] + bf0[15]; bf1[14] = bf0[12] - bf0[14]; bf1[15] = bf0[13] - bf0[15]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit); bf1[5] = half_btf(cospi[48], bf0[4], -cospi[16], bf0[5], cos_bit); bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit); bf1[7] = half_btf(cospi[16], bf0[6], cospi[48], bf0[7], cos_bit); bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = bf0[10]; bf1[11] = bf0[11]; bf1[12] = half_btf(cospi[16], bf0[12], cospi[48], bf0[13], cos_bit); bf1[13] = half_btf(cospi[48], bf0[12], -cospi[16], bf0[13], cos_bit); bf1[14] = half_btf(-cospi[48], bf0[14], cospi[16], bf0[15], cos_bit); bf1[15] = half_btf(cospi[16], bf0[14], cospi[48], bf0[15], cos_bit); // stage 5 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[4]; bf1[1] = bf0[1] + bf0[5]; bf1[2] = bf0[2] + bf0[6]; bf1[3] = bf0[3] + bf0[7]; bf1[4] = bf0[0] - bf0[4]; bf1[5] = bf0[1] - bf0[5]; bf1[6] = bf0[2] - bf0[6]; bf1[7] = bf0[3] - bf0[7]; bf1[8] = bf0[8] + bf0[12]; bf1[9] = bf0[9] + bf0[13]; bf1[10] = bf0[10] + bf0[14]; bf1[11] = bf0[11] + bf0[15]; bf1[12] = bf0[8] - bf0[12]; bf1[13] = bf0[9] - bf0[13]; bf1[14] = bf0[10] - bf0[14]; bf1[15] = bf0[11] - bf0[15]; // stage 6 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = bf0[6]; bf1[7] = bf0[7]; bf1[8] = half_btf(cospi[8], bf0[8], cospi[56], bf0[9], cos_bit); bf1[9] = half_btf(cospi[56], bf0[8], -cospi[8], bf0[9], cos_bit); bf1[10] = half_btf(cospi[40], bf0[10], cospi[24], bf0[11], cos_bit); bf1[11] = half_btf(cospi[24], bf0[10], -cospi[40], bf0[11], cos_bit); bf1[12] = half_btf(-cospi[56], bf0[12], cospi[8], bf0[13], cos_bit); bf1[13] = half_btf(cospi[8], bf0[12], cospi[56], bf0[13], cos_bit); bf1[14] = half_btf(-cospi[24], bf0[14], cospi[40], bf0[15], cos_bit); bf1[15] = half_btf(cospi[40], bf0[14], cospi[24], bf0[15], cos_bit); // stage 7 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[8]; bf1[1] = bf0[1] + bf0[9]; bf1[2] = bf0[2] + bf0[10]; bf1[3] = bf0[3] + bf0[11]; bf1[4] = bf0[4] + bf0[12]; bf1[5] = bf0[5] + bf0[13]; bf1[6] = bf0[6] + bf0[14]; bf1[7] = bf0[7] + bf0[15]; bf1[8] = bf0[0] - bf0[8]; bf1[9] = bf0[1] - bf0[9]; bf1[10] = bf0[2] - bf0[10]; bf1[11] = bf0[3] - bf0[11]; bf1[12] = bf0[4] - bf0[12]; bf1[13] = bf0[5] - bf0[13]; bf1[14] = bf0[6] - bf0[14]; bf1[15] = bf0[7] - bf0[15]; // stage 8 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = half_btf(cospi[2], bf0[0], cospi[62], bf0[1], cos_bit); bf1[1] = half_btf(cospi[62], bf0[0], -cospi[2], bf0[1], cos_bit); bf1[2] = half_btf(cospi[10], bf0[2], cospi[54], bf0[3], cos_bit); bf1[3] = half_btf(cospi[54], bf0[2], -cospi[10], bf0[3], cos_bit); bf1[4] = half_btf(cospi[18], bf0[4], cospi[46], bf0[5], cos_bit); bf1[5] = half_btf(cospi[46], bf0[4], -cospi[18], bf0[5], cos_bit); bf1[6] = half_btf(cospi[26], bf0[6], cospi[38], bf0[7], cos_bit); bf1[7] = half_btf(cospi[38], bf0[6], -cospi[26], bf0[7], cos_bit); bf1[8] = half_btf(cospi[34], bf0[8], cospi[30], bf0[9], cos_bit); bf1[9] = half_btf(cospi[30], bf0[8], -cospi[34], bf0[9], cos_bit); bf1[10] = half_btf(cospi[42], bf0[10], cospi[22], bf0[11], cos_bit); bf1[11] = half_btf(cospi[22], bf0[10], -cospi[42], bf0[11], cos_bit); bf1[12] = half_btf(cospi[50], bf0[12], cospi[14], bf0[13], cos_bit); bf1[13] = half_btf(cospi[14], bf0[12], -cospi[50], bf0[13], cos_bit); bf1[14] = half_btf(cospi[58], bf0[14], cospi[6], bf0[15], cos_bit); bf1[15] = half_btf(cospi[6], bf0[14], -cospi[58], bf0[15], cos_bit); // stage 9 bf0 = step; bf1 = output; bf1[0] = bf0[1]; bf1[1] = bf0[14]; bf1[2] = bf0[3]; bf1[3] = bf0[12]; bf1[4] = bf0[5]; bf1[5] = bf0[10]; bf1[6] = bf0[7]; bf1[7] = bf0[8]; bf1[8] = bf0[9]; bf1[9] = bf0[6]; bf1[10] = bf0[11]; bf1[11] = bf0[4]; bf1[12] = bf0[13]; bf1[13] = bf0[2]; bf1[14] = bf0[15]; bf1[15] = bf0[0]; } void av1_fadst32_new(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[32]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[31]; bf1[1] = input[0]; bf1[2] = input[29]; bf1[3] = input[2]; bf1[4] = input[27]; bf1[5] = input[4]; bf1[6] = input[25]; bf1[7] = input[6]; bf1[8] = input[23]; bf1[9] = input[8]; bf1[10] = input[21]; bf1[11] = input[10]; bf1[12] = input[19]; bf1[13] = input[12]; bf1[14] = input[17]; bf1[15] = input[14]; bf1[16] = input[15]; bf1[17] = input[16]; bf1[18] = input[13]; bf1[19] = input[18]; bf1[20] = input[11]; bf1[21] = input[20]; bf1[22] = input[9]; bf1[23] = input[22]; bf1[24] = input[7]; bf1[25] = input[24]; bf1[26] = input[5]; bf1[27] = input[26]; bf1[28] = input[3]; bf1[29] = input[28]; bf1[30] = input[1]; bf1[31] = input[30]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = half_btf(cospi[1], bf0[0], cospi[63], bf0[1], cos_bit); bf1[1] = half_btf(-cospi[1], bf0[1], cospi[63], bf0[0], cos_bit); bf1[2] = half_btf(cospi[5], bf0[2], cospi[59], bf0[3], cos_bit); bf1[3] = half_btf(-cospi[5], bf0[3], cospi[59], bf0[2], cos_bit); bf1[4] = half_btf(cospi[9], bf0[4], cospi[55], bf0[5], cos_bit); bf1[5] = half_btf(-cospi[9], bf0[5], cospi[55], bf0[4], cos_bit); bf1[6] = half_btf(cospi[13], bf0[6], cospi[51], bf0[7], cos_bit); bf1[7] = half_btf(-cospi[13], bf0[7], cospi[51], bf0[6], cos_bit); bf1[8] = half_btf(cospi[17], bf0[8], cospi[47], bf0[9], cos_bit); bf1[9] = half_btf(-cospi[17], bf0[9], cospi[47], bf0[8], cos_bit); bf1[10] = half_btf(cospi[21], bf0[10], cospi[43], bf0[11], cos_bit); bf1[11] = half_btf(-cospi[21], bf0[11], cospi[43], bf0[10], cos_bit); bf1[12] = half_btf(cospi[25], bf0[12], cospi[39], bf0[13], cos_bit); bf1[13] = half_btf(-cospi[25], bf0[13], cospi[39], bf0[12], cos_bit); bf1[14] = half_btf(cospi[29], bf0[14], cospi[35], bf0[15], cos_bit); bf1[15] = half_btf(-cospi[29], bf0[15], cospi[35], bf0[14], cos_bit); bf1[16] = half_btf(cospi[33], bf0[16], cospi[31], bf0[17], cos_bit); bf1[17] = half_btf(-cospi[33], bf0[17], cospi[31], bf0[16], cos_bit); bf1[18] = half_btf(cospi[37], bf0[18], cospi[27], bf0[19], cos_bit); bf1[19] = half_btf(-cospi[37], bf0[19], cospi[27], bf0[18], cos_bit); bf1[20] = half_btf(cospi[41], bf0[20], cospi[23], bf0[21], cos_bit); bf1[21] = half_btf(-cospi[41], bf0[21], cospi[23], bf0[20], cos_bit); bf1[22] = half_btf(cospi[45], bf0[22], cospi[19], bf0[23], cos_bit); bf1[23] = half_btf(-cospi[45], bf0[23], cospi[19], bf0[22], cos_bit); bf1[24] = half_btf(cospi[49], bf0[24], cospi[15], bf0[25], cos_bit); bf1[25] = half_btf(-cospi[49], bf0[25], cospi[15], bf0[24], cos_bit); bf1[26] = half_btf(cospi[53], bf0[26], cospi[11], bf0[27], cos_bit); bf1[27] = half_btf(-cospi[53], bf0[27], cospi[11], bf0[26], cos_bit); bf1[28] = half_btf(cospi[57], bf0[28], cospi[7], bf0[29], cos_bit); bf1[29] = half_btf(-cospi[57], bf0[29], cospi[7], bf0[28], cos_bit); bf1[30] = half_btf(cospi[61], bf0[30], cospi[3], bf0[31], cos_bit); bf1[31] = half_btf(-cospi[61], bf0[31], cospi[3], bf0[30], cos_bit); // stage 3 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[16]; bf1[1] = bf0[1] + bf0[17]; bf1[2] = bf0[2] + bf0[18]; bf1[3] = bf0[3] + bf0[19]; bf1[4] = bf0[4] + bf0[20]; bf1[5] = bf0[5] + bf0[21]; bf1[6] = bf0[6] + bf0[22]; bf1[7] = bf0[7] + bf0[23]; bf1[8] = bf0[8] + bf0[24]; bf1[9] = bf0[9] + bf0[25]; bf1[10] = bf0[10] + bf0[26]; bf1[11] = bf0[11] + bf0[27]; bf1[12] = bf0[12] + bf0[28]; bf1[13] = bf0[13] + bf0[29]; bf1[14] = bf0[14] + bf0[30]; bf1[15] = bf0[15] + bf0[31]; bf1[16] = -bf0[16] + bf0[0]; bf1[17] = -bf0[17] + bf0[1]; bf1[18] = -bf0[18] + bf0[2]; bf1[19] = -bf0[19] + bf0[3]; bf1[20] = -bf0[20] + bf0[4]; bf1[21] = -bf0[21] + bf0[5]; bf1[22] = -bf0[22] + bf0[6]; bf1[23] = -bf0[23] + bf0[7]; bf1[24] = -bf0[24] + bf0[8]; bf1[25] = -bf0[25] + bf0[9]; bf1[26] = -bf0[26] + bf0[10]; bf1[27] = -bf0[27] + bf0[11]; bf1[28] = -bf0[28] + bf0[12]; bf1[29] = -bf0[29] + bf0[13]; bf1[30] = -bf0[30] + bf0[14]; bf1[31] = -bf0[31] + bf0[15]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = bf0[6]; bf1[7] = bf0[7]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = bf0[10]; bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = bf0[13]; bf1[14] = bf0[14]; bf1[15] = bf0[15]; bf1[16] = half_btf(cospi[4], bf0[16], cospi[60], bf0[17], cos_bit); bf1[17] = half_btf(-cospi[4], bf0[17], cospi[60], bf0[16], cos_bit); bf1[18] = half_btf(cospi[20], bf0[18], cospi[44], bf0[19], cos_bit); bf1[19] = half_btf(-cospi[20], bf0[19], cospi[44], bf0[18], cos_bit); bf1[20] = half_btf(cospi[36], bf0[20], cospi[28], bf0[21], cos_bit); bf1[21] = half_btf(-cospi[36], bf0[21], cospi[28], bf0[20], cos_bit); bf1[22] = half_btf(cospi[52], bf0[22], cospi[12], bf0[23], cos_bit); bf1[23] = half_btf(-cospi[52], bf0[23], cospi[12], bf0[22], cos_bit); bf1[24] = half_btf(-cospi[60], bf0[24], cospi[4], bf0[25], cos_bit); bf1[25] = half_btf(cospi[60], bf0[25], cospi[4], bf0[24], cos_bit); bf1[26] = half_btf(-cospi[44], bf0[26], cospi[20], bf0[27], cos_bit); bf1[27] = half_btf(cospi[44], bf0[27], cospi[20], bf0[26], cos_bit); bf1[28] = half_btf(-cospi[28], bf0[28], cospi[36], bf0[29], cos_bit); bf1[29] = half_btf(cospi[28], bf0[29], cospi[36], bf0[28], cos_bit); bf1[30] = half_btf(-cospi[12], bf0[30], cospi[52], bf0[31], cos_bit); bf1[31] = half_btf(cospi[12], bf0[31], cospi[52], bf0[30], cos_bit); // stage 5 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[8]; bf1[1] = bf0[1] + bf0[9]; bf1[2] = bf0[2] + bf0[10]; bf1[3] = bf0[3] + bf0[11]; bf1[4] = bf0[4] + bf0[12]; bf1[5] = bf0[5] + bf0[13]; bf1[6] = bf0[6] + bf0[14]; bf1[7] = bf0[7] + bf0[15]; bf1[8] = -bf0[8] + bf0[0]; bf1[9] = -bf0[9] + bf0[1]; bf1[10] = -bf0[10] + bf0[2]; bf1[11] = -bf0[11] + bf0[3]; bf1[12] = -bf0[12] + bf0[4]; bf1[13] = -bf0[13] + bf0[5]; bf1[14] = -bf0[14] + bf0[6]; bf1[15] = -bf0[15] + bf0[7]; bf1[16] = bf0[16] + bf0[24]; bf1[17] = bf0[17] + bf0[25]; bf1[18] = bf0[18] + bf0[26]; bf1[19] = bf0[19] + bf0[27]; bf1[20] = bf0[20] + bf0[28]; bf1[21] = bf0[21] + bf0[29]; bf1[22] = bf0[22] + bf0[30]; bf1[23] = bf0[23] + bf0[31]; bf1[24] = -bf0[24] + bf0[16]; bf1[25] = -bf0[25] + bf0[17]; bf1[26] = -bf0[26] + bf0[18]; bf1[27] = -bf0[27] + bf0[19]; bf1[28] = -bf0[28] + bf0[20]; bf1[29] = -bf0[29] + bf0[21]; bf1[30] = -bf0[30] + bf0[22]; bf1[31] = -bf0[31] + bf0[23]; // stage 6 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = bf0[6]; bf1[7] = bf0[7]; bf1[8] = half_btf(cospi[8], bf0[8], cospi[56], bf0[9], cos_bit); bf1[9] = half_btf(-cospi[8], bf0[9], cospi[56], bf0[8], cos_bit); bf1[10] = half_btf(cospi[40], bf0[10], cospi[24], bf0[11], cos_bit); bf1[11] = half_btf(-cospi[40], bf0[11], cospi[24], bf0[10], cos_bit); bf1[12] = half_btf(-cospi[56], bf0[12], cospi[8], bf0[13], cos_bit); bf1[13] = half_btf(cospi[56], bf0[13], cospi[8], bf0[12], cos_bit); bf1[14] = half_btf(-cospi[24], bf0[14], cospi[40], bf0[15], cos_bit); bf1[15] = half_btf(cospi[24], bf0[15], cospi[40], bf0[14], cos_bit); bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = bf0[18]; bf1[19] = bf0[19]; bf1[20] = bf0[20]; bf1[21] = bf0[21]; bf1[22] = bf0[22]; bf1[23] = bf0[23]; bf1[24] = half_btf(cospi[8], bf0[24], cospi[56], bf0[25], cos_bit); bf1[25] = half_btf(-cospi[8], bf0[25], cospi[56], bf0[24], cos_bit); bf1[26] = half_btf(cospi[40], bf0[26], cospi[24], bf0[27], cos_bit); bf1[27] = half_btf(-cospi[40], bf0[27], cospi[24], bf0[26], cos_bit); bf1[28] = half_btf(-cospi[56], bf0[28], cospi[8], bf0[29], cos_bit); bf1[29] = half_btf(cospi[56], bf0[29], cospi[8], bf0[28], cos_bit); bf1[30] = half_btf(-cospi[24], bf0[30], cospi[40], bf0[31], cos_bit); bf1[31] = half_btf(cospi[24], bf0[31], cospi[40], bf0[30], cos_bit); // stage 7 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[4]; bf1[1] = bf0[1] + bf0[5]; bf1[2] = bf0[2] + bf0[6]; bf1[3] = bf0[3] + bf0[7]; bf1[4] = -bf0[4] + bf0[0]; bf1[5] = -bf0[5] + bf0[1]; bf1[6] = -bf0[6] + bf0[2]; bf1[7] = -bf0[7] + bf0[3]; bf1[8] = bf0[8] + bf0[12]; bf1[9] = bf0[9] + bf0[13]; bf1[10] = bf0[10] + bf0[14]; bf1[11] = bf0[11] + bf0[15]; bf1[12] = -bf0[12] + bf0[8]; bf1[13] = -bf0[13] + bf0[9]; bf1[14] = -bf0[14] + bf0[10]; bf1[15] = -bf0[15] + bf0[11]; bf1[16] = bf0[16] + bf0[20]; bf1[17] = bf0[17] + bf0[21]; bf1[18] = bf0[18] + bf0[22]; bf1[19] = bf0[19] + bf0[23]; bf1[20] = -bf0[20] + bf0[16]; bf1[21] = -bf0[21] + bf0[17]; bf1[22] = -bf0[22] + bf0[18]; bf1[23] = -bf0[23] + bf0[19]; bf1[24] = bf0[24] + bf0[28]; bf1[25] = bf0[25] + bf0[29]; bf1[26] = bf0[26] + bf0[30]; bf1[27] = bf0[27] + bf0[31]; bf1[28] = -bf0[28] + bf0[24]; bf1[29] = -bf0[29] + bf0[25]; bf1[30] = -bf0[30] + bf0[26]; bf1[31] = -bf0[31] + bf0[27]; // stage 8 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit); bf1[5] = half_btf(-cospi[16], bf0[5], cospi[48], bf0[4], cos_bit); bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit); bf1[7] = half_btf(cospi[48], bf0[7], cospi[16], bf0[6], cos_bit); bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = bf0[10]; bf1[11] = bf0[11]; bf1[12] = half_btf(cospi[16], bf0[12], cospi[48], bf0[13], cos_bit); bf1[13] = half_btf(-cospi[16], bf0[13], cospi[48], bf0[12], cos_bit); bf1[14] = half_btf(-cospi[48], bf0[14], cospi[16], bf0[15], cos_bit); bf1[15] = half_btf(cospi[48], bf0[15], cospi[16], bf0[14], cos_bit); bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = bf0[18]; bf1[19] = bf0[19]; bf1[20] = half_btf(cospi[16], bf0[20], cospi[48], bf0[21], cos_bit); bf1[21] = half_btf(-cospi[16], bf0[21], cospi[48], bf0[20], cos_bit); bf1[22] = half_btf(-cospi[48], bf0[22], cospi[16], bf0[23], cos_bit); bf1[23] = half_btf(cospi[48], bf0[23], cospi[16], bf0[22], cos_bit); bf1[24] = bf0[24]; bf1[25] = bf0[25]; bf1[26] = bf0[26]; bf1[27] = bf0[27]; bf1[28] = half_btf(cospi[16], bf0[28], cospi[48], bf0[29], cos_bit); bf1[29] = half_btf(-cospi[16], bf0[29], cospi[48], bf0[28], cos_bit); bf1[30] = half_btf(-cospi[48], bf0[30], cospi[16], bf0[31], cos_bit); bf1[31] = half_btf(cospi[48], bf0[31], cospi[16], bf0[30], cos_bit); // stage 9 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[2]; bf1[1] = bf0[1] + bf0[3]; bf1[2] = -bf0[2] + bf0[0]; bf1[3] = -bf0[3] + bf0[1]; bf1[4] = bf0[4] + bf0[6]; bf1[5] = bf0[5] + bf0[7]; bf1[6] = -bf0[6] + bf0[4]; bf1[7] = -bf0[7] + bf0[5]; bf1[8] = bf0[8] + bf0[10]; bf1[9] = bf0[9] + bf0[11]; bf1[10] = -bf0[10] + bf0[8]; bf1[11] = -bf0[11] + bf0[9]; bf1[12] = bf0[12] + bf0[14]; bf1[13] = bf0[13] + bf0[15]; bf1[14] = -bf0[14] + bf0[12]; bf1[15] = -bf0[15] + bf0[13]; bf1[16] = bf0[16] + bf0[18]; bf1[17] = bf0[17] + bf0[19]; bf1[18] = -bf0[18] + bf0[16]; bf1[19] = -bf0[19] + bf0[17]; bf1[20] = bf0[20] + bf0[22]; bf1[21] = bf0[21] + bf0[23]; bf1[22] = -bf0[22] + bf0[20]; bf1[23] = -bf0[23] + bf0[21]; bf1[24] = bf0[24] + bf0[26]; bf1[25] = bf0[25] + bf0[27]; bf1[26] = -bf0[26] + bf0[24]; bf1[27] = -bf0[27] + bf0[25]; bf1[28] = bf0[28] + bf0[30]; bf1[29] = bf0[29] + bf0[31]; bf1[30] = -bf0[30] + bf0[28]; bf1[31] = -bf0[31] + bf0[29]; // stage 10 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit); bf1[3] = half_btf(-cospi[32], bf0[3], cospi[32], bf0[2], cos_bit); bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit); bf1[7] = half_btf(-cospi[32], bf0[7], cospi[32], bf0[6], cos_bit); bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(cospi[32], bf0[10], cospi[32], bf0[11], cos_bit); bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[10], cos_bit); bf1[12] = bf0[12]; bf1[13] = bf0[13]; bf1[14] = half_btf(cospi[32], bf0[14], cospi[32], bf0[15], cos_bit); bf1[15] = half_btf(-cospi[32], bf0[15], cospi[32], bf0[14], cos_bit); bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = half_btf(cospi[32], bf0[18], cospi[32], bf0[19], cos_bit); bf1[19] = half_btf(-cospi[32], bf0[19], cospi[32], bf0[18], cos_bit); bf1[20] = bf0[20]; bf1[21] = bf0[21]; bf1[22] = half_btf(cospi[32], bf0[22], cospi[32], bf0[23], cos_bit); bf1[23] = half_btf(-cospi[32], bf0[23], cospi[32], bf0[22], cos_bit); bf1[24] = bf0[24]; bf1[25] = bf0[25]; bf1[26] = half_btf(cospi[32], bf0[26], cospi[32], bf0[27], cos_bit); bf1[27] = half_btf(-cospi[32], bf0[27], cospi[32], bf0[26], cos_bit); bf1[28] = bf0[28]; bf1[29] = bf0[29]; bf1[30] = half_btf(cospi[32], bf0[30], cospi[32], bf0[31], cos_bit); bf1[31] = half_btf(-cospi[32], bf0[31], cospi[32], bf0[30], cos_bit); // stage 11 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = -bf0[16]; bf1[2] = bf0[24]; bf1[3] = -bf0[8]; bf1[4] = bf0[12]; bf1[5] = -bf0[28]; bf1[6] = bf0[20]; bf1[7] = -bf0[4]; bf1[8] = bf0[6]; bf1[9] = -bf0[22]; bf1[10] = bf0[30]; bf1[11] = -bf0[14]; bf1[12] = bf0[10]; bf1[13] = -bf0[26]; bf1[14] = bf0[18]; bf1[15] = -bf0[2]; bf1[16] = bf0[3]; bf1[17] = -bf0[19]; bf1[18] = bf0[27]; bf1[19] = -bf0[11]; bf1[20] = bf0[15]; bf1[21] = -bf0[31]; bf1[22] = bf0[23]; bf1[23] = -bf0[7]; bf1[24] = bf0[5]; bf1[25] = -bf0[21]; bf1[26] = bf0[29]; bf1[27] = -bf0[13]; bf1[28] = bf0[9]; bf1[29] = -bf0[25]; bf1[30] = bf0[17]; bf1[31] = -bf0[1]; } void svt_av1_fidentity4_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; for (int32_t i = 0; i < 4; ++i) output[i] = round_shift((int64_t)input[i] * new_sqrt2, new_sqrt2_bits); assert(stage_range[0] + new_sqrt2_bits <= 32); } void svt_av1_fidentity8_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; for (int32_t i = 0; i < 8; ++i) output[i] = input[i] * 2; } void svt_av1_fidentity16_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; for (int32_t i = 0; i < 16; ++i) output[i] = round_shift((int64_t)input[i] * 2 * new_sqrt2, new_sqrt2_bits); assert(stage_range[0] + new_sqrt2_bits <= 32); } void svt_av1_fidentity32_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; for (int32_t i = 0; i < 32; ++i) output[i] = input[i] * 4; } void av1_fidentity64_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; for (int32_t i = 0; i < 64; ++i) output[i] = round_shift((int64_t)input[i] * 4 * new_sqrt2, new_sqrt2_bits); assert(stage_range[0] + new_sqrt2_bits <= 32); } static INLINE TxfmFunc fwd_txfm_type_to_func(TxfmType txfmtype) { switch (txfmtype) { case TXFM_TYPE_DCT4: return svt_av1_fdct4_new; case TXFM_TYPE_DCT8: return svt_av1_fdct8_new; case TXFM_TYPE_DCT16: return svt_av1_fdct16_new; case TXFM_TYPE_DCT32: return svt_av1_fdct32_new; case TXFM_TYPE_DCT64: return svt_av1_fdct64_new; case TXFM_TYPE_ADST4: return svt_av1_fadst4_new; case TXFM_TYPE_ADST8: return svt_av1_fadst8_new; case TXFM_TYPE_ADST16: return svt_av1_fadst16_new; case TXFM_TYPE_ADST32: return av1_fadst32_new; case TXFM_TYPE_IDENTITY4: return svt_av1_fidentity4_c; case TXFM_TYPE_IDENTITY8: return svt_av1_fidentity8_c; case TXFM_TYPE_IDENTITY16: return svt_av1_fidentity16_c; case TXFM_TYPE_IDENTITY32: return svt_av1_fidentity32_c; case TXFM_TYPE_IDENTITY64: return av1_fidentity64_c; default: assert(0); return NULL; } } //fwd_txfm2d_c static INLINE void av1_tranform_two_d_core_c(int16_t *input, uint32_t input_stride, int32_t *output, const Txfm2dFlipCfg *cfg, int32_t *buf, uint8_t bit_depth) { int32_t c, r; // Note when assigning txfm_size_col, we use the txfm_size from the // row configuration and vice versa. This is intentionally done to // accurately perform rectangular transforms. When the transform is // rectangular, the number of columns will be the same as the // txfm_size stored in the row cfg struct. It will make no difference // for square transforms. const int32_t txfm_size_col = tx_size_wide[cfg->tx_size]; const int32_t txfm_size_row = tx_size_high[cfg->tx_size]; // Take the shift from the larger dimension in the rectangular case. const int8_t *shift = cfg->shift; const int32_t rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row); int8_t stage_range_col[MAX_TXFM_STAGE_NUM]; int8_t stage_range_row[MAX_TXFM_STAGE_NUM]; assert(cfg->stage_num_col <= MAX_TXFM_STAGE_NUM); assert(cfg->stage_num_row <= MAX_TXFM_STAGE_NUM); svt_av1_gen_fwd_stage_range(stage_range_col, stage_range_row, cfg, bit_depth); const int8_t cos_bit_col = cfg->cos_bit_col; const int8_t cos_bit_row = cfg->cos_bit_row; const TxfmFunc txfm_func_col = fwd_txfm_type_to_func(cfg->txfm_type_col); const TxfmFunc txfm_func_row = fwd_txfm_type_to_func(cfg->txfm_type_row); ASSERT(txfm_func_col != NULL); ASSERT(txfm_func_row != NULL); // use output buffer as temp buffer int32_t *temp_in = output; int32_t *temp_out = output + txfm_size_row; // Columns for (c = 0; c < txfm_size_col; ++c) { if (cfg->ud_flip == 0) for (r = 0; r < txfm_size_row; ++r) temp_in[r] = input[r * input_stride + c]; else { for (r = 0; r < txfm_size_row; ++r) // flip upside down temp_in[r] = input[(txfm_size_row - r - 1) * input_stride + c]; } svt_av1_round_shift_array_c( temp_in, txfm_size_row, -shift[0]); // NM svt_av1_round_shift_array_c txfm_func_col(temp_in, temp_out, cos_bit_col, stage_range_col); svt_av1_round_shift_array_c( temp_out, txfm_size_row, -shift[1]); // NM svt_av1_round_shift_array_c if (cfg->lr_flip == 0) { for (r = 0; r < txfm_size_row; ++r) buf[r * txfm_size_col + c] = temp_out[r]; } else { for (r = 0; r < txfm_size_row; ++r) // flip from left to right buf[r * txfm_size_col + (txfm_size_col - c - 1)] = temp_out[r]; } } // Rows for (r = 0; r < txfm_size_row; ++r) { txfm_func_row( buf + r * txfm_size_col, output + r * txfm_size_col, cos_bit_row, stage_range_row); svt_av1_round_shift_array_c(output + r * txfm_size_col, txfm_size_col, -shift[2]); if (abs(rect_type) == 1) { // Multiply everything by Sqrt2 if the transform is rectangular and the // size difference is a factor of 2. for (c = 0; c < txfm_size_col; ++c) { output[r * txfm_size_col + c] = round_shift((int64_t)output[r * txfm_size_col + c] * new_sqrt2, new_sqrt2_bits); } } } } void av1_fdct32_pf_new(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[32]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[0] + input[31]; bf1[1] = input[1] + input[30]; bf1[2] = input[2] + input[29]; bf1[3] = input[3] + input[28]; bf1[4] = input[4] + input[27]; bf1[5] = input[5] + input[26]; bf1[6] = input[6] + input[25]; bf1[7] = input[7] + input[24]; bf1[8] = input[8] + input[23]; bf1[9] = input[9] + input[22]; bf1[10] = input[10] + input[21]; bf1[11] = input[11] + input[20]; bf1[12] = input[12] + input[19]; bf1[13] = input[13] + input[18]; bf1[14] = input[14] + input[17]; bf1[15] = input[15] + input[16]; bf1[16] = -input[16] + input[15]; bf1[17] = -input[17] + input[14]; bf1[18] = -input[18] + input[13]; bf1[19] = -input[19] + input[12]; bf1[20] = -input[20] + input[11]; bf1[21] = -input[21] + input[10]; bf1[22] = -input[22] + input[9]; bf1[23] = -input[23] + input[8]; bf1[24] = -input[24] + input[7]; bf1[25] = -input[25] + input[6]; bf1[26] = -input[26] + input[5]; bf1[27] = -input[27] + input[4]; bf1[28] = -input[28] + input[3]; bf1[29] = -input[29] + input[2]; bf1[30] = -input[30] + input[1]; bf1[31] = -input[31] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[15]; bf1[1] = bf0[1] + bf0[14]; bf1[2] = bf0[2] + bf0[13]; bf1[3] = bf0[3] + bf0[12]; bf1[4] = bf0[4] + bf0[11]; bf1[5] = bf0[5] + bf0[10]; bf1[6] = bf0[6] + bf0[9]; bf1[7] = bf0[7] + bf0[8]; bf1[8] = -bf0[8] + bf0[7]; bf1[9] = -bf0[9] + bf0[6]; bf1[10] = -bf0[10] + bf0[5]; bf1[11] = -bf0[11] + bf0[4]; bf1[12] = -bf0[12] + bf0[3]; bf1[13] = -bf0[13] + bf0[2]; bf1[14] = -bf0[14] + bf0[1]; bf1[15] = -bf0[15] + bf0[0]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = bf0[18]; bf1[19] = bf0[19]; bf1[20] = half_btf(-cospi[32], bf0[20], cospi[32], bf0[27], cos_bit); bf1[21] = half_btf(-cospi[32], bf0[21], cospi[32], bf0[26], cos_bit); bf1[22] = half_btf(-cospi[32], bf0[22], cospi[32], bf0[25], cos_bit); bf1[23] = half_btf(-cospi[32], bf0[23], cospi[32], bf0[24], cos_bit); bf1[24] = half_btf(cospi[32], bf0[24], cospi[32], bf0[23], cos_bit); bf1[25] = half_btf(cospi[32], bf0[25], cospi[32], bf0[22], cos_bit); bf1[26] = half_btf(cospi[32], bf0[26], cospi[32], bf0[21], cos_bit); bf1[27] = half_btf(cospi[32], bf0[27], cospi[32], bf0[20], cos_bit); bf1[28] = bf0[28]; bf1[29] = bf0[29]; bf1[30] = bf0[30]; bf1[31] = bf0[31]; // stage 3 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[7]; bf1[1] = bf0[1] + bf0[6]; bf1[2] = bf0[2] + bf0[5]; bf1[3] = bf0[3] + bf0[4]; bf1[4] = -bf0[4] + bf0[3]; bf1[5] = -bf0[5] + bf0[2]; bf1[6] = -bf0[6] + bf0[1]; bf1[7] = -bf0[7] + bf0[0]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit); bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit); bf1[12] = half_btf(cospi[32], bf0[12], cospi[32], bf0[11], cos_bit); bf1[13] = half_btf(cospi[32], bf0[13], cospi[32], bf0[10], cos_bit); bf1[14] = bf0[14]; bf1[15] = bf0[15]; bf1[16] = bf0[16] + bf0[23]; bf1[17] = bf0[17] + bf0[22]; bf1[18] = bf0[18] + bf0[21]; bf1[19] = bf0[19] + bf0[20]; bf1[20] = -bf0[20] + bf0[19]; bf1[21] = -bf0[21] + bf0[18]; bf1[22] = -bf0[22] + bf0[17]; bf1[23] = -bf0[23] + bf0[16]; bf1[24] = -bf0[24] + bf0[31]; bf1[25] = -bf0[25] + bf0[30]; bf1[26] = -bf0[26] + bf0[29]; bf1[27] = -bf0[27] + bf0[28]; bf1[28] = bf0[28] + bf0[27]; bf1[29] = bf0[29] + bf0[26]; bf1[30] = bf0[30] + bf0[25]; bf1[31] = bf0[31] + bf0[24]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[2] = -bf0[2] + bf0[1]; bf1[3] = -bf0[3] + bf0[0]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; bf1[8] = bf0[8] + bf0[11]; bf1[9] = bf0[9] + bf0[10]; bf1[10] = -bf0[10] + bf0[9]; bf1[11] = -bf0[11] + bf0[8]; bf1[12] = -bf0[12] + bf0[15]; bf1[13] = -bf0[13] + bf0[14]; bf1[14] = bf0[14] + bf0[13]; bf1[15] = bf0[15] + bf0[12]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = half_btf(-cospi[16], bf0[18], cospi[48], bf0[29], cos_bit); bf1[19] = half_btf(-cospi[16], bf0[19], cospi[48], bf0[28], cos_bit); bf1[20] = half_btf(-cospi[48], bf0[20], -cospi[16], bf0[27], cos_bit); bf1[21] = half_btf(-cospi[48], bf0[21], -cospi[16], bf0[26], cos_bit); bf1[22] = bf0[22]; bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = bf0[25]; bf1[26] = half_btf(cospi[48], bf0[26], -cospi[16], bf0[21], cos_bit); bf1[27] = half_btf(cospi[48], bf0[27], -cospi[16], bf0[20], cos_bit); bf1[28] = half_btf(cospi[16], bf0[28], cospi[48], bf0[19], cos_bit); bf1[29] = half_btf(cospi[16], bf0[29], cospi[48], bf0[18], cos_bit); bf1[30] = bf0[30]; bf1[31] = bf0[31]; // stage 5 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); //bf1[1] = half_btf(-cospi[32], bf0[1], cospi[32], bf0[0], cos_bit); bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit); //bf1[3] = half_btf(cospi[48], bf0[3], -cospi[16], bf0[2], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[5] = -bf0[5] + bf0[4]; bf1[6] = -bf0[6] + bf0[7]; bf1[7] = bf0[7] + bf0[6]; bf1[8] = bf0[8]; bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit); bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit); bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = half_btf(cospi[48], bf0[13], -cospi[16], bf0[10], cos_bit); bf1[14] = half_btf(cospi[16], bf0[14], cospi[48], bf0[9], cos_bit); bf1[15] = bf0[15]; bf1[16] = bf0[16] + bf0[19]; bf1[17] = bf0[17] + bf0[18]; bf1[18] = -bf0[18] + bf0[17]; bf1[19] = -bf0[19] + bf0[16]; bf1[20] = -bf0[20] + bf0[23]; bf1[21] = -bf0[21] + bf0[22]; bf1[22] = bf0[22] + bf0[21]; bf1[23] = bf0[23] + bf0[20]; bf1[24] = bf0[24] + bf0[27]; bf1[25] = bf0[25] + bf0[26]; bf1[26] = -bf0[26] + bf0[25]; bf1[27] = -bf0[27] + bf0[24]; bf1[28] = -bf0[28] + bf0[31]; bf1[29] = -bf0[29] + bf0[30]; bf1[30] = bf0[30] + bf0[29]; bf1[31] = bf0[31] + bf0[28]; // stage 6 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; //bf1[1] = bf0[1]; bf1[2] = bf0[2]; //bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); //bf1[5] = half_btf(cospi[24], bf0[5], cospi[40], bf0[6], cos_bit); bf1[6] = half_btf(cospi[24], bf0[6], -cospi[40], bf0[5], cos_bit); //bf1[7] = half_btf(cospi[56], bf0[7], -cospi[8], bf0[4], cos_bit); bf1[8] = bf0[8] + bf0[9]; bf1[9] = -bf0[9] + bf0[8]; bf1[10] = -bf0[10] + bf0[11]; bf1[11] = bf0[11] + bf0[10]; bf1[12] = bf0[12] + bf0[13]; bf1[13] = -bf0[13] + bf0[12]; bf1[14] = -bf0[14] + bf0[15]; bf1[15] = bf0[15] + bf0[14]; bf1[16] = bf0[16]; bf1[17] = half_btf(-cospi[8], bf0[17], cospi[56], bf0[30], cos_bit); bf1[18] = half_btf(-cospi[56], bf0[18], -cospi[8], bf0[29], cos_bit); bf1[19] = bf0[19]; bf1[20] = bf0[20]; bf1[21] = half_btf(-cospi[40], bf0[21], cospi[24], bf0[26], cos_bit); bf1[22] = half_btf(-cospi[24], bf0[22], -cospi[40], bf0[25], cos_bit); bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = half_btf(cospi[24], bf0[25], -cospi[40], bf0[22], cos_bit); bf1[26] = half_btf(cospi[40], bf0[26], cospi[24], bf0[21], cos_bit); bf1[27] = bf0[27]; bf1[28] = bf0[28]; bf1[29] = half_btf(cospi[56], bf0[29], -cospi[8], bf0[18], cos_bit); bf1[30] = half_btf(cospi[8], bf0[30], cospi[56], bf0[17], cos_bit); bf1[31] = bf0[31]; // stage 7 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0]; //bf1[1] = bf0[1]; bf1[2] = bf0[2]; //bf1[3] = bf0[3]; bf1[4] = bf0[4]; //bf1[5] = bf0[5]; bf1[6] = bf0[6]; //bf1[7] = bf0[7]; bf1[8] = half_btf(cospi[60], bf0[8], cospi[4], bf0[15], cos_bit); //bf1[9] = half_btf(cospi[28], bf0[9], cospi[36], bf0[14], cos_bit); bf1[10] = half_btf(cospi[44], bf0[10], cospi[20], bf0[13], cos_bit); //bf1[11] = half_btf(cospi[12], bf0[11], cospi[52], bf0[12], cos_bit); bf1[12] = half_btf(cospi[12], bf0[12], -cospi[52], bf0[11], cos_bit); //bf1[13] = half_btf(cospi[44], bf0[13], -cospi[20], bf0[10], cos_bit); bf1[14] = half_btf(cospi[28], bf0[14], -cospi[36], bf0[9], cos_bit); //bf1[15] = half_btf(cospi[60], bf0[15], -cospi[4], bf0[8], cos_bit); bf1[16] = bf0[16] + bf0[17]; bf1[17] = -bf0[17] + bf0[16]; bf1[18] = -bf0[18] + bf0[19]; bf1[19] = bf0[19] + bf0[18]; bf1[20] = bf0[20] + bf0[21]; bf1[21] = -bf0[21] + bf0[20]; bf1[22] = -bf0[22] + bf0[23]; bf1[23] = bf0[23] + bf0[22]; bf1[24] = bf0[24] + bf0[25]; bf1[25] = -bf0[25] + bf0[24]; bf1[26] = -bf0[26] + bf0[27]; bf1[27] = bf0[27] + bf0[26]; bf1[28] = bf0[28] + bf0[29]; bf1[29] = -bf0[29] + bf0[28]; bf1[30] = -bf0[30] + bf0[31]; bf1[31] = bf0[31] + bf0[30]; // stage 8 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; //bf1[1] = bf0[1]; bf1[2] = bf0[2]; //bf1[3] = bf0[3]; bf1[4] = bf0[4]; //bf1[5] = bf0[5]; bf1[6] = bf0[6]; //bf1[7] = bf0[7]; bf1[8] = bf0[8]; //bf1[9] = bf0[9]; bf1[10] = bf0[10]; //bf1[11] = bf0[11]; bf1[12] = bf0[12]; //bf1[13] = bf0[13]; bf1[14] = bf0[14]; //bf1[15] = bf0[15]; bf1[16] = half_btf(cospi[62], bf0[16], cospi[2], bf0[31], cos_bit); //bf1[17] = half_btf(cospi[30], bf0[17], cospi[34], bf0[30], cos_bit); bf1[18] = half_btf(cospi[46], bf0[18], cospi[18], bf0[29], cos_bit); //bf1[19] = half_btf(cospi[14], bf0[19], cospi[50], bf0[28], cos_bit); bf1[20] = half_btf(cospi[54], bf0[20], cospi[10], bf0[27], cos_bit); //bf1[21] = half_btf(cospi[22], bf0[21], cospi[42], bf0[26], cos_bit); bf1[22] = half_btf(cospi[38], bf0[22], cospi[26], bf0[25], cos_bit); //bf1[23] = half_btf(cospi[6], bf0[23], cospi[58], bf0[24], cos_bit); bf1[24] = half_btf(cospi[6], bf0[24], -cospi[58], bf0[23], cos_bit); //bf1[25] = half_btf(cospi[38], bf0[25], -cospi[26], bf0[22], cos_bit); bf1[26] = half_btf(cospi[22], bf0[26], -cospi[42], bf0[21], cos_bit); //bf1[27] = half_btf(cospi[54], bf0[27], -cospi[10], bf0[20], cos_bit); bf1[28] = half_btf(cospi[14], bf0[28], -cospi[50], bf0[19], cos_bit); //bf1[29] = half_btf(cospi[46], bf0[29], -cospi[18], bf0[18], cos_bit); bf1[30] = half_btf(cospi[30], bf0[30], -cospi[34], bf0[17], cos_bit); //bf1[31] = half_btf(cospi[62], bf0[31], -cospi[2], bf0[16], cos_bit); // stage 11 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = -bf0[16]; bf1[2] = bf0[24]; bf1[3] = -bf0[8]; bf1[4] = bf0[12]; bf1[5] = -bf0[28]; bf1[6] = bf0[20]; bf1[7] = -bf0[4]; bf1[8] = bf0[6]; bf1[9] = -bf0[22]; bf1[10] = bf0[30]; bf1[11] = -bf0[14]; bf1[12] = bf0[10]; bf1[13] = -bf0[26]; bf1[14] = bf0[18]; bf1[15] = -bf0[2]; bf1[16] = bf0[3]; bf1[17] = -bf0[19]; bf1[18] = bf0[27]; bf1[19] = -bf0[11]; bf1[20] = bf0[15]; bf1[21] = -bf0[31]; bf1[22] = bf0[23]; bf1[23] = -bf0[7]; bf1[24] = bf0[5]; bf1[25] = -bf0[21]; bf1[26] = bf0[29]; bf1[27] = -bf0[13]; bf1[28] = bf0[9]; bf1[29] = -bf0[25]; bf1[30] = bf0[17]; bf1[31] = -bf0[1]; } static INLINE void set_fwd_txfm_non_scale_range(Txfm2dFlipCfg *cfg) { av1_zero(cfg->stage_range_col); av1_zero(cfg->stage_range_row); const int8_t *range_mult2_col = fwd_txfm_range_mult2_list[cfg->txfm_type_col]; if (cfg->txfm_type_col != TXFM_TYPE_INVALID) { int stage_num_col = cfg->stage_num_col; for (int i = 0; i < stage_num_col; ++i) cfg->stage_range_col[i] = (range_mult2_col[i] + 1) >> 1; } if (cfg->txfm_type_row != TXFM_TYPE_INVALID) { int stage_num_row = cfg->stage_num_row; const int8_t *range_mult2_row = fwd_txfm_range_mult2_list[cfg->txfm_type_row]; for (int i = 0; i < stage_num_row; ++i) { cfg->stage_range_row[i] = (range_mult2_col[cfg->stage_num_col - 1] + range_mult2_row[i] + 1) >> 1; } } } void av1_transform_config(TxType tx_type, TxSize tx_size, Txfm2dFlipCfg *cfg) { assert(cfg != NULL); cfg->tx_size = tx_size; set_flip_cfg(tx_type, cfg); const TxType1D tx_type_1d_col = vtx_tab[tx_type]; const TxType1D tx_type_1d_row = htx_tab[tx_type]; const int32_t txw_idx = tx_size_wide_log2[tx_size] - tx_size_wide_log2[0]; const int32_t txh_idx = tx_size_high_log2[tx_size] - tx_size_high_log2[0]; cfg->shift = fwd_txfm_shift_ls[tx_size]; cfg->cos_bit_col = fwd_cos_bit_col[txw_idx][txh_idx]; cfg->cos_bit_row = fwd_cos_bit_row[txw_idx][txh_idx]; cfg->txfm_type_col = av1_txfm_type_ls[txh_idx][tx_type_1d_col]; cfg->txfm_type_row = av1_txfm_type_ls[txw_idx][tx_type_1d_row]; cfg->stage_num_col = av1_txfm_stage_num_list[cfg->txfm_type_col]; cfg->stage_num_row = av1_txfm_stage_num_list[cfg->txfm_type_row]; set_fwd_txfm_non_scale_range(cfg); } static uint64_t energy_computation(int32_t *coeff, uint32_t coeff_stride, uint32_t area_width, uint32_t area_height) { uint64_t prediction_distortion = 0; for (uint32_t row_index = 0; row_index < area_height; ++row_index) { for (uint32_t column_index = 0; column_index < area_width; ++column_index) prediction_distortion += (int64_t)SQR((int64_t)(coeff[column_index])); coeff += coeff_stride; } return prediction_distortion; } uint64_t svt_handle_transform64x64_c(int32_t *output) { uint64_t three_quad_energy; // top - right 32x32 area. three_quad_energy = energy_computation(output + 32, 64, 32, 32); //bottom 64x32 area. three_quad_energy += energy_computation(output + 32 * 64, 64, 64, 32); // zero out top-right 32x32 area. for (int32_t row = 0; row < 32; ++row) memset(output + row * 64 + 32, 0, 32 * sizeof(*output)); // zero out the bottom 64x32 area. memset(output + 32 * 64, 0, 32 * 64 * sizeof(*output)); // Re-pack non-zero coeffs in the first 32x32 indices. for (int32_t row = 1; row < 32; ++row) svt_memcpy_c(output + row * 32, output + row * 64, 32 * sizeof(*output)); return three_quad_energy; } void svt_av1_transform_two_d_64x64_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[64 * 64]; Txfm2dFlipCfg cfg; //av1_get_fwd_txfm_cfg av1_transform_config(transform_type, TX_64X64, &cfg); //fwd_txfm2d_c av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_transform_two_d_32x32_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[32 * 32]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_32X32, &cfg); av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_transform_two_d_16x16_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 16]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_16X16, &cfg); av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_transform_two_d_8x8_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[8 * 8]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_8X8, &cfg); av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_transform_two_d_4x4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[4 * 4]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_4X4, &cfg); av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } /********************************************************************* * Calculate CBF *********************************************************************/ void svt_av1_fwd_txfm2d_64x32_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[64 * 32]; Txfm2dFlipCfg cfg; /*av1_get_fwd_txfm_cfg*/ av1_transform_config(transform_type, TX_64X32, &cfg); /*fwd_txfm2d_c*/ av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } uint64_t svt_handle_transform64x32_c(int32_t *output) { // top - right 32x32 area. const uint64_t three_quad_energy = energy_computation(output + 32, 64, 32, 32); // zero out right 32x32 area. for (int32_t row = 0; row < 32; ++row) memset(output + row * 64 + 32, 0, 32 * sizeof(*output)); // Re-pack non-zero coeffs in the first 32x32 indices. for (int32_t row = 1; row < 32; ++row) svt_memcpy_c(output + row * 32, output + row * 64, 32 * sizeof(*output)); return three_quad_energy; } void svt_av1_fwd_txfm2d_32x64_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[32 * 64]; Txfm2dFlipCfg cfg; /*av1_get_fwd_txfm_cfg*/ av1_transform_config(transform_type, TX_32X64, &cfg); /*fwd_txfm2d_c*/ av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } uint64_t svt_handle_transform32x64_c(int32_t *output) { //bottom 32x32 area. const uint64_t three_quad_energy = energy_computation(output + 32 * 32, 32, 32, 32); // zero out the bottom 32x32 area. memset(output + 32 * 32, 0, 32 * 32 * sizeof(*output)); return three_quad_energy; } void svt_av1_fwd_txfm2d_64x16_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[64 * 16]; Txfm2dFlipCfg cfg; /*av1_get_fwd_txfm_cfg*/ av1_transform_config(transform_type, TX_64X16, &cfg); /*fwd_txfm2d_c*/ av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } uint64_t svt_handle_transform64x16_c(int32_t *output) { // top - right 32x16 area. const uint64_t three_quad_energy = energy_computation(output + 32, 64, 32, 16); // zero out right 32x16 area. for (int32_t row = 0; row < 16; ++row) memset(output + row * 64 + 32, 0, 32 * sizeof(*output)); // Re-pack non-zero coeffs in the first 32x16 indices. for (int32_t row = 1; row < 16; ++row) svt_memcpy_c(output + row * 32, output + row * 64, 32 * sizeof(*output)); return three_quad_energy; } void svt_av1_fwd_txfm2d_16x64_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 64]; Txfm2dFlipCfg cfg; /*av1_get_fwd_txfm_cfg*/ av1_transform_config(transform_type, TX_16X64, &cfg); /*fwd_txfm2d_c*/ av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } uint64_t svt_handle_transform16x64_c(int32_t *output) { //bottom 16x32 area. const uint64_t three_quad_energy = energy_computation(output + 16 * 32, 16, 16, 32); // zero out the bottom 16x32 area. memset(output + 16 * 32, 0, 16 * 32 * sizeof(*output)); return three_quad_energy; } #if FEATURE_PARTIAL_FREQUENCY uint64_t handle_transform16x64_N2_N4_c(int32_t *output) { (void)output; return 0; } uint64_t handle_transform32x64_N2_N4_c(int32_t *output) { (void)output; return 0; } uint64_t handle_transform64x16_N2_N4_c(int32_t *output) { // Re-pack non-zero coeffs in the first 32x16 indices. for (int32_t row = 1; row < 16; ++row) svt_memcpy_c(output + row * 32, output + row * 64, 32 * sizeof(*output)); return 0; } uint64_t handle_transform64x32_N2_N4_c(int32_t *output) { // Re-pack non-zero coeffs in the first 32x32 indices. for (int32_t row = 1; row < 32; ++row) svt_memcpy_c(output + row * 32, output + row * 64, 32 * sizeof(*output)); return 0; } uint64_t handle_transform64x64_N2_N4_c(int32_t *output) { // Re-pack non-zero coeffs in the first 32x32 indices. for (int32_t row = 1; row < 32; ++row) svt_memcpy_c(output + row * 32, output + row * 64, 32 * sizeof(*output)); return 0; } #endif /*FEATURE_PARTIAL_FREQUENCY*/ void svt_av1_fwd_txfm2d_32x16_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[32 * 16]; Txfm2dFlipCfg cfg; /*av1_get_fwd_txfm_cfg*/ av1_transform_config(transform_type, TX_32X16, &cfg); /*fwd_txfm2d_c*/ av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_16x32_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 32]; Txfm2dFlipCfg cfg; /*av1_get_fwd_txfm_cfg*/ av1_transform_config(transform_type, TX_16X32, &cfg); /*fwd_txfm2d_c*/ av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_16x8_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 8]; Txfm2dFlipCfg cfg; /*av1_get_fwd_txfm_cfg*/ av1_transform_config(transform_type, TX_16X8, &cfg); /*fwd_txfm2d_c*/ av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_8x16_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[8 * 16]; Txfm2dFlipCfg cfg; /*av1_get_fwd_txfm_cfg*/ av1_transform_config(transform_type, TX_8X16, &cfg); /*fwd_txfm2d_c*/ av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_32x8_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[32 * 8]; Txfm2dFlipCfg cfg; /*av1_get_fwd_txfm_cfg*/ av1_transform_config(transform_type, TX_32X8, &cfg); /*fwd_txfm2d_c*/ av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_8x32_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[8 * 32]; Txfm2dFlipCfg cfg; /*av1_get_fwd_txfm_cfg*/ av1_transform_config(transform_type, TX_8X32, &cfg); /*fwd_txfm2d_c*/ av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_16x4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 4]; Txfm2dFlipCfg cfg; /*av1_get_fwd_txfm_cfg*/ av1_transform_config(transform_type, TX_16X4, &cfg); /*fwd_txfm2d_c*/ av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_4x16_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[4 * 16]; Txfm2dFlipCfg cfg; /*av1_get_fwd_txfm_cfg*/ av1_transform_config(transform_type, TX_4X16, &cfg); /*fwd_txfm2d_c*/ av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_8x4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[8 * 4]; Txfm2dFlipCfg cfg; /*av1_get_fwd_txfm_cfg*/ av1_transform_config(transform_type, TX_8X4, &cfg); /*fwd_txfm2d_c*/ av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_4x8_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[4 * 8]; Txfm2dFlipCfg cfg; /*av1_get_fwd_txfm_cfg*/ av1_transform_config(transform_type, TX_4X8, &cfg); /*fwd_txfm2d_c*/ av1_tranform_two_d_core_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } #if FEATURE_PARTIAL_FREQUENCY static EbErrorType av1_estimate_transform_N2(int16_t *residual_buffer, uint32_t residual_stride, int32_t *coeff_buffer, uint32_t coeff_stride, TxSize transform_size, uint64_t *three_quad_energy, uint32_t bit_depth, TxType transform_type, PlaneType component_type) { EbErrorType return_error = EB_ErrorNone; (void)coeff_stride; (void)component_type; switch (transform_size) { case TX_64X32: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_64x32_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_64x32_N2_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = handle_transform64x32_N2_N4(coeff_buffer); break; case TX_32X64: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_32x64_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_32x64_N2_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = handle_transform32x64_N2_N4(coeff_buffer); break; case TX_64X16: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_64x16_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_64x16_N2_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = handle_transform64x16_N2_N4(coeff_buffer); break; case TX_16X64: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_16x64_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_16x64_N2_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = handle_transform16x64_N2_N4(coeff_buffer); break; case TX_32X16: // TTK if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_32x16_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_32x16_N2_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_16X32: if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_16x32_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_16x32_N2_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_16X8: svt_av1_fwd_txfm2d_16x8_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X16: svt_av1_fwd_txfm2d_8x16_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_32X8: if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_32x8_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_32x8_N2_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X32: if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_8x32_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_8x32_N2_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_16X4: svt_av1_fwd_txfm2d_16x4_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_4X16: svt_av1_fwd_txfm2d_4x16_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X4: svt_av1_fwd_txfm2d_8x4_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_4X8: svt_av1_fwd_txfm2d_4x8_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_64X64: svt_av1_fwd_txfm2d_64x64_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = handle_transform64x64_N2_N4(coeff_buffer); break; case TX_32X32: if (transform_type == V_DCT || transform_type == H_DCT || transform_type == V_ADST || transform_type == H_ADST || transform_type == V_FLIPADST || transform_type == H_FLIPADST) // Tahani: I believe those cases are never hit av1_transform_two_d_32x32_N2_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else { svt_av1_fwd_txfm2d_32x32_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); } break; case TX_16X16: svt_av1_fwd_txfm2d_16x16_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X8: svt_av1_fwd_txfm2d_8x8_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_4X4: svt_av1_fwd_txfm2d_4x4_N2( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; default: assert(0); break; } return return_error; } static EbErrorType av1_estimate_transform_N4(int16_t *residual_buffer, uint32_t residual_stride, int32_t *coeff_buffer, uint32_t coeff_stride, TxSize transform_size, uint64_t *three_quad_energy, uint32_t bit_depth, TxType transform_type, PlaneType component_type) { EbErrorType return_error = EB_ErrorNone; (void)coeff_stride; (void)component_type; switch (transform_size) { case TX_64X32: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_64x32_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_64x32_N4_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = handle_transform64x32_N2_N4(coeff_buffer); break; case TX_32X64: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_32x64_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_32x64_N4_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = handle_transform32x64_N2_N4(coeff_buffer); break; case TX_64X16: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_64x16_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_64x16_N4_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = handle_transform64x16_N2_N4(coeff_buffer); break; case TX_16X64: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_16x64_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_16x64_N4_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = handle_transform16x64_N2_N4(coeff_buffer); break; case TX_32X16: // TTK if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_32x16_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_32x16_N4_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_16X32: if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_16x32_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_16x32_N4_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_16X8: svt_av1_fwd_txfm2d_16x8_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X16: svt_av1_fwd_txfm2d_8x16_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_32X8: if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_32x8_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_32x8_N4_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X32: if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_8x32_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_8x32_N4_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_16X4: svt_av1_fwd_txfm2d_16x4_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_4X16: svt_av1_fwd_txfm2d_4x16_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X4: svt_av1_fwd_txfm2d_8x4_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_4X8: svt_av1_fwd_txfm2d_4x8_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_64X64: svt_av1_fwd_txfm2d_64x64_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = handle_transform64x64_N2_N4(coeff_buffer); break; case TX_32X32: if (transform_type == V_DCT || transform_type == H_DCT || transform_type == V_ADST || transform_type == H_ADST || transform_type == V_FLIPADST || transform_type == H_FLIPADST) // Tahani: I believe those cases are never hit av1_transform_two_d_32x32_N4_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else { svt_av1_fwd_txfm2d_32x32_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); } break; case TX_16X16: svt_av1_fwd_txfm2d_16x16_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X8: svt_av1_fwd_txfm2d_8x8_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_4X4: svt_av1_fwd_txfm2d_4x4_N4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; default: assert(0); break; } return return_error; } static EbErrorType av1_estimate_transform_ONLY_DC(int16_t *residual_buffer, uint32_t residual_stride, int32_t *coeff_buffer, uint32_t coeff_stride, TxSize transform_size, uint64_t *three_quad_energy, uint32_t bit_depth, TxType transform_type, PlaneType component_type) { EbErrorType return_error = av1_estimate_transform_N4(residual_buffer, residual_stride, coeff_buffer, coeff_stride, transform_size, three_quad_energy, bit_depth, transform_type, component_type); for (int i = 1; i < (tx_size_wide[transform_size] * tx_size_high[transform_size]); i++) { if (i % tx_size_wide[transform_size] < (tx_size_wide[transform_size] >> 2) || i / tx_size_wide[transform_size] < (tx_size_high[transform_size] >> 2)) { coeff_buffer[i] = 0; } } return return_error; } EbErrorType av1_estimate_transform_default(int16_t *residual_buffer, uint32_t residual_stride, int32_t *coeff_buffer, uint32_t coeff_stride, TxSize transform_size, uint64_t *three_quad_energy, uint32_t bit_depth, TxType transform_type, PlaneType component_type) { EbErrorType return_error = EB_ErrorNone; (void)coeff_stride; (void)component_type; switch (transform_size) { case TX_64X32: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_64x32( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_64x32_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = svt_handle_transform64x32(coeff_buffer); break; case TX_32X64: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_32x64( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_32x64_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = svt_handle_transform32x64(coeff_buffer); break; case TX_64X16: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_64x16( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_64x16_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = svt_handle_transform64x16(coeff_buffer); break; case TX_16X64: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_16x64( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_16x64_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = svt_handle_transform16x64(coeff_buffer); break; case TX_32X16: // TTK if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_32x16( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_32x16_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_16X32: if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_16x32( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_16x32_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_16X8: svt_av1_fwd_txfm2d_16x8( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X16: svt_av1_fwd_txfm2d_8x16( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_32X8: if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_32x8( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_32x8_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X32: if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_8x32( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_8x32_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_16X4: svt_av1_fwd_txfm2d_16x4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_4X16: svt_av1_fwd_txfm2d_4x16( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X4: svt_av1_fwd_txfm2d_8x4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_4X8: svt_av1_fwd_txfm2d_4x8( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_64X64: svt_av1_fwd_txfm2d_64x64( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = svt_handle_transform64x64(coeff_buffer); break; case TX_32X32: if (transform_type == V_DCT || transform_type == H_DCT || transform_type == V_ADST || transform_type == H_ADST || transform_type == V_FLIPADST || transform_type == H_FLIPADST) // Tahani: I believe those cases are never hit svt_av1_transform_two_d_32x32_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else { svt_av1_fwd_txfm2d_32x32( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); } break; case TX_16X16: svt_av1_fwd_txfm2d_16x16( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X8: svt_av1_fwd_txfm2d_8x8( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_4X4: svt_av1_fwd_txfm2d_4x4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; default: assert(0); break; } return return_error; } #endif /* FEATURE_PARTIAL_FREQUENCY */ /********************************************************************* * Transform * Note there is an implicit assumption that TU Size <= PU Size, * which is different than the HEVC requirements. *********************************************************************/ EbErrorType av1_estimate_transform(int16_t *residual_buffer, uint32_t residual_stride, int32_t *coeff_buffer, uint32_t coeff_stride, TxSize transform_size, uint64_t *three_quad_energy, uint32_t bit_depth, TxType transform_type, PlaneType component_type, EB_TRANS_COEFF_SHAPE trans_coeff_shape) { (void)trans_coeff_shape; #if !FEATURE_PARTIAL_FREQUENCY EbErrorType return_error = EB_ErrorNone; #endif (void)coeff_stride; (void)component_type; #if FEATURE_PARTIAL_FREQUENCY switch (trans_coeff_shape) { case DEFAULT_SHAPE: return av1_estimate_transform_default(residual_buffer, residual_stride, coeff_buffer, coeff_stride, transform_size, three_quad_energy, bit_depth, transform_type, component_type); case N2_SHAPE: return av1_estimate_transform_N2(residual_buffer, residual_stride, coeff_buffer, coeff_stride, transform_size, three_quad_energy, bit_depth, transform_type, component_type); case N4_SHAPE: return av1_estimate_transform_N4(residual_buffer, residual_stride, coeff_buffer, coeff_stride, transform_size, three_quad_energy, bit_depth, transform_type, component_type); case ONLY_DC_SHAPE: return av1_estimate_transform_ONLY_DC(residual_buffer, residual_stride, coeff_buffer, coeff_stride, transform_size, three_quad_energy, bit_depth, transform_type, component_type); } assert(0); return EB_ErrorBadParameter; #else /*FEATURE_PARTIAL_FREQUENCY*/ switch (transform_size) { case TX_64X32: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_64x32( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_64x32_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = svt_handle_transform64x32(coeff_buffer); break; case TX_32X64: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_32x64( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_32x64_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = svt_handle_transform32x64(coeff_buffer); break; case TX_64X16: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_64x16( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_64x16_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = svt_handle_transform64x16(coeff_buffer); break; case TX_16X64: if (transform_type == DCT_DCT) svt_av1_fwd_txfm2d_16x64( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_16x64_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = svt_handle_transform16x64(coeff_buffer); break; case TX_32X16: // TTK if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_32x16( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_32x16_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_16X32: if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_16x32( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_16x32_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_16X8: svt_av1_fwd_txfm2d_16x8( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X16: svt_av1_fwd_txfm2d_8x16( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_32X8: if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_32x8( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_32x8_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X32: if ((transform_type == DCT_DCT) || (transform_type == IDTX)) svt_av1_fwd_txfm2d_8x32( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else svt_av1_fwd_txfm2d_8x32_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_16X4: svt_av1_fwd_txfm2d_16x4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_4X16: svt_av1_fwd_txfm2d_4x16( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X4: svt_av1_fwd_txfm2d_8x4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_4X8: svt_av1_fwd_txfm2d_4x8( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_64X64: svt_av1_fwd_txfm2d_64x64( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); *three_quad_energy = svt_handle_transform64x64(coeff_buffer); break; case TX_32X32: if (transform_type == V_DCT || transform_type == H_DCT || transform_type == V_ADST || transform_type == H_ADST || transform_type == V_FLIPADST || transform_type == H_FLIPADST) // Tahani: I believe those cases are never hit svt_av1_transform_two_d_32x32_c( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); else { svt_av1_fwd_txfm2d_32x32( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); } break; case TX_16X16: svt_av1_fwd_txfm2d_16x16( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_8X8: svt_av1_fwd_txfm2d_8x8( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; case TX_4X4: svt_av1_fwd_txfm2d_4x4( residual_buffer, coeff_buffer, residual_stride, transform_type, bit_depth); break; default: assert(0); break; } return return_error; #endif /*FEATURE_PARTIAL_FREQUENCY*/ } static void highbd_fwd_txfm_64x64(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { assert(txfm_param->tx_type == DCT_DCT); int32_t *dst_coeff = (int32_t *)coeff; const int bd = txfm_param->bd; svt_av1_fwd_txfm2d_64x64(src_diff, dst_coeff, diff_stride, DCT_DCT, bd); } static void highbd_fwd_txfm_32x64(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { assert(txfm_param->tx_type == DCT_DCT); int32_t *dst_coeff = (int32_t *)coeff; const int bd = txfm_param->bd; svt_av1_fwd_txfm2d_32x64(src_diff, dst_coeff, diff_stride, txfm_param->tx_type, bd); } static void highbd_fwd_txfm_64x32(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { assert(txfm_param->tx_type == DCT_DCT); int32_t *dst_coeff = (int32_t *)coeff; const int bd = txfm_param->bd; svt_av1_fwd_txfm2d_64x32(src_diff, dst_coeff, diff_stride, txfm_param->tx_type, bd); } static void highbd_fwd_txfm_16x64(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { assert(txfm_param->tx_type == DCT_DCT); int32_t *dst_coeff = (int32_t *)coeff; const int bd = txfm_param->bd; svt_av1_fwd_txfm2d_16x64(src_diff, dst_coeff, diff_stride, DCT_DCT, bd); } static void highbd_fwd_txfm_64x16(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { assert(txfm_param->tx_type == DCT_DCT); int32_t *dst_coeff = (int32_t *)coeff; const int bd = txfm_param->bd; svt_av1_fwd_txfm2d_64x16(src_diff, dst_coeff, diff_stride, DCT_DCT, bd); } static void highbd_fwd_txfm_32x32(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { int32_t *dst_coeff = (int32_t *)coeff; const TxType tx_type = txfm_param->tx_type; const int bd = txfm_param->bd; svt_av1_fwd_txfm2d_32x32(src_diff, dst_coeff, diff_stride, tx_type, bd); } static void highbd_fwd_txfm_16x16(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { int32_t *dst_coeff = (int32_t *)coeff; const TxType tx_type = txfm_param->tx_type; const int bd = txfm_param->bd; svt_av1_fwd_txfm2d_16x16(src_diff, dst_coeff, diff_stride, tx_type, bd); } static void highbd_fwd_txfm_8x8(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { int32_t *dst_coeff = (int32_t *)coeff; const TxType tx_type = txfm_param->tx_type; const int bd = txfm_param->bd; svt_av1_fwd_txfm2d_8x8(src_diff, dst_coeff, diff_stride, tx_type, bd); } static void highbd_fwd_txfm_4x8(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { int32_t *dst_coeff = (int32_t *)coeff; svt_av1_fwd_txfm2d_4x8(src_diff, dst_coeff, diff_stride, txfm_param->tx_type, txfm_param->bd); } static void highbd_fwd_txfm_8x4(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { int32_t *dst_coeff = (int32_t *)coeff; svt_av1_fwd_txfm2d_8x4(src_diff, dst_coeff, diff_stride, txfm_param->tx_type, txfm_param->bd); } static void highbd_fwd_txfm_8x16(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { int32_t *dst_coeff = (int32_t *)coeff; const TxType tx_type = txfm_param->tx_type; const int bd = txfm_param->bd; svt_av1_fwd_txfm2d_8x16(src_diff, dst_coeff, diff_stride, tx_type, bd); } static void highbd_fwd_txfm_16x8(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { int32_t *dst_coeff = (int32_t *)coeff; const TxType tx_type = txfm_param->tx_type; const int bd = txfm_param->bd; svt_av1_fwd_txfm2d_16x8(src_diff, dst_coeff, diff_stride, tx_type, bd); } static void highbd_fwd_txfm_16x32(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { int32_t *dst_coeff = (int32_t *)coeff; svt_av1_fwd_txfm2d_16x32(src_diff, dst_coeff, diff_stride, txfm_param->tx_type, txfm_param->bd); } static void highbd_fwd_txfm_32x16(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { int32_t *dst_coeff = (int32_t *)coeff; svt_av1_fwd_txfm2d_32x16(src_diff, dst_coeff, diff_stride, txfm_param->tx_type, txfm_param->bd); } static void highbd_fwd_txfm_4x16(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { int32_t *dst_coeff = (int32_t *)coeff; svt_av1_fwd_txfm2d_4x16(src_diff, dst_coeff, diff_stride, txfm_param->tx_type, txfm_param->bd); } static void highbd_fwd_txfm_16x4(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { int32_t *dst_coeff = (int32_t *)coeff; svt_av1_fwd_txfm2d_16x4(src_diff, dst_coeff, diff_stride, txfm_param->tx_type, txfm_param->bd); } static void highbd_fwd_txfm_8x32(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { int32_t *dst_coeff = (int32_t *)coeff; svt_av1_fwd_txfm2d_8x32(src_diff, dst_coeff, diff_stride, txfm_param->tx_type, txfm_param->bd); } static void highbd_fwd_txfm_32x8(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { int32_t *dst_coeff = (int32_t *)coeff; svt_av1_fwd_txfm2d_32x8(src_diff, dst_coeff, diff_stride, txfm_param->tx_type, txfm_param->bd); } void svt_av1_highbd_fwd_txfm(int16_t *src_diff, TranLow *coeff, int diff_stride, TxfmParam *txfm_param) { assert(av1_ext_tx_used[txfm_param->tx_set_type][txfm_param->tx_type]); const TxSize tx_size = txfm_param->tx_size; switch (tx_size) { case TX_64X64: highbd_fwd_txfm_64x64(src_diff, coeff, diff_stride, txfm_param); break; case TX_32X64: highbd_fwd_txfm_32x64(src_diff, coeff, diff_stride, txfm_param); break; case TX_64X32: highbd_fwd_txfm_64x32(src_diff, coeff, diff_stride, txfm_param); break; case TX_16X64: highbd_fwd_txfm_16x64(src_diff, coeff, diff_stride, txfm_param); break; case TX_64X16: highbd_fwd_txfm_64x16(src_diff, coeff, diff_stride, txfm_param); break; case TX_32X32: highbd_fwd_txfm_32x32(src_diff, coeff, diff_stride, txfm_param); break; case TX_16X16: highbd_fwd_txfm_16x16(src_diff, coeff, diff_stride, txfm_param); break; case TX_8X8: highbd_fwd_txfm_8x8(src_diff, coeff, diff_stride, txfm_param); break; case TX_4X8: highbd_fwd_txfm_4x8(src_diff, coeff, diff_stride, txfm_param); break; case TX_8X4: highbd_fwd_txfm_8x4(src_diff, coeff, diff_stride, txfm_param); break; case TX_8X16: highbd_fwd_txfm_8x16(src_diff, coeff, diff_stride, txfm_param); break; case TX_16X8: highbd_fwd_txfm_16x8(src_diff, coeff, diff_stride, txfm_param); break; case TX_16X32: highbd_fwd_txfm_16x32(src_diff, coeff, diff_stride, txfm_param); break; case TX_32X16: highbd_fwd_txfm_32x16(src_diff, coeff, diff_stride, txfm_param); break; case TX_4X4: //hack highbd_fwd_txfm_4x4(src_diff, coeff, diff_stride, txfm_param); break; case TX_4X16: highbd_fwd_txfm_4x16(src_diff, coeff, diff_stride, txfm_param); break; case TX_16X4: highbd_fwd_txfm_16x4(src_diff, coeff, diff_stride, txfm_param); break; case TX_8X32: highbd_fwd_txfm_8x32(src_diff, coeff, diff_stride, txfm_param); break; case TX_32X8: highbd_fwd_txfm_32x8(src_diff, coeff, diff_stride, txfm_param); break; default: assert(0); break; } } void svt_av1_wht_fwd_txfm(int16_t *src_diff, int bw, int32_t *coeff, TxSize tx_size, int bit_depth, int is_hbd) { TxfmParam txfm_param; txfm_param.tx_type = DCT_DCT; txfm_param.tx_size = tx_size; txfm_param.lossless = 0; txfm_param.tx_set_type = EXT_TX_SET_ALL16; txfm_param.bd = bit_depth; txfm_param.is_hbd = is_hbd; svt_av1_highbd_fwd_txfm(src_diff, coeff, bw, &txfm_param); } #if FEATURE_PARTIAL_FREQUENCY void svt_av1_fidentity16_N2_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; for (int32_t i = 0; i < 8; ++i) output[i] = round_shift((int64_t)input[i] * 2 * new_sqrt2, new_sqrt2_bits); assert(stage_range[0] + new_sqrt2_bits <= 32); } void svt_av1_fadst16_new_N2(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[16]; // stage 0; // stage 1; assert(output != input); bf1 = output; bf1[0] = input[0]; bf1[1] = -input[15]; bf1[2] = -input[7]; bf1[3] = input[8]; bf1[4] = -input[3]; bf1[5] = input[12]; bf1[6] = input[4]; bf1[7] = -input[11]; bf1[8] = -input[1]; bf1[9] = input[14]; bf1[10] = input[6]; bf1[11] = -input[9]; bf1[12] = input[2]; bf1[13] = -input[13]; bf1[14] = -input[5]; bf1[15] = input[10]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit); bf1[3] = half_btf(cospi[32], bf0[2], -cospi[32], bf0[3], cos_bit); bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit); bf1[7] = half_btf(cospi[32], bf0[6], -cospi[32], bf0[7], cos_bit); bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(cospi[32], bf0[10], cospi[32], bf0[11], cos_bit); bf1[11] = half_btf(cospi[32], bf0[10], -cospi[32], bf0[11], cos_bit); bf1[12] = bf0[12]; bf1[13] = bf0[13]; bf1[14] = half_btf(cospi[32], bf0[14], cospi[32], bf0[15], cos_bit); bf1[15] = half_btf(cospi[32], bf0[14], -cospi[32], bf0[15], cos_bit); // stage 3 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[2]; bf1[1] = bf0[1] + bf0[3]; bf1[2] = bf0[0] - bf0[2]; bf1[3] = bf0[1] - bf0[3]; bf1[4] = bf0[4] + bf0[6]; bf1[5] = bf0[5] + bf0[7]; bf1[6] = bf0[4] - bf0[6]; bf1[7] = bf0[5] - bf0[7]; bf1[8] = bf0[8] + bf0[10]; bf1[9] = bf0[9] + bf0[11]; bf1[10] = bf0[8] - bf0[10]; bf1[11] = bf0[9] - bf0[11]; bf1[12] = bf0[12] + bf0[14]; bf1[13] = bf0[13] + bf0[15]; bf1[14] = bf0[12] - bf0[14]; bf1[15] = bf0[13] - bf0[15]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit); bf1[5] = half_btf(cospi[48], bf0[4], -cospi[16], bf0[5], cos_bit); bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit); bf1[7] = half_btf(cospi[16], bf0[6], cospi[48], bf0[7], cos_bit); bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = bf0[10]; bf1[11] = bf0[11]; bf1[12] = half_btf(cospi[16], bf0[12], cospi[48], bf0[13], cos_bit); bf1[13] = half_btf(cospi[48], bf0[12], -cospi[16], bf0[13], cos_bit); bf1[14] = half_btf(-cospi[48], bf0[14], cospi[16], bf0[15], cos_bit); bf1[15] = half_btf(cospi[16], bf0[14], cospi[48], bf0[15], cos_bit); // stage 5 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[4]; bf1[1] = bf0[1] + bf0[5]; bf1[2] = bf0[2] + bf0[6]; bf1[3] = bf0[3] + bf0[7]; bf1[4] = bf0[0] - bf0[4]; bf1[5] = bf0[1] - bf0[5]; bf1[6] = bf0[2] - bf0[6]; bf1[7] = bf0[3] - bf0[7]; bf1[8] = bf0[8] + bf0[12]; bf1[9] = bf0[9] + bf0[13]; bf1[10] = bf0[10] + bf0[14]; bf1[11] = bf0[11] + bf0[15]; bf1[12] = bf0[8] - bf0[12]; bf1[13] = bf0[9] - bf0[13]; bf1[14] = bf0[10] - bf0[14]; bf1[15] = bf0[11] - bf0[15]; // stage 6 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = bf0[6]; bf1[7] = bf0[7]; bf1[8] = half_btf(cospi[8], bf0[8], cospi[56], bf0[9], cos_bit); bf1[9] = half_btf(cospi[56], bf0[8], -cospi[8], bf0[9], cos_bit); bf1[10] = half_btf(cospi[40], bf0[10], cospi[24], bf0[11], cos_bit); bf1[11] = half_btf(cospi[24], bf0[10], -cospi[40], bf0[11], cos_bit); bf1[12] = half_btf(-cospi[56], bf0[12], cospi[8], bf0[13], cos_bit); bf1[13] = half_btf(cospi[8], bf0[12], cospi[56], bf0[13], cos_bit); bf1[14] = half_btf(-cospi[24], bf0[14], cospi[40], bf0[15], cos_bit); bf1[15] = half_btf(cospi[40], bf0[14], cospi[24], bf0[15], cos_bit); // stage 7 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[8]; bf1[1] = bf0[1] + bf0[9]; bf1[2] = bf0[2] + bf0[10]; bf1[3] = bf0[3] + bf0[11]; bf1[4] = bf0[4] + bf0[12]; bf1[5] = bf0[5] + bf0[13]; bf1[6] = bf0[6] + bf0[14]; bf1[7] = bf0[7] + bf0[15]; bf1[8] = bf0[0] - bf0[8]; bf1[9] = bf0[1] - bf0[9]; bf1[10] = bf0[2] - bf0[10]; bf1[11] = bf0[3] - bf0[11]; bf1[12] = bf0[4] - bf0[12]; bf1[13] = bf0[5] - bf0[13]; bf1[14] = bf0[6] - bf0[14]; bf1[15] = bf0[7] - bf0[15]; // stage 8 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[1] = half_btf(cospi[62], bf0[0], -cospi[2], bf0[1], cos_bit); bf1[3] = half_btf(cospi[54], bf0[2], -cospi[10], bf0[3], cos_bit); bf1[5] = half_btf(cospi[46], bf0[4], -cospi[18], bf0[5], cos_bit); bf1[7] = half_btf(cospi[38], bf0[6], -cospi[26], bf0[7], cos_bit); bf1[8] = half_btf(cospi[34], bf0[8], cospi[30], bf0[9], cos_bit); bf1[10] = half_btf(cospi[42], bf0[10], cospi[22], bf0[11], cos_bit); bf1[12] = half_btf(cospi[50], bf0[12], cospi[14], bf0[13], cos_bit); bf1[14] = half_btf(cospi[58], bf0[14], cospi[6], bf0[15], cos_bit); // stage 9 bf0 = step; bf1 = output; bf1[0] = bf0[1]; bf1[1] = bf0[14]; bf1[2] = bf0[3]; bf1[3] = bf0[12]; bf1[4] = bf0[5]; bf1[5] = bf0[10]; bf1[6] = bf0[7]; bf1[7] = bf0[8]; } void svt_av1_fdct16_new_N2(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[16]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[0] + input[15]; bf1[1] = input[1] + input[14]; bf1[2] = input[2] + input[13]; bf1[3] = input[3] + input[12]; bf1[4] = input[4] + input[11]; bf1[5] = input[5] + input[10]; bf1[6] = input[6] + input[9]; bf1[7] = input[7] + input[8]; bf1[8] = -input[8] + input[7]; bf1[9] = -input[9] + input[6]; bf1[10] = -input[10] + input[5]; bf1[11] = -input[11] + input[4]; bf1[12] = -input[12] + input[3]; bf1[13] = -input[13] + input[2]; bf1[14] = -input[14] + input[1]; bf1[15] = -input[15] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[7]; bf1[1] = bf0[1] + bf0[6]; bf1[2] = bf0[2] + bf0[5]; bf1[3] = bf0[3] + bf0[4]; bf1[4] = -bf0[4] + bf0[3]; bf1[5] = -bf0[5] + bf0[2]; bf1[6] = -bf0[6] + bf0[1]; bf1[7] = -bf0[7] + bf0[0]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit); bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit); bf1[12] = half_btf(cospi[32], bf0[12], cospi[32], bf0[11], cos_bit); bf1[13] = half_btf(cospi[32], bf0[13], cospi[32], bf0[10], cos_bit); bf1[14] = bf0[14]; bf1[15] = bf0[15]; // stage 3 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[2] = -bf0[2] + bf0[1]; bf1[3] = -bf0[3] + bf0[0]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; bf1[8] = bf0[8] + bf0[11]; bf1[9] = bf0[9] + bf0[10]; bf1[10] = -bf0[10] + bf0[9]; bf1[11] = -bf0[11] + bf0[8]; bf1[12] = -bf0[12] + bf0[15]; bf1[13] = -bf0[13] + bf0[14]; bf1[14] = bf0[14] + bf0[13]; bf1[15] = bf0[15] + bf0[12]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[5] = -bf0[5] + bf0[4]; bf1[6] = -bf0[6] + bf0[7]; bf1[7] = bf0[7] + bf0[6]; bf1[8] = bf0[8]; bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit); bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit); bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = half_btf(cospi[48], bf0[13], -cospi[16], bf0[10], cos_bit); bf1[14] = half_btf(cospi[16], bf0[14], cospi[48], bf0[9], cos_bit); bf1[15] = bf0[15]; // stage 5 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[2] = bf0[2]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); bf1[6] = half_btf(cospi[24], bf0[6], -cospi[40], bf0[5], cos_bit); bf1[8] = bf0[8] + bf0[9]; bf1[9] = -bf0[9] + bf0[8]; bf1[10] = -bf0[10] + bf0[11]; bf1[11] = bf0[11] + bf0[10]; bf1[12] = bf0[12] + bf0[13]; bf1[13] = -bf0[13] + bf0[12]; bf1[14] = -bf0[14] + bf0[15]; bf1[15] = bf0[15] + bf0[14]; // stage 6 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[2] = bf0[2]; bf1[4] = bf0[4]; bf1[6] = bf0[6]; bf1[8] = half_btf(cospi[60], bf0[8], cospi[4], bf0[15], cos_bit); bf1[10] = half_btf(cospi[44], bf0[10], cospi[20], bf0[13], cos_bit); bf1[12] = half_btf(cospi[12], bf0[12], -cospi[52], bf0[11], cos_bit); bf1[14] = half_btf(cospi[28], bf0[14], -cospi[36], bf0[9], cos_bit); // stage 7 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[8]; bf1[2] = bf0[4]; bf1[3] = bf0[12]; bf1[4] = bf0[2]; bf1[5] = bf0[10]; bf1[6] = bf0[6]; bf1[7] = bf0[14]; } void svt_av1_fidentity8_N2_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; for (int32_t i = 0; i < 4; ++i) output[i] = input[i] * 2; } void svt_av1_fadst8_new_N2(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[8]; // stage 0; // stage 1; assert(output != input); bf1 = output; bf1[0] = input[0]; bf1[1] = -input[7]; bf1[2] = -input[3]; bf1[3] = input[4]; bf1[4] = -input[1]; bf1[5] = input[6]; bf1[6] = input[2]; bf1[7] = -input[5]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit); bf1[3] = half_btf(cospi[32], bf0[2], -cospi[32], bf0[3], cos_bit); bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit); bf1[7] = half_btf(cospi[32], bf0[6], -cospi[32], bf0[7], cos_bit); // stage 3 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[2]; bf1[1] = bf0[1] + bf0[3]; bf1[2] = bf0[0] - bf0[2]; bf1[3] = bf0[1] - bf0[3]; bf1[4] = bf0[4] + bf0[6]; bf1[5] = bf0[5] + bf0[7]; bf1[6] = bf0[4] - bf0[6]; bf1[7] = bf0[5] - bf0[7]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit); bf1[5] = half_btf(cospi[48], bf0[4], -cospi[16], bf0[5], cos_bit); bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit); bf1[7] = half_btf(cospi[16], bf0[6], cospi[48], bf0[7], cos_bit); // stage 5 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[4]; bf1[1] = bf0[1] + bf0[5]; bf1[2] = bf0[2] + bf0[6]; bf1[3] = bf0[3] + bf0[7]; bf1[4] = bf0[0] - bf0[4]; bf1[5] = bf0[1] - bf0[5]; bf1[6] = bf0[2] - bf0[6]; bf1[7] = bf0[3] - bf0[7]; // stage 6 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[1] = half_btf(cospi[60], bf0[0], -cospi[4], bf0[1], cos_bit); bf1[3] = half_btf(cospi[44], bf0[2], -cospi[20], bf0[3], cos_bit); bf1[4] = half_btf(cospi[36], bf0[4], cospi[28], bf0[5], cos_bit); bf1[6] = half_btf(cospi[52], bf0[6], cospi[12], bf0[7], cos_bit); // stage 7 bf0 = step; bf1 = output; bf1[0] = bf0[1]; bf1[1] = bf0[6]; bf1[2] = bf0[3]; bf1[3] = bf0[4]; } void svt_av1_fdct8_new_N2(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[8]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[0] + input[7]; bf1[1] = input[1] + input[6]; bf1[2] = input[2] + input[5]; bf1[3] = input[3] + input[4]; bf1[4] = -input[4] + input[3]; bf1[5] = -input[5] + input[2]; bf1[6] = -input[6] + input[1]; bf1[7] = -input[7] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[2] = -bf0[2] + bf0[1]; bf1[3] = -bf0[3] + bf0[0]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; // stage 3 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[5] = -bf0[5] + bf0[4]; bf1[6] = -bf0[6] + bf0[7]; bf1[7] = bf0[7] + bf0[6]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[2] = bf0[2]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); bf1[6] = half_btf(cospi[24], bf0[6], -cospi[40], bf0[5], cos_bit); // stage 5 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[4]; bf1[2] = bf0[2]; bf1[3] = bf0[6]; } void svt_av1_fidentity4_N2_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; output[0] = round_shift((int64_t)input[0] * new_sqrt2, new_sqrt2_bits); output[1] = round_shift((int64_t)input[1] * new_sqrt2, new_sqrt2_bits); assert(stage_range[0] + new_sqrt2_bits <= 32); } void svt_av1_fadst4_new_N2(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; int32_t bit = cos_bit; const int32_t *sinpi = sinpi_arr(bit); int32_t x0, x1, x2, x3; int32_t s0, s2, s4, s5, s7; // stage 0 x0 = input[0]; x1 = input[1]; x2 = input[2]; x3 = input[3]; if (!(x0 | x1 | x2 | x3)) { output[0] = output[1] = output[2] = output[3] = 0; return; } // stage 1 s0 = sinpi[1] * x0; s2 = sinpi[2] * x1; s4 = sinpi[3] * x2; s5 = sinpi[4] * x3; s7 = x0 + x1; // stage 2 s7 = s7 - x3; // stage 3 x0 = s0 + s2; x1 = sinpi[3] * s7; // stage 4 x0 = x0 + s5; // stage 5 s0 = x0 + s4; // 1-D transform scaling factor is sqrt(2). output[0] = round_shift(s0, bit); output[1] = round_shift(x1, bit); } void svt_av1_fdct4_new_N2(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0; int32_t step[4]; // stage 1; bf0 = step; bf0[0] = input[0] + input[3]; bf0[1] = input[1] + input[2]; bf0[2] = -input[2] + input[1]; bf0[3] = -input[3] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); output[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); output[1] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit); } void svt_av1_fdct32_new_N2(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[32]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[0] + input[31]; bf1[1] = input[1] + input[30]; bf1[2] = input[2] + input[29]; bf1[3] = input[3] + input[28]; bf1[4] = input[4] + input[27]; bf1[5] = input[5] + input[26]; bf1[6] = input[6] + input[25]; bf1[7] = input[7] + input[24]; bf1[8] = input[8] + input[23]; bf1[9] = input[9] + input[22]; bf1[10] = input[10] + input[21]; bf1[11] = input[11] + input[20]; bf1[12] = input[12] + input[19]; bf1[13] = input[13] + input[18]; bf1[14] = input[14] + input[17]; bf1[15] = input[15] + input[16]; bf1[16] = -input[16] + input[15]; bf1[17] = -input[17] + input[14]; bf1[18] = -input[18] + input[13]; bf1[19] = -input[19] + input[12]; bf1[20] = -input[20] + input[11]; bf1[21] = -input[21] + input[10]; bf1[22] = -input[22] + input[9]; bf1[23] = -input[23] + input[8]; bf1[24] = -input[24] + input[7]; bf1[25] = -input[25] + input[6]; bf1[26] = -input[26] + input[5]; bf1[27] = -input[27] + input[4]; bf1[28] = -input[28] + input[3]; bf1[29] = -input[29] + input[2]; bf1[30] = -input[30] + input[1]; bf1[31] = -input[31] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[15]; bf1[1] = bf0[1] + bf0[14]; bf1[2] = bf0[2] + bf0[13]; bf1[3] = bf0[3] + bf0[12]; bf1[4] = bf0[4] + bf0[11]; bf1[5] = bf0[5] + bf0[10]; bf1[6] = bf0[6] + bf0[9]; bf1[7] = bf0[7] + bf0[8]; bf1[8] = -bf0[8] + bf0[7]; bf1[9] = -bf0[9] + bf0[6]; bf1[10] = -bf0[10] + bf0[5]; bf1[11] = -bf0[11] + bf0[4]; bf1[12] = -bf0[12] + bf0[3]; bf1[13] = -bf0[13] + bf0[2]; bf1[14] = -bf0[14] + bf0[1]; bf1[15] = -bf0[15] + bf0[0]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = bf0[18]; bf1[19] = bf0[19]; bf1[20] = half_btf(-cospi[32], bf0[20], cospi[32], bf0[27], cos_bit); bf1[21] = half_btf(-cospi[32], bf0[21], cospi[32], bf0[26], cos_bit); bf1[22] = half_btf(-cospi[32], bf0[22], cospi[32], bf0[25], cos_bit); bf1[23] = half_btf(-cospi[32], bf0[23], cospi[32], bf0[24], cos_bit); bf1[24] = half_btf(cospi[32], bf0[24], cospi[32], bf0[23], cos_bit); bf1[25] = half_btf(cospi[32], bf0[25], cospi[32], bf0[22], cos_bit); bf1[26] = half_btf(cospi[32], bf0[26], cospi[32], bf0[21], cos_bit); bf1[27] = half_btf(cospi[32], bf0[27], cospi[32], bf0[20], cos_bit); bf1[28] = bf0[28]; bf1[29] = bf0[29]; bf1[30] = bf0[30]; bf1[31] = bf0[31]; // stage 3 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[7]; bf1[1] = bf0[1] + bf0[6]; bf1[2] = bf0[2] + bf0[5]; bf1[3] = bf0[3] + bf0[4]; bf1[4] = -bf0[4] + bf0[3]; bf1[5] = -bf0[5] + bf0[2]; bf1[6] = -bf0[6] + bf0[1]; bf1[7] = -bf0[7] + bf0[0]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit); bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit); bf1[12] = half_btf(cospi[32], bf0[12], cospi[32], bf0[11], cos_bit); bf1[13] = half_btf(cospi[32], bf0[13], cospi[32], bf0[10], cos_bit); bf1[14] = bf0[14]; bf1[15] = bf0[15]; bf1[16] = bf0[16] + bf0[23]; bf1[17] = bf0[17] + bf0[22]; bf1[18] = bf0[18] + bf0[21]; bf1[19] = bf0[19] + bf0[20]; bf1[20] = -bf0[20] + bf0[19]; bf1[21] = -bf0[21] + bf0[18]; bf1[22] = -bf0[22] + bf0[17]; bf1[23] = -bf0[23] + bf0[16]; bf1[24] = -bf0[24] + bf0[31]; bf1[25] = -bf0[25] + bf0[30]; bf1[26] = -bf0[26] + bf0[29]; bf1[27] = -bf0[27] + bf0[28]; bf1[28] = bf0[28] + bf0[27]; bf1[29] = bf0[29] + bf0[26]; bf1[30] = bf0[30] + bf0[25]; bf1[31] = bf0[31] + bf0[24]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[2] = -bf0[2] + bf0[1]; bf1[3] = -bf0[3] + bf0[0]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; bf1[8] = bf0[8] + bf0[11]; bf1[9] = bf0[9] + bf0[10]; bf1[10] = -bf0[10] + bf0[9]; bf1[11] = -bf0[11] + bf0[8]; bf1[12] = -bf0[12] + bf0[15]; bf1[13] = -bf0[13] + bf0[14]; bf1[14] = bf0[14] + bf0[13]; bf1[15] = bf0[15] + bf0[12]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = half_btf(-cospi[16], bf0[18], cospi[48], bf0[29], cos_bit); bf1[19] = half_btf(-cospi[16], bf0[19], cospi[48], bf0[28], cos_bit); bf1[20] = half_btf(-cospi[48], bf0[20], -cospi[16], bf0[27], cos_bit); bf1[21] = half_btf(-cospi[48], bf0[21], -cospi[16], bf0[26], cos_bit); bf1[22] = bf0[22]; bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = bf0[25]; bf1[26] = half_btf(cospi[48], bf0[26], -cospi[16], bf0[21], cos_bit); bf1[27] = half_btf(cospi[48], bf0[27], -cospi[16], bf0[20], cos_bit); bf1[28] = half_btf(cospi[16], bf0[28], cospi[48], bf0[19], cos_bit); bf1[29] = half_btf(cospi[16], bf0[29], cospi[48], bf0[18], cos_bit); bf1[30] = bf0[30]; bf1[31] = bf0[31]; // stage 5 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[5] = -bf0[5] + bf0[4]; bf1[6] = -bf0[6] + bf0[7]; bf1[7] = bf0[7] + bf0[6]; bf1[8] = bf0[8]; bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit); bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit); bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = half_btf(cospi[48], bf0[13], -cospi[16], bf0[10], cos_bit); bf1[14] = half_btf(cospi[16], bf0[14], cospi[48], bf0[9], cos_bit); bf1[15] = bf0[15]; bf1[16] = bf0[16] + bf0[19]; bf1[17] = bf0[17] + bf0[18]; bf1[18] = -bf0[18] + bf0[17]; bf1[19] = -bf0[19] + bf0[16]; bf1[20] = -bf0[20] + bf0[23]; bf1[21] = -bf0[21] + bf0[22]; bf1[22] = bf0[22] + bf0[21]; bf1[23] = bf0[23] + bf0[20]; bf1[24] = bf0[24] + bf0[27]; bf1[25] = bf0[25] + bf0[26]; bf1[26] = -bf0[26] + bf0[25]; bf1[27] = -bf0[27] + bf0[24]; bf1[28] = -bf0[28] + bf0[31]; bf1[29] = -bf0[29] + bf0[30]; bf1[30] = bf0[30] + bf0[29]; bf1[31] = bf0[31] + bf0[28]; // stage 6 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[2] = bf0[2]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); bf1[6] = half_btf(cospi[24], bf0[6], -cospi[40], bf0[5], cos_bit); bf1[8] = bf0[8] + bf0[9]; bf1[9] = -bf0[9] + bf0[8]; bf1[10] = -bf0[10] + bf0[11]; bf1[11] = bf0[11] + bf0[10]; bf1[12] = bf0[12] + bf0[13]; bf1[13] = -bf0[13] + bf0[12]; bf1[14] = -bf0[14] + bf0[15]; bf1[15] = bf0[15] + bf0[14]; bf1[16] = bf0[16]; bf1[17] = half_btf(-cospi[8], bf0[17], cospi[56], bf0[30], cos_bit); bf1[18] = half_btf(-cospi[56], bf0[18], -cospi[8], bf0[29], cos_bit); bf1[19] = bf0[19]; bf1[20] = bf0[20]; bf1[21] = half_btf(-cospi[40], bf0[21], cospi[24], bf0[26], cos_bit); bf1[22] = half_btf(-cospi[24], bf0[22], -cospi[40], bf0[25], cos_bit); bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = half_btf(cospi[24], bf0[25], -cospi[40], bf0[22], cos_bit); bf1[26] = half_btf(cospi[40], bf0[26], cospi[24], bf0[21], cos_bit); bf1[27] = bf0[27]; bf1[28] = bf0[28]; bf1[29] = half_btf(cospi[56], bf0[29], -cospi[8], bf0[18], cos_bit); bf1[30] = half_btf(cospi[8], bf0[30], cospi[56], bf0[17], cos_bit); bf1[31] = bf0[31]; // stage 7 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[2] = bf0[2]; bf1[4] = bf0[4]; bf1[6] = bf0[6]; bf1[8] = half_btf(cospi[60], bf0[8], cospi[4], bf0[15], cos_bit); bf1[10] = half_btf(cospi[44], bf0[10], cospi[20], bf0[13], cos_bit); bf1[12] = half_btf(cospi[12], bf0[12], -cospi[52], bf0[11], cos_bit); bf1[14] = half_btf(cospi[28], bf0[14], -cospi[36], bf0[9], cos_bit); bf1[16] = bf0[16] + bf0[17]; bf1[17] = -bf0[17] + bf0[16]; bf1[18] = -bf0[18] + bf0[19]; bf1[19] = bf0[19] + bf0[18]; bf1[20] = bf0[20] + bf0[21]; bf1[21] = -bf0[21] + bf0[20]; bf1[22] = -bf0[22] + bf0[23]; bf1[23] = bf0[23] + bf0[22]; bf1[24] = bf0[24] + bf0[25]; bf1[25] = -bf0[25] + bf0[24]; bf1[26] = -bf0[26] + bf0[27]; bf1[27] = bf0[27] + bf0[26]; bf1[28] = bf0[28] + bf0[29]; bf1[29] = -bf0[29] + bf0[28]; bf1[30] = -bf0[30] + bf0[31]; bf1[31] = bf0[31] + bf0[30]; // stage 8 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[2] = bf0[2]; bf1[4] = bf0[4]; bf1[6] = bf0[6]; bf1[8] = bf0[8]; bf1[10] = bf0[10]; bf1[12] = bf0[12]; bf1[14] = bf0[14]; bf1[16] = half_btf(cospi[62], bf0[16], cospi[2], bf0[31], cos_bit); bf1[18] = half_btf(cospi[46], bf0[18], cospi[18], bf0[29], cos_bit); bf1[20] = half_btf(cospi[54], bf0[20], cospi[10], bf0[27], cos_bit); bf1[22] = half_btf(cospi[38], bf0[22], cospi[26], bf0[25], cos_bit); bf1[24] = half_btf(cospi[6], bf0[24], -cospi[58], bf0[23], cos_bit); bf1[26] = half_btf(cospi[22], bf0[26], -cospi[42], bf0[21], cos_bit); bf1[28] = half_btf(cospi[14], bf0[28], -cospi[50], bf0[19], cos_bit); bf1[30] = half_btf(cospi[30], bf0[30], -cospi[34], bf0[17], cos_bit); // stage 9 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[16]; bf1[2] = bf0[8]; bf1[3] = bf0[24]; bf1[4] = bf0[4]; bf1[5] = bf0[20]; bf1[6] = bf0[12]; bf1[7] = bf0[28]; bf1[8] = bf0[2]; bf1[9] = bf0[18]; bf1[10] = bf0[10]; bf1[11] = bf0[26]; bf1[12] = bf0[6]; bf1[13] = bf0[22]; bf1[14] = bf0[14]; bf1[15] = bf0[30]; } void svt_av1_fidentity32_N2_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; for (int32_t i = 0; i < 16; ++i) output[i] = input[i] * 4; } void svt_av1_fdct64_new_N2(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[64]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[0] + input[63]; bf1[1] = input[1] + input[62]; bf1[2] = input[2] + input[61]; bf1[3] = input[3] + input[60]; bf1[4] = input[4] + input[59]; bf1[5] = input[5] + input[58]; bf1[6] = input[6] + input[57]; bf1[7] = input[7] + input[56]; bf1[8] = input[8] + input[55]; bf1[9] = input[9] + input[54]; bf1[10] = input[10] + input[53]; bf1[11] = input[11] + input[52]; bf1[12] = input[12] + input[51]; bf1[13] = input[13] + input[50]; bf1[14] = input[14] + input[49]; bf1[15] = input[15] + input[48]; bf1[16] = input[16] + input[47]; bf1[17] = input[17] + input[46]; bf1[18] = input[18] + input[45]; bf1[19] = input[19] + input[44]; bf1[20] = input[20] + input[43]; bf1[21] = input[21] + input[42]; bf1[22] = input[22] + input[41]; bf1[23] = input[23] + input[40]; bf1[24] = input[24] + input[39]; bf1[25] = input[25] + input[38]; bf1[26] = input[26] + input[37]; bf1[27] = input[27] + input[36]; bf1[28] = input[28] + input[35]; bf1[29] = input[29] + input[34]; bf1[30] = input[30] + input[33]; bf1[31] = input[31] + input[32]; bf1[32] = -input[32] + input[31]; bf1[33] = -input[33] + input[30]; bf1[34] = -input[34] + input[29]; bf1[35] = -input[35] + input[28]; bf1[36] = -input[36] + input[27]; bf1[37] = -input[37] + input[26]; bf1[38] = -input[38] + input[25]; bf1[39] = -input[39] + input[24]; bf1[40] = -input[40] + input[23]; bf1[41] = -input[41] + input[22]; bf1[42] = -input[42] + input[21]; bf1[43] = -input[43] + input[20]; bf1[44] = -input[44] + input[19]; bf1[45] = -input[45] + input[18]; bf1[46] = -input[46] + input[17]; bf1[47] = -input[47] + input[16]; bf1[48] = -input[48] + input[15]; bf1[49] = -input[49] + input[14]; bf1[50] = -input[50] + input[13]; bf1[51] = -input[51] + input[12]; bf1[52] = -input[52] + input[11]; bf1[53] = -input[53] + input[10]; bf1[54] = -input[54] + input[9]; bf1[55] = -input[55] + input[8]; bf1[56] = -input[56] + input[7]; bf1[57] = -input[57] + input[6]; bf1[58] = -input[58] + input[5]; bf1[59] = -input[59] + input[4]; bf1[60] = -input[60] + input[3]; bf1[61] = -input[61] + input[2]; bf1[62] = -input[62] + input[1]; bf1[63] = -input[63] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[31]; bf1[1] = bf0[1] + bf0[30]; bf1[2] = bf0[2] + bf0[29]; bf1[3] = bf0[3] + bf0[28]; bf1[4] = bf0[4] + bf0[27]; bf1[5] = bf0[5] + bf0[26]; bf1[6] = bf0[6] + bf0[25]; bf1[7] = bf0[7] + bf0[24]; bf1[8] = bf0[8] + bf0[23]; bf1[9] = bf0[9] + bf0[22]; bf1[10] = bf0[10] + bf0[21]; bf1[11] = bf0[11] + bf0[20]; bf1[12] = bf0[12] + bf0[19]; bf1[13] = bf0[13] + bf0[18]; bf1[14] = bf0[14] + bf0[17]; bf1[15] = bf0[15] + bf0[16]; bf1[16] = -bf0[16] + bf0[15]; bf1[17] = -bf0[17] + bf0[14]; bf1[18] = -bf0[18] + bf0[13]; bf1[19] = -bf0[19] + bf0[12]; bf1[20] = -bf0[20] + bf0[11]; bf1[21] = -bf0[21] + bf0[10]; bf1[22] = -bf0[22] + bf0[9]; bf1[23] = -bf0[23] + bf0[8]; bf1[24] = -bf0[24] + bf0[7]; bf1[25] = -bf0[25] + bf0[6]; bf1[26] = -bf0[26] + bf0[5]; bf1[27] = -bf0[27] + bf0[4]; bf1[28] = -bf0[28] + bf0[3]; bf1[29] = -bf0[29] + bf0[2]; bf1[30] = -bf0[30] + bf0[1]; bf1[31] = -bf0[31] + bf0[0]; bf1[32] = bf0[32]; bf1[33] = bf0[33]; bf1[34] = bf0[34]; bf1[35] = bf0[35]; bf1[36] = bf0[36]; bf1[37] = bf0[37]; bf1[38] = bf0[38]; bf1[39] = bf0[39]; bf1[40] = half_btf(-cospi[32], bf0[40], cospi[32], bf0[55], cos_bit); bf1[41] = half_btf(-cospi[32], bf0[41], cospi[32], bf0[54], cos_bit); bf1[42] = half_btf(-cospi[32], bf0[42], cospi[32], bf0[53], cos_bit); bf1[43] = half_btf(-cospi[32], bf0[43], cospi[32], bf0[52], cos_bit); bf1[44] = half_btf(-cospi[32], bf0[44], cospi[32], bf0[51], cos_bit); bf1[45] = half_btf(-cospi[32], bf0[45], cospi[32], bf0[50], cos_bit); bf1[46] = half_btf(-cospi[32], bf0[46], cospi[32], bf0[49], cos_bit); bf1[47] = half_btf(-cospi[32], bf0[47], cospi[32], bf0[48], cos_bit); bf1[48] = half_btf(cospi[32], bf0[48], cospi[32], bf0[47], cos_bit); bf1[49] = half_btf(cospi[32], bf0[49], cospi[32], bf0[46], cos_bit); bf1[50] = half_btf(cospi[32], bf0[50], cospi[32], bf0[45], cos_bit); bf1[51] = half_btf(cospi[32], bf0[51], cospi[32], bf0[44], cos_bit); bf1[52] = half_btf(cospi[32], bf0[52], cospi[32], bf0[43], cos_bit); bf1[53] = half_btf(cospi[32], bf0[53], cospi[32], bf0[42], cos_bit); bf1[54] = half_btf(cospi[32], bf0[54], cospi[32], bf0[41], cos_bit); bf1[55] = half_btf(cospi[32], bf0[55], cospi[32], bf0[40], cos_bit); bf1[56] = bf0[56]; bf1[57] = bf0[57]; bf1[58] = bf0[58]; bf1[59] = bf0[59]; bf1[60] = bf0[60]; bf1[61] = bf0[61]; bf1[62] = bf0[62]; bf1[63] = bf0[63]; // stage 3 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[15]; bf1[1] = bf0[1] + bf0[14]; bf1[2] = bf0[2] + bf0[13]; bf1[3] = bf0[3] + bf0[12]; bf1[4] = bf0[4] + bf0[11]; bf1[5] = bf0[5] + bf0[10]; bf1[6] = bf0[6] + bf0[9]; bf1[7] = bf0[7] + bf0[8]; bf1[8] = -bf0[8] + bf0[7]; bf1[9] = -bf0[9] + bf0[6]; bf1[10] = -bf0[10] + bf0[5]; bf1[11] = -bf0[11] + bf0[4]; bf1[12] = -bf0[12] + bf0[3]; bf1[13] = -bf0[13] + bf0[2]; bf1[14] = -bf0[14] + bf0[1]; bf1[15] = -bf0[15] + bf0[0]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = bf0[18]; bf1[19] = bf0[19]; bf1[20] = half_btf(-cospi[32], bf0[20], cospi[32], bf0[27], cos_bit); bf1[21] = half_btf(-cospi[32], bf0[21], cospi[32], bf0[26], cos_bit); bf1[22] = half_btf(-cospi[32], bf0[22], cospi[32], bf0[25], cos_bit); bf1[23] = half_btf(-cospi[32], bf0[23], cospi[32], bf0[24], cos_bit); bf1[24] = half_btf(cospi[32], bf0[24], cospi[32], bf0[23], cos_bit); bf1[25] = half_btf(cospi[32], bf0[25], cospi[32], bf0[22], cos_bit); bf1[26] = half_btf(cospi[32], bf0[26], cospi[32], bf0[21], cos_bit); bf1[27] = half_btf(cospi[32], bf0[27], cospi[32], bf0[20], cos_bit); bf1[28] = bf0[28]; bf1[29] = bf0[29]; bf1[30] = bf0[30]; bf1[31] = bf0[31]; bf1[32] = bf0[32] + bf0[47]; bf1[33] = bf0[33] + bf0[46]; bf1[34] = bf0[34] + bf0[45]; bf1[35] = bf0[35] + bf0[44]; bf1[36] = bf0[36] + bf0[43]; bf1[37] = bf0[37] + bf0[42]; bf1[38] = bf0[38] + bf0[41]; bf1[39] = bf0[39] + bf0[40]; bf1[40] = -bf0[40] + bf0[39]; bf1[41] = -bf0[41] + bf0[38]; bf1[42] = -bf0[42] + bf0[37]; bf1[43] = -bf0[43] + bf0[36]; bf1[44] = -bf0[44] + bf0[35]; bf1[45] = -bf0[45] + bf0[34]; bf1[46] = -bf0[46] + bf0[33]; bf1[47] = -bf0[47] + bf0[32]; bf1[48] = -bf0[48] + bf0[63]; bf1[49] = -bf0[49] + bf0[62]; bf1[50] = -bf0[50] + bf0[61]; bf1[51] = -bf0[51] + bf0[60]; bf1[52] = -bf0[52] + bf0[59]; bf1[53] = -bf0[53] + bf0[58]; bf1[54] = -bf0[54] + bf0[57]; bf1[55] = -bf0[55] + bf0[56]; bf1[56] = bf0[56] + bf0[55]; bf1[57] = bf0[57] + bf0[54]; bf1[58] = bf0[58] + bf0[53]; bf1[59] = bf0[59] + bf0[52]; bf1[60] = bf0[60] + bf0[51]; bf1[61] = bf0[61] + bf0[50]; bf1[62] = bf0[62] + bf0[49]; bf1[63] = bf0[63] + bf0[48]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[7]; bf1[1] = bf0[1] + bf0[6]; bf1[2] = bf0[2] + bf0[5]; bf1[3] = bf0[3] + bf0[4]; bf1[4] = -bf0[4] + bf0[3]; bf1[5] = -bf0[5] + bf0[2]; bf1[6] = -bf0[6] + bf0[1]; bf1[7] = -bf0[7] + bf0[0]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit); bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit); bf1[12] = half_btf(cospi[32], bf0[12], cospi[32], bf0[11], cos_bit); bf1[13] = half_btf(cospi[32], bf0[13], cospi[32], bf0[10], cos_bit); bf1[14] = bf0[14]; bf1[15] = bf0[15]; bf1[16] = bf0[16] + bf0[23]; bf1[17] = bf0[17] + bf0[22]; bf1[18] = bf0[18] + bf0[21]; bf1[19] = bf0[19] + bf0[20]; bf1[20] = -bf0[20] + bf0[19]; bf1[21] = -bf0[21] + bf0[18]; bf1[22] = -bf0[22] + bf0[17]; bf1[23] = -bf0[23] + bf0[16]; bf1[24] = -bf0[24] + bf0[31]; bf1[25] = -bf0[25] + bf0[30]; bf1[26] = -bf0[26] + bf0[29]; bf1[27] = -bf0[27] + bf0[28]; bf1[28] = bf0[28] + bf0[27]; bf1[29] = bf0[29] + bf0[26]; bf1[30] = bf0[30] + bf0[25]; bf1[31] = bf0[31] + bf0[24]; bf1[32] = bf0[32]; bf1[33] = bf0[33]; bf1[34] = bf0[34]; bf1[35] = bf0[35]; bf1[36] = half_btf(-cospi[16], bf0[36], cospi[48], bf0[59], cos_bit); bf1[37] = half_btf(-cospi[16], bf0[37], cospi[48], bf0[58], cos_bit); bf1[38] = half_btf(-cospi[16], bf0[38], cospi[48], bf0[57], cos_bit); bf1[39] = half_btf(-cospi[16], bf0[39], cospi[48], bf0[56], cos_bit); bf1[40] = half_btf(-cospi[48], bf0[40], -cospi[16], bf0[55], cos_bit); bf1[41] = half_btf(-cospi[48], bf0[41], -cospi[16], bf0[54], cos_bit); bf1[42] = half_btf(-cospi[48], bf0[42], -cospi[16], bf0[53], cos_bit); bf1[43] = half_btf(-cospi[48], bf0[43], -cospi[16], bf0[52], cos_bit); bf1[44] = bf0[44]; bf1[45] = bf0[45]; bf1[46] = bf0[46]; bf1[47] = bf0[47]; bf1[48] = bf0[48]; bf1[49] = bf0[49]; bf1[50] = bf0[50]; bf1[51] = bf0[51]; bf1[52] = half_btf(cospi[48], bf0[52], -cospi[16], bf0[43], cos_bit); bf1[53] = half_btf(cospi[48], bf0[53], -cospi[16], bf0[42], cos_bit); bf1[54] = half_btf(cospi[48], bf0[54], -cospi[16], bf0[41], cos_bit); bf1[55] = half_btf(cospi[48], bf0[55], -cospi[16], bf0[40], cos_bit); bf1[56] = half_btf(cospi[16], bf0[56], cospi[48], bf0[39], cos_bit); bf1[57] = half_btf(cospi[16], bf0[57], cospi[48], bf0[38], cos_bit); bf1[58] = half_btf(cospi[16], bf0[58], cospi[48], bf0[37], cos_bit); bf1[59] = half_btf(cospi[16], bf0[59], cospi[48], bf0[36], cos_bit); bf1[60] = bf0[60]; bf1[61] = bf0[61]; bf1[62] = bf0[62]; bf1[63] = bf0[63]; // stage 5 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[2] = -bf0[2] + bf0[1]; bf1[3] = -bf0[3] + bf0[0]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; bf1[8] = bf0[8] + bf0[11]; bf1[9] = bf0[9] + bf0[10]; bf1[10] = -bf0[10] + bf0[9]; bf1[11] = -bf0[11] + bf0[8]; bf1[12] = -bf0[12] + bf0[15]; bf1[13] = -bf0[13] + bf0[14]; bf1[14] = bf0[14] + bf0[13]; bf1[15] = bf0[15] + bf0[12]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = half_btf(-cospi[16], bf0[18], cospi[48], bf0[29], cos_bit); bf1[19] = half_btf(-cospi[16], bf0[19], cospi[48], bf0[28], cos_bit); bf1[20] = half_btf(-cospi[48], bf0[20], -cospi[16], bf0[27], cos_bit); bf1[21] = half_btf(-cospi[48], bf0[21], -cospi[16], bf0[26], cos_bit); bf1[22] = bf0[22]; bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = bf0[25]; bf1[26] = half_btf(cospi[48], bf0[26], -cospi[16], bf0[21], cos_bit); bf1[27] = half_btf(cospi[48], bf0[27], -cospi[16], bf0[20], cos_bit); bf1[28] = half_btf(cospi[16], bf0[28], cospi[48], bf0[19], cos_bit); bf1[29] = half_btf(cospi[16], bf0[29], cospi[48], bf0[18], cos_bit); bf1[30] = bf0[30]; bf1[31] = bf0[31]; bf1[32] = bf0[32] + bf0[39]; bf1[33] = bf0[33] + bf0[38]; bf1[34] = bf0[34] + bf0[37]; bf1[35] = bf0[35] + bf0[36]; bf1[36] = -bf0[36] + bf0[35]; bf1[37] = -bf0[37] + bf0[34]; bf1[38] = -bf0[38] + bf0[33]; bf1[39] = -bf0[39] + bf0[32]; bf1[40] = -bf0[40] + bf0[47]; bf1[41] = -bf0[41] + bf0[46]; bf1[42] = -bf0[42] + bf0[45]; bf1[43] = -bf0[43] + bf0[44]; bf1[44] = bf0[44] + bf0[43]; bf1[45] = bf0[45] + bf0[42]; bf1[46] = bf0[46] + bf0[41]; bf1[47] = bf0[47] + bf0[40]; bf1[48] = bf0[48] + bf0[55]; bf1[49] = bf0[49] + bf0[54]; bf1[50] = bf0[50] + bf0[53]; bf1[51] = bf0[51] + bf0[52]; bf1[52] = -bf0[52] + bf0[51]; bf1[53] = -bf0[53] + bf0[50]; bf1[54] = -bf0[54] + bf0[49]; bf1[55] = -bf0[55] + bf0[48]; bf1[56] = -bf0[56] + bf0[63]; bf1[57] = -bf0[57] + bf0[62]; bf1[58] = -bf0[58] + bf0[61]; bf1[59] = -bf0[59] + bf0[60]; bf1[60] = bf0[60] + bf0[59]; bf1[61] = bf0[61] + bf0[58]; bf1[62] = bf0[62] + bf0[57]; bf1[63] = bf0[63] + bf0[56]; // stage 6 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[2] = half_btf(cospi[48], bf0[2], cospi[16], bf0[3], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[5] = -bf0[5] + bf0[4]; bf1[6] = -bf0[6] + bf0[7]; bf1[7] = bf0[7] + bf0[6]; bf1[8] = bf0[8]; bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit); bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit); bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = half_btf(cospi[48], bf0[13], -cospi[16], bf0[10], cos_bit); bf1[14] = half_btf(cospi[16], bf0[14], cospi[48], bf0[9], cos_bit); bf1[15] = bf0[15]; bf1[16] = bf0[16] + bf0[19]; bf1[17] = bf0[17] + bf0[18]; bf1[18] = -bf0[18] + bf0[17]; bf1[19] = -bf0[19] + bf0[16]; bf1[20] = -bf0[20] + bf0[23]; bf1[21] = -bf0[21] + bf0[22]; bf1[22] = bf0[22] + bf0[21]; bf1[23] = bf0[23] + bf0[20]; bf1[24] = bf0[24] + bf0[27]; bf1[25] = bf0[25] + bf0[26]; bf1[26] = -bf0[26] + bf0[25]; bf1[27] = -bf0[27] + bf0[24]; bf1[28] = -bf0[28] + bf0[31]; bf1[29] = -bf0[29] + bf0[30]; bf1[30] = bf0[30] + bf0[29]; bf1[31] = bf0[31] + bf0[28]; bf1[32] = bf0[32]; bf1[33] = bf0[33]; bf1[34] = half_btf(-cospi[8], bf0[34], cospi[56], bf0[61], cos_bit); bf1[35] = half_btf(-cospi[8], bf0[35], cospi[56], bf0[60], cos_bit); bf1[36] = half_btf(-cospi[56], bf0[36], -cospi[8], bf0[59], cos_bit); bf1[37] = half_btf(-cospi[56], bf0[37], -cospi[8], bf0[58], cos_bit); bf1[38] = bf0[38]; bf1[39] = bf0[39]; bf1[40] = bf0[40]; bf1[41] = bf0[41]; bf1[42] = half_btf(-cospi[40], bf0[42], cospi[24], bf0[53], cos_bit); bf1[43] = half_btf(-cospi[40], bf0[43], cospi[24], bf0[52], cos_bit); bf1[44] = half_btf(-cospi[24], bf0[44], -cospi[40], bf0[51], cos_bit); bf1[45] = half_btf(-cospi[24], bf0[45], -cospi[40], bf0[50], cos_bit); bf1[46] = bf0[46]; bf1[47] = bf0[47]; bf1[48] = bf0[48]; bf1[49] = bf0[49]; bf1[50] = half_btf(cospi[24], bf0[50], -cospi[40], bf0[45], cos_bit); bf1[51] = half_btf(cospi[24], bf0[51], -cospi[40], bf0[44], cos_bit); bf1[52] = half_btf(cospi[40], bf0[52], cospi[24], bf0[43], cos_bit); bf1[53] = half_btf(cospi[40], bf0[53], cospi[24], bf0[42], cos_bit); bf1[54] = bf0[54]; bf1[55] = bf0[55]; bf1[56] = bf0[56]; bf1[57] = bf0[57]; bf1[58] = half_btf(cospi[56], bf0[58], -cospi[8], bf0[37], cos_bit); bf1[59] = half_btf(cospi[56], bf0[59], -cospi[8], bf0[36], cos_bit); bf1[60] = half_btf(cospi[8], bf0[60], cospi[56], bf0[35], cos_bit); bf1[61] = half_btf(cospi[8], bf0[61], cospi[56], bf0[34], cos_bit); bf1[62] = bf0[62]; bf1[63] = bf0[63]; // stage 7 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[2] = bf0[2]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); bf1[6] = half_btf(cospi[24], bf0[6], -cospi[40], bf0[5], cos_bit); bf1[8] = bf0[8] + bf0[9]; bf1[9] = -bf0[9] + bf0[8]; bf1[10] = -bf0[10] + bf0[11]; bf1[11] = bf0[11] + bf0[10]; bf1[12] = bf0[12] + bf0[13]; bf1[13] = -bf0[13] + bf0[12]; bf1[14] = -bf0[14] + bf0[15]; bf1[15] = bf0[15] + bf0[14]; bf1[16] = bf0[16]; bf1[17] = half_btf(-cospi[8], bf0[17], cospi[56], bf0[30], cos_bit); bf1[18] = half_btf(-cospi[56], bf0[18], -cospi[8], bf0[29], cos_bit); bf1[19] = bf0[19]; bf1[20] = bf0[20]; bf1[21] = half_btf(-cospi[40], bf0[21], cospi[24], bf0[26], cos_bit); bf1[22] = half_btf(-cospi[24], bf0[22], -cospi[40], bf0[25], cos_bit); bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = half_btf(cospi[24], bf0[25], -cospi[40], bf0[22], cos_bit); bf1[26] = half_btf(cospi[40], bf0[26], cospi[24], bf0[21], cos_bit); bf1[27] = bf0[27]; bf1[28] = bf0[28]; bf1[29] = half_btf(cospi[56], bf0[29], -cospi[8], bf0[18], cos_bit); bf1[30] = half_btf(cospi[8], bf0[30], cospi[56], bf0[17], cos_bit); bf1[31] = bf0[31]; bf1[32] = bf0[32] + bf0[35]; bf1[33] = bf0[33] + bf0[34]; bf1[34] = -bf0[34] + bf0[33]; bf1[35] = -bf0[35] + bf0[32]; bf1[36] = -bf0[36] + bf0[39]; bf1[37] = -bf0[37] + bf0[38]; bf1[38] = bf0[38] + bf0[37]; bf1[39] = bf0[39] + bf0[36]; bf1[40] = bf0[40] + bf0[43]; bf1[41] = bf0[41] + bf0[42]; bf1[42] = -bf0[42] + bf0[41]; bf1[43] = -bf0[43] + bf0[40]; bf1[44] = -bf0[44] + bf0[47]; bf1[45] = -bf0[45] + bf0[46]; bf1[46] = bf0[46] + bf0[45]; bf1[47] = bf0[47] + bf0[44]; bf1[48] = bf0[48] + bf0[51]; bf1[49] = bf0[49] + bf0[50]; bf1[50] = -bf0[50] + bf0[49]; bf1[51] = -bf0[51] + bf0[48]; bf1[52] = -bf0[52] + bf0[55]; bf1[53] = -bf0[53] + bf0[54]; bf1[54] = bf0[54] + bf0[53]; bf1[55] = bf0[55] + bf0[52]; bf1[56] = bf0[56] + bf0[59]; bf1[57] = bf0[57] + bf0[58]; bf1[58] = -bf0[58] + bf0[57]; bf1[59] = -bf0[59] + bf0[56]; bf1[60] = -bf0[60] + bf0[63]; bf1[61] = -bf0[61] + bf0[62]; bf1[62] = bf0[62] + bf0[61]; bf1[63] = bf0[63] + bf0[60]; // stage 8 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[2] = bf0[2]; bf1[4] = bf0[4]; bf1[6] = bf0[6]; bf1[8] = half_btf(cospi[60], bf0[8], cospi[4], bf0[15], cos_bit); bf1[10] = half_btf(cospi[44], bf0[10], cospi[20], bf0[13], cos_bit); bf1[12] = half_btf(cospi[12], bf0[12], -cospi[52], bf0[11], cos_bit); bf1[14] = half_btf(cospi[28], bf0[14], -cospi[36], bf0[9], cos_bit); bf1[16] = bf0[16] + bf0[17]; bf1[17] = -bf0[17] + bf0[16]; bf1[18] = -bf0[18] + bf0[19]; bf1[19] = bf0[19] + bf0[18]; bf1[20] = bf0[20] + bf0[21]; bf1[21] = -bf0[21] + bf0[20]; bf1[22] = -bf0[22] + bf0[23]; bf1[23] = bf0[23] + bf0[22]; bf1[24] = bf0[24] + bf0[25]; bf1[25] = -bf0[25] + bf0[24]; bf1[26] = -bf0[26] + bf0[27]; bf1[27] = bf0[27] + bf0[26]; bf1[28] = bf0[28] + bf0[29]; bf1[29] = -bf0[29] + bf0[28]; bf1[30] = -bf0[30] + bf0[31]; bf1[31] = bf0[31] + bf0[30]; bf1[32] = bf0[32]; bf1[33] = half_btf(-cospi[4], bf0[33], cospi[60], bf0[62], cos_bit); bf1[34] = half_btf(-cospi[60], bf0[34], -cospi[4], bf0[61], cos_bit); bf1[35] = bf0[35]; bf1[36] = bf0[36]; bf1[37] = half_btf(-cospi[36], bf0[37], cospi[28], bf0[58], cos_bit); bf1[38] = half_btf(-cospi[28], bf0[38], -cospi[36], bf0[57], cos_bit); bf1[39] = bf0[39]; bf1[40] = bf0[40]; bf1[41] = half_btf(-cospi[20], bf0[41], cospi[44], bf0[54], cos_bit); bf1[42] = half_btf(-cospi[44], bf0[42], -cospi[20], bf0[53], cos_bit); bf1[43] = bf0[43]; bf1[44] = bf0[44]; bf1[45] = half_btf(-cospi[52], bf0[45], cospi[12], bf0[50], cos_bit); bf1[46] = half_btf(-cospi[12], bf0[46], -cospi[52], bf0[49], cos_bit); bf1[47] = bf0[47]; bf1[48] = bf0[48]; bf1[49] = half_btf(cospi[12], bf0[49], -cospi[52], bf0[46], cos_bit); bf1[50] = half_btf(cospi[52], bf0[50], cospi[12], bf0[45], cos_bit); bf1[51] = bf0[51]; bf1[52] = bf0[52]; bf1[53] = half_btf(cospi[44], bf0[53], -cospi[20], bf0[42], cos_bit); bf1[54] = half_btf(cospi[20], bf0[54], cospi[44], bf0[41], cos_bit); bf1[55] = bf0[55]; bf1[56] = bf0[56]; bf1[57] = half_btf(cospi[28], bf0[57], -cospi[36], bf0[38], cos_bit); bf1[58] = half_btf(cospi[36], bf0[58], cospi[28], bf0[37], cos_bit); bf1[59] = bf0[59]; bf1[60] = bf0[60]; bf1[61] = half_btf(cospi[60], bf0[61], -cospi[4], bf0[34], cos_bit); bf1[62] = half_btf(cospi[4], bf0[62], cospi[60], bf0[33], cos_bit); bf1[63] = bf0[63]; // stage 9 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[2] = bf0[2]; bf1[4] = bf0[4]; bf1[6] = bf0[6]; bf1[8] = bf0[8]; bf1[10] = bf0[10]; bf1[12] = bf0[12]; bf1[14] = bf0[14]; bf1[16] = half_btf(cospi[62], bf0[16], cospi[2], bf0[31], cos_bit); bf1[18] = half_btf(cospi[46], bf0[18], cospi[18], bf0[29], cos_bit); bf1[20] = half_btf(cospi[54], bf0[20], cospi[10], bf0[27], cos_bit); bf1[22] = half_btf(cospi[38], bf0[22], cospi[26], bf0[25], cos_bit); bf1[24] = half_btf(cospi[6], bf0[24], -cospi[58], bf0[23], cos_bit); bf1[26] = half_btf(cospi[22], bf0[26], -cospi[42], bf0[21], cos_bit); bf1[28] = half_btf(cospi[14], bf0[28], -cospi[50], bf0[19], cos_bit); bf1[30] = half_btf(cospi[30], bf0[30], -cospi[34], bf0[17], cos_bit); bf1[32] = bf0[32] + bf0[33]; bf1[33] = -bf0[33] + bf0[32]; bf1[34] = -bf0[34] + bf0[35]; bf1[35] = bf0[35] + bf0[34]; bf1[36] = bf0[36] + bf0[37]; bf1[37] = -bf0[37] + bf0[36]; bf1[38] = -bf0[38] + bf0[39]; bf1[39] = bf0[39] + bf0[38]; bf1[40] = bf0[40] + bf0[41]; bf1[41] = -bf0[41] + bf0[40]; bf1[42] = -bf0[42] + bf0[43]; bf1[43] = bf0[43] + bf0[42]; bf1[44] = bf0[44] + bf0[45]; bf1[45] = -bf0[45] + bf0[44]; bf1[46] = -bf0[46] + bf0[47]; bf1[47] = bf0[47] + bf0[46]; bf1[48] = bf0[48] + bf0[49]; bf1[49] = -bf0[49] + bf0[48]; bf1[50] = -bf0[50] + bf0[51]; bf1[51] = bf0[51] + bf0[50]; bf1[52] = bf0[52] + bf0[53]; bf1[53] = -bf0[53] + bf0[52]; bf1[54] = -bf0[54] + bf0[55]; bf1[55] = bf0[55] + bf0[54]; bf1[56] = bf0[56] + bf0[57]; bf1[57] = -bf0[57] + bf0[56]; bf1[58] = -bf0[58] + bf0[59]; bf1[59] = bf0[59] + bf0[58]; bf1[60] = bf0[60] + bf0[61]; bf1[61] = -bf0[61] + bf0[60]; bf1[62] = -bf0[62] + bf0[63]; bf1[63] = bf0[63] + bf0[62]; // stage 10 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[2] = bf0[2]; bf1[4] = bf0[4]; bf1[6] = bf0[6]; bf1[8] = bf0[8]; bf1[10] = bf0[10]; bf1[12] = bf0[12]; bf1[14] = bf0[14]; bf1[16] = bf0[16]; bf1[18] = bf0[18]; bf1[20] = bf0[20]; bf1[22] = bf0[22]; bf1[24] = bf0[24]; bf1[26] = bf0[26]; bf1[28] = bf0[28]; bf1[30] = bf0[30]; bf1[32] = half_btf(cospi[63], bf0[32], cospi[1], bf0[63], cos_bit); bf1[34] = half_btf(cospi[47], bf0[34], cospi[17], bf0[61], cos_bit); bf1[36] = half_btf(cospi[55], bf0[36], cospi[9], bf0[59], cos_bit); bf1[38] = half_btf(cospi[39], bf0[38], cospi[25], bf0[57], cos_bit); bf1[40] = half_btf(cospi[59], bf0[40], cospi[5], bf0[55], cos_bit); bf1[42] = half_btf(cospi[43], bf0[42], cospi[21], bf0[53], cos_bit); bf1[44] = half_btf(cospi[51], bf0[44], cospi[13], bf0[51], cos_bit); bf1[46] = half_btf(cospi[35], bf0[46], cospi[29], bf0[49], cos_bit); bf1[48] = half_btf(cospi[3], bf0[48], -cospi[61], bf0[47], cos_bit); bf1[50] = half_btf(cospi[19], bf0[50], -cospi[45], bf0[45], cos_bit); bf1[52] = half_btf(cospi[11], bf0[52], -cospi[53], bf0[43], cos_bit); bf1[54] = half_btf(cospi[27], bf0[54], -cospi[37], bf0[41], cos_bit); bf1[56] = half_btf(cospi[7], bf0[56], -cospi[57], bf0[39], cos_bit); bf1[58] = half_btf(cospi[23], bf0[58], -cospi[41], bf0[37], cos_bit); bf1[60] = half_btf(cospi[15], bf0[60], -cospi[49], bf0[35], cos_bit); bf1[62] = half_btf(cospi[31], bf0[62], -cospi[33], bf0[33], cos_bit); // stage 11 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[32]; bf1[2] = bf0[16]; bf1[3] = bf0[48]; bf1[4] = bf0[8]; bf1[5] = bf0[40]; bf1[6] = bf0[24]; bf1[7] = bf0[56]; bf1[8] = bf0[4]; bf1[9] = bf0[36]; bf1[10] = bf0[20]; bf1[11] = bf0[52]; bf1[12] = bf0[12]; bf1[13] = bf0[44]; bf1[14] = bf0[28]; bf1[15] = bf0[60]; bf1[16] = bf0[2]; bf1[17] = bf0[34]; bf1[18] = bf0[18]; bf1[19] = bf0[50]; bf1[20] = bf0[10]; bf1[21] = bf0[42]; bf1[22] = bf0[26]; bf1[23] = bf0[58]; bf1[24] = bf0[6]; bf1[25] = bf0[38]; bf1[26] = bf0[22]; bf1[27] = bf0[54]; bf1[28] = bf0[14]; bf1[29] = bf0[46]; bf1[30] = bf0[30]; bf1[31] = bf0[62]; } void av1_fidentity64_N2_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; for (int32_t i = 0; i < 32; ++i) output[i] = round_shift((int64_t)input[i] * 4 * new_sqrt2, new_sqrt2_bits); assert(stage_range[0] + new_sqrt2_bits <= 32); } static INLINE TxfmFunc fwd_txfm_type_to_func_N2(TxfmType txfmtype) { switch (txfmtype) { case TXFM_TYPE_DCT4: return svt_av1_fdct4_new_N2; case TXFM_TYPE_DCT8: return svt_av1_fdct8_new_N2; case TXFM_TYPE_DCT16: return svt_av1_fdct16_new_N2; case TXFM_TYPE_DCT32: return svt_av1_fdct32_new_N2; case TXFM_TYPE_DCT64: return svt_av1_fdct64_new_N2; case TXFM_TYPE_ADST4: return svt_av1_fadst4_new_N2; case TXFM_TYPE_ADST8: return svt_av1_fadst8_new_N2; case TXFM_TYPE_ADST16: return svt_av1_fadst16_new_N2; case TXFM_TYPE_ADST32: return av1_fadst32_new; case TXFM_TYPE_IDENTITY4: return svt_av1_fidentity4_N2_c; case TXFM_TYPE_IDENTITY8: return svt_av1_fidentity8_N2_c; case TXFM_TYPE_IDENTITY16: return svt_av1_fidentity16_N2_c; case TXFM_TYPE_IDENTITY32: return svt_av1_fidentity32_N2_c; case TXFM_TYPE_IDENTITY64: return av1_fidentity64_N2_c; default: assert(0); return NULL; } } static INLINE void av1_tranform_two_d_core_N2_c(int16_t *input, uint32_t input_stride, int32_t *output, const Txfm2dFlipCfg *cfg, int32_t *buf, uint8_t bit_depth) { int32_t c, r; // Note when assigning txfm_size_col, we use the txfm_size from the // row configuration and vice versa. This is intentionally done to // accurately perform rectangular transforms. When the transform is // rectangular, the number of columns will be the same as the // txfm_size stored in the row cfg struct. It will make no difference // for square transforms. const int32_t txfm_size_col = tx_size_wide[cfg->tx_size]; const int32_t txfm_size_row = tx_size_high[cfg->tx_size]; // Take the shift from the larger dimension in the rectangular case. const int8_t *shift = cfg->shift; const int32_t rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row); int8_t stage_range_col[MAX_TXFM_STAGE_NUM]; int8_t stage_range_row[MAX_TXFM_STAGE_NUM]; assert(cfg->stage_num_col <= MAX_TXFM_STAGE_NUM); assert(cfg->stage_num_row <= MAX_TXFM_STAGE_NUM); svt_av1_gen_fwd_stage_range(stage_range_col, stage_range_row, cfg, bit_depth); const int8_t cos_bit_col = cfg->cos_bit_col; const int8_t cos_bit_row = cfg->cos_bit_row; const TxfmFunc txfm_func_col = fwd_txfm_type_to_func_N2(cfg->txfm_type_col); const TxfmFunc txfm_func_row = fwd_txfm_type_to_func_N2(cfg->txfm_type_row); ASSERT(txfm_func_col != NULL); ASSERT(txfm_func_row != NULL); // use output buffer as temp buffer int32_t *temp_in = output; int32_t *temp_out = output + txfm_size_row; // Columns for (c = 0; c < txfm_size_col; ++c) { if (cfg->ud_flip == 0) for (r = 0; r < txfm_size_row; ++r) temp_in[r] = input[r * input_stride + c]; else { for (r = 0; r < txfm_size_row; ++r) // flip upside down temp_in[r] = input[(txfm_size_row - r - 1) * input_stride + c]; } svt_av1_round_shift_array_c( temp_in, txfm_size_row, -shift[0]); // NM svt_av1_round_shift_array_c txfm_func_col(temp_in, temp_out, cos_bit_col, stage_range_col); svt_av1_round_shift_array_c( temp_out, txfm_size_row / 2, -shift[1]); // NM svt_av1_round_shift_array_c if (cfg->lr_flip == 0) { for (r = 0; r < txfm_size_row; ++r) buf[r * txfm_size_col + c] = temp_out[r]; } else { for (r = 0; r < txfm_size_row; ++r) // flip from left to right buf[r * txfm_size_col + (txfm_size_col - c - 1)] = temp_out[r]; } } // Rows for (r = 0; r < txfm_size_row / 2; ++r) { txfm_func_row( buf + r * txfm_size_col, output + r * txfm_size_col, cos_bit_row, stage_range_row); svt_av1_round_shift_array_c(output + r * txfm_size_col, txfm_size_col / 2, -shift[2]); if (abs(rect_type) == 1) { // Multiply everything by Sqrt2 if the transform is rectangular and the // size difference is a factor of 2. for (c = 0; c < txfm_size_col / 2; ++c) { output[r * txfm_size_col + c] = round_shift( (int64_t)output[r * txfm_size_col + c] * new_sqrt2, new_sqrt2_bits); } } } for (int i = 0; i < (txfm_size_col * txfm_size_row); i++) { if (i % txfm_size_col >= (txfm_size_col >> 1) || i / txfm_size_col >= (txfm_size_row >> 1)) { output[i] = 0; } } } void av1_transform_two_d_64x64_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[64 * 64]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_64X64, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void av1_transform_two_d_32x32_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[32 * 32]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_32X32, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void av1_transform_two_d_16x16_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 16]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_16X16, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void av1_transform_two_d_8x8_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[8 * 8]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_8X8, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void av1_transform_two_d_4x4_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[4 * 4]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_4X4, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_64x32_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[64 * 32]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_64X32, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_32x64_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[32 * 64]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_32X64, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_64x16_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[64 * 16]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_64X16, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_16x64_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 64]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_16X64, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_32x16_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[32 * 16]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_32X16, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_16x32_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 32]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_16X32, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_16x8_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 8]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_16X8, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_8x16_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[8 * 16]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_8X16, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_32x8_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[32 * 8]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_32X8, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_8x32_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[8 * 32]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_8X32, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_16x4_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 4]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_16X4, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_4x16_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[4 * 16]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_4X16, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_8x4_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[8 * 4]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_8X4, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_4x8_N2_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[4 * 8]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_4X8, &cfg); av1_tranform_two_d_core_N2_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fdct4_new_N4(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi = cospi_arr(cos_bit); int32_t step[2]; // stage 1; step[0] = input[0] + input[3]; step[1] = input[1] + input[2]; output[0] = half_btf(cospi[32], step[0], cospi[32], step[1], cos_bit); } void svt_av1_fadst4_new_N4(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; int32_t bit = cos_bit; const int32_t *sinpi = sinpi_arr(bit); int32_t x0, x1, x2, x3; int32_t s0, s2, s4, s5; // stage 0 x0 = input[0]; x1 = input[1]; x2 = input[2]; x3 = input[3]; if (!(x0 | x1 | x2 | x3)) { output[0] = output[1] = output[2] = output[3] = 0; return; } // stage 1 s0 = sinpi[1] * x0; s2 = sinpi[2] * x1; s4 = sinpi[3] * x2; s5 = sinpi[4] * x3; // stage 3 x0 = s0 + s2; // stage 4 x0 = x0 + s5; // stage 5 s0 = x0 + s4; // 1-D transform scaling factor is sqrt(2). output[0] = round_shift(s0, bit); } void svt_av1_fidentity4_N4_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; output[0] = round_shift((int64_t)input[0] * new_sqrt2, new_sqrt2_bits); assert(stage_range[0] + new_sqrt2_bits <= 32); } void svt_av1_fdct8_new_N4(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[8]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[0] + input[7]; bf1[1] = input[1] + input[6]; bf1[2] = input[2] + input[5]; bf1[3] = input[3] + input[4]; bf1[4] = -input[4] + input[3]; bf1[5] = -input[5] + input[2]; bf1[6] = -input[6] + input[1]; bf1[7] = -input[7] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; // stage 3 bf0 = step; bf1 = output; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[7] = bf0[7] + bf0[6]; // stage 4 bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); // stage 5 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[4]; } void svt_av1_fadst8_new_N4(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[8]; // stage 0; // stage 1; assert(output != input); bf1 = output; bf1[0] = input[0]; bf1[1] = -input[7]; bf1[2] = -input[3]; bf1[3] = input[4]; bf1[4] = -input[1]; bf1[5] = input[6]; bf1[6] = input[2]; bf1[7] = -input[5]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit); bf1[3] = half_btf(cospi[32], bf0[2], -cospi[32], bf0[3], cos_bit); bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit); bf1[7] = half_btf(cospi[32], bf0[6], -cospi[32], bf0[7], cos_bit); // stage 3 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[2]; bf1[1] = bf0[1] + bf0[3]; bf1[2] = bf0[0] - bf0[2]; bf1[3] = bf0[1] - bf0[3]; bf1[4] = bf0[4] + bf0[6]; bf1[5] = bf0[5] + bf0[7]; bf1[6] = bf0[4] - bf0[6]; bf1[7] = bf0[5] - bf0[7]; // stage 4 bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit); bf1[5] = half_btf(cospi[48], bf0[4], -cospi[16], bf0[5], cos_bit); bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit); bf1[7] = half_btf(cospi[16], bf0[6], cospi[48], bf0[7], cos_bit); // stage 5 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[4]; bf1[1] = bf0[1] + bf0[5]; bf1[6] = bf0[2] - bf0[6]; bf1[7] = bf0[3] - bf0[7]; // stage 6 bf0 = output; bf1 = step; bf1[1] = half_btf(cospi[60], bf0[0], -cospi[4], bf0[1], cos_bit); bf1[6] = half_btf(cospi[52], bf0[6], cospi[12], bf0[7], cos_bit); // stage 7 bf0 = step; bf1 = output; bf1[0] = bf0[1]; bf1[1] = bf0[6]; } void svt_av1_fidentity8_N4_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; for (int32_t i = 0; i < 2; ++i) output[i] = input[i] * 2; } void svt_av1_fdct16_new_N4(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[16]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[0] + input[15]; bf1[1] = input[1] + input[14]; bf1[2] = input[2] + input[13]; bf1[3] = input[3] + input[12]; bf1[4] = input[4] + input[11]; bf1[5] = input[5] + input[10]; bf1[6] = input[6] + input[9]; bf1[7] = input[7] + input[8]; bf1[8] = -input[8] + input[7]; bf1[9] = -input[9] + input[6]; bf1[10] = -input[10] + input[5]; bf1[11] = -input[11] + input[4]; bf1[12] = -input[12] + input[3]; bf1[13] = -input[13] + input[2]; bf1[14] = -input[14] + input[1]; bf1[15] = -input[15] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[7]; bf1[1] = bf0[1] + bf0[6]; bf1[2] = bf0[2] + bf0[5]; bf1[3] = bf0[3] + bf0[4]; bf1[4] = -bf0[4] + bf0[3]; bf1[5] = -bf0[5] + bf0[2]; bf1[6] = -bf0[6] + bf0[1]; bf1[7] = -bf0[7] + bf0[0]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit); bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit); bf1[12] = half_btf(cospi[32], bf0[12], cospi[32], bf0[11], cos_bit); bf1[13] = half_btf(cospi[32], bf0[13], cospi[32], bf0[10], cos_bit); bf1[14] = bf0[14]; bf1[15] = bf0[15]; // stage 3 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; bf1[8] = bf0[8] + bf0[11]; bf1[9] = bf0[9] + bf0[10]; bf1[10] = -bf0[10] + bf0[9]; bf1[11] = -bf0[11] + bf0[8]; bf1[12] = -bf0[12] + bf0[15]; bf1[13] = -bf0[13] + bf0[14]; bf1[14] = bf0[14] + bf0[13]; bf1[15] = bf0[15] + bf0[12]; // stage 4 bf0 = output; bf1 = step; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[7] = bf0[7] + bf0[6]; bf1[8] = bf0[8]; bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit); bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit); bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = half_btf(cospi[48], bf0[13], -cospi[16], bf0[10], cos_bit); bf1[14] = half_btf(cospi[16], bf0[14], cospi[48], bf0[9], cos_bit); bf1[15] = bf0[15]; // stage 5 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); bf1[8] = bf0[8] + bf0[9]; bf1[11] = bf0[11] + bf0[10]; bf1[12] = bf0[12] + bf0[13]; bf1[15] = bf0[15] + bf0[14]; // stage 6 bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[4] = bf0[4]; bf1[8] = half_btf(cospi[60], bf0[8], cospi[4], bf0[15], cos_bit); bf1[12] = half_btf(cospi[12], bf0[12], -cospi[52], bf0[11], cos_bit); // stage 7 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[8]; bf1[2] = bf0[4]; bf1[3] = bf0[12]; } void svt_av1_fadst16_new_N4(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[16]; // stage 0; // stage 1; assert(output != input); bf1 = output; bf1[0] = input[0]; bf1[1] = -input[15]; bf1[2] = -input[7]; bf1[3] = input[8]; bf1[4] = -input[3]; bf1[5] = input[12]; bf1[6] = input[4]; bf1[7] = -input[11]; bf1[8] = -input[1]; bf1[9] = input[14]; bf1[10] = input[6]; bf1[11] = -input[9]; bf1[12] = input[2]; bf1[13] = -input[13]; bf1[14] = -input[5]; bf1[15] = input[10]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = half_btf(cospi[32], bf0[2], cospi[32], bf0[3], cos_bit); bf1[3] = half_btf(cospi[32], bf0[2], -cospi[32], bf0[3], cos_bit); bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[7], cos_bit); bf1[7] = half_btf(cospi[32], bf0[6], -cospi[32], bf0[7], cos_bit); bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(cospi[32], bf0[10], cospi[32], bf0[11], cos_bit); bf1[11] = half_btf(cospi[32], bf0[10], -cospi[32], bf0[11], cos_bit); bf1[12] = bf0[12]; bf1[13] = bf0[13]; bf1[14] = half_btf(cospi[32], bf0[14], cospi[32], bf0[15], cos_bit); bf1[15] = half_btf(cospi[32], bf0[14], -cospi[32], bf0[15], cos_bit); // stage 3 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[2]; bf1[1] = bf0[1] + bf0[3]; bf1[2] = bf0[0] - bf0[2]; bf1[3] = bf0[1] - bf0[3]; bf1[4] = bf0[4] + bf0[6]; bf1[5] = bf0[5] + bf0[7]; bf1[6] = bf0[4] - bf0[6]; bf1[7] = bf0[5] - bf0[7]; bf1[8] = bf0[8] + bf0[10]; bf1[9] = bf0[9] + bf0[11]; bf1[10] = bf0[8] - bf0[10]; bf1[11] = bf0[9] - bf0[11]; bf1[12] = bf0[12] + bf0[14]; bf1[13] = bf0[13] + bf0[15]; bf1[14] = bf0[12] - bf0[14]; bf1[15] = bf0[13] - bf0[15]; // stage 4 bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = half_btf(cospi[16], bf0[4], cospi[48], bf0[5], cos_bit); bf1[5] = half_btf(cospi[48], bf0[4], -cospi[16], bf0[5], cos_bit); bf1[6] = half_btf(-cospi[48], bf0[6], cospi[16], bf0[7], cos_bit); bf1[7] = half_btf(cospi[16], bf0[6], cospi[48], bf0[7], cos_bit); bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = bf0[10]; bf1[11] = bf0[11]; bf1[12] = half_btf(cospi[16], bf0[12], cospi[48], bf0[13], cos_bit); bf1[13] = half_btf(cospi[48], bf0[12], -cospi[16], bf0[13], cos_bit); bf1[14] = half_btf(-cospi[48], bf0[14], cospi[16], bf0[15], cos_bit); bf1[15] = half_btf(cospi[16], bf0[14], cospi[48], bf0[15], cos_bit); // stage 5 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[4]; bf1[1] = bf0[1] + bf0[5]; bf1[2] = bf0[2] + bf0[6]; bf1[3] = bf0[3] + bf0[7]; bf1[4] = bf0[0] - bf0[4]; bf1[5] = bf0[1] - bf0[5]; bf1[6] = bf0[2] - bf0[6]; bf1[7] = bf0[3] - bf0[7]; bf1[8] = bf0[8] + bf0[12]; bf1[9] = bf0[9] + bf0[13]; bf1[10] = bf0[10] + bf0[14]; bf1[11] = bf0[11] + bf0[15]; bf1[12] = bf0[8] - bf0[12]; bf1[13] = bf0[9] - bf0[13]; bf1[14] = bf0[10] - bf0[14]; bf1[15] = bf0[11] - bf0[15]; // stage 6 bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[1] = bf0[1]; bf1[2] = bf0[2]; bf1[3] = bf0[3]; bf1[4] = bf0[4]; bf1[5] = bf0[5]; bf1[6] = bf0[6]; bf1[7] = bf0[7]; bf1[8] = half_btf(cospi[8], bf0[8], cospi[56], bf0[9], cos_bit); bf1[9] = half_btf(cospi[56], bf0[8], -cospi[8], bf0[9], cos_bit); bf1[10] = half_btf(cospi[40], bf0[10], cospi[24], bf0[11], cos_bit); bf1[11] = half_btf(cospi[24], bf0[10], -cospi[40], bf0[11], cos_bit); bf1[12] = half_btf(-cospi[56], bf0[12], cospi[8], bf0[13], cos_bit); bf1[13] = half_btf(cospi[8], bf0[12], cospi[56], bf0[13], cos_bit); bf1[14] = half_btf(-cospi[24], bf0[14], cospi[40], bf0[15], cos_bit); bf1[15] = half_btf(cospi[40], bf0[14], cospi[24], bf0[15], cos_bit); // stage 7 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[8]; bf1[1] = bf0[1] + bf0[9]; bf1[2] = bf0[2] + bf0[10]; bf1[3] = bf0[3] + bf0[11]; bf1[12] = bf0[4] - bf0[12]; bf1[13] = bf0[5] - bf0[13]; bf1[14] = bf0[6] - bf0[14]; bf1[15] = bf0[7] - bf0[15]; // stage 8 bf0 = output; bf1 = step; bf1[1] = half_btf(cospi[62], bf0[0], -cospi[2], bf0[1], cos_bit); bf1[3] = half_btf(cospi[54], bf0[2], -cospi[10], bf0[3], cos_bit); bf1[12] = half_btf(cospi[50], bf0[12], cospi[14], bf0[13], cos_bit); bf1[14] = half_btf(cospi[58], bf0[14], cospi[6], bf0[15], cos_bit); // stage 9 bf0 = step; bf1 = output; bf1[0] = bf0[1]; bf1[1] = bf0[14]; bf1[2] = bf0[3]; bf1[3] = bf0[12]; } void svt_av1_fidentity16_N4_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; for (int32_t i = 0; i < 4; ++i) output[i] = round_shift((int64_t)input[i] * 2 * new_sqrt2, new_sqrt2_bits); assert(stage_range[0] + new_sqrt2_bits <= 32); } void svt_av1_fdct32_new_N4(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[32]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[0] + input[31]; bf1[1] = input[1] + input[30]; bf1[2] = input[2] + input[29]; bf1[3] = input[3] + input[28]; bf1[4] = input[4] + input[27]; bf1[5] = input[5] + input[26]; bf1[6] = input[6] + input[25]; bf1[7] = input[7] + input[24]; bf1[8] = input[8] + input[23]; bf1[9] = input[9] + input[22]; bf1[10] = input[10] + input[21]; bf1[11] = input[11] + input[20]; bf1[12] = input[12] + input[19]; bf1[13] = input[13] + input[18]; bf1[14] = input[14] + input[17]; bf1[15] = input[15] + input[16]; bf1[16] = -input[16] + input[15]; bf1[17] = -input[17] + input[14]; bf1[18] = -input[18] + input[13]; bf1[19] = -input[19] + input[12]; bf1[20] = -input[20] + input[11]; bf1[21] = -input[21] + input[10]; bf1[22] = -input[22] + input[9]; bf1[23] = -input[23] + input[8]; bf1[24] = -input[24] + input[7]; bf1[25] = -input[25] + input[6]; bf1[26] = -input[26] + input[5]; bf1[27] = -input[27] + input[4]; bf1[28] = -input[28] + input[3]; bf1[29] = -input[29] + input[2]; bf1[30] = -input[30] + input[1]; bf1[31] = -input[31] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[15]; bf1[1] = bf0[1] + bf0[14]; bf1[2] = bf0[2] + bf0[13]; bf1[3] = bf0[3] + bf0[12]; bf1[4] = bf0[4] + bf0[11]; bf1[5] = bf0[5] + bf0[10]; bf1[6] = bf0[6] + bf0[9]; bf1[7] = bf0[7] + bf0[8]; bf1[8] = -bf0[8] + bf0[7]; bf1[9] = -bf0[9] + bf0[6]; bf1[10] = -bf0[10] + bf0[5]; bf1[11] = -bf0[11] + bf0[4]; bf1[12] = -bf0[12] + bf0[3]; bf1[13] = -bf0[13] + bf0[2]; bf1[14] = -bf0[14] + bf0[1]; bf1[15] = -bf0[15] + bf0[0]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = bf0[18]; bf1[19] = bf0[19]; bf1[20] = half_btf(-cospi[32], bf0[20], cospi[32], bf0[27], cos_bit); bf1[21] = half_btf(-cospi[32], bf0[21], cospi[32], bf0[26], cos_bit); bf1[22] = half_btf(-cospi[32], bf0[22], cospi[32], bf0[25], cos_bit); bf1[23] = half_btf(-cospi[32], bf0[23], cospi[32], bf0[24], cos_bit); bf1[24] = half_btf(cospi[32], bf0[24], cospi[32], bf0[23], cos_bit); bf1[25] = half_btf(cospi[32], bf0[25], cospi[32], bf0[22], cos_bit); bf1[26] = half_btf(cospi[32], bf0[26], cospi[32], bf0[21], cos_bit); bf1[27] = half_btf(cospi[32], bf0[27], cospi[32], bf0[20], cos_bit); bf1[28] = bf0[28]; bf1[29] = bf0[29]; bf1[30] = bf0[30]; bf1[31] = bf0[31]; // stage 3 bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[7]; bf1[1] = bf0[1] + bf0[6]; bf1[2] = bf0[2] + bf0[5]; bf1[3] = bf0[3] + bf0[4]; bf1[4] = -bf0[4] + bf0[3]; bf1[5] = -bf0[5] + bf0[2]; bf1[6] = -bf0[6] + bf0[1]; bf1[7] = -bf0[7] + bf0[0]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit); bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit); bf1[12] = half_btf(cospi[32], bf0[12], cospi[32], bf0[11], cos_bit); bf1[13] = half_btf(cospi[32], bf0[13], cospi[32], bf0[10], cos_bit); bf1[14] = bf0[14]; bf1[15] = bf0[15]; bf1[16] = bf0[16] + bf0[23]; bf1[17] = bf0[17] + bf0[22]; bf1[18] = bf0[18] + bf0[21]; bf1[19] = bf0[19] + bf0[20]; bf1[20] = -bf0[20] + bf0[19]; bf1[21] = -bf0[21] + bf0[18]; bf1[22] = -bf0[22] + bf0[17]; bf1[23] = -bf0[23] + bf0[16]; bf1[24] = -bf0[24] + bf0[31]; bf1[25] = -bf0[25] + bf0[30]; bf1[26] = -bf0[26] + bf0[29]; bf1[27] = -bf0[27] + bf0[28]; bf1[28] = bf0[28] + bf0[27]; bf1[29] = bf0[29] + bf0[26]; bf1[30] = bf0[30] + bf0[25]; bf1[31] = bf0[31] + bf0[24]; // stage 4 bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; bf1[8] = bf0[8] + bf0[11]; bf1[9] = bf0[9] + bf0[10]; bf1[10] = -bf0[10] + bf0[9]; bf1[11] = -bf0[11] + bf0[8]; bf1[12] = -bf0[12] + bf0[15]; bf1[13] = -bf0[13] + bf0[14]; bf1[14] = bf0[14] + bf0[13]; bf1[15] = bf0[15] + bf0[12]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = half_btf(-cospi[16], bf0[18], cospi[48], bf0[29], cos_bit); bf1[19] = half_btf(-cospi[16], bf0[19], cospi[48], bf0[28], cos_bit); bf1[20] = half_btf(-cospi[48], bf0[20], -cospi[16], bf0[27], cos_bit); bf1[21] = half_btf(-cospi[48], bf0[21], -cospi[16], bf0[26], cos_bit); bf1[22] = bf0[22]; bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = bf0[25]; bf1[26] = half_btf(cospi[48], bf0[26], -cospi[16], bf0[21], cos_bit); bf1[27] = half_btf(cospi[48], bf0[27], -cospi[16], bf0[20], cos_bit); bf1[28] = half_btf(cospi[16], bf0[28], cospi[48], bf0[19], cos_bit); bf1[29] = half_btf(cospi[16], bf0[29], cospi[48], bf0[18], cos_bit); bf1[30] = bf0[30]; bf1[31] = bf0[31]; // stage 5 bf0 = step; bf1 = output; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[7] = bf0[7] + bf0[6]; bf1[8] = bf0[8]; bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit); bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit); bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = half_btf(cospi[48], bf0[13], -cospi[16], bf0[10], cos_bit); bf1[14] = half_btf(cospi[16], bf0[14], cospi[48], bf0[9], cos_bit); bf1[15] = bf0[15]; bf1[16] = bf0[16] + bf0[19]; bf1[17] = bf0[17] + bf0[18]; bf1[18] = -bf0[18] + bf0[17]; bf1[19] = -bf0[19] + bf0[16]; bf1[20] = -bf0[20] + bf0[23]; bf1[21] = -bf0[21] + bf0[22]; bf1[22] = bf0[22] + bf0[21]; bf1[23] = bf0[23] + bf0[20]; bf1[24] = bf0[24] + bf0[27]; bf1[25] = bf0[25] + bf0[26]; bf1[26] = -bf0[26] + bf0[25]; bf1[27] = -bf0[27] + bf0[24]; bf1[28] = -bf0[28] + bf0[31]; bf1[29] = -bf0[29] + bf0[30]; bf1[30] = bf0[30] + bf0[29]; bf1[31] = bf0[31] + bf0[28]; // stage 6 bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); bf1[8] = bf0[8] + bf0[9]; bf1[11] = bf0[11] + bf0[10]; bf1[12] = bf0[12] + bf0[13]; bf1[15] = bf0[15] + bf0[14]; bf1[16] = bf0[16]; bf1[17] = half_btf(-cospi[8], bf0[17], cospi[56], bf0[30], cos_bit); bf1[18] = half_btf(-cospi[56], bf0[18], -cospi[8], bf0[29], cos_bit); bf1[19] = bf0[19]; bf1[20] = bf0[20]; bf1[21] = half_btf(-cospi[40], bf0[21], cospi[24], bf0[26], cos_bit); bf1[22] = half_btf(-cospi[24], bf0[22], -cospi[40], bf0[25], cos_bit); bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = half_btf(cospi[24], bf0[25], -cospi[40], bf0[22], cos_bit); bf1[26] = half_btf(cospi[40], bf0[26], cospi[24], bf0[21], cos_bit); bf1[27] = bf0[27]; bf1[28] = bf0[28]; bf1[29] = half_btf(cospi[56], bf0[29], -cospi[8], bf0[18], cos_bit); bf1[30] = half_btf(cospi[8], bf0[30], cospi[56], bf0[17], cos_bit); bf1[31] = bf0[31]; // stage 7 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[4] = bf0[4]; bf1[8] = half_btf(cospi[60], bf0[8], cospi[4], bf0[15], cos_bit); bf1[12] = half_btf(cospi[12], bf0[12], -cospi[52], bf0[11], cos_bit); bf1[16] = bf0[16] + bf0[17]; bf1[19] = bf0[19] + bf0[18]; bf1[20] = bf0[20] + bf0[21]; bf1[23] = bf0[23] + bf0[22]; bf1[24] = bf0[24] + bf0[25]; bf1[27] = bf0[27] + bf0[26]; bf1[28] = bf0[28] + bf0[29]; bf1[31] = bf0[31] + bf0[30]; // stage 8 bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[4] = bf0[4]; bf1[8] = bf0[8]; bf1[12] = bf0[12]; bf1[16] = half_btf(cospi[62], bf0[16], cospi[2], bf0[31], cos_bit); bf1[20] = half_btf(cospi[54], bf0[20], cospi[10], bf0[27], cos_bit); bf1[24] = half_btf(cospi[6], bf0[24], -cospi[58], bf0[23], cos_bit); bf1[28] = half_btf(cospi[14], bf0[28], -cospi[50], bf0[19], cos_bit); // stage 9 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[16]; bf1[2] = bf0[8]; bf1[3] = bf0[24]; bf1[4] = bf0[4]; bf1[5] = bf0[20]; bf1[6] = bf0[12]; bf1[7] = bf0[28]; } void svt_av1_fidentity32_N4_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; for (int32_t i = 0; i < 8; ++i) output[i] = input[i] * 4; } void svt_av1_fdct64_new_N4(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; const int32_t *cospi; int32_t *bf0, *bf1; int32_t step[64]; // stage 0; // stage 1; bf1 = output; bf1[0] = input[0] + input[63]; bf1[1] = input[1] + input[62]; bf1[2] = input[2] + input[61]; bf1[3] = input[3] + input[60]; bf1[4] = input[4] + input[59]; bf1[5] = input[5] + input[58]; bf1[6] = input[6] + input[57]; bf1[7] = input[7] + input[56]; bf1[8] = input[8] + input[55]; bf1[9] = input[9] + input[54]; bf1[10] = input[10] + input[53]; bf1[11] = input[11] + input[52]; bf1[12] = input[12] + input[51]; bf1[13] = input[13] + input[50]; bf1[14] = input[14] + input[49]; bf1[15] = input[15] + input[48]; bf1[16] = input[16] + input[47]; bf1[17] = input[17] + input[46]; bf1[18] = input[18] + input[45]; bf1[19] = input[19] + input[44]; bf1[20] = input[20] + input[43]; bf1[21] = input[21] + input[42]; bf1[22] = input[22] + input[41]; bf1[23] = input[23] + input[40]; bf1[24] = input[24] + input[39]; bf1[25] = input[25] + input[38]; bf1[26] = input[26] + input[37]; bf1[27] = input[27] + input[36]; bf1[28] = input[28] + input[35]; bf1[29] = input[29] + input[34]; bf1[30] = input[30] + input[33]; bf1[31] = input[31] + input[32]; bf1[32] = -input[32] + input[31]; bf1[33] = -input[33] + input[30]; bf1[34] = -input[34] + input[29]; bf1[35] = -input[35] + input[28]; bf1[36] = -input[36] + input[27]; bf1[37] = -input[37] + input[26]; bf1[38] = -input[38] + input[25]; bf1[39] = -input[39] + input[24]; bf1[40] = -input[40] + input[23]; bf1[41] = -input[41] + input[22]; bf1[42] = -input[42] + input[21]; bf1[43] = -input[43] + input[20]; bf1[44] = -input[44] + input[19]; bf1[45] = -input[45] + input[18]; bf1[46] = -input[46] + input[17]; bf1[47] = -input[47] + input[16]; bf1[48] = -input[48] + input[15]; bf1[49] = -input[49] + input[14]; bf1[50] = -input[50] + input[13]; bf1[51] = -input[51] + input[12]; bf1[52] = -input[52] + input[11]; bf1[53] = -input[53] + input[10]; bf1[54] = -input[54] + input[9]; bf1[55] = -input[55] + input[8]; bf1[56] = -input[56] + input[7]; bf1[57] = -input[57] + input[6]; bf1[58] = -input[58] + input[5]; bf1[59] = -input[59] + input[4]; bf1[60] = -input[60] + input[3]; bf1[61] = -input[61] + input[2]; bf1[62] = -input[62] + input[1]; bf1[63] = -input[63] + input[0]; // stage 2 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[31]; bf1[1] = bf0[1] + bf0[30]; bf1[2] = bf0[2] + bf0[29]; bf1[3] = bf0[3] + bf0[28]; bf1[4] = bf0[4] + bf0[27]; bf1[5] = bf0[5] + bf0[26]; bf1[6] = bf0[6] + bf0[25]; bf1[7] = bf0[7] + bf0[24]; bf1[8] = bf0[8] + bf0[23]; bf1[9] = bf0[9] + bf0[22]; bf1[10] = bf0[10] + bf0[21]; bf1[11] = bf0[11] + bf0[20]; bf1[12] = bf0[12] + bf0[19]; bf1[13] = bf0[13] + bf0[18]; bf1[14] = bf0[14] + bf0[17]; bf1[15] = bf0[15] + bf0[16]; bf1[16] = -bf0[16] + bf0[15]; bf1[17] = -bf0[17] + bf0[14]; bf1[18] = -bf0[18] + bf0[13]; bf1[19] = -bf0[19] + bf0[12]; bf1[20] = -bf0[20] + bf0[11]; bf1[21] = -bf0[21] + bf0[10]; bf1[22] = -bf0[22] + bf0[9]; bf1[23] = -bf0[23] + bf0[8]; bf1[24] = -bf0[24] + bf0[7]; bf1[25] = -bf0[25] + bf0[6]; bf1[26] = -bf0[26] + bf0[5]; bf1[27] = -bf0[27] + bf0[4]; bf1[28] = -bf0[28] + bf0[3]; bf1[29] = -bf0[29] + bf0[2]; bf1[30] = -bf0[30] + bf0[1]; bf1[31] = -bf0[31] + bf0[0]; bf1[32] = bf0[32]; bf1[33] = bf0[33]; bf1[34] = bf0[34]; bf1[35] = bf0[35]; bf1[36] = bf0[36]; bf1[37] = bf0[37]; bf1[38] = bf0[38]; bf1[39] = bf0[39]; bf1[40] = half_btf(-cospi[32], bf0[40], cospi[32], bf0[55], cos_bit); bf1[41] = half_btf(-cospi[32], bf0[41], cospi[32], bf0[54], cos_bit); bf1[42] = half_btf(-cospi[32], bf0[42], cospi[32], bf0[53], cos_bit); bf1[43] = half_btf(-cospi[32], bf0[43], cospi[32], bf0[52], cos_bit); bf1[44] = half_btf(-cospi[32], bf0[44], cospi[32], bf0[51], cos_bit); bf1[45] = half_btf(-cospi[32], bf0[45], cospi[32], bf0[50], cos_bit); bf1[46] = half_btf(-cospi[32], bf0[46], cospi[32], bf0[49], cos_bit); bf1[47] = half_btf(-cospi[32], bf0[47], cospi[32], bf0[48], cos_bit); bf1[48] = half_btf(cospi[32], bf0[48], cospi[32], bf0[47], cos_bit); bf1[49] = half_btf(cospi[32], bf0[49], cospi[32], bf0[46], cos_bit); bf1[50] = half_btf(cospi[32], bf0[50], cospi[32], bf0[45], cos_bit); bf1[51] = half_btf(cospi[32], bf0[51], cospi[32], bf0[44], cos_bit); bf1[52] = half_btf(cospi[32], bf0[52], cospi[32], bf0[43], cos_bit); bf1[53] = half_btf(cospi[32], bf0[53], cospi[32], bf0[42], cos_bit); bf1[54] = half_btf(cospi[32], bf0[54], cospi[32], bf0[41], cos_bit); bf1[55] = half_btf(cospi[32], bf0[55], cospi[32], bf0[40], cos_bit); bf1[56] = bf0[56]; bf1[57] = bf0[57]; bf1[58] = bf0[58]; bf1[59] = bf0[59]; bf1[60] = bf0[60]; bf1[61] = bf0[61]; bf1[62] = bf0[62]; bf1[63] = bf0[63]; // stage 3 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[15]; bf1[1] = bf0[1] + bf0[14]; bf1[2] = bf0[2] + bf0[13]; bf1[3] = bf0[3] + bf0[12]; bf1[4] = bf0[4] + bf0[11]; bf1[5] = bf0[5] + bf0[10]; bf1[6] = bf0[6] + bf0[9]; bf1[7] = bf0[7] + bf0[8]; bf1[8] = -bf0[8] + bf0[7]; bf1[9] = -bf0[9] + bf0[6]; bf1[10] = -bf0[10] + bf0[5]; bf1[11] = -bf0[11] + bf0[4]; bf1[12] = -bf0[12] + bf0[3]; bf1[13] = -bf0[13] + bf0[2]; bf1[14] = -bf0[14] + bf0[1]; bf1[15] = -bf0[15] + bf0[0]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = bf0[18]; bf1[19] = bf0[19]; bf1[20] = half_btf(-cospi[32], bf0[20], cospi[32], bf0[27], cos_bit); bf1[21] = half_btf(-cospi[32], bf0[21], cospi[32], bf0[26], cos_bit); bf1[22] = half_btf(-cospi[32], bf0[22], cospi[32], bf0[25], cos_bit); bf1[23] = half_btf(-cospi[32], bf0[23], cospi[32], bf0[24], cos_bit); bf1[24] = half_btf(cospi[32], bf0[24], cospi[32], bf0[23], cos_bit); bf1[25] = half_btf(cospi[32], bf0[25], cospi[32], bf0[22], cos_bit); bf1[26] = half_btf(cospi[32], bf0[26], cospi[32], bf0[21], cos_bit); bf1[27] = half_btf(cospi[32], bf0[27], cospi[32], bf0[20], cos_bit); bf1[28] = bf0[28]; bf1[29] = bf0[29]; bf1[30] = bf0[30]; bf1[31] = bf0[31]; bf1[32] = bf0[32] + bf0[47]; bf1[33] = bf0[33] + bf0[46]; bf1[34] = bf0[34] + bf0[45]; bf1[35] = bf0[35] + bf0[44]; bf1[36] = bf0[36] + bf0[43]; bf1[37] = bf0[37] + bf0[42]; bf1[38] = bf0[38] + bf0[41]; bf1[39] = bf0[39] + bf0[40]; bf1[40] = -bf0[40] + bf0[39]; bf1[41] = -bf0[41] + bf0[38]; bf1[42] = -bf0[42] + bf0[37]; bf1[43] = -bf0[43] + bf0[36]; bf1[44] = -bf0[44] + bf0[35]; bf1[45] = -bf0[45] + bf0[34]; bf1[46] = -bf0[46] + bf0[33]; bf1[47] = -bf0[47] + bf0[32]; bf1[48] = -bf0[48] + bf0[63]; bf1[49] = -bf0[49] + bf0[62]; bf1[50] = -bf0[50] + bf0[61]; bf1[51] = -bf0[51] + bf0[60]; bf1[52] = -bf0[52] + bf0[59]; bf1[53] = -bf0[53] + bf0[58]; bf1[54] = -bf0[54] + bf0[57]; bf1[55] = -bf0[55] + bf0[56]; bf1[56] = bf0[56] + bf0[55]; bf1[57] = bf0[57] + bf0[54]; bf1[58] = bf0[58] + bf0[53]; bf1[59] = bf0[59] + bf0[52]; bf1[60] = bf0[60] + bf0[51]; bf1[61] = bf0[61] + bf0[50]; bf1[62] = bf0[62] + bf0[49]; bf1[63] = bf0[63] + bf0[48]; // stage 4 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0] + bf0[7]; bf1[1] = bf0[1] + bf0[6]; bf1[2] = bf0[2] + bf0[5]; bf1[3] = bf0[3] + bf0[4]; bf1[4] = -bf0[4] + bf0[3]; bf1[5] = -bf0[5] + bf0[2]; bf1[6] = -bf0[6] + bf0[1]; bf1[7] = -bf0[7] + bf0[0]; bf1[8] = bf0[8]; bf1[9] = bf0[9]; bf1[10] = half_btf(-cospi[32], bf0[10], cospi[32], bf0[13], cos_bit); bf1[11] = half_btf(-cospi[32], bf0[11], cospi[32], bf0[12], cos_bit); bf1[12] = half_btf(cospi[32], bf0[12], cospi[32], bf0[11], cos_bit); bf1[13] = half_btf(cospi[32], bf0[13], cospi[32], bf0[10], cos_bit); bf1[14] = bf0[14]; bf1[15] = bf0[15]; bf1[16] = bf0[16] + bf0[23]; bf1[17] = bf0[17] + bf0[22]; bf1[18] = bf0[18] + bf0[21]; bf1[19] = bf0[19] + bf0[20]; bf1[20] = -bf0[20] + bf0[19]; bf1[21] = -bf0[21] + bf0[18]; bf1[22] = -bf0[22] + bf0[17]; bf1[23] = -bf0[23] + bf0[16]; bf1[24] = -bf0[24] + bf0[31]; bf1[25] = -bf0[25] + bf0[30]; bf1[26] = -bf0[26] + bf0[29]; bf1[27] = -bf0[27] + bf0[28]; bf1[28] = bf0[28] + bf0[27]; bf1[29] = bf0[29] + bf0[26]; bf1[30] = bf0[30] + bf0[25]; bf1[31] = bf0[31] + bf0[24]; bf1[32] = bf0[32]; bf1[33] = bf0[33]; bf1[34] = bf0[34]; bf1[35] = bf0[35]; bf1[36] = half_btf(-cospi[16], bf0[36], cospi[48], bf0[59], cos_bit); bf1[37] = half_btf(-cospi[16], bf0[37], cospi[48], bf0[58], cos_bit); bf1[38] = half_btf(-cospi[16], bf0[38], cospi[48], bf0[57], cos_bit); bf1[39] = half_btf(-cospi[16], bf0[39], cospi[48], bf0[56], cos_bit); bf1[40] = half_btf(-cospi[48], bf0[40], -cospi[16], bf0[55], cos_bit); bf1[41] = half_btf(-cospi[48], bf0[41], -cospi[16], bf0[54], cos_bit); bf1[42] = half_btf(-cospi[48], bf0[42], -cospi[16], bf0[53], cos_bit); bf1[43] = half_btf(-cospi[48], bf0[43], -cospi[16], bf0[52], cos_bit); bf1[44] = bf0[44]; bf1[45] = bf0[45]; bf1[46] = bf0[46]; bf1[47] = bf0[47]; bf1[48] = bf0[48]; bf1[49] = bf0[49]; bf1[50] = bf0[50]; bf1[51] = bf0[51]; bf1[52] = half_btf(cospi[48], bf0[52], -cospi[16], bf0[43], cos_bit); bf1[53] = half_btf(cospi[48], bf0[53], -cospi[16], bf0[42], cos_bit); bf1[54] = half_btf(cospi[48], bf0[54], -cospi[16], bf0[41], cos_bit); bf1[55] = half_btf(cospi[48], bf0[55], -cospi[16], bf0[40], cos_bit); bf1[56] = half_btf(cospi[16], bf0[56], cospi[48], bf0[39], cos_bit); bf1[57] = half_btf(cospi[16], bf0[57], cospi[48], bf0[38], cos_bit); bf1[58] = half_btf(cospi[16], bf0[58], cospi[48], bf0[37], cos_bit); bf1[59] = half_btf(cospi[16], bf0[59], cospi[48], bf0[36], cos_bit); bf1[60] = bf0[60]; bf1[61] = bf0[61]; bf1[62] = bf0[62]; bf1[63] = bf0[63]; // stage 5 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0] + bf0[3]; bf1[1] = bf0[1] + bf0[2]; bf1[4] = bf0[4]; bf1[5] = half_btf(-cospi[32], bf0[5], cospi[32], bf0[6], cos_bit); bf1[6] = half_btf(cospi[32], bf0[6], cospi[32], bf0[5], cos_bit); bf1[7] = bf0[7]; bf1[8] = bf0[8] + bf0[11]; bf1[9] = bf0[9] + bf0[10]; bf1[10] = -bf0[10] + bf0[9]; bf1[11] = -bf0[11] + bf0[8]; bf1[12] = -bf0[12] + bf0[15]; bf1[13] = -bf0[13] + bf0[14]; bf1[14] = bf0[14] + bf0[13]; bf1[15] = bf0[15] + bf0[12]; bf1[16] = bf0[16]; bf1[17] = bf0[17]; bf1[18] = half_btf(-cospi[16], bf0[18], cospi[48], bf0[29], cos_bit); bf1[19] = half_btf(-cospi[16], bf0[19], cospi[48], bf0[28], cos_bit); bf1[20] = half_btf(-cospi[48], bf0[20], -cospi[16], bf0[27], cos_bit); bf1[21] = half_btf(-cospi[48], bf0[21], -cospi[16], bf0[26], cos_bit); bf1[22] = bf0[22]; bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = bf0[25]; bf1[26] = half_btf(cospi[48], bf0[26], -cospi[16], bf0[21], cos_bit); bf1[27] = half_btf(cospi[48], bf0[27], -cospi[16], bf0[20], cos_bit); bf1[28] = half_btf(cospi[16], bf0[28], cospi[48], bf0[19], cos_bit); bf1[29] = half_btf(cospi[16], bf0[29], cospi[48], bf0[18], cos_bit); bf1[30] = bf0[30]; bf1[31] = bf0[31]; bf1[32] = bf0[32] + bf0[39]; bf1[33] = bf0[33] + bf0[38]; bf1[34] = bf0[34] + bf0[37]; bf1[35] = bf0[35] + bf0[36]; bf1[36] = -bf0[36] + bf0[35]; bf1[37] = -bf0[37] + bf0[34]; bf1[38] = -bf0[38] + bf0[33]; bf1[39] = -bf0[39] + bf0[32]; bf1[40] = -bf0[40] + bf0[47]; bf1[41] = -bf0[41] + bf0[46]; bf1[42] = -bf0[42] + bf0[45]; bf1[43] = -bf0[43] + bf0[44]; bf1[44] = bf0[44] + bf0[43]; bf1[45] = bf0[45] + bf0[42]; bf1[46] = bf0[46] + bf0[41]; bf1[47] = bf0[47] + bf0[40]; bf1[48] = bf0[48] + bf0[55]; bf1[49] = bf0[49] + bf0[54]; bf1[50] = bf0[50] + bf0[53]; bf1[51] = bf0[51] + bf0[52]; bf1[52] = -bf0[52] + bf0[51]; bf1[53] = -bf0[53] + bf0[50]; bf1[54] = -bf0[54] + bf0[49]; bf1[55] = -bf0[55] + bf0[48]; bf1[56] = -bf0[56] + bf0[63]; bf1[57] = -bf0[57] + bf0[62]; bf1[58] = -bf0[58] + bf0[61]; bf1[59] = -bf0[59] + bf0[60]; bf1[60] = bf0[60] + bf0[59]; bf1[61] = bf0[61] + bf0[58]; bf1[62] = bf0[62] + bf0[57]; bf1[63] = bf0[63] + bf0[56]; // stage 6 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = half_btf(cospi[32], bf0[0], cospi[32], bf0[1], cos_bit); bf1[4] = bf0[4] + bf0[5]; bf1[7] = bf0[7] + bf0[6]; bf1[8] = bf0[8]; bf1[9] = half_btf(-cospi[16], bf0[9], cospi[48], bf0[14], cos_bit); bf1[10] = half_btf(-cospi[48], bf0[10], -cospi[16], bf0[13], cos_bit); bf1[11] = bf0[11]; bf1[12] = bf0[12]; bf1[13] = half_btf(cospi[48], bf0[13], -cospi[16], bf0[10], cos_bit); bf1[14] = half_btf(cospi[16], bf0[14], cospi[48], bf0[9], cos_bit); bf1[15] = bf0[15]; bf1[16] = bf0[16] + bf0[19]; bf1[17] = bf0[17] + bf0[18]; bf1[18] = -bf0[18] + bf0[17]; bf1[19] = -bf0[19] + bf0[16]; bf1[20] = -bf0[20] + bf0[23]; bf1[21] = -bf0[21] + bf0[22]; bf1[22] = bf0[22] + bf0[21]; bf1[23] = bf0[23] + bf0[20]; bf1[24] = bf0[24] + bf0[27]; bf1[25] = bf0[25] + bf0[26]; bf1[26] = -bf0[26] + bf0[25]; bf1[27] = -bf0[27] + bf0[24]; bf1[28] = -bf0[28] + bf0[31]; bf1[29] = -bf0[29] + bf0[30]; bf1[30] = bf0[30] + bf0[29]; bf1[31] = bf0[31] + bf0[28]; bf1[32] = bf0[32]; bf1[33] = bf0[33]; bf1[34] = half_btf(-cospi[8], bf0[34], cospi[56], bf0[61], cos_bit); bf1[35] = half_btf(-cospi[8], bf0[35], cospi[56], bf0[60], cos_bit); bf1[36] = half_btf(-cospi[56], bf0[36], -cospi[8], bf0[59], cos_bit); bf1[37] = half_btf(-cospi[56], bf0[37], -cospi[8], bf0[58], cos_bit); bf1[38] = bf0[38]; bf1[39] = bf0[39]; bf1[40] = bf0[40]; bf1[41] = bf0[41]; bf1[42] = half_btf(-cospi[40], bf0[42], cospi[24], bf0[53], cos_bit); bf1[43] = half_btf(-cospi[40], bf0[43], cospi[24], bf0[52], cos_bit); bf1[44] = half_btf(-cospi[24], bf0[44], -cospi[40], bf0[51], cos_bit); bf1[45] = half_btf(-cospi[24], bf0[45], -cospi[40], bf0[50], cos_bit); bf1[46] = bf0[46]; bf1[47] = bf0[47]; bf1[48] = bf0[48]; bf1[49] = bf0[49]; bf1[50] = half_btf(cospi[24], bf0[50], -cospi[40], bf0[45], cos_bit); bf1[51] = half_btf(cospi[24], bf0[51], -cospi[40], bf0[44], cos_bit); bf1[52] = half_btf(cospi[40], bf0[52], cospi[24], bf0[43], cos_bit); bf1[53] = half_btf(cospi[40], bf0[53], cospi[24], bf0[42], cos_bit); bf1[54] = bf0[54]; bf1[55] = bf0[55]; bf1[56] = bf0[56]; bf1[57] = bf0[57]; bf1[58] = half_btf(cospi[56], bf0[58], -cospi[8], bf0[37], cos_bit); bf1[59] = half_btf(cospi[56], bf0[59], -cospi[8], bf0[36], cos_bit); bf1[60] = half_btf(cospi[8], bf0[60], cospi[56], bf0[35], cos_bit); bf1[61] = half_btf(cospi[8], bf0[61], cospi[56], bf0[34], cos_bit); bf1[62] = bf0[62]; bf1[63] = bf0[63]; // stage 7 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[4] = half_btf(cospi[56], bf0[4], cospi[8], bf0[7], cos_bit); bf1[8] = bf0[8] + bf0[9]; bf1[11] = bf0[11] + bf0[10]; bf1[12] = bf0[12] + bf0[13]; bf1[15] = bf0[15] + bf0[14]; bf1[16] = bf0[16]; bf1[17] = half_btf(-cospi[8], bf0[17], cospi[56], bf0[30], cos_bit); bf1[18] = half_btf(-cospi[56], bf0[18], -cospi[8], bf0[29], cos_bit); bf1[19] = bf0[19]; bf1[20] = bf0[20]; bf1[21] = half_btf(-cospi[40], bf0[21], cospi[24], bf0[26], cos_bit); bf1[22] = half_btf(-cospi[24], bf0[22], -cospi[40], bf0[25], cos_bit); bf1[23] = bf0[23]; bf1[24] = bf0[24]; bf1[25] = half_btf(cospi[24], bf0[25], -cospi[40], bf0[22], cos_bit); bf1[26] = half_btf(cospi[40], bf0[26], cospi[24], bf0[21], cos_bit); bf1[27] = bf0[27]; bf1[28] = bf0[28]; bf1[29] = half_btf(cospi[56], bf0[29], -cospi[8], bf0[18], cos_bit); bf1[30] = half_btf(cospi[8], bf0[30], cospi[56], bf0[17], cos_bit); bf1[31] = bf0[31]; bf1[32] = bf0[32] + bf0[35]; bf1[33] = bf0[33] + bf0[34]; bf1[34] = -bf0[34] + bf0[33]; bf1[35] = -bf0[35] + bf0[32]; bf1[36] = -bf0[36] + bf0[39]; bf1[37] = -bf0[37] + bf0[38]; bf1[38] = bf0[38] + bf0[37]; bf1[39] = bf0[39] + bf0[36]; bf1[40] = bf0[40] + bf0[43]; bf1[41] = bf0[41] + bf0[42]; bf1[42] = -bf0[42] + bf0[41]; bf1[43] = -bf0[43] + bf0[40]; bf1[44] = -bf0[44] + bf0[47]; bf1[45] = -bf0[45] + bf0[46]; bf1[46] = bf0[46] + bf0[45]; bf1[47] = bf0[47] + bf0[44]; bf1[48] = bf0[48] + bf0[51]; bf1[49] = bf0[49] + bf0[50]; bf1[50] = -bf0[50] + bf0[49]; bf1[51] = -bf0[51] + bf0[48]; bf1[52] = -bf0[52] + bf0[55]; bf1[53] = -bf0[53] + bf0[54]; bf1[54] = bf0[54] + bf0[53]; bf1[55] = bf0[55] + bf0[52]; bf1[56] = bf0[56] + bf0[59]; bf1[57] = bf0[57] + bf0[58]; bf1[58] = -bf0[58] + bf0[57]; bf1[59] = -bf0[59] + bf0[56]; bf1[60] = -bf0[60] + bf0[63]; bf1[61] = -bf0[61] + bf0[62]; bf1[62] = bf0[62] + bf0[61]; bf1[63] = bf0[63] + bf0[60]; // stage 8 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[4] = bf0[4]; bf1[8] = half_btf(cospi[60], bf0[8], cospi[4], bf0[15], cos_bit); bf1[12] = half_btf(cospi[12], bf0[12], -cospi[52], bf0[11], cos_bit); bf1[16] = bf0[16] + bf0[17]; bf1[19] = bf0[19] + bf0[18]; bf1[20] = bf0[20] + bf0[21]; bf1[23] = bf0[23] + bf0[22]; bf1[24] = bf0[24] + bf0[25]; bf1[27] = bf0[27] + bf0[26]; bf1[28] = bf0[28] + bf0[29]; bf1[31] = bf0[31] + bf0[30]; bf1[32] = bf0[32]; bf1[33] = half_btf(-cospi[4], bf0[33], cospi[60], bf0[62], cos_bit); bf1[34] = half_btf(-cospi[60], bf0[34], -cospi[4], bf0[61], cos_bit); bf1[35] = bf0[35]; bf1[36] = bf0[36]; bf1[37] = half_btf(-cospi[36], bf0[37], cospi[28], bf0[58], cos_bit); bf1[38] = half_btf(-cospi[28], bf0[38], -cospi[36], bf0[57], cos_bit); bf1[39] = bf0[39]; bf1[40] = bf0[40]; bf1[41] = half_btf(-cospi[20], bf0[41], cospi[44], bf0[54], cos_bit); bf1[42] = half_btf(-cospi[44], bf0[42], -cospi[20], bf0[53], cos_bit); bf1[43] = bf0[43]; bf1[44] = bf0[44]; bf1[45] = half_btf(-cospi[52], bf0[45], cospi[12], bf0[50], cos_bit); bf1[46] = half_btf(-cospi[12], bf0[46], -cospi[52], bf0[49], cos_bit); bf1[47] = bf0[47]; bf1[48] = bf0[48]; bf1[49] = half_btf(cospi[12], bf0[49], -cospi[52], bf0[46], cos_bit); bf1[50] = half_btf(cospi[52], bf0[50], cospi[12], bf0[45], cos_bit); bf1[51] = bf0[51]; bf1[52] = bf0[52]; bf1[53] = half_btf(cospi[44], bf0[53], -cospi[20], bf0[42], cos_bit); bf1[54] = half_btf(cospi[20], bf0[54], cospi[44], bf0[41], cos_bit); bf1[55] = bf0[55]; bf1[56] = bf0[56]; bf1[57] = half_btf(cospi[28], bf0[57], -cospi[36], bf0[38], cos_bit); bf1[58] = half_btf(cospi[36], bf0[58], cospi[28], bf0[37], cos_bit); bf1[59] = bf0[59]; bf1[60] = bf0[60]; bf1[61] = half_btf(cospi[60], bf0[61], -cospi[4], bf0[34], cos_bit); bf1[62] = half_btf(cospi[4], bf0[62], cospi[60], bf0[33], cos_bit); bf1[63] = bf0[63]; // stage 9 cospi = cospi_arr(cos_bit); bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[4] = bf0[4]; bf1[8] = bf0[8]; bf1[12] = bf0[12]; bf1[16] = half_btf(cospi[62], bf0[16], cospi[2], bf0[31], cos_bit); bf1[20] = half_btf(cospi[54], bf0[20], cospi[10], bf0[27], cos_bit); bf1[24] = half_btf(cospi[6], bf0[24], -cospi[58], bf0[23], cos_bit); bf1[28] = half_btf(cospi[14], bf0[28], -cospi[50], bf0[19], cos_bit); bf1[32] = bf0[32] + bf0[33]; bf1[35] = bf0[35] + bf0[34]; bf1[36] = bf0[36] + bf0[37]; bf1[39] = bf0[39] + bf0[38]; bf1[40] = bf0[40] + bf0[41]; bf1[43] = bf0[43] + bf0[42]; bf1[44] = bf0[44] + bf0[45]; bf1[47] = bf0[47] + bf0[46]; bf1[48] = bf0[48] + bf0[49]; bf1[51] = bf0[51] + bf0[50]; bf1[52] = bf0[52] + bf0[53]; bf1[55] = bf0[55] + bf0[54]; bf1[56] = bf0[56] + bf0[57]; bf1[59] = bf0[59] + bf0[58]; bf1[60] = bf0[60] + bf0[61]; bf1[63] = bf0[63] + bf0[62]; // stage 10 cospi = cospi_arr(cos_bit); bf0 = output; bf1 = step; bf1[0] = bf0[0]; bf1[4] = bf0[4]; bf1[8] = bf0[8]; bf1[12] = bf0[12]; bf1[16] = bf0[16]; bf1[20] = bf0[20]; bf1[24] = bf0[24]; bf1[28] = bf0[28]; bf1[32] = half_btf(cospi[63], bf0[32], cospi[1], bf0[63], cos_bit); bf1[36] = half_btf(cospi[55], bf0[36], cospi[9], bf0[59], cos_bit); bf1[40] = half_btf(cospi[59], bf0[40], cospi[5], bf0[55], cos_bit); bf1[44] = half_btf(cospi[51], bf0[44], cospi[13], bf0[51], cos_bit); bf1[48] = half_btf(cospi[3], bf0[48], -cospi[61], bf0[47], cos_bit); bf1[52] = half_btf(cospi[11], bf0[52], -cospi[53], bf0[43], cos_bit); bf1[56] = half_btf(cospi[7], bf0[56], -cospi[57], bf0[39], cos_bit); bf1[60] = half_btf(cospi[15], bf0[60], -cospi[49], bf0[35], cos_bit); // stage 11 bf0 = step; bf1 = output; bf1[0] = bf0[0]; bf1[1] = bf0[32]; bf1[2] = bf0[16]; bf1[3] = bf0[48]; bf1[4] = bf0[8]; bf1[5] = bf0[40]; bf1[6] = bf0[24]; bf1[7] = bf0[56]; bf1[8] = bf0[4]; bf1[9] = bf0[36]; bf1[10] = bf0[20]; bf1[11] = bf0[52]; bf1[12] = bf0[12]; bf1[13] = bf0[44]; bf1[14] = bf0[28]; bf1[15] = bf0[60]; } void av1_fidentity64_N4_c(const int32_t *input, int32_t *output, int8_t cos_bit, const int8_t *stage_range) { (void)stage_range; (void)cos_bit; for (int32_t i = 0; i < 16; ++i) output[i] = round_shift((int64_t)input[i] * 4 * new_sqrt2, new_sqrt2_bits); assert(stage_range[0] + new_sqrt2_bits <= 32); } static INLINE TxfmFunc fwd_txfm_type_to_func_N4(TxfmType txfmtype) { switch (txfmtype) { case TXFM_TYPE_DCT4: return svt_av1_fdct4_new_N4; case TXFM_TYPE_DCT8: return svt_av1_fdct8_new_N4; case TXFM_TYPE_DCT16: return svt_av1_fdct16_new_N4; case TXFM_TYPE_DCT32: return svt_av1_fdct32_new_N4; case TXFM_TYPE_DCT64: return svt_av1_fdct64_new_N4; case TXFM_TYPE_ADST4: return svt_av1_fadst4_new_N4; case TXFM_TYPE_ADST8: return svt_av1_fadst8_new_N4; case TXFM_TYPE_ADST16: return svt_av1_fadst16_new_N4; case TXFM_TYPE_ADST32: return av1_fadst32_new; case TXFM_TYPE_IDENTITY4: return svt_av1_fidentity4_N4_c; case TXFM_TYPE_IDENTITY8: return svt_av1_fidentity8_N4_c; case TXFM_TYPE_IDENTITY16: return svt_av1_fidentity16_N4_c; case TXFM_TYPE_IDENTITY32: return svt_av1_fidentity32_N4_c; case TXFM_TYPE_IDENTITY64: return av1_fidentity64_N4_c; default: assert(0); return NULL; } } static INLINE void av1_tranform_two_d_core_N4_c(int16_t *input, uint32_t input_stride, int32_t *output, const Txfm2dFlipCfg *cfg, int32_t *buf, uint8_t bit_depth) { int32_t c, r; // Note when assigning txfm_size_col, we use the txfm_size from the // row configuration and vice versa. This is intentionally done to // accurately perform rectangular transforms. When the transform is // rectangular, the number of columns will be the same as the // txfm_size stored in the row cfg struct. It will make no difference // for square transforms. const int32_t txfm_size_col = tx_size_wide[cfg->tx_size]; const int32_t txfm_size_row = tx_size_high[cfg->tx_size]; // Take the shift from the larger dimension in the rectangular case. const int8_t *shift = cfg->shift; const int32_t rect_type = get_rect_tx_log_ratio(txfm_size_col, txfm_size_row); int8_t stage_range_col[MAX_TXFM_STAGE_NUM]; int8_t stage_range_row[MAX_TXFM_STAGE_NUM]; assert(cfg->stage_num_col <= MAX_TXFM_STAGE_NUM); assert(cfg->stage_num_row <= MAX_TXFM_STAGE_NUM); svt_av1_gen_fwd_stage_range(stage_range_col, stage_range_row, cfg, bit_depth); const int8_t cos_bit_col = cfg->cos_bit_col; const int8_t cos_bit_row = cfg->cos_bit_row; const TxfmFunc txfm_func_col = fwd_txfm_type_to_func_N4(cfg->txfm_type_col); const TxfmFunc txfm_func_row = fwd_txfm_type_to_func_N4(cfg->txfm_type_row); ASSERT(txfm_func_col != NULL); ASSERT(txfm_func_row != NULL); // use output buffer as temp buffer int32_t *temp_in = output; int32_t *temp_out = output + txfm_size_row; // Columns for (c = 0; c < txfm_size_col; ++c) { if (cfg->ud_flip == 0) for (r = 0; r < txfm_size_row; ++r) temp_in[r] = input[r * input_stride + c]; else { for (r = 0; r < txfm_size_row; ++r) // flip upside down temp_in[r] = input[(txfm_size_row - r - 1) * input_stride + c]; } svt_av1_round_shift_array_c( temp_in, txfm_size_row, -shift[0]); // NM svt_av1_round_shift_array_c txfm_func_col(temp_in, temp_out, cos_bit_col, stage_range_col); svt_av1_round_shift_array_c( temp_out, txfm_size_row / 4, -shift[1]); // NM svt_av1_round_shift_array_c if (cfg->lr_flip == 0) { for (r = 0; r < txfm_size_row; ++r) buf[r * txfm_size_col + c] = temp_out[r]; } else { for (r = 0; r < txfm_size_row; ++r) // flip from left to right buf[r * txfm_size_col + (txfm_size_col - c - 1)] = temp_out[r]; } } // Rows for (r = 0; r < txfm_size_row / 4; ++r) { txfm_func_row( buf + r * txfm_size_col, output + r * txfm_size_col, cos_bit_row, stage_range_row); svt_av1_round_shift_array_c(output + r * txfm_size_col, txfm_size_col / 4, -shift[2]); if (abs(rect_type) == 1) { // Multiply everything by Sqrt2 if the transform is rectangular and the // size difference is a factor of 2. for (c = 0; c < txfm_size_col / 4; ++c) { output[r * txfm_size_col + c] = round_shift( (int64_t)output[r * txfm_size_col + c] * new_sqrt2, new_sqrt2_bits); } } } for (int i = 0; i < (txfm_size_col * txfm_size_row); i++) if (i % txfm_size_col >= (txfm_size_col >> 2) || i / txfm_size_col >= (txfm_size_row >> 2)) output[i] = 0; } void av1_transform_two_d_64x64_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[64 * 64]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_64X64, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void av1_transform_two_d_32x32_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[32 * 32]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_32X32, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void av1_transform_two_d_16x16_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 16]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_16X16, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void av1_transform_two_d_8x8_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[8 * 8]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_8X8, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void av1_transform_two_d_4x4_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[4 * 4]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_4X4, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_64x32_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[64 * 32]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_64X32, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_32x64_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[32 * 64]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_32X64, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_64x16_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[64 * 16]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_64X16, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_16x64_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 64]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_16X64, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_32x16_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[32 * 16]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_32X16, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_16x32_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 32]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_16X32, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_16x8_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 8]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_16X8, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_8x16_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[8 * 16]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_8X16, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_32x8_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[32 * 8]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_32X8, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_8x32_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[8 * 32]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_8X32, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_16x4_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[16 * 4]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_16X4, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_4x16_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[4 * 16]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_4X16, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_8x4_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[8 * 4]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_8X4, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } void svt_av1_fwd_txfm2d_4x8_N4_c(int16_t *input, int32_t *output, uint32_t input_stride, TxType transform_type, uint8_t bit_depth) { int32_t intermediate_transform_buffer[4 * 8]; Txfm2dFlipCfg cfg; av1_transform_config(transform_type, TX_4X8, &cfg); av1_tranform_two_d_core_N4_c( input, input_stride, output, &cfg, intermediate_transform_buffer, bit_depth); } #endif /*FEATURE_PARTIAL_FREQUENCY*/ /********************************************************************* * Map Chroma QP *********************************************************************/ uint8_t map_chroma_qp(uint8_t qp) { return qp; }
632002.c
/* compress.c -- compress a memory buffer * Copyright (C) 1995-2005, 2014, 2016 Jean-loup Gailly, Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* @(#) $Id$ */ #define ZLIB_INTERNAL #include "zlib.h" /* =========================================================================== Compresses the source buffer into the destination buffer. The level parameter has the same meaning as in deflateInit. sourceLen is the byte length of the source buffer. Upon entry, destLen is the total size of the destination buffer, which must be at least 0.1% larger than sourceLen plus 12 bytes. Upon exit, destLen is the actual size of the compressed buffer. compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output buffer, Z_STREAM_ERROR if the level parameter is invalid. */ int ZEXPORT compress2(dest, destLen, source, sourceLen, level) Bytef* dest; uLongf* destLen; const Bytef* source; uLong sourceLen; int level; { z_stream stream; int err; const uInt max = (uInt)-1; uLong left; left = *destLen; *destLen = 0; stream.zalloc = (alloc_func)0; stream.zfree = (free_func)0; stream.opaque = (voidpf)0; err = deflateInit(&stream, level); if (err != Z_OK) return err; stream.next_out = dest; stream.avail_out = 0; stream.next_in = (z_const Bytef *)source; stream.avail_in = 0; do { if (stream.avail_out == 0) { stream.avail_out = left > (uLong)max ? max : (uInt)left; left -= stream.avail_out; } if (stream.avail_in == 0) { stream.avail_in = sourceLen > (uLong)max ? max : (uInt)sourceLen; sourceLen -= stream.avail_in; } err = deflate(&stream, sourceLen ? Z_NO_FLUSH : Z_FINISH); } while (err == Z_OK); *destLen = stream.total_out; deflateEnd(&stream); return err == Z_STREAM_END ? Z_OK : err; } /* =========================================================================== */ int ZEXPORT compress(dest, destLen, source, sourceLen) Bytef* dest; uLongf* destLen; const Bytef* source; uLong sourceLen; { return compress2(dest, destLen, source, sourceLen, Z_DEFAULT_COMPRESSION); } /* =========================================================================== If the default memLevel or windowBits for deflateInit() is changed, then this function needs to be updated. */ uLong ZEXPORT compressBound(sourceLen) uLong sourceLen; { return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + (sourceLen >> 25) + 13; }
1000254.c
/* * Generated by asn1c-0.9.29 (http://lionet.info/asn1c) * From ASN.1 module "NR-RRC-Definitions" * found in "/home/labadmin/hlal/rrc_15.3_asn.asn1" * `asn1c -D ./15_3_rrc/ -fcompound-names -fno-include-deps -findirect-choice -gen-PER -no-gen-example` */ #include "SRB-ToAddModList.h" #include "SRB-ToAddMod.h" static asn_oer_constraints_t asn_OER_type_SRB_ToAddModList_constr_1 CC_NOTUSED = { { 0, 0 }, -1 /* (SIZE(1..2)) */}; asn_per_constraints_t asn_PER_type_SRB_ToAddModList_constr_1 CC_NOTUSED = { { APC_UNCONSTRAINED, -1, -1, 0, 0 }, { APC_CONSTRAINED, 1, 1, 1, 2 } /* (SIZE(1..2)) */, 0, 0 /* No PER value map */ }; asn_TYPE_member_t asn_MBR_SRB_ToAddModList_1[] = { { ATF_POINTER, 0, 0, (ASN_TAG_CLASS_UNIVERSAL | (16 << 2)), 0, &asn_DEF_SRB_ToAddMod, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "" }, }; static const ber_tlv_tag_t asn_DEF_SRB_ToAddModList_tags_1[] = { (ASN_TAG_CLASS_UNIVERSAL | (16 << 2)) }; asn_SET_OF_specifics_t asn_SPC_SRB_ToAddModList_specs_1 = { sizeof(struct SRB_ToAddModList), offsetof(struct SRB_ToAddModList, _asn_ctx), 0, /* XER encoding is XMLDelimitedItemList */ }; asn_TYPE_descriptor_t asn_DEF_SRB_ToAddModList = { "SRB-ToAddModList", "SRB-ToAddModList", &asn_OP_SEQUENCE_OF, asn_DEF_SRB_ToAddModList_tags_1, sizeof(asn_DEF_SRB_ToAddModList_tags_1) /sizeof(asn_DEF_SRB_ToAddModList_tags_1[0]), /* 1 */ asn_DEF_SRB_ToAddModList_tags_1, /* Same as above */ sizeof(asn_DEF_SRB_ToAddModList_tags_1) /sizeof(asn_DEF_SRB_ToAddModList_tags_1[0]), /* 1 */ { &asn_OER_type_SRB_ToAddModList_constr_1, &asn_PER_type_SRB_ToAddModList_constr_1, SEQUENCE_OF_constraint }, asn_MBR_SRB_ToAddModList_1, 1, /* Single element */ &asn_SPC_SRB_ToAddModList_specs_1 /* Additional specs */ };
294068.c
/* * Copyright (c) 2004-2007 The Trustees of Indiana University and Indiana * University Research and Technology * Corporation. All rights reserved. * Copyright (c) 2004-2006 The University of Tennessee and The University * of Tennessee Research Foundation. All rights * reserved. * Copyright (c) 2004-2005 High Performance Computing Center Stuttgart, * University of Stuttgart. All rights reserved. * Copyright (c) 2004-2005 The Regents of the University of California. * All rights reserved. * Copyright (c) 2008 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2009 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2011 Los Alamos National Security, LLC. * All rights reserved. * $COPYRIGHT$ * * Additional copyrights may follow * * $HEADER$ */ /** * @file * * Warning: this is not for the faint of heart -- don't even bother * reading this source code if you don't have a strong understanding * of nested data structures and pointer math (remember that * associativity and order of C operations is *critical* in terms of * pointer math!). */ #include "ompi_config.h" #include <stdio.h> #ifdef HAVE_STRING_H #include <string.h> #endif #ifdef HAVE_SCHED_H #include <sched.h> #endif #include <sys/types.h> #ifdef HAVE_SYS_MMAN_H #include <sys/mman.h> #endif /* HAVE_SYS_MMAN_H */ #ifdef HAVE_UNISTD_H #include <unistd.h> #endif /* HAVE_UNISTD_H */ #include "mpi.h" #include "opal_stdint.h" #include "opal/mca/maffinity/maffinity.h" #include "opal/mca/maffinity/base/base.h" #include "opal/util/os_path.h" #include "orte/util/proc_info.h" #include "orte/util/name_fns.h" #include "ompi/communicator/communicator.h" #include "ompi/group/group.h" #include "ompi/mca/coll/coll.h" #include "ompi/mca/coll/base/base.h" #include "ompi/proc/proc.h" #include "coll_sm.h" /* * Global variables */ uint32_t mca_coll_sm_one = 1; /* * Local functions */ static int sm_module_enable(mca_coll_base_module_t *module, struct ompi_communicator_t *comm); static bool have_local_peers(ompi_group_t *group, size_t size); static int bootstrap_comm(ompi_communicator_t *comm, mca_coll_sm_module_t *module); /* * Module constructor */ static void mca_coll_sm_module_construct(mca_coll_sm_module_t *module) { module->enabled = false; module->sm_comm_data = NULL; module->previous_reduce = NULL; module->previous_reduce_module = NULL; } /* * Module destructor */ static void mca_coll_sm_module_destruct(mca_coll_sm_module_t *module) { mca_coll_sm_comm_t *c = module->sm_comm_data; if (NULL != c) { /* Munmap the per-communicator shmem data segment */ if (NULL != c->mcb_mmap) { /* Ignore any errors -- what are we going to do about them? */ mca_common_sm_fini(c->mcb_mmap); } free(c); } /* It should always be non-NULL, but just in case */ if (NULL != module->previous_reduce_module) { OBJ_RELEASE(module->previous_reduce_module); } module->enabled = false; } OBJ_CLASS_INSTANCE(mca_coll_sm_module_t, mca_coll_base_module_t, mca_coll_sm_module_construct, mca_coll_sm_module_destruct); /* * Initial query function that is invoked during MPI_INIT, allowing * this component to disqualify itself if it doesn't support the * required level of thread support. This function is invoked exactly * once. */ int mca_coll_sm_init_query(bool enable_progress_threads, bool enable_mpi_threads) { ompi_proc_t *my_proc, **procs; size_t i, size; /* See if there are other procs in my job on this node. If not, then don't bother going any further. */ if (NULL == (my_proc = ompi_proc_local()) || NULL == (procs = ompi_proc_all(&size))) { opal_output_verbose(10, mca_coll_base_output, "coll:sm:init_query: weirdness on procs; disqualifying myself"); return OMPI_ERR_OUT_OF_RESOURCE; } if (size <= 1) { opal_output_verbose(10, mca_coll_base_output, "coll:sm:init_query: comm size too small; disqualifying myself"); return OMPI_ERR_NOT_AVAILABLE; } for (i = 0; i < size; ++i) { if (procs[i] != my_proc && procs[i]->proc_name.jobid == my_proc->proc_name.jobid && OPAL_PROC_ON_LOCAL_NODE(procs[i]->proc_flags)) { break; } } if (i >= size) { opal_output_verbose(10, mca_coll_base_output, "coll:sm:init_query: no other local procs; disqualifying myself"); return OMPI_ERR_NOT_AVAILABLE; } free(procs); /* Don't do much here because we don't really want to allocate any shared memory until this component is selected to be used. */ opal_output_verbose(10, mca_coll_base_output, "coll:sm:init_query: pick me! pick me!"); return OMPI_SUCCESS; } /* * Invoked when there's a new communicator that has been created. * Look at the communicator and decide which set of functions and * priority we want to return. */ mca_coll_base_module_t * mca_coll_sm_comm_query(struct ompi_communicator_t *comm, int *priority) { mca_coll_sm_module_t *sm_module; /* If we're intercomm, or if there's only one process in the communicator, or if not all the processes in the communicator are not on this node, then we don't want to run */ if (OMPI_COMM_IS_INTER(comm) || 1 == ompi_comm_size(comm) || !have_local_peers(comm->c_local_group, ompi_comm_size(comm))) { opal_output_verbose(10, mca_coll_base_output, "coll:sm:comm_query (%d/%s): intercomm, comm is too small, or not all peers local; disqualifying myself", comm->c_contextid, comm->c_name); return NULL; } /* Get the priority level attached to this module. If priority is less * than or equal to 0, then the module is unavailable. */ *priority = mca_coll_sm_component.sm_priority; if (mca_coll_sm_component.sm_priority <= 0) { opal_output_verbose(10, mca_coll_base_output, "coll:sm:comm_query (%d/%s): priority too low; disqualifying myself", comm->c_contextid, comm->c_name); return NULL; } /* All is good -- return a module */ sm_module = OBJ_NEW(mca_coll_sm_module_t); sm_module->super.coll_module_enable = sm_module_enable; sm_module->super.ft_event = mca_coll_sm_ft_event; sm_module->super.coll_allgather = NULL; sm_module->super.coll_allgatherv = NULL; sm_module->super.coll_allreduce = mca_coll_sm_allreduce_intra; sm_module->super.coll_alltoall = NULL; sm_module->super.coll_alltoallv = NULL; sm_module->super.coll_alltoallw = NULL; sm_module->super.coll_barrier = mca_coll_sm_barrier_intra; sm_module->super.coll_bcast = mca_coll_sm_bcast_intra; sm_module->super.coll_exscan = NULL; sm_module->super.coll_gather = NULL; sm_module->super.coll_gatherv = NULL; sm_module->super.coll_reduce = mca_coll_sm_reduce_intra; sm_module->super.coll_reduce_scatter = NULL; sm_module->super.coll_scan = NULL; sm_module->super.coll_scatter = NULL; sm_module->super.coll_scatterv = NULL; opal_output_verbose(10, mca_coll_base_output, "coll:sm:comm_query (%d/%s): pick me! pick me!", comm->c_contextid, comm->c_name); return &(sm_module->super); } /* * Init module on the communicator */ static int sm_module_enable(mca_coll_base_module_t *module, struct ompi_communicator_t *comm) { if (NULL == comm->c_coll.coll_reduce || NULL == comm->c_coll.coll_reduce_module) { opal_output_verbose(10, mca_coll_base_output, "coll:sm:enable (%d/%s): no underlying reduce; disqualifying myself", comm->c_contextid, comm->c_name); return OMPI_ERROR; } /* We do everything lazily in ompi_coll_sm_enable() */ return OMPI_SUCCESS; } int ompi_coll_sm_lazy_enable(mca_coll_base_module_t *module, struct ompi_communicator_t *comm) { int i, j, root, ret; int rank = ompi_comm_rank(comm); int size = ompi_comm_size(comm); mca_coll_sm_module_t *sm_module = (mca_coll_sm_module_t*) module; mca_coll_sm_comm_t *data = NULL; size_t control_size, frag_size; mca_coll_sm_component_t *c = &mca_coll_sm_component; opal_maffinity_base_segment_t *maffinity; int parent, min_child, max_child, num_children; unsigned char *base = NULL; const int num_barrier_buffers = 2; /* Just make sure we haven't been here already */ if (sm_module->enabled) { return OMPI_SUCCESS; } sm_module->enabled = true; /* Get some space to setup memory affinity (just easier to try to alloc here to handle the error case) */ maffinity = (opal_maffinity_base_segment_t*) malloc(sizeof(opal_maffinity_base_segment_t) * c->sm_comm_num_segments * 3); if (NULL == maffinity) { opal_output_verbose(10, mca_coll_base_output, "coll:sm:enable (%d/%s): malloc failed (1)", comm->c_contextid, comm->c_name); return OMPI_ERR_OUT_OF_RESOURCE; } /* Allocate data to hang off the communicator. The memory we alloc will be laid out as follows: 1. mca_coll_base_comm_t 2. array of num_segments mca_coll_base_mpool_index_t instances (pointed to by the array in 2) 3. array of ompi_comm_size(comm) mca_coll_sm_tree_node_t instances 4. array of sm_tree_degree pointers to other tree nodes (i.e., this nodes' children) for each instance of mca_coll_sm_tree_node_t */ sm_module->sm_comm_data = data = (mca_coll_sm_comm_t*) malloc(sizeof(mca_coll_sm_comm_t) + (c->sm_comm_num_segments * sizeof(mca_coll_sm_data_index_t)) + (size * (sizeof(mca_coll_sm_tree_node_t) + (sizeof(mca_coll_sm_tree_node_t*) * c->sm_tree_degree)))); if (NULL == data) { free(maffinity); opal_output_verbose(10, mca_coll_base_output, "coll:sm:enable (%d/%s): malloc failed (2)", comm->c_contextid, comm->c_name); return OMPI_ERR_TEMP_OUT_OF_RESOURCE; } data->mcb_operation_count = 0; /* Setup #2: set the array to point immediately beyond the mca_coll_base_comm_t */ data->mcb_data_index = (mca_coll_sm_data_index_t*) (data + 1); /* Setup array of pointers for #3 */ data->mcb_tree = (mca_coll_sm_tree_node_t*) (data->mcb_data_index + c->sm_comm_num_segments); /* Finally, setup the array of children pointers in the instances in #5 to point to their corresponding arrays in #6 */ data->mcb_tree[0].mcstn_children = (mca_coll_sm_tree_node_t**) (data->mcb_tree + size); for (i = 1; i < size; ++i) { data->mcb_tree[i].mcstn_children = data->mcb_tree[i - 1].mcstn_children + c->sm_tree_degree; } /* Pre-compute a tree for a given number of processes and degree. We'll re-use this tree for all possible values of root (i.e., shift everyone's process to be the "0"/root in this tree. */ for (root = 0; root < size; ++root) { parent = (root - 1) / mca_coll_sm_component.sm_tree_degree; num_children = mca_coll_sm_component.sm_tree_degree; /* Do we have children? If so, how many? */ if ((root * num_children) + 1 >= size) { /* Leaves */ min_child = -1; max_child = -1; num_children = 0; } else { /* Interior nodes */ min_child = root * num_children + 1; max_child = root * num_children + num_children; if (max_child >= size) { max_child = size - 1; } num_children = max_child - min_child + 1; } /* Save the values */ data->mcb_tree[root].mcstn_id = root; if (root == 0 && parent == 0) { data->mcb_tree[root].mcstn_parent = NULL; } else { data->mcb_tree[root].mcstn_parent = &data->mcb_tree[parent]; } data->mcb_tree[root].mcstn_num_children = num_children; for (i = 0; i < c->sm_tree_degree; ++i) { data->mcb_tree[root].mcstn_children[i] = (i < num_children) ? &data->mcb_tree[min_child + i] : NULL; } } /* Attach to this communicator's shmem data segment */ if (OMPI_SUCCESS != (ret = bootstrap_comm(comm, sm_module))) { free(data); free(maffinity); sm_module->sm_comm_data = NULL; return ret; } /* Once the communicator is bootstrapped, setup the pointers into the per-communicator shmem data segment. First, setup the barrier buffers. There are 2 sets of barrier buffers (because there can never be more than one outstanding barrier occuring at any timie). Setup pointers to my control buffers, my parents, and [the beginning of] my children (note that the children are contiguous, so having the first pointer and the num_children from the mcb_tree data is sufficient). */ control_size = c->sm_control_size; base = data->mcb_mmap->module_data_addr; data->mcb_barrier_control_me = (uint32_t*) (base + (rank * control_size * num_barrier_buffers * 2)); if (data->mcb_tree[rank].mcstn_parent) { data->mcb_barrier_control_parent = (uint32_t*) (base + (data->mcb_tree[rank].mcstn_parent->mcstn_id * control_size * num_barrier_buffers * 2)); } else { data->mcb_barrier_control_parent = NULL; } if (data->mcb_tree[rank].mcstn_num_children > 0) { data->mcb_barrier_control_children = (uint32_t*) (base + (data->mcb_tree[rank].mcstn_children[0]->mcstn_id * control_size * num_barrier_buffers * 2)); } else { data->mcb_barrier_control_children = NULL; } data->mcb_barrier_count = 0; /* Next, setup the pointer to the in-use flags. The number of segments will be an even multiple of the number of in-use flags. */ base += (c->sm_control_size * size * num_barrier_buffers * 2); data->mcb_in_use_flags = (mca_coll_sm_in_use_flag_t*) base; /* All things being equal, if we're rank 0, then make the in-use flags be local (memory affinity). Then zero them all out so that they're marked as unused. */ j = 0; if (0 == rank) { maffinity[j].mbs_start_addr = base; maffinity[j].mbs_len = c->sm_control_size * c->sm_comm_num_in_use_flags; /* Set the op counts to 1 (actually any nonzero value will do) so that the first time children/leaf processes come through, they don't see a value of 0 and think that the root/parent has already set the count to their op number (i.e., 0 is the first op count value). */ for (i = 0; i < mca_coll_sm_component.sm_comm_num_in_use_flags; ++i) { ((mca_coll_sm_in_use_flag_t *)base)[i].mcsiuf_operation_count = 1; ((mca_coll_sm_in_use_flag_t *)base)[i].mcsiuf_num_procs_using = 0; } ++j; } /* Next, setup pointers to the control and data portions of the segments, as well as to the relevant in-use flags. */ base += (c->sm_comm_num_in_use_flags * c->sm_control_size); control_size = size * c->sm_control_size; frag_size = size * c->sm_fragment_size; for (i = 0; i < c->sm_comm_num_segments; ++i) { data->mcb_data_index[i].mcbmi_control = (uint32_t*) (base + (i * (control_size + frag_size))); data->mcb_data_index[i].mcbmi_data = (((char*) data->mcb_data_index[i].mcbmi_control) + control_size); /* Memory affinity: control */ maffinity[j].mbs_len = c->sm_control_size; maffinity[j].mbs_start_addr = (void *) (data->mcb_data_index[i].mcbmi_control + (rank * c->sm_control_size)); ++j; /* Memory affinity: data */ maffinity[j].mbs_len = c->sm_fragment_size; maffinity[j].mbs_start_addr = data->mcb_data_index[i].mcbmi_data + (rank * c->sm_control_size); ++j; } /* Setup memory affinity so that the pages that belong to this process are local to this process */ opal_maffinity_base_set(maffinity, j); free(maffinity); /* Zero out the control structures that belong to this process */ memset(data->mcb_barrier_control_me, 0, num_barrier_buffers * 2 * c->sm_control_size); for (i = 0; i < c->sm_comm_num_segments; ++i) { memset((void *) data->mcb_data_index[i].mcbmi_control, 0, c->sm_control_size); } /* Save previous component's reduce information */ sm_module->previous_reduce = comm->c_coll.coll_reduce; sm_module->previous_reduce_module = comm->c_coll.coll_reduce_module; OBJ_RETAIN(sm_module->previous_reduce_module); /* Indicate that we have successfully attached and setup */ opal_atomic_add(&(data->mcb_mmap->module_seg->seg_inited), 1); /* Wait for everyone in this communicator to attach and setup */ opal_output_verbose(10, mca_coll_base_output, "coll:sm:enable (%d/%s): waiting for peers to attach", comm->c_contextid, comm->c_name); SPIN_CONDITION(size == data->mcb_mmap->module_seg->seg_inited, seg_init_exit); /* Once we're all here, remove the mmap file; it's not needed anymore */ if (0 == rank) { unlink(data->mcb_mmap->shmem_ds.seg_name); opal_output_verbose(10, mca_coll_base_output, "coll:sm:enable (%d/%s): removed mmap file %s", comm->c_contextid, comm->c_name, data->mcb_mmap->shmem_ds.seg_name); } /* All done */ opal_output_verbose(10, mca_coll_base_output, "coll:sm:enable (%d/%s): success!", comm->c_contextid, comm->c_name); return OMPI_SUCCESS; } static bool have_local_peers(ompi_group_t *group, size_t size) { size_t i; ompi_proc_t *proc; for (i = 0; i < size; ++i) { proc = ompi_group_peer_lookup(group,i); if (!OPAL_PROC_ON_LOCAL_NODE(proc->proc_flags)) { return false; } } return true; } static int bootstrap_comm(ompi_communicator_t *comm, mca_coll_sm_module_t *module) { int i; char *shortpath, *fullpath; mca_coll_sm_component_t *c = &mca_coll_sm_component; mca_coll_sm_comm_t *data = module->sm_comm_data; int comm_size = ompi_comm_size(comm); int num_segments = c->sm_comm_num_segments; int num_in_use = c->sm_comm_num_in_use_flags; int frag_size = c->sm_fragment_size; int control_size = c->sm_control_size; orte_process_name_t *lowest_name = NULL; size_t size; ompi_proc_t *proc; /* Make the rendezvous filename for this communicators shmem data segment. The CID is not guaranteed to be unique among all procs on this node, so also pair it with the PID of the proc with the lowest ORTE name to form a unique filename. */ proc = ompi_group_peer_lookup(comm->c_local_group, 0); lowest_name = &(proc->proc_name); for (i = 1; i < comm_size; ++i) { proc = ompi_group_peer_lookup(comm->c_local_group, i); if (orte_util_compare_name_fields(ORTE_NS_CMP_ALL, &(proc->proc_name), lowest_name) < 0) { lowest_name = &(proc->proc_name); } } asprintf(&shortpath, "coll-sm-cid-%d-name-%s.mmap", comm->c_contextid, ORTE_NAME_PRINT(lowest_name)); if (NULL == shortpath) { opal_output_verbose(10, mca_coll_base_output, "coll:sm:enable:bootstrap comm (%d/%s): asprintf failed", comm->c_contextid, comm->c_name); return OMPI_ERR_OUT_OF_RESOURCE; } fullpath = opal_os_path(false, orte_process_info.job_session_dir, shortpath, NULL); free(shortpath); if (NULL == fullpath) { opal_output_verbose(10, mca_coll_base_output, "coll:sm:enable:bootstrap comm (%d/%s): opal_os_path failed", comm->c_contextid, comm->c_name); return OMPI_ERR_OUT_OF_RESOURCE; } /* Calculate how much space we need in the per-communicator shmem data segment. There are several values to add: - size of the barrier data (2 of these): - fan-in data (num_procs * control_size) - fan-out data (num_procs * control_size) - size of the "in use" buffers: - num_in_use_buffers * control_size - size of the message fragment area (one for each segment): - control (num_procs * control_size) - fragment data (num_procs * (frag_size)) So it's: barrier: 2 * control_size + 2 * control_size in use: num_in_use * control_size control: num_segments * (num_procs * control_size * 2 + num_procs * control_size) message: num_segments * (num_procs * frag_size) */ size = 4 * control_size + (num_in_use * control_size) + (num_segments * (comm_size * control_size * 2)) + (num_segments * (comm_size * frag_size)); opal_output_verbose(10, mca_coll_base_output, "coll:sm:enable:bootstrap comm (%d/%s): attaching to %" PRIsize_t " byte mmap: %s", comm->c_contextid, comm->c_name, size, fullpath); data->mcb_mmap = mca_common_sm_init_group(comm->c_local_group, size, fullpath, sizeof(mca_common_sm_seg_header_t), sizeof(void*)); if (NULL == data->mcb_mmap) { opal_output_verbose(10, mca_coll_base_output, "coll:sm:enable:bootstrap comm (%d/%s): common_sm_mmap_init_group failed", comm->c_contextid, comm->c_name); return OMPI_ERR_OUT_OF_RESOURCE; } /* All done */ return OMPI_SUCCESS; } int mca_coll_sm_ft_event(int state) { if(OPAL_CRS_CHECKPOINT == state) { ; } else if(OPAL_CRS_CONTINUE == state) { ; } else if(OPAL_CRS_RESTART == state) { ; } else if(OPAL_CRS_TERM == state ) { ; } else { ; } return OMPI_SUCCESS; }
64817.c
/* * Authors: Jan Zarsky <[email protected]> * * Copyright (C) 2019 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "utilities.h" #include "test_port.h" #define PORT_COUNT 3 #define PORT1_LOW 80 #define PORT1_HIGH 80 #define PORT1_PROTO SEPOL_PROTO_TCP #define PORT2_LOW 1 #define PORT2_HIGH 1023 #define PORT2_PROTO SEPOL_PROTO_UDP #define PORT3_LOW 12345 #define PORT3_HIGH 12345 #define PORT3_PROTO SEPOL_PROTO_TCP /* port_record.h */ void test_port_compare(void); void test_port_compare2(void); void test_port_key_create(void); void test_port_key_extract(void); void test_port_get_set_proto(void); void test_port_get_proto_str(void); void test_port_get_set_port(void); void test_port_get_set_con(void); void test_port_create(void); void test_port_clone(void); /* ports_policy.h */ void test_port_query(void); void test_port_exists(void); void test_port_count(void); void test_port_iterate(void); void test_port_list(void); /* ports_local.h */ void test_port_modify_del_local(void); void test_port_query_local(void); void test_port_exists_local(void); void test_port_count_local(void); void test_port_iterate_local(void); void test_port_list_local(void); /* internal */ void test_port_validate_local(void); extern semanage_handle_t *sh; int port_test_init(void) { if (create_test_store() < 0) { fprintf(stderr, "Could not create test store\n"); return 1; } if (write_test_policy_from_file("test_port.policy") < 0) { fprintf(stderr, "Could not write test policy\n"); return 1; } return 0; } int port_test_cleanup(void) { if (destroy_test_store() < 0) { fprintf(stderr, "Could not destroy test store\n"); return 1; } return 0; } int port_add_tests(CU_pSuite suite) { CU_add_test(suite, "port_compare", test_port_compare); CU_add_test(suite, "port_compare2", test_port_compare2); CU_add_test(suite, "port_key_create", test_port_key_create); CU_add_test(suite, "port_key_extract", test_port_key_extract); CU_add_test(suite, "port_get_set_proto", test_port_get_set_proto); CU_add_test(suite, "port_get_proto_str", test_port_get_proto_str); CU_add_test(suite, "port_get_set_port", test_port_get_set_port); CU_add_test(suite, "port_get_set_con", test_port_get_set_con); CU_add_test(suite, "port_create", test_port_create); CU_add_test(suite, "port_clone", test_port_clone); CU_add_test(suite, "port_query", test_port_query); CU_add_test(suite, "port_exists", test_port_exists); CU_add_test(suite, "port_count", test_port_count); CU_add_test(suite, "port_iterate", test_port_iterate); CU_add_test(suite, "port_list", test_port_list); CU_add_test(suite, "port_modify_del_local", test_port_modify_del_local); CU_add_test(suite, "port_query_local", test_port_query_local); CU_add_test(suite, "port_exists_local", test_port_exists_local); CU_add_test(suite, "port_count_local", test_port_count_local); CU_add_test(suite, "port_iterate_local", test_port_iterate_local); CU_add_test(suite, "port_list_local", test_port_list_local); CU_add_test(suite, "port_validate_local", test_port_validate_local); return 0; } /* Helpers */ semanage_port_t *get_port_nth(int idx) { int res; semanage_port_t **records; semanage_port_t *port; unsigned int count; if (idx == I_NULL) return NULL; res = semanage_port_list(sh, &records, &count); CU_ASSERT_FATAL(res >= 0); CU_ASSERT_FATAL(count >= (unsigned int) idx + 1); port = records[idx]; for (unsigned int i = 0; i < count; i++) if (i != (unsigned int) idx) semanage_port_free(records[i]); return port; } semanage_port_key_t *get_port_key_nth(int idx) { semanage_port_key_t *key; semanage_port_t *port; int res; if (idx == I_NULL) return NULL; port = get_port_nth(idx); res = semanage_port_key_extract(sh, port, &key); CU_ASSERT_FATAL(res >= 0); CU_ASSERT_PTR_NOT_NULL_FATAL(key); return key; } void add_local_port(int port_idx) { semanage_port_t *port; semanage_port_key_t *key = NULL; CU_ASSERT_FATAL(port_idx != I_NULL); port = get_port_nth(port_idx); CU_ASSERT_FATAL(semanage_port_key_extract(sh, port, &key) >= 0); CU_ASSERT_PTR_NOT_NULL_FATAL(key); CU_ASSERT_FATAL(semanage_port_modify_local(sh, key, port) >= 0); } void delete_local_port(int port_idx) { semanage_port_key_t *key = NULL; CU_ASSERT_FATAL(port_idx != I_NULL); key = get_port_key_nth(port_idx); CU_ASSERT_FATAL(semanage_port_del_local(sh, key) >= 0); } /* Function semanage_port_compare */ void helper_port_compare(int idx1, int idx2) { semanage_port_t *port = NULL; semanage_port_key_t *key = NULL; int res = 42; /* setup */ setup_handle(SH_CONNECT); port = get_port_nth(idx1); key = get_port_key_nth(idx2); /* test */ res = semanage_port_compare(port, key); if (idx1 == idx2) { CU_ASSERT(res == 0); } else { CU_ASSERT(res != 0); } /* cleanup */ semanage_port_free(port); semanage_port_key_free(key); cleanup_handle(SH_CONNECT); } void test_port_compare(void) { helper_port_compare(I_FIRST, I_FIRST); helper_port_compare(I_FIRST, I_SECOND); helper_port_compare(I_SECOND, I_FIRST); helper_port_compare(I_SECOND, I_SECOND); } /* Function semanage_port_compare2 */ void helper_port_compare2(int idx1, int idx2) { semanage_port_t *port1 = NULL; semanage_port_t *port2 = NULL; int res = 42; /* setup */ setup_handle(SH_CONNECT); port1 = get_port_nth(idx1); port2 = get_port_nth(idx2); /* test */ res = semanage_port_compare2(port1, port2); if (idx1 == idx2) { CU_ASSERT(res == 0); } else { CU_ASSERT(res != 0); } /* cleanup */ semanage_port_free(port1); semanage_port_free(port2); cleanup_handle(SH_CONNECT); } void test_port_compare2(void) { helper_port_compare2(I_FIRST, I_FIRST); helper_port_compare2(I_FIRST, I_SECOND); helper_port_compare2(I_SECOND, I_FIRST); helper_port_compare2(I_SECOND, I_SECOND); } /* Function semanage_port_create */ void test_port_key_create(void) { semanage_port_key_t *key = NULL; /* setup */ setup_handle(SH_CONNECT); /* test */ CU_ASSERT(semanage_port_key_create(sh, 1000, 1200, 0, &key) >= 0); CU_ASSERT_PTR_NOT_NULL(key); /* cleanup */ semanage_port_key_free(key); cleanup_handle(SH_CONNECT); } /* Function semanage_port_extract */ void test_port_key_extract(void) { semanage_port_t *port = NULL; semanage_port_key_t *key = NULL; /* setup */ setup_handle(SH_CONNECT); port = get_port_nth(I_FIRST); /* test */ CU_ASSERT(semanage_port_key_extract(sh, port, &key) >= 0); CU_ASSERT_PTR_NOT_NULL(key); /* cleanup */ semanage_port_free(port); semanage_port_key_free(key); cleanup_handle(SH_CONNECT); } /* Function semanage_port_get_proto, semanage_port_set_proto */ void helper_port_get_set_proto(int idx) { semanage_port_t *port = NULL; /* setup */ setup_handle(SH_CONNECT); port = get_port_nth(idx); /* test */ semanage_port_set_proto(port, 0); CU_ASSERT(semanage_port_get_proto(port) == 0); semanage_port_set_proto(port, 1); CU_ASSERT(semanage_port_get_proto(port) == 1); /* cleanup */ semanage_port_free(port); cleanup_handle(SH_CONNECT); } void test_port_get_set_proto(void) { helper_port_get_set_proto(I_FIRST); helper_port_get_set_proto(I_SECOND); } /* Function semanage_port_get_proto_str */ void test_port_get_proto_str(void) { const char *str = NULL; str = semanage_port_get_proto_str(-1); CU_ASSERT_STRING_EQUAL(str, "???"); str = semanage_port_get_proto_str(0); CU_ASSERT_STRING_EQUAL(str, "udp"); str = semanage_port_get_proto_str(1); CU_ASSERT_STRING_EQUAL(str, "tcp"); str = semanage_port_get_proto_str(2); CU_ASSERT_STRING_EQUAL(str, "dccp"); str = semanage_port_get_proto_str(3); CU_ASSERT_STRING_EQUAL(str, "sctp"); str = semanage_port_get_proto_str(4); CU_ASSERT_STRING_EQUAL(str, "???"); } /* Function semanage_port_get_low, semanage_port_get_high, */ /* semanage_port_set_port, semanage_port_set_range */ void test_port_get_set_port(void) { semanage_port_t *port = NULL; /* setup */ setup_handle(SH_CONNECT); port = get_port_nth(I_FIRST); /* test */ semanage_port_set_port(port, 1000); CU_ASSERT(semanage_port_get_low(port) == 1000); CU_ASSERT(semanage_port_get_high(port) == 1000); semanage_port_set_range(port, 1000, 1200); CU_ASSERT(semanage_port_get_low(port) == 1000); CU_ASSERT(semanage_port_get_high(port) == 1200); /* cleanup */ semanage_port_free(port); cleanup_handle(SH_CONNECT); } /* Function semanage_port_get_con, semanage_port_set_con */ void test_port_get_set_con(void) { semanage_port_t *port = NULL; semanage_port_t *port_tmp = NULL; semanage_context_t *con1 = NULL; semanage_context_t *con2 = NULL; /* setup */ setup_handle(SH_CONNECT); port = get_port_nth(I_FIRST); port_tmp = get_port_nth(I_SECOND); con1 = semanage_port_get_con(port_tmp); /* test */ CU_ASSERT(semanage_port_set_con(sh, port, con1) >= 0); con2 = semanage_port_get_con(port); CU_ASSERT_CONTEXT_EQUAL(con1, con2); /* cleanup */ semanage_port_free(port); semanage_port_free(port_tmp); cleanup_handle(SH_CONNECT); } /* Function semanage_port_create */ void test_port_create(void) { semanage_port_t *port = NULL; /* setup */ setup_handle(SH_CONNECT); /* test */ CU_ASSERT(semanage_port_create(sh, &port) >= 0); CU_ASSERT(semanage_port_get_low(port) == 0); CU_ASSERT(semanage_port_get_high(port) == 0); CU_ASSERT(semanage_port_get_con(port) == NULL); CU_ASSERT(semanage_port_get_proto(port) == 0); /* cleanup */ semanage_port_free(port); cleanup_handle(SH_CONNECT); } /* Function semanage_port_clone */ void test_port_clone(void) { semanage_port_t *port = NULL; semanage_port_t *port_clone = NULL; semanage_context_t *con = NULL; semanage_context_t *con2 = NULL; /* setup */ setup_handle(SH_CONNECT); CU_ASSERT(semanage_port_create(sh, &port) >= 0); semanage_port_set_range(port, 1000, 1200); semanage_port_set_proto(port, 1); semanage_context_from_string(sh, "user_u:role_r:type_t:s0", &con); semanage_port_set_con(sh, port, con); /* test */ CU_ASSERT(semanage_port_clone(sh, port, &port_clone) >= 0); CU_ASSERT(semanage_port_get_low(port_clone) == 1000); CU_ASSERT(semanage_port_get_high(port_clone) == 1200); CU_ASSERT(semanage_port_get_proto(port_clone) == 1); con2 = semanage_port_get_con(port_clone); CU_ASSERT_CONTEXT_EQUAL(con, con2); /* cleanup */ semanage_port_free(port); semanage_port_free(port_clone); cleanup_handle(SH_CONNECT); } /* Function semanage_port_query */ void test_port_query(void) { semanage_port_t *port = NULL; semanage_port_t *port_exp = NULL; semanage_port_key_t *key = NULL; semanage_context_t *con = NULL; semanage_context_t *con_exp = NULL; /* setup */ setup_handle(SH_CONNECT); key = get_port_key_nth(I_FIRST); port_exp = get_port_nth(I_FIRST); /* test */ CU_ASSERT(semanage_port_query(sh, key, &port) >= 0); CU_ASSERT(semanage_port_get_low(port) == semanage_port_get_low(port_exp)); CU_ASSERT(semanage_port_get_high(port) == semanage_port_get_high(port_exp)); CU_ASSERT(semanage_port_get_proto(port) == semanage_port_get_proto(port_exp)); con = semanage_port_get_con(port); con_exp = semanage_port_get_con(port_exp); CU_ASSERT_CONTEXT_EQUAL(con, con_exp); /* cleanup */ semanage_port_free(port); semanage_port_free(port_exp); cleanup_handle(SH_CONNECT); } /* Function semanage_port_exists */ void test_port_exists(void) { semanage_port_key_t *key1 = NULL; semanage_port_key_t *key2 = NULL; int resp = 42; /* setup */ setup_handle(SH_CONNECT); key1 = get_port_key_nth(I_FIRST); CU_ASSERT(semanage_port_key_create(sh, 123, 456, 0, &key2) >= 0); /* test */ CU_ASSERT(semanage_port_exists(sh, key1, &resp) >= 0); CU_ASSERT(resp); CU_ASSERT(semanage_port_exists(sh, key2, &resp) >= 0); CU_ASSERT(!resp); /* cleanup */ semanage_port_key_free(key1); semanage_port_key_free(key2); cleanup_handle(SH_CONNECT); } /* Function semanage_port_count */ void test_port_count(void) { unsigned int count = 42; /* setup */ setup_handle(SH_CONNECT); /* test */ CU_ASSERT(semanage_port_count(sh, &count) >= 0); CU_ASSERT(count == PORT_COUNT); /* cleanup */ cleanup_handle(SH_CONNECT); } /* Function semanage_port_iterate */ unsigned int counter_port_iterate = 0; int handler_port_iterate(const semanage_port_t *record, void *varg) { counter_port_iterate++; return 0; } void test_port_iterate(void) { /* setup */ setup_handle(SH_CONNECT); /* test */ semanage_port_iterate(sh, handler_port_iterate, NULL); CU_ASSERT(counter_port_iterate == PORT_COUNT); /* cleanup */ cleanup_handle(SH_CONNECT); } /* Function semanage_port_list */ void test_port_list(void) { semanage_port_t **records = NULL; unsigned int count = 42; /* setup */ setup_handle(SH_CONNECT); /* test */ CU_ASSERT(semanage_port_list(sh, &records, &count) >= 0); CU_ASSERT(count == PORT_COUNT); for (unsigned int i = 0; i < count; i++) CU_ASSERT_PTR_NOT_NULL(records[i]); /* cleanup */ for (unsigned int i = 0; i < count; i++) semanage_port_free(records[i]); cleanup_handle(SH_CONNECT); } /* Function semanage_port_modify_local, semanage_port_del_local */ void test_port_modify_del_local(void) { semanage_port_t *port; semanage_port_t *port_local; semanage_port_key_t *key = NULL; semanage_context_t *con = NULL; semanage_context_t *con_local = NULL; /* setup */ setup_handle(SH_TRANS); port = get_port_nth(I_FIRST); semanage_context_from_string(sh, "user_u:role_r:type_t:s0", &con); semanage_port_set_con(sh, port, con); CU_ASSERT(semanage_port_key_extract(sh, port, &key) >= 0); CU_ASSERT_PTR_NOT_NULL(key); /* test */ CU_ASSERT(semanage_port_modify_local(sh, key, port) >= 0); CU_ASSERT(semanage_port_query_local(sh, key, &port_local) >= 0); CU_ASSERT_PTR_NOT_NULL_FATAL(port_local); con_local = semanage_port_get_con(port_local); CU_ASSERT_CONTEXT_EQUAL(con, con_local); CU_ASSERT(semanage_port_del_local(sh, key) >= 0); CU_ASSERT(semanage_port_query_local(sh, key, &port_local) < 0); /* cleanup */ semanage_port_free(port); cleanup_handle(SH_TRANS); } /* Function semanage_port_query_local */ void test_port_query_local(void) { semanage_port_t *port = NULL; semanage_port_t *port_exp = NULL; semanage_port_key_t *key = NULL; semanage_context_t *con = NULL; semanage_context_t *con_exp = NULL; /* setup */ setup_handle(SH_TRANS); add_local_port(I_FIRST); key = get_port_key_nth(I_FIRST); port_exp = get_port_nth(I_FIRST); /* test */ CU_ASSERT(semanage_port_query_local(sh, key, &port) >= 0); CU_ASSERT(semanage_port_get_low(port) == semanage_port_get_low(port_exp)); CU_ASSERT(semanage_port_get_high(port) == semanage_port_get_high(port_exp)); CU_ASSERT(semanage_port_get_proto(port) == semanage_port_get_proto(port_exp)); con = semanage_port_get_con(port); con_exp = semanage_port_get_con(port_exp); CU_ASSERT_CONTEXT_EQUAL(con, con_exp); /* cleanup */ delete_local_port(I_FIRST); semanage_port_free(port); semanage_port_free(port_exp); cleanup_handle(SH_TRANS); } /* Function semanage_port_exists_local */ void test_port_exists_local(void) { semanage_port_key_t *key1 = NULL; semanage_port_key_t *key2 = NULL; int resp = 42; /* setup */ setup_handle(SH_TRANS); add_local_port(I_FIRST); key1 = get_port_key_nth(I_FIRST); key2 = get_port_key_nth(I_SECOND); /* test */ CU_ASSERT(semanage_port_exists_local(sh, key1, &resp) >= 0); CU_ASSERT(resp); CU_ASSERT(semanage_port_exists_local(sh, key2, &resp) >= 0); CU_ASSERT(!resp); /* cleanup */ delete_local_port(I_FIRST); semanage_port_key_free(key1); semanage_port_key_free(key2); cleanup_handle(SH_TRANS); } /* Function semanage_port_count_local */ void test_port_count_local(void) { unsigned int count = 42; /* setup */ setup_handle(SH_TRANS); /* test */ CU_ASSERT(semanage_port_count_local(sh, &count) >= 0); CU_ASSERT(count == 0); add_local_port(I_FIRST); CU_ASSERT(semanage_port_count_local(sh, &count) >= 0); CU_ASSERT(count == 1); add_local_port(I_SECOND); CU_ASSERT(semanage_port_count_local(sh, &count) >= 0); CU_ASSERT(count == 2); delete_local_port(I_SECOND); CU_ASSERT(semanage_port_count_local(sh, &count) >= 0); CU_ASSERT(count == 1); delete_local_port(I_FIRST); CU_ASSERT(semanage_port_count_local(sh, &count) >= 0); CU_ASSERT(count == 0); /* cleanup */ cleanup_handle(SH_TRANS); } /* Function semanage_port_iterate_local */ unsigned int counter_port_iterate_local = 0; int handler_port_iterate_local(const semanage_port_t *record, void *varg) { counter_port_iterate_local++; return 0; } void test_port_iterate_local(void) { /* setup */ setup_handle(SH_TRANS); add_local_port(I_FIRST); add_local_port(I_SECOND); add_local_port(I_THIRD); /* test */ semanage_port_iterate_local(sh, handler_port_iterate_local, NULL); CU_ASSERT(counter_port_iterate_local == 3); /* cleanup */ delete_local_port(I_FIRST); delete_local_port(I_SECOND); delete_local_port(I_THIRD); cleanup_handle(SH_TRANS); } /* Function semanage_port_list_local */ void test_port_list_local(void) { semanage_port_t **records = NULL; unsigned int count = 42; /* setup */ setup_handle(SH_TRANS); add_local_port(I_FIRST); add_local_port(I_SECOND); add_local_port(I_THIRD); /* test */ CU_ASSERT(semanage_port_list_local(sh, &records, &count) >= 0); CU_ASSERT(count == 3); for (unsigned int i = 0; i < count; i++) CU_ASSERT_PTR_NOT_NULL(records[i]); /* cleanup */ for (unsigned int i = 0; i < count; i++) semanage_port_free(records[i]); delete_local_port(I_FIRST); delete_local_port(I_SECOND); delete_local_port(I_THIRD); cleanup_handle(SH_TRANS); } /* Internal function semanage_port_validate_local */ void helper_port_validate_local_noport(void) { semanage_port_key_t *key = NULL; int resp = 42; /* setup */ setup_handle(SH_TRANS); add_local_port(I_FIRST); helper_commit(); key = get_port_key_nth(I_FIRST); CU_ASSERT(semanage_port_exists_local(sh, key, &resp) >= 0); CU_ASSERT(resp); /* test */ helper_begin_transaction(); delete_local_port(I_FIRST); helper_commit(); /* cleanup */ helper_begin_transaction(); delete_local_port(I_FIRST); cleanup_handle(SH_TRANS); } void helper_port_validate_local_oneport(void) { /* setup */ setup_handle(SH_TRANS); add_local_port(I_FIRST); /* test */ helper_commit(); /* cleanup */ helper_begin_transaction(); delete_local_port(I_FIRST); cleanup_handle(SH_TRANS); } void helper_port_validate_local_twoports(void) { semanage_port_key_t *key1 = NULL; semanage_port_key_t *key2 = NULL; semanage_port_t *port1 = NULL; semanage_port_t *port2 = NULL; semanage_context_t *con1 = NULL; semanage_context_t *con2 = NULL; /* setup */ setup_handle(SH_TRANS); CU_ASSERT(semanage_port_key_create(sh, 101, 200, 0, &key1) >= 0); CU_ASSERT(semanage_port_key_create(sh, 201, 300, 0, &key2) >= 0); CU_ASSERT(semanage_port_create(sh, &port1) >= 0); CU_ASSERT(semanage_port_create(sh, &port2) >= 0); semanage_port_set_range(port1, 101, 200); semanage_port_set_range(port2, 201, 300); semanage_port_set_proto(port1, 0); semanage_port_set_proto(port2, 0); CU_ASSERT(semanage_context_from_string(sh, "system_u:object_r:user_home_t:s0", &con1) >= 0); CU_ASSERT(semanage_context_from_string(sh, "system_u:object_r:user_tmp_t:s0", &con2) >= 0); semanage_port_set_con(sh, port1, con1); semanage_port_set_con(sh, port2, con2); CU_ASSERT(semanage_port_modify_local(sh, key1, port1) >= 0); CU_ASSERT(semanage_port_modify_local(sh, key2, port2) >= 0); /* test */ helper_commit(); /* cleanup */ helper_begin_transaction(); CU_ASSERT(semanage_port_del_local(sh, key1) >= 0); CU_ASSERT(semanage_port_del_local(sh, key2) >= 0); semanage_port_key_free(key1); semanage_port_key_free(key2); semanage_port_free(port1); semanage_port_free(port2); cleanup_handle(SH_TRANS); } void helper_port_validate_local_proto(void) { semanage_port_key_t *key1 = NULL; semanage_port_key_t *key2 = NULL; semanage_port_key_t *key3 = NULL; semanage_port_t *port1 = NULL; semanage_port_t *port2 = NULL; semanage_port_t *port3 = NULL; semanage_context_t *con1 = NULL; semanage_context_t *con2 = NULL; semanage_context_t *con3 = NULL; /* setup */ setup_handle(SH_TRANS); CU_ASSERT(semanage_port_key_create(sh, 101, 200, 0, &key1) >= 0); CU_ASSERT(semanage_port_key_create(sh, 51, 250, 1, &key2) >= 0); CU_ASSERT(semanage_port_key_create(sh, 201, 300, 0, &key3) >= 0); CU_ASSERT(semanage_port_create(sh, &port1) >= 0); CU_ASSERT(semanage_port_create(sh, &port2) >= 0); CU_ASSERT(semanage_port_create(sh, &port3) >= 0); semanage_port_set_range(port1, 101, 200); semanage_port_set_range(port2, 51, 250); semanage_port_set_range(port3, 201, 300); semanage_port_set_proto(port1, 0); semanage_port_set_proto(port2, 0); semanage_port_set_proto(port3, 0); CU_ASSERT(semanage_context_from_string(sh, "system_u:object_r:user_home_t:s0", &con1) >= 0); CU_ASSERT(semanage_context_from_string(sh, "system_u:object_r:user_home_t:s0", &con2) >= 0); CU_ASSERT(semanage_context_from_string(sh, "system_u:object_r:user_tmp_t:s0", &con3) >= 0); semanage_port_set_con(sh, port1, con1); semanage_port_set_con(sh, port2, con2); semanage_port_set_con(sh, port3, con3); CU_ASSERT(semanage_port_modify_local(sh, key1, port1) >= 0); CU_ASSERT(semanage_port_modify_local(sh, key2, port2) >= 0); CU_ASSERT(semanage_port_modify_local(sh, key3, port3) >= 0); /* test */ helper_commit(); /* cleanup */ CU_ASSERT(semanage_port_del_local(sh, key1) >= 0); CU_ASSERT(semanage_port_del_local(sh, key2) >= 0); CU_ASSERT(semanage_port_del_local(sh, key3) >= 0); semanage_port_key_free(key1); semanage_port_key_free(key2); semanage_port_key_free(key3); semanage_port_free(port1); semanage_port_free(port2); semanage_port_free(port3); cleanup_handle(SH_TRANS); } void test_port_validate_local(void) { helper_port_validate_local_noport(); helper_port_validate_local_oneport(); helper_port_validate_local_twoports(); }
513867.c
/*- * Copyright (c) 2017 Broadcom. All rights reserved. * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /** * @file * Generic state machine framework. */ #include "ocs_os.h" #include "ocs_sm.h" const char *ocs_sm_id[] = { "common", "domain", "login" }; /** * @brief Post an event to a context. * * @param ctx State machine context * @param evt Event to post * @param data Event-specific data (if any) * * @return 0 if successfully posted event; -1 if state machine * is disabled */ int ocs_sm_post_event(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data) { if (ctx->current_state) { ctx->current_state(ctx, evt, data); return 0; } else { return -1; } } /** * @brief Transition to a new state. */ void ocs_sm_transition(ocs_sm_ctx_t *ctx, ocs_sm_function_t state, void *data) { if (ctx->current_state == state) { ocs_sm_post_event(ctx, OCS_EVT_REENTER, data); } else { ocs_sm_post_event(ctx, OCS_EVT_EXIT, data); ctx->current_state = state; ocs_sm_post_event(ctx, OCS_EVT_ENTER, data); } } /** * @brief Disable further state machine processing. */ void ocs_sm_disable(ocs_sm_ctx_t *ctx) { ctx->current_state = NULL; } const char *ocs_sm_event_name(ocs_sm_event_t evt) { switch (evt) { #define RETEVT(x) case x: return #x; RETEVT(OCS_EVT_ENTER) RETEVT(OCS_EVT_REENTER) RETEVT(OCS_EVT_EXIT) RETEVT(OCS_EVT_SHUTDOWN) RETEVT(OCS_EVT_RESPONSE) RETEVT(OCS_EVT_RESUME) RETEVT(OCS_EVT_TIMER_EXPIRED) RETEVT(OCS_EVT_ERROR) RETEVT(OCS_EVT_SRRS_ELS_REQ_OK) RETEVT(OCS_EVT_SRRS_ELS_CMPL_OK) RETEVT(OCS_EVT_SRRS_ELS_REQ_FAIL) RETEVT(OCS_EVT_SRRS_ELS_CMPL_FAIL) RETEVT(OCS_EVT_SRRS_ELS_REQ_RJT) RETEVT(OCS_EVT_NODE_ATTACH_OK) RETEVT(OCS_EVT_NODE_ATTACH_FAIL) RETEVT(OCS_EVT_NODE_FREE_OK) RETEVT(OCS_EVT_ELS_REQ_TIMEOUT) RETEVT(OCS_EVT_ELS_REQ_ABORTED) RETEVT(OCS_EVT_ABORT_ELS) RETEVT(OCS_EVT_ELS_ABORT_CMPL) RETEVT(OCS_EVT_DOMAIN_FOUND) RETEVT(OCS_EVT_DOMAIN_ALLOC_OK) RETEVT(OCS_EVT_DOMAIN_ALLOC_FAIL) RETEVT(OCS_EVT_DOMAIN_REQ_ATTACH) RETEVT(OCS_EVT_DOMAIN_ATTACH_OK) RETEVT(OCS_EVT_DOMAIN_ATTACH_FAIL) RETEVT(OCS_EVT_DOMAIN_LOST) RETEVT(OCS_EVT_DOMAIN_FREE_OK) RETEVT(OCS_EVT_DOMAIN_FREE_FAIL) RETEVT(OCS_EVT_HW_DOMAIN_REQ_ATTACH) RETEVT(OCS_EVT_HW_DOMAIN_REQ_FREE) RETEVT(OCS_EVT_ALL_CHILD_NODES_FREE) RETEVT(OCS_EVT_SPORT_ALLOC_OK) RETEVT(OCS_EVT_SPORT_ALLOC_FAIL) RETEVT(OCS_EVT_SPORT_ATTACH_OK) RETEVT(OCS_EVT_SPORT_ATTACH_FAIL) RETEVT(OCS_EVT_SPORT_FREE_OK) RETEVT(OCS_EVT_SPORT_FREE_FAIL) RETEVT(OCS_EVT_SPORT_TOPOLOGY_NOTIFY) RETEVT(OCS_EVT_HW_PORT_ALLOC_OK) RETEVT(OCS_EVT_HW_PORT_ALLOC_FAIL) RETEVT(OCS_EVT_HW_PORT_ATTACH_OK) RETEVT(OCS_EVT_HW_PORT_REQ_ATTACH) RETEVT(OCS_EVT_HW_PORT_REQ_FREE) RETEVT(OCS_EVT_HW_PORT_FREE_OK) RETEVT(OCS_EVT_NODE_FREE_FAIL) RETEVT(OCS_EVT_ABTS_RCVD) RETEVT(OCS_EVT_NODE_MISSING) RETEVT(OCS_EVT_NODE_REFOUND) RETEVT(OCS_EVT_SHUTDOWN_IMPLICIT_LOGO) RETEVT(OCS_EVT_SHUTDOWN_EXPLICIT_LOGO) RETEVT(OCS_EVT_ELS_FRAME) RETEVT(OCS_EVT_PLOGI_RCVD) RETEVT(OCS_EVT_FLOGI_RCVD) RETEVT(OCS_EVT_LOGO_RCVD) RETEVT(OCS_EVT_PRLI_RCVD) RETEVT(OCS_EVT_PRLO_RCVD) RETEVT(OCS_EVT_PDISC_RCVD) RETEVT(OCS_EVT_FDISC_RCVD) RETEVT(OCS_EVT_ADISC_RCVD) RETEVT(OCS_EVT_RSCN_RCVD) RETEVT(OCS_EVT_SCR_RCVD) RETEVT(OCS_EVT_ELS_RCVD) RETEVT(OCS_EVT_LAST) RETEVT(OCS_EVT_FCP_CMD_RCVD) RETEVT(OCS_EVT_RFT_ID_RCVD) RETEVT(OCS_EVT_RFF_ID_RCVD) RETEVT(OCS_EVT_GNN_ID_RCVD) RETEVT(OCS_EVT_GPN_ID_RCVD) RETEVT(OCS_EVT_GFPN_ID_RCVD) RETEVT(OCS_EVT_GFF_ID_RCVD) RETEVT(OCS_EVT_GID_FT_RCVD) RETEVT(OCS_EVT_GID_PT_RCVD) RETEVT(OCS_EVT_RPN_ID_RCVD) RETEVT(OCS_EVT_RNN_ID_RCVD) RETEVT(OCS_EVT_RCS_ID_RCVD) RETEVT(OCS_EVT_RSNN_NN_RCVD) RETEVT(OCS_EVT_RSPN_ID_RCVD) RETEVT(OCS_EVT_RHBA_RCVD) RETEVT(OCS_EVT_RPA_RCVD) RETEVT(OCS_EVT_GIDPT_DELAY_EXPIRED) RETEVT(OCS_EVT_ABORT_IO) RETEVT(OCS_EVT_ABORT_IO_NO_RESP) RETEVT(OCS_EVT_IO_CMPL) RETEVT(OCS_EVT_IO_CMPL_ERRORS) RETEVT(OCS_EVT_RESP_CMPL) RETEVT(OCS_EVT_ABORT_CMPL) RETEVT(OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY) RETEVT(OCS_EVT_NODE_DEL_INI_COMPLETE) RETEVT(OCS_EVT_NODE_DEL_TGT_COMPLETE) RETEVT(OCS_EVT_IO_ABORTED_BY_TMF) RETEVT(OCS_EVT_IO_ABORT_IGNORED) RETEVT(OCS_EVT_IO_FIRST_BURST) RETEVT(OCS_EVT_IO_FIRST_BURST_ERR) RETEVT(OCS_EVT_IO_FIRST_BURST_ABORTED) default: break; #undef RETEVT } return "unknown"; }
492716.c
/* * RELIC is an Efficient LIbrary for Cryptography * Copyright (C) 2007-2017 RELIC Authors * * This file is part of RELIC. RELIC is legal property of its developers, * whose names are not listed here. Please refer to the COPYRIGHT file * for contact information. * * RELIC is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * RELIC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with RELIC. If not, see <http://www.gnu.org/licenses/>. */ /** * @file * * Implementation of the low-level binary field addition and subtraction * functions. * * @ingroup fb */ #include <gmp.h> #include "relic_fb.h" #include "relic_fb_low.h" /*============================================================================*/ /* Public definitions */ /*============================================================================*/ void fb_add1_low(dig_t *c, const dig_t *a, dig_t digit) { int i; (*c) = (*a) ^ digit; c++; a++; for (i = 0; i < FB_DIGS - 1; i++, a++, c++) (*c) = (*a); } void fb_addn_low(dig_t *c, const dig_t *a, const dig_t *b) { mpn_xor_n(c, a, b, FB_DIGS); } void fb_addd_low(dig_t *c, const dig_t *a, const dig_t *b, int size) { mpn_xor_n(c, a, b, size); }
920428.c
#include <stdio.h> long double fib(int n, long double dp[]) { if (n == 1 || n == 2) { return(1); } if (dp[n - 1] == -1) { dp[n - 1] = fib(n - 1, dp) + fib(n - 2, dp); } return(dp[n - 1]); } int main() { int ith; while (scanf("%d", &ith) != EOF) { long double programacaoDinamica[ith]; int i; for (i = 0; i < ith; i ++) { programacaoDinamica[i] = -1; } printf("%0.Lf\n", fib(ith, programacaoDinamica)); } return(0); }
837699.c
/* * Xpost - a Level-2 Postscript interpreter * Copyright (C) 2013-2016, Michael Joshua Ryan * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of the Xpost software product nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H # include <config.h> #endif #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "xpost.h" #include "xpost_log.h" #include "xpost_memory.h" // name structures live in mfiles #include "xpost_object.h" // names are objects, with associated hidden string objects #include "xpost_stack.h" // name strings live on a stack #include "xpost_context.h" //#include "xpost_interpreter.h" // initialize interpreter to test #include "xpost_error.h" #include "xpost_string.h" // access string objects #include "xpost_name.h" // double-check prototypes #define CNT_STR(s) sizeof(s)-1, s /* print a dump of the name string stacks, global and local */ void xpost_name_dump_names(Xpost_Context *ctx) { unsigned int stk; unsigned int cnt, i; Xpost_Object str; char *s; xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK, &stk); cnt = xpost_stack_count(ctx->gl, stk); printf("global names:\n"); for (i=0; i < cnt; i++){ str = xpost_stack_bottomup_fetch(ctx->gl, stk, i); s = xpost_string_get_pointer(ctx, str); printf("%u: %*s\n", i, str.comp_.sz, s); } xpost_memory_table_get_addr(ctx->lo, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK, &stk); cnt = xpost_stack_count(ctx->lo, stk); printf("local names:\n"); for (i=0; i < cnt; i++) { str = xpost_stack_bottomup_fetch(ctx->lo, stk, i); s = xpost_string_get_pointer(ctx, str); printf("%u: %*s\n", i, str.comp_.sz, s); } } /* initialize the name special entities XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK, NAME_TREE */ int xpost_name_init(Xpost_Context *ctx) { Xpost_Memory_Table *tab; unsigned int ent; unsigned int t; unsigned int mode; unsigned int nstk; int ret; mode = ctx->vmmode; ctx->vmmode = GLOBAL; ret = xpost_memory_table_alloc(ctx->gl, 0, 0, &ent); //gl:NAMES if (!ret) { return 0; } //assert(ent == XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK); if (ent != XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK) XPOST_LOG_ERR("Warning: name stack is not in special position"); ret = xpost_memory_table_alloc(ctx->gl, 0, 0, &ent); //gl:NAMET if (!ret) { return 0; } //assert(ent == XPOST_MEMORY_TABLE_SPECIAL_NAME_TREE); if (ent != XPOST_MEMORY_TABLE_SPECIAL_NAME_TREE) XPOST_LOG_ERR("Warning: name tree is not in special position"); xpost_stack_init(ctx->gl, &t); tab = &ctx->gl->table; //recalc pointer tab->tab[XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK].adr = t; tab->tab[XPOST_MEMORY_TABLE_SPECIAL_NAME_TREE].adr = 0; xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK, &nstk); xpost_stack_push(ctx->gl, nstk, xpost_string_cons(ctx, CNT_STR("_not_a_name_"))); assert (xpost_object_get_ent(xpost_stack_topdown_fetch(ctx->gl, nstk, 0)) == XPOST_MEMORY_TABLE_SPECIAL_BOGUS_NAME); ctx->vmmode = LOCAL; ret = xpost_memory_table_alloc(ctx->lo, 0, 0, &ent); //lo:NAMES if (!ret) { return 0; } //assert(ent == XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK); if (ent != XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK) XPOST_LOG_ERR("Warning: name stack is not in special position"); ret = xpost_memory_table_alloc(ctx->lo, 0, 0, &ent); //lo:NAMET if (!ret) { return 0; } //assert(ent == XPOST_MEMORY_TABLE_SPECIAL_NAME_TREE); if (ent != XPOST_MEMORY_TABLE_SPECIAL_NAME_TREE) XPOST_LOG_ERR("Warning: name tree is not in special position"); xpost_stack_init(ctx->lo, &t); tab = &ctx->lo->table; //recalc pointer tab->tab[XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK].adr = t; tab->tab[XPOST_MEMORY_TABLE_SPECIAL_NAME_TREE].adr = 0; xpost_memory_table_get_addr(ctx->lo, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK, &nstk); xpost_stack_push(ctx->lo, nstk, xpost_string_cons(ctx, CNT_STR("_not_a_name_"))); //assert (xpost_object_get_ent(xpost_stack_topdown_fetch(ctx->lo, nstk, 0)) == XPOST_MEMORY_TABLE_SPECIAL_BOGUS_NAME); if (xpost_object_get_ent(xpost_stack_topdown_fetch(ctx->lo, nstk, 0)) != XPOST_MEMORY_TABLE_SPECIAL_BOGUS_NAME) XPOST_LOG_ERR("Warning: bogus name not in special position"); ctx->vmmode = mode; return 1; } /* perform a search using the ternary search tree */ static unsigned int tstsearch(Xpost_Memory_File *mem, unsigned int tadr, const char *s) { while (tadr) { tst *p = (void *)(mem->base + tadr); if ((unsigned int)*s < p->val) { tadr = p->lo; } else if ((unsigned int)*s == p->val) { if (*s++ == 0) return p->eq; /* payload when val == '\0' */ tadr = p->eq; } else { tadr = p->hi; } } return 0; } /* add a string to the ternary search tree */ static int tstinsert(Xpost_Memory_File *mem, unsigned int tadr, const char *s, unsigned int *retval) { tst *p; unsigned int t; //temporary unsigned int nstk; int ret; if (!tadr) { if (!xpost_memory_file_alloc(mem, sizeof(tst), &tadr)) { XPOST_LOG_ERR("cannot allocate tree node"); return VMerror; } p = (void *)(mem->base + tadr); p->val = *s; p->lo = p->eq = p->hi = 0; } p = (void *)(mem->base + tadr); if ((unsigned int)*s < p->val) { ret = tstinsert(mem, p->lo, s, &t); if (ret) return ret; p = (void *)(mem->base + tadr); //recalc pointer p->lo = t; } else if ((unsigned int)*s == p->val) { if (*s) { ret = tstinsert(mem, p->eq, ++s, &t); if (ret) return ret; p = (void *)(mem->base + tadr); //recalc pointer p->eq = t; }else { xpost_memory_table_get_addr(mem, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK, &nstk); p->eq = xpost_stack_count(mem, nstk); /* payload when val == '\0' */ } } else { ret = tstinsert(mem, p->hi, s, &t); if (ret) return ret; p = (void *)(mem->base + tadr); //recalc pointer p->hi = t; } //return tadr; *retval = tadr; return 0; } /* add the name to the name stack, return index */ static unsigned int addname(Xpost_Context *ctx, const char *s) { Xpost_Memory_File *mem = ctx->vmmode==GLOBAL?ctx->gl:ctx->lo; unsigned int names; unsigned int u; Xpost_Object str; xpost_memory_table_get_addr(mem, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK, &names); u = xpost_stack_count(mem, names); //xpost_memory_file_dump(ctx->gl); //dumpmtab(ctx->gl, 0); //unsigned int vmmode = ctx->vmmode; //ctx->vmmode = GLOBAL; str = xpost_string_cons(ctx, strlen(s), s); if (xpost_object_get_type(str) == nulltype) { XPOST_LOG_ERR("cannot allocate name string"); return 0; } xpost_stack_push(mem, names, str); //ctx->vmmode = vmmode; return u; } /* construct a name object from a string searches and if necessary installs string in ternary search tree, adding string to stack if so. returns a generic object with nametype tag with FBANK flag, mark_.pad0 set to zero mark_.padw contains XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK stack index */ Xpost_Object xpost_name_cons(Xpost_Context *ctx, const char *s) { unsigned int u; unsigned int t; Xpost_Object o; unsigned int tstk; int ret; xpost_memory_table_get_addr(ctx->lo, XPOST_MEMORY_TABLE_SPECIAL_NAME_TREE, &tstk); u = tstsearch(ctx->lo, tstk, s); if (!u) { xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_TREE, &tstk); u = tstsearch(ctx->gl, tstk, s); if (!u) { Xpost_Memory_File *mem = ctx->vmmode==GLOBAL?ctx->gl:ctx->lo; Xpost_Memory_Table *tab = &mem->table; ret = tstinsert(mem, tab->tab[XPOST_MEMORY_TABLE_SPECIAL_NAME_TREE].adr, s, &t); if (ret) { //this can only be a VMerror return invalid; } tab = &mem->table; //recalc pointer tab->tab[XPOST_MEMORY_TABLE_SPECIAL_NAME_TREE].adr = t; u = addname(ctx, s); // obeys vmmode o.mark_.tag = nametype | (ctx->vmmode==GLOBAL?XPOST_OBJECT_TAG_DATA_FLAG_BANK:0); o.mark_.pad0 = 0; o.mark_.padw = u; } else { o.mark_.tag = nametype | XPOST_OBJECT_TAG_DATA_FLAG_BANK; // global o.mark_.pad0 = 0; o.mark_.padw = u; } } else { o.mark_.tag = nametype; // local o.mark_.pad0 = 0; o.mark_.padw = u; } return o; } /* yield the string object from the name string stack */ Xpost_Object xpost_name_get_string(Xpost_Context *ctx, Xpost_Object n) { Xpost_Memory_File *mem = xpost_context_select_memory(ctx, n); unsigned int names; Xpost_Object str; xpost_memory_table_get_addr(mem, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK, &names); str = xpost_stack_bottomup_fetch(mem, names, n.mark_.padw); //str.tag |= XPOST_OBJECT_TAG_DATA_FLAG_BANK; return str; } #ifdef TESTMODULE_NM #include <stdio.h> #include <unistd.h> /* void init(Xpost_Context *ctx) { ctx->gl = malloc(sizeof(Xpost_Memory_File)); xpost_memory_file_init(ctx->gl, "x.mem"); (void)xpost_memory_table_init(ctx->gl); // create mtab at address zero //(void)xpost_memory_table_alloc(ctx->gl, 0, 0, 0); //FREE xpost_free_init(ctx->gl); (void)xpost_memory_table_alloc(ctx->gl, 0, 0, 0); //VS xpost_context_init_ctxlist(ctx->gl); xpost_name_init(ctx); } Xpost_Context ctx; */ Xpost_Context *ctx; void init(void) { itpdata = malloc(sizeof*itpdata); memset(itpdata, 0, sizeof*itpdata); xpost_interpreter_init(itpdata); ctx = &itpdata->ctab[0]; ctx->vmmode = GLOBAL; } int main(void) { if (!xpost_init()) { fprintf(stderr, "Fail to initialize xpost name test\n"); return -1; } printf("\n^test nm\n"); //init(&ctx); init(); ctx->vmmode = LOCAL; printf("pop "); xpost_object_dump(xpost_name_cons(ctx, "pop")); printf("NAMES at %u\n", xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK)); //xpost_stack_dump(ctx->gl, xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK)); puts(""); printf("apple "); xpost_object_dump(xpost_name_cons(ctx, "apple")); xpost_object_dump(xpost_name_cons(ctx, "apple")); //printf("NAMES at %u\n", xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK)); //xpost_stack_dump(ctx->gl, xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK)); puts(""); printf("banana "); xpost_object_dump(xpost_name_cons(ctx, "banana")); xpost_object_dump(xpost_name_cons(ctx, "banana")); //printf("NAMES at %u\n", xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK)); //xpost_stack_dump(ctx->gl, xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK)); puts(""); printf("currant "); xpost_object_dump(xpost_name_cons(ctx, "currant")); xpost_object_dump(xpost_name_cons(ctx, "currant")); //printf("NAMES at %u\n", xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK)); //xpost_stack_dump(ctx->gl, xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK)); puts(""); printf("apple "); xpost_object_dump(xpost_name_cons(ctx, "apple")); printf("banana "); xpost_object_dump(xpost_name_cons(ctx, "banana")); printf("currant "); xpost_object_dump(xpost_name_cons(ctx, "currant")); printf("date "); //printf("NAMES at %u\n", xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK)); xpost_object_dump(xpost_name_cons(ctx, "date")); //printf("NAMES at %u\n", xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK)); xpost_stack_dump(ctx->gl, xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK)); puts(""); //printf("NAMES at %u\n", xpost_memory_table_get_addr(ctx->gl, XPOST_MEMORY_TABLE_SPECIAL_NAME_STACK)); printf("elderberry "); xpost_object_dump(xpost_name_cons(ctx, "elderberry")); printf("pop "); xpost_object_dump(xpost_name_cons(ctx, "pop")); //xpost_memory_file_dump(ctx->gl); //dumpmtab(ctx->gl, 0); puts(""); xpost_quit(); return 0; } #endif
13319.c
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* ft_clamp_d.c :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: lpaulo-m <[email protected]> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2021/04/03 13:46:42 by lpaulo-m #+# #+# */ /* Updated: 2021/04/03 13:47:10 by lpaulo-m ### ########.fr */ /* */ /* ************************************************************************** */ #include <libft.h> double ft_clamp_d(double x, double min, double max) { if (x < min) return (min); if (x > max) return (max); return (x); }
564369.c
#include "seatest.h" void test_asserting() { assert_true( 1 == 1); assert_false(1 == 2); assert_int_equal(1, 1); } void test_assert_fails() { assert_true( 1 == 2); assert_true( 3 == 5); // for checking END_TEST_IF_ASSERT_FAIL option (will be skipped) } void test_fixture_two( void ) { test_fixture_start(); run_test(test_asserting); run_test(test_assert_fails); test_fixture_end(); }
879815.c
// Copyright 2021 XMOS LIMITED. // This Software is subject to the terms of the XMOS Public Licence: Version 1. /* System headers */ #include <platform.h> #include <xs1.h> /* Library headers */ #include "rtos/osal/api/rtos_osal.h" #include "rtos/drivers/intertile/api/rtos_intertile.h" /* App headers */ #include "app_conf.h" #include "individual_tests/intertile/intertile_test.h" static const char* test_name = "fixed_len_tx_test"; #define local_printf( FMT, ... ) intertile_printf("%s|" FMT, test_name, ##__VA_ARGS__) #define INTERTILE_TX_TILE 0 #define INTERTILE_RX_TILE 1 typedef struct test_params { size_t len; uint8_t *data; } test_params_t; #define INTERTILE_TEST_ITERS 3 #define INTERTILE_RX_BUF_SIZE 1000 #define INTERTILE_TEST_VECTOR_2_LEN 1000 static uint8_t test_vector_0[] = {0x00, 0xFF, 0xAA, 0x55}; static uint8_t test_vector_1[] = {0xDE, 0xAD, 0xBE, 0xEF}; static uint8_t test_vector_2[INTERTILE_TEST_VECTOR_2_LEN] = {0}; static test_params_t intertile_tests[INTERTILE_TEST_ITERS] = { {sizeof(test_vector_0), test_vector_0}, {sizeof(test_vector_1), test_vector_1}, {sizeof(test_vector_2), test_vector_2}, }; INTERTILE_MAIN_TEST_ATTR static int main_test(intertile_test_ctx_t *ctx) { local_printf("Start"); for (int i=0; i<INTERTILE_TEST_VECTOR_2_LEN; i++) { test_vector_2[i] = (uint8_t)(0xFF & i); } for (int i=0; i<INTERTILE_TEST_ITERS; i++) { size_t test_len = intertile_tests[i].len; uint8_t *test_buf = intertile_tests[i].data; local_printf("Test iteration %d", i); #if ON_TILE(INTERTILE_TX_TILE) { local_printf("TX %u", test_len); rtos_intertile_tx(ctx->intertile_ctx, INTERTILE_RPC_PORT, test_buf, test_len); local_printf("TX done"); } #endif #if ON_TILE(INTERTILE_RX_TILE) { uint8_t *rx_buf = NULL; size_t bytes_rx = rtos_intertile_rx(ctx->intertile_ctx, INTERTILE_RPC_PORT, (void**)&rx_buf, RTOS_OSAL_WAIT_MS(10)); if (rx_buf == NULL) { local_printf("RX returned NULL buffer"); return -1; } if (bytes_rx != test_len) { local_printf("RX timed out. Got %u expected %u", bytes_rx, test_len); rtos_osal_free(rx_buf); return -1; } else { local_printf("RX passed. Got %u expected %u", bytes_rx, test_len); } for (size_t j=0; j< bytes_rx; j++) { if (test_buf[j] != rx_buf[j]) { local_printf("RX failed at index %u. Got %u expected %u", j, rx_buf[j], test_buf[j]); rtos_osal_free(rx_buf); return -1; } } rtos_osal_free(rx_buf); } #endif } local_printf("Done"); return 0; } void register_fixed_len_tx_test(intertile_test_ctx_t *test_ctx) { uint32_t this_test_num = test_ctx->test_cnt; local_printf("Register to test num %d", this_test_num); test_ctx->name[this_test_num] = (char*)test_name; test_ctx->main_test[this_test_num] = main_test; test_ctx->test_cnt++; } #undef local_printf
362417.c
#ifndef lint static const char RCSid[] = "$Id: caldefn.c,v 2.25 2012/07/29 22:10:45 greg Exp $"; #endif /* * Store variable definitions. * * 7/1/85 Greg Ward * * 11/11/85 Added conditional compiles (OUTCHAN) for control output. * * 4/2/86 Added conditional compiles for function definitions (FUNCTION). * * 1/15/88 Added clock for caching of variable values. * * 11/16/88 Added VARDEF structure for hard linking. * * 5/31/90 Added conditional compile (REDEFW) for redefinition warning. * * 4/23/91 Added ':' assignment for constant expressions * * 8/7/91 Added optional context path to append to variable names * * 5/17/2001 Fixed clock counter wrapping behavior * * 2/19/03 Eliminated conditional compiles in favor of esupport extern. */ #include "copyright.h" #include <stdio.h> #include <string.h> #include <ctype.h> #include "rterror.h" #include "rtio.h" #include "rtmisc.h" #include "calcomp.h" #ifndef NHASH #define NHASH 521 /* hash size (a prime!) */ #endif #define hash(s) (shash(s)%NHASH) #define newnode() (EPNODE *)ecalloc(1, sizeof(EPNODE)) static double dvalue(char *name, EPNODE *d); #define MAXCLOCK (1L<<31) /* clock wrap value */ unsigned long eclock = 0; /* value storage timer */ #define MAXCNTX 1023 /* maximum context length */ static char context[MAXCNTX+1]; /* current context path */ static VARDEF *hashtbl[NHASH]; /* definition list */ static int htndx; /* index for */ static VARDEF *htpos; /* ...dfirst() and */ static EPNODE *ochpos; /* ...dnext */ static EPNODE *outchan; EPNODE *curfunc = NULL; #define dname(ep) ((ep)->v.kid->type == SYM ? \ (ep)->v.kid->v.name : \ (ep)->v.kid->v.kid->v.name) void fcompile( /* get definitions from a file */ char *fname ) { FILE *fp; if (fname == NULL) fp = stdin; else if ((fp = fopen(fname, "r")) == NULL) { eputs(fname); eputs(": cannot open\n"); quit(1); } initfile(fp, fname, 0); while (nextc != EOF) getstatement(); if (fname != NULL) fclose(fp); } void scompile( /* get definitions from a string */ char *str, char *fn, int ln ) { initstr(str, fn, ln); while (nextc != EOF) getstatement(); } double varvalue( /* return a variable's value */ char *vname ) { return(dvalue(vname, dlookup(vname))); } double evariable( /* evaluate a variable */ EPNODE *ep ) { VARDEF *dp = ep->v.ln; return(dvalue(dp->name, dp->def)); } void varset( /* set a variable's value */ char *vname, int assign, double val ) { char *qname; EPNODE *ep1, *ep2; /* get qualified name */ qname = qualname(vname, 0); /* check for quick set */ if ((ep1 = dlookup(qname)) != NULL && ep1->v.kid->type == SYM && (ep1->type == ':') <= (assign == ':')) { ep2 = ep1->v.kid->sibling; if (ep2->type == NUM) { ep2->v.num = val; ep1->type = assign; return; } } /* hand build definition */ ep1 = newnode(); ep1->type = assign; ep2 = newnode(); ep2->type = SYM; ep2->v.name = savestr(vname); addekid(ep1, ep2); ep2 = newnode(); ep2->type = NUM; ep2->v.num = val; addekid(ep1, ep2); if (assign == ':') dremove(qname); else dclear(qname); dpush(qname, ep1); } void dclear( /* delete variable definitions of name */ char *name ) { EPNODE *ep; while ((ep = dpop(name)) != NULL) { if (ep->type == ':') { dpush(name, ep); /* don't clear constants */ return; } epfree(ep); } } void dremove( /* delete all definitions of name */ char *name ) { EPNODE *ep; while ((ep = dpop(name)) != NULL) epfree(ep); } int vardefined( /* return non-zero if variable defined */ char *name ) { EPNODE *dp; return((dp = dlookup(name)) != NULL && dp->v.kid->type == SYM); } char * setcontext( /* set a new context path */ char *ctx ) { char *cpp; if (ctx == NULL) return(context); /* just asking */ while (*ctx == CNTXMARK) ctx++; /* skip past marks */ if (!*ctx) { context[0] = '\0'; /* empty means clear context */ return(context); } cpp = context; /* start context with mark */ *cpp++ = CNTXMARK; do { /* carefully copy new context */ if (cpp >= context+MAXCNTX) break; /* just copy what we can */ if (isid(*ctx)) *cpp++ = *ctx++; else { *cpp++ = '_'; ctx++; } } while (*ctx); while (cpp[-1] == CNTXMARK) /* cannot end in context mark */ cpp--; *cpp = '\0'; return(context); } char * pushcontext( /* push on another context */ char *ctx ) { char oldcontext[MAXCNTX+1]; int n; strcpy(oldcontext, context); /* save old context */ setcontext(ctx); /* set new context */ n = strlen(context); /* tack on old */ if (n+strlen(oldcontext) > MAXCNTX) { strncpy(context+n, oldcontext, MAXCNTX-n); context[MAXCNTX] = '\0'; } else strcpy(context+n, oldcontext); return(context); } char * popcontext(void) /* pop off top context */ { char *cp1, *cp2; if (!context[0]) /* nothing left to pop */ return(context); cp2 = context; /* find mark */ while (*++cp2 && *cp2 != CNTXMARK) ; cp1 = context; /* copy tail to front */ while ( (*cp1++ = *cp2++) ) ; return(context); } char * qualname( /* get qualified name */ char *nam, int lvl ) { static char nambuf[RMAXWORD+1]; char *cp = nambuf, *cpp; /* check for explicit local */ if (*nam == CNTXMARK) if (lvl > 0) /* only action is to refuse search */ return(NULL); else nam++; else if (nam == nambuf) /* check for repeat call */ return(lvl > 0 ? NULL : nam); /* copy name to static buffer */ while (*nam) { if (cp >= nambuf+RMAXWORD) goto toolong; *cp++ = *nam++; } /* check for explicit global */ if (cp > nambuf && cp[-1] == CNTXMARK) { if (lvl > 0) return(NULL); *--cp = '\0'; return(nambuf); /* already qualified */ } cpp = context; /* else skip the requested levels */ while (lvl-- > 0) { if (!*cpp) return(NULL); /* return NULL if past global level */ while (*++cpp && *cpp != CNTXMARK) ; } while (*cpp) { /* copy context to static buffer */ if (cp >= nambuf+RMAXWORD) goto toolong; *cp++ = *cpp++; } toolong: *cp = '\0'; return(nambuf); /* return qualified name */ } int incontext( /* is qualified name in current context? */ char *qn ) { if (!context[0]) /* global context accepts all */ return(1); while (*qn && *qn != CNTXMARK) /* find context mark */ qn++; return(!strcmp(qn, context)); } void chanout( /* set output channels */ void (*cs)(int n, double v) ) { EPNODE *ep; for (ep = outchan; ep != NULL; ep = ep->sibling) (*cs)(ep->v.kid->v.chan, evalue(ep->v.kid->sibling)); } void dcleanup( /* clear definitions (0->vars,1->output,2->consts) */ int lvl ) { int i; VARDEF *vp; EPNODE *ep; /* if context is global, clear all */ for (i = 0; i < NHASH; i++) for (vp = hashtbl[i]; vp != NULL; vp = vp->next) if (incontext(vp->name)) { if (lvl >= 2) dremove(vp->name); else dclear(vp->name); } if (lvl >= 1) { for (ep = outchan; ep != NULL; ep = ep->sibling) epfree(ep); outchan = NULL; } } EPNODE * dlookup( /* look up a definition */ char *name ) { VARDEF *vp; if ((vp = varlookup(name)) == NULL) return(NULL); return(vp->def); } VARDEF * varlookup( /* look up a variable */ char *name ) { int lvl = 0; char *qname; VARDEF *vp; /* find most qualified match */ while ((qname = qualname(name, lvl++)) != NULL) for (vp = hashtbl[hash(qname)]; vp != NULL; vp = vp->next) if (!strcmp(vp->name, qname)) return(vp); return(NULL); } VARDEF * varinsert( /* get a link to a variable */ char *name ) { VARDEF *vp; int hv; if ((vp = varlookup(name)) != NULL) { vp->nlinks++; return(vp); } vp = (VARDEF *)emalloc(sizeof(VARDEF)); vp->lib = liblookup(name); if (vp->lib == NULL) /* if name not in library */ name = qualname(name, 0); /* use fully qualified version */ hv = hash(name); vp->name = savestr(name); vp->nlinks = 1; vp->def = NULL; vp->next = hashtbl[hv]; hashtbl[hv] = vp; return(vp); } void libupdate( /* update library links */ char *fn ) { int i; VARDEF *vp; /* if fn is NULL then relink all */ for (i = 0; i < NHASH; i++) for (vp = hashtbl[i]; vp != NULL; vp = vp->next) if (vp->lib != NULL || fn == NULL || !strcmp(fn, vp->name)) vp->lib = liblookup(vp->name); } void varfree( /* release link to variable */ VARDEF *ln ) { VARDEF *vp; int hv; if (--ln->nlinks > 0) return; /* still active */ hv = hash(ln->name); vp = hashtbl[hv]; if (vp == ln) hashtbl[hv] = vp->next; else { while (vp->next != ln) /* must be in list */ vp = vp->next; vp->next = ln->next; } freestr(ln->name); efree((char *)ln); } EPNODE * dfirst(void) /* return pointer to first definition */ { htndx = 0; htpos = NULL; ochpos = outchan; return(dnext()); } EPNODE * dnext(void) /* return pointer to next definition */ { EPNODE *ep; char *nm; while (htndx < NHASH) { if (htpos == NULL) htpos = hashtbl[htndx++]; while (htpos != NULL) { ep = htpos->def; nm = htpos->name; htpos = htpos->next; if (ep != NULL && incontext(nm)) return(ep); } } if ((ep = ochpos) != NULL) ochpos = ep->sibling; return(ep); } EPNODE * dpop( /* pop a definition */ char *name ) { VARDEF *vp; EPNODE *dp; if ((vp = varlookup(name)) == NULL || vp->def == NULL) return(NULL); dp = vp->def; vp->def = dp->sibling; varfree(vp); return(dp); } void dpush( /* push on a definition */ char *nm, EPNODE *ep ) { VARDEF *vp; vp = varinsert(nm); ep->sibling = vp->def; vp->def = ep; } void addchan( /* add an output channel assignment */ EPNODE *sp ) { int ch = sp->v.kid->v.chan; EPNODE *ep, *epl; for (epl = NULL, ep = outchan; ep != NULL; epl = ep, ep = ep->sibling) if (ep->v.kid->v.chan >= ch) { if (epl != NULL) epl->sibling = sp; else outchan = sp; if (ep->v.kid->v.chan > ch) sp->sibling = ep; else { sp->sibling = ep->sibling; epfree(ep); } return; } if (epl != NULL) epl->sibling = sp; else outchan = sp; sp->sibling = NULL; } void getstatement(void) /* get next statement */ { EPNODE *ep; char *qname; VARDEF *vdef; if (nextc == ';') { /* empty statement */ scan(); return; } if (esupport&E_OUTCHAN && nextc == '$') { /* channel assignment */ ep = getchan(); addchan(ep); } else { /* ordinary definition */ ep = getdefn(); qname = qualname(dname(ep), 0); if (esupport&E_REDEFW && (vdef = varlookup(qname)) != NULL) { if (vdef->def != NULL && epcmp(ep, vdef->def)) { wputs(qname); if (vdef->def->type == ':') wputs(": redefined constant expression\n"); else wputs(": redefined\n"); } else if (ep->v.kid->type == FUNC && vdef->lib != NULL) { wputs(qname); wputs(": definition hides library function\n"); } } if (ep->type == ':') dremove(qname); else dclear(qname); dpush(qname, ep); } if (nextc != EOF) { if (nextc != ';') syntax("';' expected"); scan(); } } EPNODE * getdefn(void) /* A -> SYM = E1 */ /* SYM : E1 */ /* FUNC(SYM,..) = E1 */ /* FUNC(SYM,..) : E1 */ { EPNODE *ep1, *ep2; if (!isalpha(nextc) && nextc != CNTXMARK) syntax("illegal variable name"); ep1 = newnode(); ep1->type = SYM; ep1->v.name = savestr(getname()); if (esupport&E_FUNCTION && nextc == '(') { ep2 = newnode(); ep2->type = FUNC; addekid(ep2, ep1); ep1 = ep2; do { scan(); if (!isalpha(nextc)) syntax("illegal parameter name"); ep2 = newnode(); ep2->type = SYM; ep2->v.name = savestr(getname()); addekid(ep1, ep2); } while (nextc == ','); if (nextc != ')') syntax("')' expected"); scan(); curfunc = ep1; } if (nextc != '=' && nextc != ':') syntax("'=' or ':' expected"); ep2 = newnode(); ep2->type = nextc; scan(); addekid(ep2, ep1); addekid(ep2, getE1()); if (ep1->type == SYM && ep1->sibling->type != NUM) { ep1 = newnode(); ep1->type = CLKT; ep1->v.tick = 0; addekid(ep2, ep1); ep1 = newnode(); ep1->type = NUM; addekid(ep2, ep1); } curfunc = NULL; return(ep2); } EPNODE * getchan(void) /* A -> $N = E1 */ { EPNODE *ep1, *ep2; if (nextc != '$') syntax("missing '$'"); scan(); ep1 = newnode(); ep1->type = CHAN; ep1->v.chan = getinum(); if (nextc != '=') syntax("'=' expected"); scan(); ep2 = newnode(); ep2->type = '='; addekid(ep2, ep1); addekid(ep2, getE1()); return(ep2); } /* * The following routines are for internal use only: */ static double /* evaluate a variable */ dvalue(char *name, EPNODE *d) { EPNODE *ep1, *ep2; if (d == NULL || d->v.kid->type != SYM) { eputs(name); eputs(": undefined variable\n"); quit(1); } ep1 = d->v.kid->sibling; /* get expression */ if (ep1->type == NUM) return(ep1->v.num); /* return if number */ ep2 = ep1->sibling; /* check time */ if (eclock >= MAXCLOCK) eclock = 1; /* wrap clock counter */ if (ep2->v.tick < MAXCLOCK && (ep2->v.tick == 0) | (ep2->v.tick != eclock)) { ep2->v.tick = d->type == ':' ? MAXCLOCK : eclock; ep2 = ep2->sibling; ep2->v.num = evalue(ep1); /* needs new value */ } else ep2 = ep2->sibling; /* else reuse old value */ return(ep2->v.num); }
970860.c
#include "stdlib.h" #include "stdbool.h" #include "driverDataDispatcher.h" #include "driverDataDispatcherList.h" #include "i2cDriverDataDispatcher.h" #include "remoteDriverDataDispatcher.h" // Drivers #include "../../drivers/driver.h" // I2C management #include "../../common/i2c/master/i2cMaster.h" #include "../../common/i2c/master/i2cMasterInputStream.h" #include "../../common/i2c/master/i2cMasterOutputStream.h" #include "../../common/io/buffer.h" #include "../../common/io/inputStream.h" #include "../../common/io/outputStream.h" #include "../../common/io/ioUtils.h" #include "../../common/log/logger.h" #include "../../common/log/logLevel.h" #include "../../device/transmitMode.h" /** * The stream to send data through I2C */ // temp buffer, shared by every i2cDriverDataDispatcher // useful only for copyInputToOutputStream ! static unsigned char i2cTempBufferArray[I2C_DRIVER_DATA_DISPATCHER_BUFFER_LENGTH]; static Buffer i2cTempBuffer; static I2cMasterOutputStream i2cMasterOutputStream; static I2cMasterInputStream i2cMasterInputStream; DriverDataDispatcher* addI2CDriverDataDispatcher( const char* dispatcherName, Buffer* i2cMasterInputBuffer, unsigned char (*i2cMasterInputBufferArray)[], unsigned char i2cMasterInputBufferLength, OutputStream* outputStream, InputStream* inputStream, I2cBusConnection* i2cBusConnection) { // Init the output Stream : I2C Master -> I2C Slave(address) initBuffer(&i2cTempBuffer, (unsigned char(*)[]) & i2cTempBufferArray, I2C_DRIVER_DATA_DISPATCHER_BUFFER_LENGTH, "I2C Master Output", "global"); initMasterI2cOutputStream(&i2cMasterOutputStream, i2cBusConnection, outputStream, &i2cTempBuffer); // Init the input Stream : I2C Slave (address) -> I2C Master initBuffer(i2cMasterInputBuffer, (unsigned char(*)[]) i2cMasterInputBufferArray, i2cMasterInputBufferLength, "I2C Master Input", dispatcherName); initMasterI2cInputStream(&i2cMasterInputStream, i2cBusConnection, i2cMasterInputBuffer, inputStream); // Clear previous data to avoid buffer from the other board provided by error at the initialization // TODO : Clarify this, to avoid to read some bad data until we are ready ! clearInputStream(inputStream); DriverDataDispatcher* result = addDriverDataDispatcher( TRANSMIT_I2C, dispatcherName, NULL, i2cBusConnection->i2cAddress, inputStream, outputStream, remoteDriverDataDispatcherTransmit ); return result; }
345351.c
/* Copyright (c) 2011 Stanford University. * Copyright (c) 2014 Cryptography Research, Inc. * Released under the MIT License. See LICENSE.txt for license information. */ /* Chacha random number generator code copied from crandom */ #include "crandom.h" #include "intrinsics.h" #include "config.h" #include "magic.h" #include <stdio.h> volatile unsigned int crandom_features = 0; unsigned int crandom_detect_features(void) { unsigned int out = GEN; # if (defined(__i386__) || defined(__x86_64__)) u_int32_t a,b,c,d; a=1; __asm__("cpuid" : "+a"(a), "=b"(b), "=c"(c), "=d"(d)); out |= GEN; if (d & 1<<26) out |= SSE2; if (d & 1<< 9) out |= SSSE3; if (c & 1<<25) out |= AESNI; if (c & 1<<28) out |= AVX; if (b & 1<<5) out |= AVX2; a=0x80000001; __asm__("cpuid" : "+a"(a), "=b"(b), "=c"(c), "=d"(d)); if (c & 1<<11) out |= XOP; if (c & 1<<30) out |= RDRAND; # endif return out; } INTRINSIC u_int64_t rdrand(int abort_on_fail) { uint64_t out = 0; int tries = 1000; if (HAVE(RDRAND)) { # if defined(__x86_64__) u_int64_t out, a=0; for (; tries && !a; tries--) { __asm__ __volatile__ ( "rdrand %0\n\tsetc %%al" : "=r"(out), "+a"(a) :: "cc" ); } # elif (defined(__i386__)) u_int32_t reg, a=0; uint64_t out; for (; tries && !a; tries--) { __asm__ __volatile__ ( "rdrand %0\n\tsetc %%al" : "=r"(reg), "+a"(a) :: "cc" ); } out = reg; a = 0; for (; tries && !a; tries--) { __asm__ __volatile__ ( "rdrand %0\n\tsetc %%al" : "=r"(reg), "+a"(a) :: "cc" ); } out = out << 32 | reg; return out; # else abort(); /* whut */ # endif } else { tries = 0; } if (abort_on_fail && !tries) { abort(); } return out; } /* ------------------------------- Vectorized code ------------------------------- */ #define shuffle(x,i) _mm_shuffle_epi32(x, \ i + ((i+1)&3)*4 + ((i+2)&3)*16 + ((i+3)&3)*64) #define add _mm_add_epi32 #define add64 _mm_add_epi64 #define NEED_XOP (MIGHT_HAVE(XOP)) #define NEED_SSSE3 (MIGHT_HAVE(SSSE3) && !MUST_HAVE(XOP)) #define NEED_SSE2 (MIGHT_HAVE(SSE2) && !MUST_HAVE(SSSE3)) #define NEED_CONV (!MUST_HAVE(SSE2)) #if NEED_XOP static __inline__ void quarter_round_xop( ssereg *a, ssereg *b, ssereg *c, ssereg *d ) { *a = add(*a,*b); *d = xop_rotate(16, *d ^ *a); *c = add(*c,*d); *b = xop_rotate(12, *b ^ *c); *a = add(*a,*b); *d = xop_rotate(8, *d ^ *a); *c = add(*c,*d); *b = xop_rotate(7, *b ^ *c); } #endif #if NEED_SSSE3 static const ssereg shuffle8 = { 0x0605040702010003ull, 0x0E0D0C0F0A09080Bull }; static const ssereg shuffle16 = { 0x0504070601000302ull, 0x0D0C0F0E09080B0Aull }; INTRINSIC ssereg ssse3_rotate_8(ssereg a) { return _mm_shuffle_epi8(a, shuffle8); } INTRINSIC ssereg ssse3_rotate_16(ssereg a) { return _mm_shuffle_epi8(a, shuffle16); } static __inline__ void quarter_round_ssse3( ssereg *a, ssereg *b, ssereg *c, ssereg *d ) { *a = add(*a,*b); *d = ssse3_rotate_16(*d ^ *a); *c = add(*c,*d); *b = sse2_rotate(12, *b ^ *c); *a = add(*a,*b); *d = ssse3_rotate_8( *d ^ *a); *c = add(*c,*d); *b = sse2_rotate(7, *b ^ *c); } #endif /* MIGHT_HAVE(SSSE3) && !MUST_HAVE(XOP) */ #if NEED_SSE2 static __inline__ void quarter_round_sse2( ssereg *a, ssereg *b, ssereg *c, ssereg *d ) { *a = add(*a,*b); *d = sse2_rotate(16, *d ^ *a); *c = add(*c,*d); *b = sse2_rotate(12, *b ^ *c); *a = add(*a,*b); *d = sse2_rotate(8, *d ^ *a); *c = add(*c,*d); *b = sse2_rotate(7, *b ^ *c); } #endif #define DOUBLE_ROUND(qrf) { \ qrf(&a1,&b1,&c1,&d1); \ qrf(&a2,&b2,&c2,&d2); \ b1 = shuffle(b1,1); \ c1 = shuffle(c1,2); \ d1 = shuffle(d1,3); \ b2 = shuffle(b2,1); \ c2 = shuffle(c2,2); \ d2 = shuffle(d2,3); \ \ qrf(&a1,&b1,&c1,&d1); \ qrf(&a2,&b2,&c2,&d2); \ b1 = shuffle(b1,3); \ c1 = shuffle(c1,2); \ d1 = shuffle(d1,1); \ b2 = shuffle(b2,3); \ c2 = shuffle(c2,2); \ d2 = shuffle(d2,1); \ } #define OUTPUT_FUNCTION { \ output[0] = add(a1,aa); \ output[1] = add(b1,bb); \ output[2] = add(c1,cc); \ output[3] = add(d1,dd); \ output[4] = add(a2,aa); \ output[5] = add(b2,bb); \ output[6] = add(c2,add(cc,p)); \ output[7] = add(d2,dd); \ \ output += 8; \ \ cc = add64(add64(cc,p), p); \ a1 = a2 = aa; \ b1 = b2 = bb; \ c1 = cc; c2 = add64(cc,p);\ d1 = d2 = dd; \ } /* ------------------------------------------------------------------------------- */ INTRINSIC u_int32_t rotate(int r, u_int32_t a) { return a<<r ^ a>>(32-r); } static __inline__ __attribute__((unused)) void quarter_round(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d) { *a = *a + *b; *d = rotate(16, *d^*a); *c = *c + *d; *b = rotate(12, *b^*c); *a = *a + *b; *d = rotate(8, *d^*a); *c = *c + *d; *b = rotate(7, *b^*c); } static void crandom_chacha_expand(u_int64_t iv, u_int64_t ctr, int nr, int output_size, const unsigned char *key_, unsigned char *output_) { # if MIGHT_HAVE_SSE2 if (HAVE(SSE2)) { ssereg *key = (ssereg *)key_; ssereg *output = (ssereg *)output_; ssereg a1 = key[0], a2 = a1, aa = a1, b1 = key[1], b2 = b1, bb = b1, c1 = {iv, ctr}, c2 = {iv, ctr+1}, cc = c1, d1 = {0x3320646e61707865ull, 0x6b20657479622d32ull}, d2 = d1, dd = d1, p = {0, 1}; int i,r; # if (NEED_XOP) if (HAVE(XOP)) { for (i=0; i<output_size; i+=128) { for (r=nr; r>0; r-=2) DOUBLE_ROUND(quarter_round_xop); OUTPUT_FUNCTION; } return; } # endif # if (NEED_SSSE3) if (HAVE(SSSE3)) { for (i=0; i<output_size; i+=128) { for (r=nr; r>0; r-=2) DOUBLE_ROUND(quarter_round_ssse3); OUTPUT_FUNCTION; } return; } # endif # if (NEED_SSE2) if (HAVE(SSE2)) { for (i=0; i<output_size; i+=128) { for (r=nr; r>0; r-=2) DOUBLE_ROUND(quarter_round_sse2); OUTPUT_FUNCTION; } return; } # endif } # endif # if NEED_CONV { const u_int32_t *key = (const u_int32_t *)key_; u_int32_t x[16], input[16] = { key[0], key[1], key[2], key[3], key[4], key[5], key[6], key[7], iv, iv>>32, ctr, ctr>>32, 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574 }, *output = (u_int32_t *)output_; int i, r; for (i=0; i<output_size; i+= 64) { for (r=0; r<16; r++) { x[r] = input[r]; } for (r=nr; r>0; r-=2) { quarter_round(&x[0], &x[4], &x[8], &x[12]); quarter_round(&x[1], &x[5], &x[9], &x[13]); quarter_round(&x[2], &x[6], &x[10], &x[14]); quarter_round(&x[3], &x[7], &x[11], &x[15]); quarter_round(&x[0], &x[5], &x[10], &x[15]); quarter_round(&x[1], &x[6], &x[11], &x[12]); quarter_round(&x[2], &x[7], &x[8], &x[13]); quarter_round(&x[3], &x[4], &x[9], &x[14]); } for (r=0; r<16; r++) { output[r] = x[r] + input[r]; } output += 16; input[11] ++; if (!input[11]) input[12]++; } } #endif /* NEED_CONV */ } int crandom_init_from_file( struct crandom_state_t *state, const char *filename, int reseed_interval, int reseeds_mandatory ) { state->fill = 0; state->reseed_countdown = reseed_interval; state->reseed_interval = reseed_interval; state->ctr = 0; state->randomfd = open(filename, O_RDONLY); if (state->randomfd == -1) { int err = errno; return err ? err : -1; } ssize_t offset = 0, red; do { red = read(state->randomfd, state->seed + offset, 32 - offset); if (red > 0) offset += red; } while (red > 0 && offset < 32); if (offset < 32) { int err = errno; return err ? err : -1; } memset(state->buffer, 0, 96); state->magic = CRANDOM_MAGIC; state->reseeds_mandatory = reseeds_mandatory; return 0; } void crandom_init_from_buffer( struct crandom_state_t *state, const char initial_seed[32] ) { memcpy(state->seed, initial_seed, 32); memset(state->buffer, 0, 96); state->reseed_countdown = state->reseed_interval = state->fill = state->ctr = state->reseeds_mandatory = 0; state->randomfd = -1; state->magic = CRANDOM_MAGIC; } int crandom_generate( struct crandom_state_t *state, unsigned char *output, unsigned long long length ) { /* the generator isn't seeded; maybe they ignored the return value of init_from_file */ if (unlikely(state->magic != CRANDOM_MAGIC)) { abort(); } int ret = 0; /* * Addition 5/21/2014. * * If this is used in an application inside a VM, and the VM * is snapshotted and restored, then crandom_generate() would * produce the same output. * * Of course, the real defense against this is "don't do that", * but we mitigate it by the RDRAND and/or rdtsc() in the refilling * code. Since chacha is pseudorandom, when the attacker doesn't * know the state, it's good enough if RDRAND/rdtsc() return * different results. However, if (part of) the request is filled * from the buffer, this won't help. * * So, add a flag EXPERIMENT_CRANDOM_BUFFER_CUTOFF_BYTES which * disables the buffer for requests larger than this size. * * Suggest EXPERIMENT_CRANDOM_BUFFER_CUTOFF_BYTES = 0, which * disables the buffer. But instead you can set it to say 16, * so that pulls of at least 128 bits will be stirred. This * could still be a problem for eg 64-bit nonces, but those * aren't entirely collision-resistant anyway. * * Heuristic: large requests are more likely to be * cryptographically important, and the buffer doesn't impact * their performance as much. So if the request is bigger * than a certain size, just drop the buffer on the floor. * * This code isn't activated if state->reseed_interval == 0, * because then the PRNG is deterministic anyway. * * TODO: sample 128 bits out of RDRAND() instead of 64 bits. * TODO: option to completely remove the buffer and fill? * FUTURE: come up with a less band-aid-y solution to this problem. */ #ifdef EXPERIMENT_CRANDOM_BUFFER_CUTOFF_BYTES if (state->reseed_interval #if EXPERIMENT_CRANDOM_CUTOFF_BYTES > 0 /* #if'd to a warning from -Wtype-limits in GCC when it's zero */ && length >= EXPERIMENT_CRANDOM_BUFFER_CUTOFF_BYTES #endif ) { state->fill = 0; } #endif while (length) { if (unlikely(state->fill <= 0)) { uint64_t iv = 0; if (state->reseed_interval) { /* it's nondeterministic, stir in some rdrand() or rdtsc() */ if (HAVE(RDRAND)) { iv = rdrand(0); if (!iv) iv = rdtsc(); } else { iv = rdtsc(); } state->reseed_countdown--; if (unlikely(state->reseed_countdown <= 0)) { /* reseed by xoring in random state */ state->reseed_countdown = state->reseed_interval; ssize_t offset = 0, red; do { red = read(state->randomfd, state->buffer + offset, 32 - offset); if (red > 0) offset += red; } while (red > 0 && offset < 32); if (offset < 32) { /* The read failed. Signal an error with the return code. * * If reseeds are mandatory, crash. * * If not, the generator is still probably safe to use, because reseeding * is basically over-engineering for caution. Also, the user might ignore * the return code, so we still need to fill the request. * * Set reseed_countdown = 1 so we'll try again later. If the user's * performance sucks as a result of ignoring the error code while calling * us in a loop, well, that's life. */ if (state->reseeds_mandatory) { abort(); } ret = errno; if (ret == 0) ret = -1; state->reseed_countdown = 1; } int i; for (i=0; i<32; i++) { /* Stir in the buffer. If somehow the read failed, it'll be zeros. */ state->seed[i] ^= state->buffer[i]; } } } crandom_chacha_expand(iv,state->ctr,20,128,state->seed,state->seed); state->ctr++; state->fill = sizeof(state->buffer); } unsigned long long copy = (length > state->fill) ? state->fill : length; state->fill -= copy; memcpy(output, state->buffer + state->fill, copy); really_memset(state->buffer + state->fill, 0, copy); output += copy; length -= copy; } return ret; } void crandom_destroy( struct crandom_state_t *state ) { if (state->magic == CRANDOM_MAGIC && state->randomfd) { (void) close(state->randomfd); /* Ignore the return value from close(), because what would it mean? * "Your random device, which you were reading over NFS, lost some data"? */ } really_memset(state, 0, sizeof(*state)); }
644884.c
int search(int *a,int n,int key) { for(int i=0;i<n;i++) { if(a[i]==key) { key++; return search(a,n,key); } } return key; } int firstMissingPositive(int* nums, int numsSize) { int i,j,key=1; for(i=0;i<numsSize;i++) { for(j=0;j<numsSize-i-1;j++) { if(nums[j]>nums[j+1]) { int temp=nums[j]; nums[j]=nums[j+1]; nums[j+1]=temp; } } } int value=search(nums,numsSize,key); return value; }
105103.c
/* * BlueALSA - utils.c * Copyright (c) 2016-2020 Arkadiusz Bokowy * * This file is a part of bluez-alsa. * * This project is licensed under the terms of the MIT license. * */ #include "utils.h" #include <ctype.h> #include <stdbool.h> #include <stdlib.h> #include <string.h> #include <bluetooth/bluetooth.h> #if ENABLE_LDAC # include "ldacBT.h" #endif #include "a2dp-codecs.h" #include "hfp.h" #include "shared/defs.h" #include "shared/log.h" /** * Extract HCI device ID from the BlueZ D-Bus object path. * * @param path BlueZ D-Bus object path. * @return On success this function returns ID of the HCI device. * Otherwise, -1 is returned. */ int g_dbus_bluez_object_path_to_hci_dev_id(const char *path) { if ((path = strstr(path, "/hci")) == NULL || path[4] == '\0') return -1; return atoi(&path[4]); } /** * Extract BT address from the BlueZ D-Bus object path. * * @param path BlueZ D-Bus object path. * @param addr Address where the parsed BT address will be stored. * @return On success this function returns pointer to the BT address. On * error, NULL is returned. */ bdaddr_t *g_dbus_bluez_object_path_to_bdaddr(const char *path, bdaddr_t *addr) { char tmp[sizeof("00:00:00:00:00:00")] = { 0 }; size_t i; if ((path = strstr(path, "/dev_")) != NULL) strncpy(tmp, path + 5, sizeof(tmp) - 1); for (i = 0; i < sizeof(tmp); i++) if (tmp[i] == '_') tmp[i] = ':'; if (str2ba(tmp, addr) == -1) return NULL; return addr; } /** * Get BlueZ D-Bus object path for given transport type. * * @param type Transport type structure. * @return This function returns BlueZ D-Bus object path. */ const char *g_dbus_transport_type_to_bluez_object_path(struct ba_transport_type type) { switch (type.profile) { case BA_TRANSPORT_PROFILE_A2DP_SOURCE: switch (type.codec) { case A2DP_CODEC_SBC: return "/A2DP/SBC/Source"; #if ENABLE_MPEG case A2DP_CODEC_MPEG12: return "/A2DP/MPEG/Source"; #endif #if ENABLE_AAC case A2DP_CODEC_MPEG24: return "/A2DP/AAC/Source"; #endif #if ENABLE_APTX case A2DP_CODEC_VENDOR_APTX: return "/A2DP/aptX/Source"; #endif #if ENABLE_APTX_HD case A2DP_CODEC_VENDOR_APTX_HD: return "/A2DP/aptXHD/Source"; #endif #if ENABLE_FASTSTREAM case A2DP_CODEC_VENDOR_FASTSTREAM: return "/A2DP/FastStream/Source"; #endif #if ENABLE_LDAC case A2DP_CODEC_VENDOR_LDAC: return "/A2DP/LDAC/Source"; #endif default: error("Unsupported A2DP codec: %#x", type.codec); g_assert_not_reached(); } case BA_TRANSPORT_PROFILE_A2DP_SINK: switch (type.codec) { case A2DP_CODEC_SBC: return "/A2DP/SBC/Sink"; #if ENABLE_MPEG case A2DP_CODEC_MPEG12: return "/A2DP/MPEG/Sink"; #endif #if ENABLE_AAC case A2DP_CODEC_MPEG24: return "/A2DP/AAC/Sink"; #endif #if ENABLE_APTX case A2DP_CODEC_VENDOR_APTX: return "/A2DP/aptX/Sink"; #endif #if ENABLE_APTX_HD case A2DP_CODEC_VENDOR_APTX_HD: return "/A2DP/aptXHD/Sink"; #endif #if ENABLE_FASTSTREAM case A2DP_CODEC_VENDOR_FASTSTREAM: return "/A2DP/FastStream/Sink"; #endif #if ENABLE_LDAC case A2DP_CODEC_VENDOR_LDAC: return "/A2DP/LDAC/Sink"; #endif default: error("Unsupported A2DP codec: %#x", type.codec); g_assert_not_reached(); } case BA_TRANSPORT_PROFILE_HFP_HF: return "/HFP/HandsFree"; case BA_TRANSPORT_PROFILE_HFP_AG: return "/HFP/AudioGateway"; case BA_TRANSPORT_PROFILE_HSP_HS: return "/HSP/Headset"; case BA_TRANSPORT_PROFILE_HSP_AG: return "/HSP/AudioGateway"; } return "/"; } /** * Sanitize D-Bus object path. * * @param path D-Bus object path. * @return Pointer to the object path string. */ char *g_variant_sanitize_object_path(char *path) { char *tmp = path - 1; while (*(++tmp) != '\0') if (!(*tmp == '/' || isalnum(*tmp))) *tmp = '_'; return path; } /** * Convenience wrapper around g_variant_is_of_type(). * * @param value Variant for validation. * @param type Expected variant type. * @param name Variant name for logging. * @return If variant matches type, this function returns true. */ bool g_variant_validate_value(GVariant *value, const GVariantType *type, const char *name) { if (g_variant_is_of_type(value, type)) return true; warn("Invalid variant type: %s: %s != %s", name, g_variant_get_type_string(value), (const char *)type); return false; } /** * Convert a pointer to BT address to a hash value. * * @param v A pointer to bdaddr_t structure. * @return Hash value compatible with GHashTable. */ unsigned int g_bdaddr_hash(const void *v) { const bdaddr_t *ba = (const bdaddr_t *)v; return ((uint32_t *)ba->b)[0] * ((uint16_t *)ba->b)[2]; } /** * Compare two BT addresses. * * @param v1 A pointer to first bdaddr_t structure. * @param v2 A pointer to second bdaddr_t structure. * @return Comparision value compatible with GHashTable. */ gboolean g_bdaddr_equal(const void *v1, const void *v2) { return bacmp(v1, v2) == 0; } /** * Get BlueALSA A2DP codec from string representation. * * @param codec String representation of BlueALSA audio codec. * @return BlueALSA audio codec or 0xFFFF for not supported value. */ uint16_t ba_transport_codecs_a2dp_from_string(const char *str) { static const uint16_t codecs[] = { A2DP_CODEC_SBC, #if ENABLE_MPEG A2DP_CODEC_MPEG12, #endif #if ENABLE_AAC A2DP_CODEC_MPEG24, #endif #if ENABLE_APTX A2DP_CODEC_VENDOR_APTX, #endif #if ENABLE_APTX_HD A2DP_CODEC_VENDOR_APTX_HD, #endif #if ENABLE_FASTSTREAM A2DP_CODEC_VENDOR_FASTSTREAM, #endif #if ENABLE_LDAC A2DP_CODEC_VENDOR_LDAC, #endif }; size_t i; for (i = 0; i < ARRAYSIZE(codecs); i++) if (strcmp(str, ba_transport_codecs_a2dp_to_string(codecs[i])) == 0) return codecs[i]; return 0xFFFF; } /** * Convert BlueALSA A2DP codec into a human-readable string. * * @param codec BlueALSA A2DP audio codec. * @return Human-readable string or NULL for unknown codec. */ const char *ba_transport_codecs_a2dp_to_string(uint16_t codec) { switch (codec) { case A2DP_CODEC_SBC: return "SBC"; case A2DP_CODEC_MPEG12: return "MP3"; case A2DP_CODEC_MPEG24: return "AAC"; case A2DP_CODEC_ATRAC: return "ATRAC"; case A2DP_CODEC_VENDOR_APTX: return "aptX"; case A2DP_CODEC_VENDOR_APTX_AD: return "aptX-AD"; case A2DP_CODEC_VENDOR_APTX_HD: return "aptX-HD"; case A2DP_CODEC_VENDOR_APTX_LL: return "aptX-LL"; case A2DP_CODEC_VENDOR_APTX_TWS: return "aptX-TWS"; case A2DP_CODEC_VENDOR_FASTSTREAM: return "FastStream"; case A2DP_CODEC_VENDOR_LDAC: return "LDAC"; case A2DP_CODEC_VENDOR_LHDC: return "LHDC"; case A2DP_CODEC_VENDOR_LHDC_V1: return "LHDCv1"; case A2DP_CODEC_VENDOR_LLAC: return "LLAC"; case A2DP_CODEC_VENDOR_SAMSUNG_HD: return "samsung-HD"; case A2DP_CODEC_VENDOR_SAMSUNG_SC: return "samsung-SC"; default: return NULL; } } /** * Get BlueALSA HFP codec from string representation. * * @param codec String representation of BlueALSA audio codec. * @return BlueALSA audio codec or 0xFFFF for not supported value. */ uint16_t ba_transport_codecs_hfp_from_string(const char *str) { static const uint16_t codecs[] = { HFP_CODEC_CVSD, #if ENABLE_MSBC HFP_CODEC_MSBC, #endif }; size_t i; for (i = 0; i < ARRAYSIZE(codecs); i++) if (strcmp(str, ba_transport_codecs_hfp_to_string(codecs[i])) == 0) return codecs[i]; return 0xFFFF; } /** * Convert HFP audio codec into a human-readable string. * * @param codec HFP audio codec. * @return Human-readable string or NULL for unknown codec. */ const char *ba_transport_codecs_hfp_to_string(uint16_t codec) { switch (codec) { case HFP_CODEC_CVSD: return "CVSD"; case HFP_CODEC_MSBC: return "mSBC"; default: return NULL; } } /** * Convert BlueALSA transport type into a human-readable string. * * @param type Transport type structure. * @return Human-readable string. */ const char *ba_transport_type_to_string(struct ba_transport_type type) { switch (type.profile) { case BA_TRANSPORT_PROFILE_A2DP_SOURCE: switch (type.codec) { case A2DP_CODEC_SBC: return "A2DP Source (SBC)"; #if ENABLE_MPEG case A2DP_CODEC_MPEG12: return "A2DP Source (MP3)"; #endif #if ENABLE_AAC case A2DP_CODEC_MPEG24: return "A2DP Source (AAC)"; #endif #if ENABLE_APTX case A2DP_CODEC_VENDOR_APTX: return "A2DP Source (aptX)"; #endif #if ENABLE_APTX_HD case A2DP_CODEC_VENDOR_APTX_HD: return "A2DP Source (aptX HD)"; #endif #if ENABLE_FASTSTREAM case A2DP_CODEC_VENDOR_FASTSTREAM: return "A2DP Source (FastStream)"; #endif #if ENABLE_LDAC case A2DP_CODEC_VENDOR_LDAC: return "A2DP Source (LDAC)"; #endif default: return "A2DP Source"; } case BA_TRANSPORT_PROFILE_A2DP_SINK: switch (type.codec) { case A2DP_CODEC_SBC: return "A2DP Sink (SBC)"; #if ENABLE_MPEG case A2DP_CODEC_MPEG12: return "A2DP Sink (MP3)"; #endif #if ENABLE_AAC case A2DP_CODEC_MPEG24: return "A2DP Sink (AAC)"; #endif #if ENABLE_APTX case A2DP_CODEC_VENDOR_APTX: return "A2DP Sink (aptX)"; #endif #if ENABLE_APTX_HD case A2DP_CODEC_VENDOR_APTX_HD: return "A2DP Sink (aptX HD)"; #endif #if ENABLE_FASTSTREAM case A2DP_CODEC_VENDOR_FASTSTREAM: return "A2DP Sink (FastStream)"; #endif #if ENABLE_LDAC case A2DP_CODEC_VENDOR_LDAC: return "A2DP Sink (LDAC)"; #endif default: return "A2DP Sink"; } case BA_TRANSPORT_PROFILE_HFP_HF: switch (type.codec) { case HFP_CODEC_CVSD: return "HFP Hands-Free (CVSD)"; case HFP_CODEC_MSBC: return "HFP Hands-Free (mSBC)"; default: return "HFP Hands-Free"; } case BA_TRANSPORT_PROFILE_HFP_AG: switch (type.codec) { case HFP_CODEC_CVSD: return "HFP Audio Gateway (CVSD)"; case HFP_CODEC_MSBC: return "HFP Audio Gateway (mSBC)"; default: return "HFP Audio Gateway"; } case BA_TRANSPORT_PROFILE_HSP_HS: return "HSP Headset"; case BA_TRANSPORT_PROFILE_HSP_AG: return "HSP Audio Gateway"; } debug("Unknown transport type: %#x %#x", type.profile, type.codec); return "N/A"; } #if ENABLE_MP3LAME /** * Get maximum possible bit-rate for the given bit-rate mask. * * @param mask MPEG-1 layer III bit-rate mask. * @return Bit-rate in kilobits per second. */ int a2dp_mpeg1_mp3_get_max_bitrate(uint16_t mask) { static int bitrates[] = { 320, 256, 224, 192, 160, 128, 112, 96, 80, 64, 56, 48, 40, 32 }; size_t i = 0; while (i < ARRAYSIZE(bitrates)) { if (mask & (1 << (14 - i))) return bitrates[i]; i++; } return -1; } #endif #if ENABLE_MP3LAME /** * Get string representation of LAME error code. * * @param error LAME encoder error code. * @return Human-readable string. */ const char *lame_encode_strerror(int err) { switch (err) { case -1: return "Too small output buffer"; case -2: return "Out of memory"; case -3: return "Params not initialized"; case -4: return "Psycho acoustic error"; default: debug("Unknown error code: %#x", err); return "Unknown error"; } } #endif #if ENABLE_AAC /** * Get string representation of the FDK-AAC decoder error code. * * @param err FDK-AAC decoder error code. * @return Human-readable string. */ const char *aacdec_strerror(AAC_DECODER_ERROR err) { switch (err) { case AAC_DEC_OK: return "Success"; case AAC_DEC_OUT_OF_MEMORY: return "Out of memory"; case AAC_DEC_TRANSPORT_SYNC_ERROR: return "Transport sync error"; case AAC_DEC_NOT_ENOUGH_BITS: return "Not enough bits"; case AAC_DEC_INVALID_HANDLE: return "Invalid handle"; case AAC_DEC_UNSUPPORTED_AOT: return "Unsupported AOT"; case AAC_DEC_UNSUPPORTED_FORMAT: return "Unsupported format"; case AAC_DEC_UNSUPPORTED_ER_FORMAT: return "Unsupported ER format"; case AAC_DEC_UNSUPPORTED_EPCONFIG: return "Unsupported EP format"; case AAC_DEC_UNSUPPORTED_MULTILAYER: return "Unsupported multilayer"; case AAC_DEC_UNSUPPORTED_CHANNELCONFIG: return "Unsupported channels"; case AAC_DEC_UNSUPPORTED_SAMPLINGRATE: return "Unsupported sample rate"; case AAC_DEC_INVALID_SBR_CONFIG: return "Unsupported SBR"; case AAC_DEC_SET_PARAM_FAIL: return "Unsupported parameter"; case AAC_DEC_NEED_TO_RESTART: return "Restart required"; case AAC_DEC_TRANSPORT_ERROR: return "Transport error"; case AAC_DEC_PARSE_ERROR: return "Parse error"; case AAC_DEC_UNSUPPORTED_EXTENSION_PAYLOAD: return "Unsupported extension payload"; case AAC_DEC_DECODE_FRAME_ERROR: return "Bitstream corrupted"; case AAC_DEC_CRC_ERROR: return "CRC mismatch"; case AAC_DEC_INVALID_CODE_BOOK: return "Invalid codebook"; case AAC_DEC_UNSUPPORTED_PREDICTION: return "Unsupported prediction"; case AAC_DEC_UNSUPPORTED_CCE: return "Unsupported CCE"; case AAC_DEC_UNSUPPORTED_LFE: return "Unsupported LFE"; case AAC_DEC_UNSUPPORTED_GAIN_CONTROL_DATA: return "Unsupported gain control data"; case AAC_DEC_UNSUPPORTED_SBA: return "Unsupported SBA"; case AAC_DEC_TNS_READ_ERROR: return "TNS read error"; case AAC_DEC_RVLC_ERROR: return "RVLC decode error"; case AAC_DEC_ANC_DATA_ERROR: return "Ancillary data error"; case AAC_DEC_TOO_SMALL_ANC_BUFFER: return "Too small ancillary buffer"; case AAC_DEC_TOO_MANY_ANC_ELEMENTS: return "Too many ancillary elements"; default: debug("Unknown error code: %#x", err); return "Unknown error"; } } #endif #if ENABLE_AAC /** * Get string representation of the FDK-AAC encoder error code. * * @param err FDK-AAC encoder error code. * @return Human-readable string. */ const char *aacenc_strerror(AACENC_ERROR err) { switch (err) { case AACENC_OK: return "Success"; case AACENC_INVALID_HANDLE: return "Invalid handle"; case AACENC_MEMORY_ERROR: return "Out of memory"; case AACENC_UNSUPPORTED_PARAMETER: return "Unsupported parameter"; case AACENC_INVALID_CONFIG: return "Invalid config"; case AACENC_INIT_ERROR: return "Initialization error"; case AACENC_INIT_AAC_ERROR: return "AAC library initialization error"; case AACENC_INIT_SBR_ERROR: return "SBR library initialization error"; case AACENC_INIT_TP_ERROR: return "Transport library initialization error"; case AACENC_INIT_META_ERROR: return "Metadata library initialization error"; case AACENC_ENCODE_ERROR: return "Encoding error"; case AACENC_ENCODE_EOF: return "End of file"; default: debug("Unknown error code: %#x", err); return "Unknown error"; } } #endif #if ENABLE_LDAC /** * Get string representation of the LDAC error code. * * @param err LDAC error code. * @return Human-readable string. */ const char *ldacBT_strerror(int err) { int code = LDACBT_HANDLE_ERR(err); switch (code != 0 ? code : LDACBT_API_ERR(err)) { case LDACBT_ERR_NONE: return "Success"; case LDACBT_ERR_FATAL_HANDLE: return "Invalid handle"; case LDACBT_ERR_HANDLE_NOT_INIT: return "Handle not initialized"; case LDACBT_ERR_ENC_INIT_ALLOC: case LDACBT_ERR_DEC_INIT_ALLOC: return "Out of memory"; case LDACBT_ERR_ASSERT_SAMPLING_FREQ: case LDACBT_ERR_ASSERT_SUP_SAMPLING_FREQ: case LDACBT_ERR_CHECK_SAMPLING_FREQ: return "Invalid sample rate"; case LDACBT_ERR_ASSERT_CHANNEL_CONFIG: case LDACBT_ERR_CHECK_CHANNEL_CONFIG: return "Invalid channel config"; case LDACBT_ERR_ASSERT_FRAME_LENGTH: case LDACBT_ERR_ASSERT_SUP_FRAME_LENGTH: case LDACBT_ERR_ASSERT_FRAME_STATUS: case LDACBT_ERR_FRAME_LENGTH_OVER: case LDACBT_ERR_FRAME_ALIGN_OVER: return "Invalid frame"; case LDACBT_ERR_ASSERT_NSHIFT: return "Invalid N-shift"; case LDACBT_ERR_ASSERT_CHANNEL_MODE: return "Invalid channel mode"; case LDACBT_ERR_ENC_ILL_GRADMODE: case LDACBT_ERR_ENC_ILL_GRADPAR_A: case LDACBT_ERR_ENC_ILL_GRADPAR_B: case LDACBT_ERR_ENC_ILL_GRADPAR_C: case LDACBT_ERR_ENC_ILL_GRADPAR_D: return "Invalid gradient parameter"; case LDACBT_ERR_ENC_ILL_NBANDS: return "Invalid N-bands"; case LDACBT_ERR_PACK_BLOCK_FAILED: return "Block packing error"; case LDACBT_ERR_INPUT_BUFFER_SIZE: return "Too small input buffer"; case LDACBT_ERR_UNPACK_BLOCK_FAILED: case LDACBT_ERR_UNPACK_BLOCK_ALIGN: case LDACBT_ERR_UNPACK_FRAME_ALIGN: return "Block unpacking error"; case LDACBT_ERR_ILL_SYNCWORD: return "Invalid sync-word"; case LDACBT_ERR_ILL_SMPL_FORMAT: return "Invalid sample format"; case LDACBT_ERR_ILL_PARAM: return "Invalid parameter"; case LDACBT_ERR_ILL_EQMID: return "Unsupported EQMID"; case LDACBT_ERR_ILL_SAMPLING_FREQ: return "Unsupported sample rate"; case LDACBT_ERR_ILL_NUM_CHANNEL: return "Unsupported channels"; case LDACBT_ERR_ILL_MTU_SIZE: return "Unsupported MTU"; case LDACBT_ERR_ALTER_EQMID_LIMITED: return "EQMID limited"; case LDACBT_ERR_DEC_CONFIG_UPDATED: return "Configuration updated"; default: debug("Unknown error code: %#x (API: %u, handle: %u, block: %u)", err, LDACBT_API_ERR(err), LDACBT_HANDLE_ERR(err), LDACBT_BLOCK_ERR(err)); return "Unknown error"; } } #endif
122645.c
/* Write a program that calculates the user's weight on other planets. For example: Enter your weight: 71.5 Your weight on Jupiter is: 179.114796 kg. Your weight on the moon is: 11.83 kg. Your weight on Mars is: 27.85 kg. Your weight on Jupiter is: 188.76 kg. Your weight on Saturn: is 92.95 kg. Your weight in Neptune is: 57.20 kg. Your weight in Mercury is: 100.10 kg. */ #include <stdio.h> #define JUPITER 24.55 #define VENUS 8.87 #define URANO 8.887 #define MARTE 3.71 #define TIERRA 9.8 #define SATURNO 10.44 #define NEPTUNO 11.15 #define MERCURIO 3.7 int main() { double peso_final, peso_tierra; printf("--------Know your weight in another planets-------\n"); printf("What's your weight on earth?: \n"); scanf("%lf", &peso_tierra); peso_final = (peso_tierra * JUPITER) / TIERRA; printf("Your weight in jupiter is:%lf\n", peso_final); peso_final = (peso_tierra * VENUS) / TIERRA; printf("Your weight in venus is:%lf\n", peso_final); peso_final = (peso_tierra * URANO) / TIERRA; printf("Your weight in urano is:%lf\n", peso_final); peso_final = (peso_tierra * MARTE) / TIERRA; printf("Your weight in mars is:%lf\n", peso_final); peso_final = (peso_tierra * MERCURIO) / TIERRA; printf("Your weight in mercury is:%lf\n", peso_final); peso_final = (peso_tierra * SATURNO) / TIERRA; printf("Your weight in saturn is:%lf\n", peso_final); peso_final = (peso_tierra * NEPTUNO) / TIERRA; printf("Your weight in neptune is:%lf\n", peso_final); return 0; }
4847.c
/** * Test the C++ compiler interface of the * $(LINK2 http://www.dlang.org, D programming language). * * Copyright: Copyright (C) 2017-2018 by The D Language Foundation, All Rights Reserved * Authors: Iain Buclaw * License: $(LINK2 http://www.boost.org/LICENSE_1_0.txt, Boost License 1.0) * Source: $(LINK2 https://github.com/dlang/dmd/blob/master/src/tests/cxxfrontend.c, _cxxfrontend.c) */ #include "array.h" #include "ctfloat.h" #include "file.h" #include "filename.h" #include "longdouble.h" #include "object.h" #include "outbuffer.h" #include "port.h" #include "rmem.h" #include "root.h" #include "stringtable.h" #include "thread.h" #include "aggregate.h" #include "aliasthis.h" #include "arraytypes.h" #include "attrib.h" #include "compiler.h" #include "complex_t.h" #include "cond.h" #include "ctfe.h" #include "declaration.h" #include "dsymbol.h" #include "enum.h" #include "errors.h" #include "expression.h" #include "globals.h" #include "hdrgen.h" #include "identifier.h" #include "id.h" #include "import.h" #include "init.h" #include "intrange.h" #include "json.h" #include "mars.h" #include "module.h" #include "mtype.h" #include "nspace.h" #include "objc.h" #include "scope.h" #include "statement.h" #include "staticassert.h" #include "target.h" #include "template.h" #include "tokens.h" #include "version.h" #include "visitor.h" /**********************************/ extern "C" int rt_init(); extern "C" void gc_disable(); static void frontend_init() { rt_init(); gc_disable(); global._init(); global.params.isLinux = true; Type::_init(); Id::initialize(); Module::_init(); Expression::_init(); Objc::_init(); Target::_init(); } /**********************************/ extern "C" int rt_term(); extern "C" void gc_enable(); static void frontend_term() { gc_enable(); rt_term(); } /**********************************/ class TestVisitor : public Visitor { public: bool expr; bool package; bool stmt; bool type; bool aggr; bool attrib; bool decl; bool typeinfo; TestVisitor() : expr(false), package(false), stmt(false), type(false), aggr(false), attrib(false), decl(false), typeinfo(false) { } void visit(Expression *) { expr = true; } void visit(Package *) { package = true; } void visit(Statement *) { stmt = true; } void visit(AttribDeclaration *) { attrib = true; } void visit(Declaration *) { decl = true; } void visit(AggregateDeclaration *) { aggr = true; } void visit(TypeNext *) { type = true; } void visit(TypeInfoDeclaration *) { typeinfo = true; } }; void test_visitors() { TestVisitor tv; Loc loc; Identifier *ident = Identifier::idPool("test"); IntegerExp *ie = IntegerExp::createi(loc, 42, Type::tint32); ie->accept(&tv); assert(tv.expr == true); Module *mod = Module::create("test", ident, 0, 0); mod->accept(&tv); assert(tv.package == true); ExpStatement *es = ExpStatement::create(loc, ie); es->accept(&tv); assert(tv.stmt == true); TypePointer *tp = TypePointer::create(Type::tvoid); tp->accept(&tv); assert(tv.type == true); LinkDeclaration *ld = LinkDeclaration::create(LINKd, NULL); ld->accept(&tv); assert(tv.attrib == true); ClassDeclaration *cd = ClassDeclaration::create(loc, Identifier::idPool("TypeInfo"), NULL, NULL, true); cd->accept(&tv); assert(tv.aggr = true); AliasDeclaration *ad = AliasDeclaration::create(loc, ident, tp); ad->accept(&tv); assert(tv.decl == true); cd = ClassDeclaration::create(loc, Identifier::idPool("TypeInfo_Pointer"), NULL, NULL, true); TypeInfoPointerDeclaration *ti = TypeInfoPointerDeclaration::create(tp); ti->accept(&tv); assert(tv.typeinfo == true); } /**********************************/ void test_semantic() { /* Mini object.d source. Module::parse will add internal members also. */ const char *buf = "module object;\n" "class Object { }\n" "class Throwable { }\n" "class Error : Throwable { this(immutable(char)[]); }"; Module *m = Module::create("object.d", Identifier::idPool("object"), 0, 0); unsigned errors = global.startGagging(); m->srcfile->setbuffer((void*)buf, strlen(buf)); m->srcfile->ref = 1; m->parse(); m->importedFrom = m; m->importAll(NULL); dsymbolSemantic(m, NULL); semantic2(m, NULL); semantic3(m, NULL); assert(!global.endGagging(errors)); } /**********************************/ int main(int argc, char **argv) { frontend_init(); test_visitors(); test_semantic(); frontend_term(); return 0; }
12091.c
/*---------------------------------------- * runtTimeStatsTimer.c * * Created on: Feb 12, 2015 * Author: garbez * * Description: To support the run time stats, * a timer must be configured to update a counter * for periods shorter than the 1 ms tick period * of the OS. * * This module will uses timer 3 configured in * normal mode with a 16 micro-second increment. * Thus the counter after incremented will not overflow * before 1.05 sec. Run time stats must be recorded * before then. * * Configure Timer 3 nomral mode with control register * * TCCR3A: 0x00, No Output compare mode enabled (Chan A-B7:6, Chan B-5:4, Chan C-3:2), * WGM1:0 Normal mode (Bits 1:0) set to 00 * TCCR3B: 0b00000100 * No Input Capture enabled B7:6 = 00 * WGM3:2 Normal mode = 00 * System clock for the AtMega 2560, given by = 16MHz * Clock prescaler: One of four prescalers are available. Selecting 256 * gives a time base for run time stats = 0.0625 microSec * 256 = 16 microSec // Enables the clock * B2:0=100, for 256 prescaler * TCCR3C: 0x00 no force output compare for channels Chan A B7, Chan B, B6, Chan C B5 * TCNT3: TCNT counter register for Timer 3 * OCR3A, OCR3B, OCR3C: Output Compare Registers * ICR3, Input Capture register * TIMSK3: Interrupt mask register: Bit 0 is the overflow interrupt enable * TIFR3: Timer Interrupt Flag Register Bit 0 is the overflow flag * *---------------------------------------*/ #include <avr/io.h> // To get hardware register defines /*-------------------------------------------------- * Function: portCONFIGURE_TIMER_FOR_RUNT_TIME_STATS * * Description: defined as a function rather than a MACRO - does not require changing the FreeRTOSConfig.h file. * * This function configures timer 3 with 16 microsecond increments. TCNT3 can be used as the counter value * to return (see portGET_RUN_TIME_COUNTER_VALUE() function). *-----------------------------------------------------------*/ void xconfigTimerForRunTimeStats() { TCCR1A = 0; // No OC channel enabled, WGM1:0==00, normal mode TCCR1B = 0; // Sets prescaler to 000 - disables the timer TCNT1 = 0; // set timer count register to 0 TCCR1B = 0b00000100; // Input capture disabled, WGM3:2=00(normal mode), B2:0=100, for 256 prescaler } /*-------------------------------------------------- * Function: portGET_RUN_TIME_COUNTER_VALUE * * Description: Defined as an inline function rather than a MACRO - does not require changing the FreeRTOSConfig.h file. * * This function configures timer 3 with 16 microsecond increments. TCNT3 can be used as the counter value * to return (see portGET_RUN_TIME_COUNTER_VALUE() function). *-----------------------------------------------------------*/ //inline uint16_t portGET_RUN_TIME_COUNTER_VALUE() { return TCNT1; }
224966.c
/* * capabilities.c - A DNS server for testing server capabilities * * Copyright (c) 2016, NLnet Labs. All rights reserved. * * This software is open source. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the NLNET LABS nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <getdns/getdns_extra.h> #include <stdio.h> #include <string.h> void handler(getdns_context *context, getdns_callback_type_t callback_type, getdns_dict *request, void *userarg, getdns_transaction_t request_id) { getdns_bindata *qname; char ans_str[] = "Some answer"; getdns_bindata ans_bd = { sizeof(ans_str) - 1, (void *)ans_str }; (void) userarg; (void)callback_type; if (getdns_dict_get_bindata(request, "/question/qname", &qname) || getdns_dict_set_bindata(request, "/answer/0/name", qname) || getdns_dict_set_int(request, "/answer/0/type", GETDNS_RRTYPE_TXT) || getdns_dict_set_int(request, "/header/tc", 1) || getdns_dict_set_bindata(request, "/answer/0/rdata/txt_strings/-", &ans_bd)) fprintf(stderr, "Request init error\n"); else if (qname->size >= 8 && qname->data[0] == 6 && qname->data[1] == 'c' && qname->data[2] == 'a' && qname->data[3] == 'n' && qname->data[4] == 'c' && qname->data[5] == 'e' && qname->data[6] == 'l') { (void) getdns_reply(context, NULL, request_id); getdns_dict_destroy(request); return; } else if (qname->size >= 6 && qname->data[0] == 4 && qname->data[1] == 'q' && qname->data[2] == 'u' && qname->data[3] == 'i' && qname->data[4] == 't') { (void) getdns_dict_set_int(request, "/header/tc", 0); (void) getdns_reply(context, request, request_id); (void) getdns_context_set_listen_addresses(context, NULL, NULL, NULL); getdns_dict_destroy(request); return; } else { if (getdns_reply(context, request, request_id)) getdns_reply(context, NULL, request_id); getdns_dict_destroy(request); return; } getdns_dict_destroy(request); exit(EXIT_FAILURE); } int main() { getdns_context *context = NULL; getdns_list *listeners = NULL; getdns_dict *address = NULL; getdns_dict *address2 = NULL; uint32_t port1 = 18000; uint32_t port2 = 18000; getdns_return_t r; if ((r = getdns_str2list("[ 127.0.0.1:18000 ]", &listeners)) || (r = getdns_str2dict("127.0.0.1:18000", &address2)) || (r = getdns_list_get_dict(listeners, 0, &address)) || (r = getdns_context_create(&context, 0))) fprintf(stderr, "Error initializing: "); else while (++port1 < 18200 && !(r = getdns_dict_set_int(address, "port", port1)) && (r = getdns_context_set_listen_addresses( context, listeners, NULL, handler))) ; /* pass */ if (!r && ((r = getdns_list_set_dict(listeners, 1, address2)) || (r = getdns_list_get_dict(listeners, 1, &address)))) fprintf(stderr, "Error initializing 2nd address: "); if (r) fprintf(stderr, "%s\n", getdns_get_errorstr_by_id(r)); else { port2 = port1; while (++port2 < 18200 && !(r = getdns_dict_set_int(address, "port", port2)) && (r = getdns_context_set_listen_addresses( context, listeners, NULL, handler))) ; /* pass */ fprintf(stdout, "%d\n", (int)port1); fprintf(stdout, "%d\n", (int)port2); fflush(stdout); getdns_context_run(context); } getdns_list_destroy(listeners); getdns_dict_destroy(address2); getdns_context_destroy(context); return r; }
811257.c
//############################################## //# UESC # //# LPI # //# Jauberth w. Abijaude # //# [email protected] # //############################################## #include<stdio.h> #include<stdlib.h> #include<stdbool.h> int n; int main(){ printf("Informe um valor: \n"); scanf("%d", &n); if(n%2==0){ printf("O numero %d e par!", n); } else { printf("O numero %d e impar!", n); } printf("\n"); return 0; }
836511.c
/* ** LIBC PROJECT, 2018 ** Unit tests ** File description : ** Unit tests for ez_sqrt() */ #include <criterion/criterion.h> #include "ez_math.h" Test(ez_sqrt, zero) { cr_expect_float_eq(ez_sqrt(0), 0, 0.00000001); } Test(ez_sqrt, zero1) { cr_expect_float_eq(ez_sqrt(-0), 0, 0.00000001); } Test(ez_sqrt, one) { cr_expect_float_eq(ez_sqrt(1), 1, 0.00000001); } Test(ez_sqrt, basic) { cr_expect_float_eq(ez_sqrt(25), 5, 0.00000001); } Test(ez_sqrt, big) { cr_expect_float_eq(ez_sqrt(1524157875019052100), 1234567890, 0.0000001); } Test(ez_sqrt, neg) { cr_expect_float_eq(ez_sqrt(-25), 0, 0.00000001); } Test(ez_sqrt, floating) { cr_expect_float_eq(ez_sqrt(0.1), 0.31622776601, 0.00000001); } Test(ez_sqrt, floating1) { cr_expect_float_eq(ez_sqrt(0.01), 0.1, 0.00000001); }
918222.c
/** @file SPDM common library. It follows the SPDM Specification. Copyright (c) 2020, Intel Corporation. All rights reserved.<BR> SPDX-License-Identifier: BSD-2-Clause-Patent **/ #include "SpdmResponderLibInternal.h" /** Get the SPDM encapsulated request. @param SpdmContext A pointer to the SPDM context. @param EncapRequestSize Size in bytes of the encapsulated request data. On input, it means the size in bytes of encapsulated request data buffer. On output, it means the size in bytes of copied encapsulated request data buffer if RETURN_SUCCESS is returned, and means the size in bytes of desired encapsulated request data buffer if RETURN_BUFFER_TOO_SMALL is returned. @param EncapRequest A pointer to the encapsulated request data. @retval RETURN_SUCCESS The encapsulated request is returned. @retval RETURN_BUFFER_TOO_SMALL The buffer is too small to hold the data. **/ typedef RETURN_STATUS (EFIAPI *SPDM_GET_ENCAP_REQUEST) ( IN SPDM_DEVICE_CONTEXT *SpdmContext, IN OUT UINTN *EncapRequestSize, OUT VOID *EncapRequest ); /** Process the SPDM encapsulated response. @param SpdmContext A pointer to the SPDM context. @param EncapResponseSize Size in bytes of the encapsulated response data. @param EncapResponse A pointer to the encapsulated response data. @param Continue Indicate if encapsulated communication need continue. @retval RETURN_SUCCESS The encapsulated response is processed. @retval RETURN_BUFFER_TOO_SMALL The buffer is too small to hold the data. @retval RETURN_SECURITY_VIOLATION Any verification fails. **/ typedef RETURN_STATUS (EFIAPI *SPDM_PROCESS_ENCAP_RESPONSE) ( IN SPDM_DEVICE_CONTEXT *SpdmContext, IN UINTN EncapResponseSize, IN VOID *EncapResponse, OUT BOOLEAN *Continue ); typedef struct { UINT8 RequestOpCode; SPDM_GET_ENCAP_REQUEST GetEncapRequest; SPDM_PROCESS_ENCAP_RESPONSE ProcessEncapResponse; } SPDM_ENCAP_RESPONSE_STRUCT; SPDM_ENCAP_RESPONSE_STRUCT mEncapResponsestruct[] = { {SPDM_GET_DIGESTS, SpdmGetEncapReqestGetDigest, SpdmProcessEncapResponseDigest}, {SPDM_GET_CERTIFICATE, SpdmGetEncapReqestGetCertificate, SpdmProcessEncapResponseCertificate}, {SPDM_CHALLENGE, SpdmGetEncapReqestChallenge, SpdmProcessEncapResponseChallengeAuth}, {SPDM_KEY_UPDATE, SpdmGetEncapReqestKeyUpdate, SpdmProcessEncapResponseKeyUpdate}, }; SPDM_ENCAP_RESPONSE_STRUCT * SpdmGetEncapStructViaOpCode ( IN SPDM_DEVICE_CONTEXT *SpdmContext, IN UINT8 RequestOpCode ) { UINTN Index; for (Index = 0; Index < ARRAY_SIZE(mEncapResponsestruct); Index++) { if (mEncapResponsestruct[Index].RequestOpCode == RequestOpCode) { return &mEncapResponsestruct[Index]; } } ASSERT (FALSE); return NULL; } VOID SpdmEncapMoveToNextOpCode ( IN SPDM_DEVICE_CONTEXT *SpdmContext ) { UINT8 Index; ASSERT (SpdmContext->EncapContext.RequestOpCodeCount <= MAX_ENCAP_REQUEST_OP_CODE_SEQUENCE_COUNT); if (SpdmContext->EncapContext.CurrentRequestOpCode == 0) { SpdmContext->EncapContext.CurrentRequestOpCode = SpdmContext->EncapContext.RequestOpCodeSequence[0]; return ; } for (Index = 0; Index < SpdmContext->EncapContext.RequestOpCodeCount; Index ++) { if (SpdmContext->EncapContext.CurrentRequestOpCode == SpdmContext->EncapContext.RequestOpCodeSequence[Index]) { SpdmContext->EncapContext.CurrentRequestOpCode = SpdmContext->EncapContext.RequestOpCodeSequence[Index + 1]; return ; } } ASSERT (FALSE); } /** Process a SPDM encapsulated response. @param SpdmContext The SPDM context for the device. @param EncapResponseSize Size in bytes of the request data. @param EncapResponse A pointer to the request data. @param EncapRequestSize Size in bytes of the response data. @param EncapRequest A pointer to the response data. @retval RETURN_SUCCESS The SPDM encapsulated request is generated successfully. @retval RETURN_UNSUPPORTED Do not know how to process the request. **/ RETURN_STATUS SpdmProcessEncapsulatedResponse ( IN SPDM_DEVICE_CONTEXT *SpdmContext, IN UINTN EncapResponseSize, IN VOID *EncapResponse, IN OUT UINTN *EncapRequestSize, OUT VOID *EncapRequest ) { RETURN_STATUS Status; BOOLEAN Continue; SPDM_ENCAP_RESPONSE_STRUCT *EncapResponseStruct; // Process previous response Continue = FALSE; if (SpdmContext->EncapContext.CurrentRequestOpCode != 0) { EncapResponseStruct = SpdmGetEncapStructViaOpCode (SpdmContext, SpdmContext->EncapContext.CurrentRequestOpCode); ASSERT (EncapResponseStruct != NULL); if (EncapResponseStruct == NULL) { return RETURN_UNSUPPORTED; } ASSERT (EncapResponseStruct->ProcessEncapResponse != NULL); if (EncapResponseStruct->ProcessEncapResponse == NULL) { return RETURN_UNSUPPORTED; } Status = EncapResponseStruct->ProcessEncapResponse (SpdmContext, EncapResponseSize, EncapResponse, &Continue); if (RETURN_ERROR(Status)) { return Status; } } SpdmContext->EncapContext.RequestId += 1; // Move to next request if (!Continue) { SpdmEncapMoveToNextOpCode (SpdmContext); } if (SpdmContext->EncapContext.CurrentRequestOpCode == 0) { // No more work to do - stop *EncapRequestSize = 0; SpdmContext->EncapContext.CurrentRequestOpCode = 0; return RETURN_SUCCESS; } // Process the next request EncapResponseStruct = SpdmGetEncapStructViaOpCode (SpdmContext, SpdmContext->EncapContext.CurrentRequestOpCode); ASSERT (EncapResponseStruct != NULL); if (EncapResponseStruct == NULL) { return RETURN_UNSUPPORTED; } ASSERT (EncapResponseStruct->GetEncapRequest != NULL); if (EncapResponseStruct->GetEncapRequest == NULL) { return RETURN_UNSUPPORTED; } Status = EncapResponseStruct->GetEncapRequest (SpdmContext, EncapRequestSize, EncapRequest); return Status; } /** This function initializes the mut_auth encapsulated state. @param SpdmContext A pointer to the SPDM context. @param MutAuthRequested Indicate of the MutAuthRequested through KEY_EXCHANGE or CHALLENG response. **/ VOID SpdmInitMutAuthEncapState ( IN SPDM_DEVICE_CONTEXT *SpdmContext, IN UINT8 MutAuthRequested ) { SpdmContext->EncapContext.ErrorState = 0; SpdmContext->EncapContext.CurrentRequestOpCode = 0x00; if (MutAuthRequested == SPDM_KEY_EXCHANGE_RESPONSE_MUT_AUTH_REQUESTED_WITH_GET_DIGESTS) { SpdmContext->EncapContext.CurrentRequestOpCode = SPDM_GET_DIGESTS; } SpdmContext->EncapContext.RequestId = 0; SpdmContext->EncapContext.LastEncapRequestSize = 0; ZeroMem (&SpdmContext->EncapContext.LastEncapRequestHeader, sizeof(SpdmContext->EncapContext.LastEncapRequestHeader)); SpdmContext->EncapContext.CertificateChainBuffer.BufferSize = 0; SpdmContext->ResponseState = SpdmResponseStateProcessingEncap; // // Clear Cache // ResetManagedBuffer (&SpdmContext->Transcript.MessageMutB); ResetManagedBuffer (&SpdmContext->Transcript.MessageMutC); // // Possible Sequence: // 2. Session Mutual Auth: (SpdmContext->LastSpdmRequestSessionIdValid) // 2.1 GET_DIGEST/GET_CERTIFICATE (SpdmContext->EncapContext.ReqSlotNum != 0xFF) // 2.2 GET_DIGEST (SpdmContext->EncapContext.ReqSlotNum == 0xFF) // 2.3 N/A (SPDM_GET_CAPABILITIES_REQUEST_FLAGS_PUB_KEY_ID_CAP) // ZeroMem (SpdmContext->EncapContext.RequestOpCodeSequence, sizeof(SpdmContext->EncapContext.RequestOpCodeSequence)); // Session Mutual Auth if (SpdmIsCapabilitiesFlagSupported(SpdmContext, FALSE, SPDM_GET_CAPABILITIES_REQUEST_FLAGS_PUB_KEY_ID_CAP, 0) || (MutAuthRequested == SPDM_KEY_EXCHANGE_RESPONSE_MUT_AUTH_REQUESTED)) { // no encap is required SpdmContext->EncapContext.RequestOpCodeCount = 0; } else if (SpdmContext->EncapContext.ReqSlotNum != 0xFF) { SpdmContext->EncapContext.RequestOpCodeCount = 2; SpdmContext->EncapContext.RequestOpCodeSequence[0] = SPDM_GET_DIGESTS; SpdmContext->EncapContext.RequestOpCodeSequence[1] = SPDM_GET_CERTIFICATE; } else { SpdmContext->EncapContext.RequestOpCodeCount = 1; SpdmContext->EncapContext.RequestOpCodeSequence[0] = SPDM_GET_DIGESTS; } } /** This function initializes the basic_mut_auth encapsulated state. @param SpdmContext A pointer to the SPDM context. @param BasicMutAuthRequested Indicate of the MutAuthRequested through CHALLENG response. **/ VOID SpdmInitBasicMutAuthEncapState ( IN SPDM_DEVICE_CONTEXT *SpdmContext, IN UINT8 BasicMutAuthRequested ) { SpdmContext->EncapContext.ErrorState = 0; SpdmContext->EncapContext.CurrentRequestOpCode = 0x00; SpdmContext->EncapContext.RequestId = 0; SpdmContext->EncapContext.LastEncapRequestSize = 0; ZeroMem (&SpdmContext->EncapContext.LastEncapRequestHeader, sizeof(SpdmContext->EncapContext.LastEncapRequestHeader)); SpdmContext->EncapContext.CertificateChainBuffer.BufferSize = 0; SpdmContext->ResponseState = SpdmResponseStateProcessingEncap; // // Clear Cache // ResetManagedBuffer (&SpdmContext->Transcript.MessageMutB); ResetManagedBuffer (&SpdmContext->Transcript.MessageMutC); // // Possible Sequence: // 1. Basic Mutual Auth: // 1.1 GET_DIGEST/GET_CERTIFICATE/CHALLENGE (SpdmContext->EncapContext.ReqSlotNum != 0xFF) // 1.2 GET_DIGEST/CHALLENGE (SpdmContext->EncapContext.ReqSlotNum == 0xFF) // 1.3 CHALLENGE (SPDM_GET_CAPABILITIES_REQUEST_FLAGS_PUB_KEY_ID_CAP) // ZeroMem (SpdmContext->EncapContext.RequestOpCodeSequence, sizeof(SpdmContext->EncapContext.RequestOpCodeSequence)); // Basic Mutual Auth if (SpdmIsCapabilitiesFlagSupported(SpdmContext, FALSE, SPDM_GET_CAPABILITIES_REQUEST_FLAGS_PUB_KEY_ID_CAP, 0)) { SpdmContext->EncapContext.RequestOpCodeCount = 1; SpdmContext->EncapContext.RequestOpCodeSequence[0] = SPDM_CHALLENGE; } else if (SpdmContext->EncapContext.ReqSlotNum != 0xFF) { SpdmContext->EncapContext.RequestOpCodeCount = 3; SpdmContext->EncapContext.RequestOpCodeSequence[0] = SPDM_GET_DIGESTS; SpdmContext->EncapContext.RequestOpCodeSequence[1] = SPDM_GET_CERTIFICATE; SpdmContext->EncapContext.RequestOpCodeSequence[2] = SPDM_CHALLENGE; } else { SpdmContext->EncapContext.RequestOpCodeCount = 2; SpdmContext->EncapContext.RequestOpCodeSequence[0] = SPDM_GET_DIGESTS; SpdmContext->EncapContext.RequestOpCodeSequence[1] = SPDM_CHALLENGE; } } /** This function initializes the key_update encapsulated state. @param SpdmContext A pointer to the SPDM context. **/ VOID EFIAPI SpdmInitKeyUpdateEncapState ( IN VOID *Context ) { SPDM_DEVICE_CONTEXT *SpdmContext; SpdmContext = Context; SpdmContext->EncapContext.ErrorState = 0; SpdmContext->EncapContext.CurrentRequestOpCode = 0x00; SpdmContext->EncapContext.RequestId = 0; SpdmContext->EncapContext.LastEncapRequestSize = 0; ZeroMem (&SpdmContext->EncapContext.LastEncapRequestHeader, sizeof(SpdmContext->EncapContext.LastEncapRequestHeader)); SpdmContext->EncapContext.CertificateChainBuffer.BufferSize = 0; SpdmContext->ResponseState = SpdmResponseStateProcessingEncap; ResetManagedBuffer (&SpdmContext->Transcript.MessageMutB); ResetManagedBuffer (&SpdmContext->Transcript.MessageMutC); ZeroMem (SpdmContext->EncapContext.RequestOpCodeSequence, sizeof(SpdmContext->EncapContext.RequestOpCodeSequence)); SpdmContext->EncapContext.RequestOpCodeCount = 1; SpdmContext->EncapContext.RequestOpCodeSequence[0] = SPDM_KEY_UPDATE; } /** Process the SPDM ENCAPSULATED_REQUEST request and return the response. @param SpdmContext A pointer to the SPDM context. @param RequestSize Size in bytes of the request data. @param Request A pointer to the request data. @param ResponseSize Size in bytes of the response data. On input, it means the size in bytes of response data buffer. On output, it means the size in bytes of copied response data buffer if RETURN_SUCCESS is returned, and means the size in bytes of desired response data buffer if RETURN_BUFFER_TOO_SMALL is returned. @param Response A pointer to the response data. @retval RETURN_SUCCESS The request is processed and the response is returned. @retval RETURN_BUFFER_TOO_SMALL The buffer is too small to hold the data. @retval RETURN_DEVICE_ERROR A device error occurs when communicates with the device. @retval RETURN_SECURITY_VIOLATION Any verification fails. **/ RETURN_STATUS EFIAPI SpdmGetResponseEncapsulatedRequest ( IN VOID *Context, IN UINTN RequestSize, IN VOID *Request, IN OUT UINTN *ResponseSize, OUT VOID *Response ) { SPDM_ENCAPSULATED_REQUEST_RESPONSE *SpdmResponse; SPDM_DEVICE_CONTEXT *SpdmContext; VOID *EncapRequest; UINTN EncapRequestSize; RETURN_STATUS Status; SPDM_GET_ENCAPSULATED_REQUEST_REQUEST *SpdmRequest; SpdmContext = Context; SpdmRequest = Request; if (!SpdmIsCapabilitiesFlagSupported(SpdmContext, FALSE, SPDM_GET_CAPABILITIES_REQUEST_FLAGS_ENCAP_CAP, SPDM_GET_CAPABILITIES_RESPONSE_FLAGS_ENCAP_CAP)) { SpdmGenerateErrorResponse (SpdmContext, SPDM_ERROR_CODE_UNSUPPORTED_REQUEST, SPDM_GET_ENCAPSULATED_REQUEST, ResponseSize, Response); return RETURN_SUCCESS; } if (SpdmContext->ResponseState != SpdmResponseStateProcessingEncap) { if (SpdmContext->ResponseState == SpdmResponseStateNormal) { SpdmGenerateErrorResponse (SpdmContext, SPDM_ERROR_CODE_UNEXPECTED_REQUEST, 0, ResponseSize, Response); return RETURN_SUCCESS; } return SpdmResponderHandleResponseState(SpdmContext, SpdmRequest->Header.RequestResponseCode, ResponseSize, Response); } if (RequestSize != sizeof(SPDM_GET_ENCAPSULATED_REQUEST_REQUEST)) { SpdmGenerateErrorResponse (SpdmContext, SPDM_ERROR_CODE_INVALID_REQUEST, 0, ResponseSize, Response); return RETURN_SUCCESS; } ASSERT (*ResponseSize > sizeof(SPDM_ENCAPSULATED_REQUEST_RESPONSE)); ZeroMem (Response, *ResponseSize); SpdmResponse = Response; SpdmResponse->Header.SPDMVersion = SPDM_MESSAGE_VERSION_11; SpdmResponse->Header.RequestResponseCode = SPDM_ENCAPSULATED_REQUEST; SpdmResponse->Header.Param1 = 0; SpdmResponse->Header.Param2 = 0; EncapRequestSize = *ResponseSize - sizeof(SPDM_ENCAPSULATED_REQUEST_RESPONSE); EncapRequest = SpdmResponse + 1; Status = SpdmProcessEncapsulatedResponse (Context, 0, NULL, &EncapRequestSize, EncapRequest); if (RETURN_ERROR(Status)) { SpdmGenerateErrorResponse (SpdmContext, SPDM_ERROR_CODE_INVALID_RESPONSE_CODE, 0, ResponseSize, Response); SpdmContext->ResponseState = SpdmResponseStateNormal; return RETURN_SUCCESS; } *ResponseSize = sizeof(SPDM_ENCAPSULATED_REQUEST_RESPONSE) + EncapRequestSize; SpdmResponse->Header.Param1 = SpdmContext->EncapContext.RequestId; if (EncapRequestSize == 0) { SpdmContext->ResponseState = SpdmResponseStateNormal; } return RETURN_SUCCESS; } /** Process the SPDM ENCAPSULATED_RESPONSE_ACK request and return the response. @param SpdmContext A pointer to the SPDM context. @param RequestSize Size in bytes of the request data. @param Request A pointer to the request data. @param ResponseSize Size in bytes of the response data. On input, it means the size in bytes of response data buffer. On output, it means the size in bytes of copied response data buffer if RETURN_SUCCESS is returned, and means the size in bytes of desired response data buffer if RETURN_BUFFER_TOO_SMALL is returned. @param Response A pointer to the response data. @retval RETURN_SUCCESS The request is processed and the response is returned. @retval RETURN_BUFFER_TOO_SMALL The buffer is too small to hold the data. @retval RETURN_DEVICE_ERROR A device error occurs when communicates with the device. @retval RETURN_SECURITY_VIOLATION Any verification fails. **/ RETURN_STATUS EFIAPI SpdmGetResponseEncapsulatedResponseAck ( IN VOID *Context, IN UINTN RequestSize, IN VOID *Request, IN OUT UINTN *ResponseSize, OUT VOID *Response ) { SPDM_DELIVER_ENCAPSULATED_RESPONSE_REQUEST *SpdmRequest; UINTN SpdmRequestSize; SPDM_ENCAPSULATED_RESPONSE_ACK_RESPONSE *SpdmResponse; SPDM_DEVICE_CONTEXT *SpdmContext; VOID *EncapResponse; UINTN EncapResponseSize; VOID *EncapRequest; UINTN EncapRequestSize; RETURN_STATUS Status; SpdmContext = Context; SpdmRequest = Request; if (!SpdmIsCapabilitiesFlagSupported(SpdmContext, FALSE, SPDM_GET_CAPABILITIES_REQUEST_FLAGS_ENCAP_CAP, SPDM_GET_CAPABILITIES_RESPONSE_FLAGS_ENCAP_CAP)) { SpdmGenerateErrorResponse (SpdmContext, SPDM_ERROR_CODE_UNSUPPORTED_REQUEST, SPDM_DELIVER_ENCAPSULATED_RESPONSE, ResponseSize, Response); return RETURN_SUCCESS; } if (SpdmContext->ResponseState != SpdmResponseStateProcessingEncap) { if (SpdmContext->ResponseState == SpdmResponseStateNormal) { SpdmGenerateErrorResponse (SpdmContext, SPDM_ERROR_CODE_UNEXPECTED_REQUEST, 0, ResponseSize, Response); return RETURN_SUCCESS; } return SpdmResponderHandleResponseState(SpdmContext, SpdmRequest->Header.RequestResponseCode, ResponseSize, Response); } if (RequestSize <= sizeof(SPDM_DELIVER_ENCAPSULATED_RESPONSE_REQUEST)) { SpdmGenerateErrorResponse (SpdmContext, SPDM_ERROR_CODE_INVALID_REQUEST, 0, ResponseSize, Response); return RETURN_SUCCESS; } SpdmRequestSize = RequestSize; if (SpdmRequest->Header.Param1 != SpdmContext->EncapContext.RequestId) { SpdmGenerateErrorResponse (SpdmContext, SPDM_ERROR_CODE_INVALID_REQUEST, 0, ResponseSize, Response); return RETURN_SUCCESS; } EncapResponse = (SpdmRequest + 1); EncapResponseSize = SpdmRequestSize - sizeof(SPDM_DELIVER_ENCAPSULATED_RESPONSE_REQUEST); ASSERT (*ResponseSize > sizeof(SPDM_ENCAPSULATED_RESPONSE_ACK_RESPONSE)); ZeroMem (Response, *ResponseSize); SpdmResponse = Response; SpdmResponse->Header.SPDMVersion = SPDM_MESSAGE_VERSION_11; SpdmResponse->Header.RequestResponseCode = SPDM_ENCAPSULATED_RESPONSE_ACK; SpdmResponse->Header.Param1 = 0; SpdmResponse->Header.Param2 = SPDM_ENCAPSULATED_RESPONSE_ACK_RESPONSE_PAYLOAD_TYPE_PRESENT; EncapRequestSize = *ResponseSize - sizeof(SPDM_ENCAPSULATED_RESPONSE_ACK_RESPONSE); EncapRequest = SpdmResponse + 1; if (EncapResponseSize < sizeof(SPDM_MESSAGE_HEADER)) { SpdmGenerateErrorResponse (SpdmContext, SPDM_ERROR_CODE_INVALID_REQUEST, 0, ResponseSize, Response); return RETURN_SUCCESS; } Status = SpdmProcessEncapsulatedResponse (Context, EncapResponseSize, EncapResponse, &EncapRequestSize, EncapRequest); if (RETURN_ERROR(Status)) { SpdmGenerateErrorResponse (SpdmContext, SPDM_ERROR_CODE_INVALID_RESPONSE_CODE, 0, ResponseSize, Response); SpdmContext->ResponseState = SpdmResponseStateNormal; return RETURN_SUCCESS; } *ResponseSize = sizeof(SPDM_ENCAPSULATED_RESPONSE_ACK_RESPONSE) + EncapRequestSize; SpdmResponse->Header.Param1 = SpdmContext->EncapContext.RequestId; if (EncapRequestSize == 0) { SpdmResponse->Header.Param2 = SPDM_ENCAPSULATED_RESPONSE_ACK_RESPONSE_PAYLOAD_TYPE_ABSENT; if (SpdmContext->EncapContext.ReqSlotNum != 0) { SpdmResponse->Header.Param2 = SPDM_ENCAPSULATED_RESPONSE_ACK_RESPONSE_PAYLOAD_TYPE_REQ_SLOT_NUMBER; *ResponseSize = sizeof(SPDM_ENCAPSULATED_RESPONSE_ACK_RESPONSE) + 1; *(UINT8 *)(SpdmResponse + 1) = SpdmContext->EncapContext.ReqSlotNum; } SpdmContext->ResponseState = SpdmResponseStateNormal; } return RETURN_SUCCESS; } /** This function handles the encap error response. @param SpdmContext A pointer to the SPDM context. @param ManagedBuffer The managed buffer to be shrinked. @param ShrinkBufferSize The size in bytes of the size of the buffer to be shrinked. @param ErrorCode Indicate the error code. @retval RETURN_DEVICE_ERROR A device error occurs when communicates with the device. **/ RETURN_STATUS EFIAPI SpdmHandleEncapErrorResponseMain ( IN SPDM_DEVICE_CONTEXT *SpdmContext, IN OUT VOID *MBuffer, IN UINTN ShrinkBufferSize, IN UINT8 ErrorCode ) { // // According to "Timing Specification for SPDM messages", RESPONSE_NOT_READY is only for responder. // RESPONSE_NOT_READY should not be sent by requester. No need to check it. // // // No need to shrink MessageMutB and MessageMutC, because any error will terminate the ENCAP MUT AUTH. // The sequence is fixed in CHALLENG_AUTH or KEY_EXCHANGE_RSP, the responder cannot issue encap request again. // If the requester restarts the mutual auth via CHALLENG or KEY_EXCHANGE, the encap will also restart. // Do it here just to align with requester. // ShrinkManagedBuffer(MBuffer, ShrinkBufferSize); return RETURN_DEVICE_ERROR; }
382151.c
//------------------------------------------------------------------------------ // GB_Adot3B: hard-coded dot3 method for a semiring //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB_AxB_defs__plus_first_fp32.h" #ifndef GBCOMPACT //------------------------------------------------------------------------------ // C<M>=A'*B: masked dot product method (phase 2) where C is sparse or hyper //------------------------------------------------------------------------------ GrB_Info GB (_Adot3B__plus_first_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix B, bool B_is_pattern, const GB_task_struct *restrict TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_AxB_dot3_meta.c" return (GrB_SUCCESS) ; #endif } #endif
839507.c
/********************************************************************************** ** ** Copyright (C) 1994 Narvik University College ** Contact: GMlib Online Portal at http://episteme.hin.no ** ** This file is part of the Geometric Modeling Library, GMlib. ** ** GMlib is free software: you can redistribute it and/or modify ** it under the terms of the GNU Lesser General Public License as published by ** the Free Software Foundation, either version 3 of the License, or ** (at your option) any later version. ** ** GMlib is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU Lesser General Public License for more details. ** ** You should have received a copy of the GNU Lesser General Public License ** along with GMlib. If not, see <http://www.gnu.org/licenses/>. ** **********************************************************************************/ namespace GMlib { /*! PHermiteSurface<T>::PHermiteSurface( const Array< PCurve<T>* >& c1, const Array<PCurve<T>* >& c2 ) * * Creates a generic Hermite surface from the curves provided in c1 and c2. * It's expected two or four curves in each array. If two curves are given it will create a Coon's patch surface. * * The curves in c1 describes the u-direction, while the curves in c2 describes the v-direction. * The two first curves describes the boundaries, and the next two describes the derivatives along the respective boundary. * * If c1[0] is f1, c1[1] is f2, c2[0] is f3 and c2[1] is f4, and the corners is denoted p00, p10, p01 and p11, then * p00 is the corner at f1 and f3, p10 is the corner at f1 and f4, p01 is the corner at f2 and f3 and p11 is the corner at f3 and f4. * * The direction of f1, f2, f3 and f4 are: * f1: p00 to p10 * f2: p00 to p01 * f3: p01 to p11 * f4: p10 to p11 * * \param[in] c1 Curves describing the boundary in u-direction * \param[in] c2 Curves describing the boundary in v-direction */ //***************************************** // Constructors and destructor ** //***************************************** template <typename T> PHermiteSurface<T>::PHermiteSurface( const Array< PCurve<T,3>* >& c1, const Array<PCurve<T,3>* >& c2 ) { init(); _c1 = c1; _c2 = c2; for( int i = 0; i < _c1.getSize(); i++ ) insertPatch( _c1[i] ); for( int i = 0; i < _c2.getSize(); i++ ) insertPatch( _c2[i] ); _b.setDim( 2, 2 ); _b[0][0] = (_c1[0]->evaluateParent( _c1[0]->getParStart(), 0 ))[0]; _b[1][0] = (_c1[0]->evaluateParent( _c1[0]->getParEnd(), 0 ))[0]; _b[0][1] = (_c2[0]->evaluateParent( _c2[0]->getParEnd(), 0 ))[0]; _b[1][1] = (_c1[1]->evaluateParent( _c1[1]->getParEnd(), 0 ))[0]; } /*! PHermiteSurface<T>::~PHermiteSurface() * * Destructor. */ template <typename T> PHermiteSurface<T>::~PHermiteSurface() { for( int i = 0; i < _c1.getSize(); i++ ) SceneObject::remove( _c1[i] ); for( int i = 0; i < _c2.getSize(); i++ ) SceneObject::remove( _c2[i] ); } //***************************************** // Local functons ** //***************************************** template <typename T> inline void PHermiteSurface<T>::edit( SceneObject* /*obj*/ ) { _b[0][0] = (_c1[0]->evaluateParent( _c1[0]->getParStart(), 0 ))[0]; _b[1][0] = (_c1[0]->evaluateParent( _c1[0]->getParEnd(), 0 ))[0]; _b[0][1] = (_c2[0]->evaluateParent( _c2[0]->getParEnd(), 0 ))[0]; _b[1][1] = (_c1[1]->evaluateParent( _c1[1]->getParEnd(), 0 ))[0]; PSurf<T,3>::replot(); } //************************************************** // Overrided (public) virtual functons from PSurf ** //************************************************** template <typename T> bool PHermiteSurface<T>::isClosedU() const { return false; } template <typename T> bool PHermiteSurface<T>::isClosedV() const { return false; } //***************************************************** // Overrided (protected) virtual functons from PSurf ** //***************************************************** template <typename T> void PHermiteSurface<T>::eval(T u, T v, int d1, int d2, bool /*lu*/, bool /*lv*/) const { // set result set dim this->_p.setDim( d1+1, d2+1 ); // vars Point<float,3> p(0.0f); // interpolate u in v direction for( int i = 0; i < _c1.getSize(); i++ ) p += getH( _c2.getSize()/2, i, v ) * (_c1(i)->evaluateParent(u, 1))(0); // interpolate v in u direction for( int i = 0; i < _c2.getSize(); i++ ) p += getH( _c1.getSize()/2, i, u ) * (_c2(i)->evaluateParent(v, 1))(0); // bi-linear interpolation p -= _b(0)(0) * (1.0f - u) * (1.0f - v) + _b(0)(1) * (1.0f - u) * v + _b(1)(0) * u * (1.0f - v) + _b(1)(1) * u * v; this->_p[0][0] = p; if( this->_dm == GM_DERIVATION_EXPLICIT ) { if( d1 > 0 ) { // Su p = Point<float,3>(0.0f); // interpolate u in v direction for( int i = 0; i < _c1.getSize(); i++ ) p += getH( _c2.getSize()/2, i, v ) * (_c1(i)->evaluateParent(u, 1))(1); // interpolate v in u direction for( int i = 0; i < _c2.getSize(); i++ ) p += getHder( _c1.getSize()/2, i, u ) * (_c2(i)->evaluateParent(v, 1))(0); // bi-linear interpolation p -= _b(0)(0) * ( v - 1.0f ) + _b(0)(1) * ( -v ) + _b(1)(0) * (1.0f - v) + _b(1)(1) * v; this->_p[1][0] = p; } if( d2 > 0 ) { // Sv p = Point<float,3>(0.0f); // interpolate u in v direction for( int i = 0; i < _c1.getSize(); i++ ) p += getHder( _c2.getSize()/2, i, v ) * (_c1(i)->evaluateParent(u, 1))(0); // interpolate v in u direction for( int i = 0; i < _c2.getSize(); i++ ) p += getH( _c1.getSize()/2, i, u ) * (_c2(i)->evaluateParent(v, 1))(1); // bi-linear interpolation p -= _b(0)(0) * (u - 1.0f) + _b(0)(1) * (1.0f - u) + _b(1)(0) * ( -u ) + _b(1)(1) * u; this->_p[0][1] = p; } if( d1 > 0 && d2 > 0 ) { p = Point<float,3>(0.0f); // interpolate u in v direction for( int i = 0; i < _c1.getSize(); i++ ) p += getHder( _c2.getSize()/2, i, v ) * (_c1(i)->evaluateParent(u, 1))(1); // interpolate v in u direction for( int i = 0; i < _c2.getSize(); i++ ) p += getHder( _c1.getSize()/2, i, u ) * (_c2(i)->evaluateParent(v, 1))(1); // bi-linear interpolation p -= _b(0)(0) * (1.0f - u) * (1.0f - v) + _b(0)(1) * (1.0f - u) * v + _b(1)(0) * u * (1.0f - v) + _b(1)(1) * u * v; this->_p[1][1] = p; } } } template <typename T> T PHermiteSurface<T>::getStartPU() const { return T(0); } template <typename T> T PHermiteSurface<T>::getEndPU() const { return T(1); } template <typename T> T PHermiteSurface<T>::getStartPV() const { return T(0); } template <typename T> T PHermiteSurface<T>::getEndPV() const { return T(1); } /*! T PHermiteSurface<T>::getH( int d, int k, T t ) * * Gives the hermite interpolation factor * * \param[in] d Number of curves describing the given boundary * \param[in] k Curve number k * \param[in] t Parametric value [0-1] * \return The hermite interpolation factor [0-1]. */ template <typename T> inline T PHermiteSurface<T>::getH( int d, int k, T t ) const { if(d==2) { switch (k) { case 1: return (2*t-3)*t*t+1; case 2: return (3-2*t)*t*t; case 3: return (t*t-2*t+1)*t; case 4: return (t-1)*t*t; } } // if(d==1) if(k==1) return t; else return 1.0-t; } /*! T PHermiteSurface<T>::getHder( int d, int k, T t ) * * Gives the hermite interpolation factor for derivatives * * \param[in] d Number of curves describing the given boundary * \param[in] k Curve number k * \param[in] t Parametric value [0-1] * \return The hermite interpolation factor [0-1]. */ template <typename T> inline T PHermiteSurface<T>::getHder( int d, int k, T t ) const { if(d==2) { switch (k) { case 1: return (t-1)*t*6; case 2: return (1-t)*t*6; case 3: return 3*t*t-4*t+1; case 4: return (3*t-2)*t; } } // if(d==1) if(k==1) return 1.0; else return -1.0; } template <typename T> void PHermiteSurface<T>::insertPatch( PCurve<T,3>* patch ) { patch->replot( 10 ); patch->setVisible( true ); patch->setCollapsed( false ); SceneObject::insert( patch ); } /*! bool PHermiteSurface<T>::isValidCoonsPatch() * * Validates the surfaces as a Coon's patch * Returns whether the surface is classified as a Coon's patch. * * \return Returns true if the surfaces is a Coon's patch */ template <typename T> bool PHermiteSurface<T>::isValidCoonsPatch() { // Check number of curves if( _c1.getSize() < 2 || _c2.getSize() < 2 ) return false; // Check that the corner points are joined together (b00, b10, b01 and b11) // _c1 -> c1/c2, _c2 -> f1/f2 Point<T,3> c1_s, c1_e, c2_s, c2_e; Point<T,3> f1_s, f1_e, f2_s, f2_e; c1_s = _c1[0]->evaluateParent( _c1[0]->getParStart(), 0 )[0]; c1_e = _c1[0]->evaluateParent( _c1[0]->getParEnd(), 0 )[0]; c2_s = _c1[1]->evaluateParent( _c1[1]->getParStart(), 0 )[0]; c2_e = _c1[1]->evaluateParent( _c1[1]->getParEnd(), 0 )[0]; f1_s = _c2[0]->evaluateParent( _c2[0]->getParStart(), 0 )[0]; f1_e = _c2[0]->evaluateParent( _c2[0]->getParEnd(), 0 )[0]; f2_s = _c2[1]->evaluateParent( _c2[1]->getParStart(), 0 )[0]; f2_e = _c2[1]->evaluateParent( _c2[1]->getParEnd(), 0 )[0]; // Junction c1 x c2 if( std::fabs( (c1_s - f1_s).getLength() ) > 1e-5 ) return false; // Junction c1 x c4 if( std::fabs( (c1_e - f2_s).getLength() ) > 1e-5 ) return false; // Junction c2 x c3 if( std::fabs( (f1_e - c2_s).getLength() ) > 1e-5 ) return false; // Junction c3 x c4 if( std::fabs( (c2_e - f2_e).getLength() ) > 1e-5 ) return false; // return true if all criterias is met return true; } /*! bool PHermiteSurface<T>::isValidHermiteSurface() * * Validates the surfaces as a Hermite surface * Returns whether the surface is classified as a Hermite surface * * \return Returns true if the surfaces is a hermite surface */ template <typename T> bool PHermiteSurface<T>::isValidHermiteSurface() { // Needs to fullfill the requirements for a Coons patch if( !isValidCoonsPatch() ) return false; // Check Size if( _c1.getSize() != 4 || _c2.getSize() != 4 ) return false; // _c1 -> c1/c2/c3/c4, _c2 -> f1/f2/f3/f4 // c3/c4 and f3/f4 represent derivatives Point<T,3> c1_s, c1_e, c2_s, c2_e, c3_s, c3_e, c4_s, c4_e; Point<T,3> f1_s, f1_e, f2_s, f2_e, f3_s, f3_e, f4_s, f4_e; c1_s = _c1[0]->evaluateParent( _c1[0]->getParStart(), 0 )[0]; c1_e = _c1[0]->evaluateParent( _c1[0]->getParEnd(), 0 )[0]; c2_s = _c1[1]->evaluateParent( _c1[1]->getParStart(), 0 )[0]; c2_e = _c1[1]->evaluateParent( _c1[1]->getParEnd(), 0 )[0]; c3_s = _c1[2]->evaluateParent( _c1[2]->getParStart(), 0 )[0]; c3_e = _c1[2]->evaluateParent( _c1[2]->getParEnd(), 0 )[0]; c4_s = _c1[3]->evaluateParent( _c1[3]->getParStart(), 0 )[0]; c4_e = _c1[3]->evaluateParent( _c1[3]->getParEnd(), 0 )[0]; f1_s = _c2[0]->evaluateParent( _c2[0]->getParStart(), 0 )[0]; f1_e = _c2[0]->evaluateParent( _c2[0]->getParEnd(), 0 )[0]; f2_s = _c2[1]->evaluateParent( _c2[1]->getParStart(), 0 )[0]; f2_e = _c2[1]->evaluateParent( _c2[1]->getParEnd(), 0 )[0]; f3_s = _c2[2]->evaluateParent( _c2[2]->getParStart(), 0 )[0]; f3_e = _c2[2]->evaluateParent( _c2[2]->getParEnd(), 0 )[0]; f4_s = _c2[3]->evaluateParent( _c2[3]->getParStart(), 0 )[0]; f4_e = _c2[3]->evaluateParent( _c2[3]->getParEnd(), 0 )[0]; // b00 -> c1_s f3'_s && f1_s c3'_s if( std::fabs( (c1_s - f3_s).getLength() ) > 1e-5 ) return false; if( std::fabs( (f1_s - c3_s).getLength() ) > 1e-5 ) return false; // b10 -> c1_e f4'_s && f2_s c3'_e if( std::fabs( (c1_e - f4_s).getLength() ) > 1e-5 ) return false; if( std::fabs( (f2_s - c3_e).getLength() ) > 1e-5 ) return false; // b01 -> f1_e c4'_s && c2_s f3'_e if( std::fabs( (f1_e - c4_s).getLength() ) > 1e-5 ) return false; if( std::fabs( (c2_s - f3_e).getLength() ) > 1e-5 ) return false; // b11 -> f2_e c3'_e && c2_e f4'_e if( std::fabs( (f2_e - c3_e).getLength() ) > 1e-5 ) return false; if( std::fabs( (c2_e - f4_e).getLength() ) > 1e-5 ) return false; // return true if all criterias is met return true; } //***************************************** // Local (protected) functons ** //***************************************** template <typename T> void PHermiteSurface<T>::init() { this->_dm = GM_DERIVATION_EXPLICIT; } } // END namespace GMlib
284132.c
#include <stdio.h> #include <conio.h> int main(){ char c= 'A'; do { printf("%c ", c); (c++);} while (c!='Z'); return 0; }
463082.c
#include "capwap.h" #include "dbg.h" #include "log.h" int cw_check_missing_mand(struct cw_MsgData *msgdata, mavl_t keys ) { mlistelem_t * elem; char *mandkey, *result; mlist_t missing; int count; missing = mlist_create_conststr(); if (missing==NULL){ cw_log(LOG_ERR, "Can't allocate memory for check of missing mand elems: %s", strerror(errno)); return 0; } mlist_foreach(elem, msgdata->mand_keys){ mandkey = mlistelem_get_str(elem); result = mavl_get_str(keys,mandkey); if (result == NULL){ mlist_append_ptr(missing,mandkey); } } mlist_foreach(elem,missing){ cw_dbg(DBG_RFC," Missing mandatory message element: %s", mlistelem_get_str(elem)); } count = missing->count; mlist_destroy(missing); return count; }
930572.c
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* ------------------------------------------------------------------------- * Includes * ------------------------------------------------------------------------- */ #include <linux/msm_dma_iommu_mapping.h> #include <soc/qcom/subsystem_restart.h> #include <linux/device.h> #include <linux/platform_device.h> #include "npu_hw_access.h" #include "npu_common.h" #include "npu_hw.h" /* ------------------------------------------------------------------------- * Functions - Register * ------------------------------------------------------------------------- */ uint32_t npu_core_reg_read(struct npu_device *npu_dev, uint32_t off) { uint32_t ret = 0; ret = readl(npu_dev->core_io.base + off); return ret; } void npu_core_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val) { writel_relaxed(val, npu_dev->core_io.base + off); __iowmb(); } uint32_t npu_bwmon_reg_read(struct npu_device *npu_dev, uint32_t off) { uint32_t ret = 0; ret = readl(npu_dev->bwmon_io.base + off); return ret; } void npu_bwmon_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val) { writel_relaxed(val, npu_dev->bwmon_io.base + off); __iowmb(); } uint32_t npu_qfprom_reg_read(struct npu_device *npu_dev, uint32_t off) { uint32_t ret = 0; if (npu_dev->qfprom_io.base) ret = readl(npu_dev->qfprom_io.base + off); return ret; } /* ------------------------------------------------------------------------- * Functions - Memory * ------------------------------------------------------------------------- */ void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src, uint32_t size) { size_t dst_off = (size_t)dst; uint32_t *src_ptr32 = (uint32_t *)src; uint8_t *src_ptr8 = 0; uint32_t i = 0; uint32_t num = 0; num = size/4; for (i = 0; i < num; i++) { writel_relaxed(src_ptr32[i], npu_dev->tcm_io.base + dst_off); dst_off += 4; } if (size%4 != 0) { src_ptr8 = (uint8_t *)((size_t)src + (num*4)); num = size%4; for (i = 0; i < num; i++) { writeb_relaxed(src_ptr8[i], npu_dev->tcm_io.base + dst_off); dst_off += 1; } } } int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst, uint32_t size) { size_t src_off = (size_t)src; uint32_t *out32 = (uint32_t *)dst; uint8_t *out8 = 0; uint32_t i = 0; uint32_t num = 0; num = size/4; for (i = 0; i < num; i++) { out32[i] = readl_relaxed(npu_dev->tcm_io.base + src_off); src_off += 4; } if (size%4 != 0) { out8 = (uint8_t *)((size_t)dst + (num*4)); num = size%4; for (i = 0; i < num; i++) { out8[i] = readb_relaxed(npu_dev->tcm_io.base + src_off); src_off += 1; } } return 0; } void *npu_ipc_addr(void) { return (void *)(IPC_MEM_OFFSET_FROM_SSTCM); } /* ------------------------------------------------------------------------- * Functions - Interrupt * ------------------------------------------------------------------------- */ void npu_interrupt_ack(struct npu_device *npu_dev, uint32_t intr_num) { struct npu_host_ctx *host_ctx = &npu_dev->host_ctx; uint32_t wdg_irq_sts = 0, error_irq_sts = 0; /* Clear irq state */ REGW(npu_dev, NPU_MASTERn_IPC_IRQ_OUT(0), 0x0); wdg_irq_sts = REGR(npu_dev, NPU_MASTERn_WDOG_IRQ_STATUS(0)); if (wdg_irq_sts != 0) { pr_err("wdg irq %x\n", wdg_irq_sts); host_ctx->wdg_irq_sts |= wdg_irq_sts; host_ctx->fw_error = true; } error_irq_sts = REGR(npu_dev, NPU_MASTERn_ERROR_IRQ_STATUS(0)); error_irq_sts &= REGR(npu_dev, NPU_MASTERn_ERROR_IRQ_ENABLE(0)); if (error_irq_sts != 0) { REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_CLEAR(0), error_irq_sts); pr_err("error irq %x\n", error_irq_sts); host_ctx->err_irq_sts |= error_irq_sts; host_ctx->fw_error = true; } } int32_t npu_interrupt_raise_m0(struct npu_device *npu_dev) { /* Bit 4 is setting IRQ_SOURCE_SELECT to local * and we're triggering a pulse to NPU_MASTER0_IPC_IN_IRQ0 */ npu_core_reg_write(npu_dev, NPU_MASTERn_IPC_IRQ_IN_CTRL(0), 0x1 << NPU_MASTER0_IPC_IRQ_IN_CTRL__IRQ_SOURCE_SELECT___S | 0x1); return 0; } int32_t npu_interrupt_raise_dsp(struct npu_device *npu_dev) { npu_core_reg_write(npu_dev, NPU_MASTERn_IPC_IRQ_OUT_CTRL(1), 0x8); return 0; } /* ------------------------------------------------------------------------- * Functions - ION Memory * ------------------------------------------------------------------------- */ static struct npu_ion_buf *npu_alloc_npu_ion_buffer(struct npu_client *client, int buf_hdl, uint32_t size) { struct npu_ion_buf *ret_val = NULL, *tmp; struct list_head *pos = NULL; mutex_lock(&client->list_lock); list_for_each(pos, &(client->mapped_buffer_list)) { tmp = list_entry(pos, struct npu_ion_buf, list); if (tmp->fd == buf_hdl) { ret_val = tmp; break; } } if (ret_val) { /* mapped already, treat as invalid request */ pr_err("ion buf has been mapped\n"); ret_val = NULL; } else { ret_val = kzalloc(sizeof(*ret_val), GFP_KERNEL); if (ret_val) { ret_val->fd = buf_hdl; ret_val->size = size; ret_val->iova = 0; list_add(&(ret_val->list), &(client->mapped_buffer_list)); } } mutex_unlock(&client->list_lock); return ret_val; } static struct npu_ion_buf *npu_get_npu_ion_buffer(struct npu_client *client, int buf_hdl) { struct list_head *pos = NULL; struct npu_ion_buf *ret_val = NULL, *tmp; mutex_lock(&client->list_lock); list_for_each(pos, &(client->mapped_buffer_list)) { tmp = list_entry(pos, struct npu_ion_buf, list); if (tmp->fd == buf_hdl) { ret_val = tmp; break; } } mutex_unlock(&client->list_lock); return ret_val; } static void npu_free_npu_ion_buffer(struct npu_client *client, int buf_hdl) { struct list_head *pos = NULL; struct npu_ion_buf *npu_ion_buf = NULL; mutex_lock(&client->list_lock); list_for_each(pos, &(client->mapped_buffer_list)) { npu_ion_buf = list_entry(pos, struct npu_ion_buf, list); if (npu_ion_buf->fd == buf_hdl) { list_del(&npu_ion_buf->list); kfree(npu_ion_buf); break; } } mutex_unlock(&client->list_lock); } int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size, uint64_t *addr) { int ret = 0; struct npu_device *npu_dev = client->npu_dev; struct npu_ion_buf *ion_buf = NULL; struct npu_smmu_ctx *smmu_ctx = &npu_dev->smmu_ctx; if (buf_hdl == 0) return -EINVAL; ion_buf = npu_alloc_npu_ion_buffer(client, buf_hdl, size); if (!ion_buf) { pr_err("%s fail to alloc npu_ion_buffer\n", __func__); ret = -ENOMEM; return ret; } smmu_ctx->attach_cnt++; ion_buf->dma_buf = dma_buf_get(ion_buf->fd); if (IS_ERR_OR_NULL(ion_buf->dma_buf)) { pr_err("dma_buf_get failed %d\n", ion_buf->fd); ret = -ENOMEM; ion_buf->dma_buf = NULL; goto map_end; } ion_buf->attachment = dma_buf_attach(ion_buf->dma_buf, &(npu_dev->pdev->dev)); if (IS_ERR(ion_buf->attachment)) { ret = -ENOMEM; ion_buf->attachment = NULL; goto map_end; } ion_buf->attachment->dma_map_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT; ion_buf->table = dma_buf_map_attachment(ion_buf->attachment, DMA_BIDIRECTIONAL); if (IS_ERR(ion_buf->table)) { pr_err("npu dma_buf_map_attachment failed\n"); ret = -ENOMEM; ion_buf->table = NULL; goto map_end; } ion_buf->iova = ion_buf->table->sgl->dma_address; ion_buf->size = ion_buf->dma_buf->size; *addr = ion_buf->iova; pr_debug("mapped mem addr:0x%llx size:0x%x\n", ion_buf->iova, ion_buf->size); map_end: if (ret) npu_mem_unmap(client, buf_hdl, 0); return ret; } void npu_mem_invalidate(struct npu_client *client, int buf_hdl) { struct npu_device *npu_dev = client->npu_dev; struct npu_ion_buf *ion_buf = npu_get_npu_ion_buffer(client, buf_hdl); if (!ion_buf) pr_err("%s cant find ion buf\n", __func__); else dma_sync_sg_for_cpu(&(npu_dev->pdev->dev), ion_buf->table->sgl, ion_buf->table->nents, DMA_BIDIRECTIONAL); } bool npu_mem_verify_addr(struct npu_client *client, uint64_t addr) { struct npu_ion_buf *ion_buf = 0; struct list_head *pos = NULL; bool valid = false; mutex_lock(&client->list_lock); list_for_each(pos, &(client->mapped_buffer_list)) { ion_buf = list_entry(pos, struct npu_ion_buf, list); if (ion_buf->iova == addr) { valid = true; break; } } mutex_unlock(&client->list_lock); return valid; } void npu_mem_unmap(struct npu_client *client, int buf_hdl, uint64_t addr) { struct npu_device *npu_dev = client->npu_dev; struct npu_ion_buf *ion_buf = 0; /* clear entry and retrieve the corresponding buffer */ ion_buf = npu_get_npu_ion_buffer(client, buf_hdl); if (!ion_buf) { pr_err("%s could not find buffer\n", __func__); return; } if (ion_buf->iova != addr) pr_warn("unmap address %llu doesn't match %llu\n", addr, ion_buf->iova); if (ion_buf->table) dma_buf_unmap_attachment(ion_buf->attachment, ion_buf->table, DMA_BIDIRECTIONAL); if (ion_buf->dma_buf && ion_buf->attachment) dma_buf_detach(ion_buf->dma_buf, ion_buf->attachment); if (ion_buf->dma_buf) dma_buf_put(ion_buf->dma_buf); npu_dev->smmu_ctx.attach_cnt--; pr_debug("unmapped mem addr:0x%llx size:0x%x\n", ion_buf->iova, ion_buf->size); npu_free_npu_ion_buffer(client, buf_hdl); } /* ------------------------------------------------------------------------- * Functions - Features * ------------------------------------------------------------------------- */ uint8_t npu_hw_clk_gating_enabled(void) { return 1; } uint8_t npu_hw_log_enabled(void) { return 1; } /* ------------------------------------------------------------------------- * Functions - Subsystem/PIL * ------------------------------------------------------------------------- */ void *subsystem_get_local(char *sub_system) { return subsystem_get(sub_system); } void subsystem_put_local(void *sub_system_handle) { return subsystem_put(sub_system_handle); } /* ------------------------------------------------------------------------- * Functions - Log * ------------------------------------------------------------------------- */ void npu_process_log_message(struct npu_device *npu_dev, uint32_t *message, uint32_t size) { struct npu_debugfs_ctx *debugfs = &npu_dev->debugfs_ctx; /* mutex log lock */ mutex_lock(&debugfs->log_lock); if ((debugfs->log_num_bytes_buffered + size) > debugfs->log_buf_size) { /* No more space, invalidate it all and start over */ debugfs->log_read_index = 0; debugfs->log_write_index = size; debugfs->log_num_bytes_buffered = size; memcpy(debugfs->log_buf, message, size); } else { if ((debugfs->log_write_index + size) > debugfs->log_buf_size) { /* Wrap around case */ uint8_t *src_addr = (uint8_t *)message; uint8_t *dst_addr = 0; uint32_t remaining_to_end = debugfs->log_buf_size - debugfs->log_write_index + 1; dst_addr = debugfs->log_buf + debugfs->log_write_index; memcpy(dst_addr, src_addr, remaining_to_end); src_addr = &(src_addr[remaining_to_end]); dst_addr = debugfs->log_buf; memcpy(dst_addr, src_addr, size-remaining_to_end); debugfs->log_write_index = size-remaining_to_end; } else { memcpy((debugfs->log_buf + debugfs->log_write_index), message, size); debugfs->log_write_index += size; if (debugfs->log_write_index == debugfs->log_buf_size) debugfs->log_write_index = 0; } debugfs->log_num_bytes_buffered += size; } /* mutex log unlock */ mutex_unlock(&debugfs->log_lock); }
604596.c
/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 1.3.31 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ #define SWIGRUBY /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) # if (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* exporting methods */ #if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) # if (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* exporting methods */ #if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* ----------------------------------------------------------------------------- * swigrun.swg * * This file contains generic CAPI SWIG runtime support for pointer * type checking. * ----------------------------------------------------------------------------- */ /* This should only be incremented when either the layout of swig_type_info changes, or for whatever reason, the runtime changes incompatibly */ #define SWIG_RUNTIME_VERSION "3" /* define SWIG_TYPE_TABLE_NAME as "SWIG_TYPE_TABLE" */ #ifdef SWIG_TYPE_TABLE # define SWIG_QUOTE_STRING(x) #x # define SWIG_EXPAND_AND_QUOTE_STRING(x) SWIG_QUOTE_STRING(x) # define SWIG_TYPE_TABLE_NAME SWIG_EXPAND_AND_QUOTE_STRING(SWIG_TYPE_TABLE) #else # define SWIG_TYPE_TABLE_NAME #endif /* You can use the SWIGRUNTIME and SWIGRUNTIMEINLINE macros for creating a static or dynamic library from the swig runtime code. In 99.9% of the cases, swig just needs to declare them as 'static'. But only do this if is strictly necessary, ie, if you have problems with your compiler or so. */ #ifndef SWIGRUNTIME # define SWIGRUNTIME SWIGINTERN #endif #ifndef SWIGRUNTIMEINLINE # define SWIGRUNTIMEINLINE SWIGRUNTIME SWIGINLINE #endif /* Generic buffer size */ #ifndef SWIG_BUFFER_SIZE # define SWIG_BUFFER_SIZE 1024 #endif /* Flags for pointer conversions */ #define SWIG_POINTER_DISOWN 0x1 /* Flags for new pointer objects */ #define SWIG_POINTER_OWN 0x1 /* Flags/methods for returning states. The swig conversion methods, as ConvertPtr, return and integer that tells if the conversion was successful or not. And if not, an error code can be returned (see swigerrors.swg for the codes). Use the following macros/flags to set or process the returning states. In old swig versions, you usually write code as: if (SWIG_ConvertPtr(obj,vptr,ty.flags) != -1) { // success code } else { //fail code } Now you can be more explicit as: int res = SWIG_ConvertPtr(obj,vptr,ty.flags); if (SWIG_IsOK(res)) { // success code } else { // fail code } that seems to be the same, but now you can also do Type *ptr; int res = SWIG_ConvertPtr(obj,(void **)(&ptr),ty.flags); if (SWIG_IsOK(res)) { // success code if (SWIG_IsNewObj(res) { ... delete *ptr; } else { ... } } else { // fail code } I.e., now SWIG_ConvertPtr can return new objects and you can identify the case and take care of the deallocation. Of course that requires also to SWIG_ConvertPtr to return new result values, as int SWIG_ConvertPtr(obj, ptr,...) { if (<obj is ok>) { if (<need new object>) { *ptr = <ptr to new allocated object>; return SWIG_NEWOBJ; } else { *ptr = <ptr to old object>; return SWIG_OLDOBJ; } } else { return SWIG_BADOBJ; } } Of course, returning the plain '0(success)/-1(fail)' still works, but you can be more explicit by returning SWIG_BADOBJ, SWIG_ERROR or any of the swig errors code. Finally, if the SWIG_CASTRANK_MODE is enabled, the result code allows to return the 'cast rank', for example, if you have this int food(double) int fooi(int); and you call food(1) // cast rank '1' (1 -> 1.0) fooi(1) // cast rank '0' just use the SWIG_AddCast()/SWIG_CheckState() */ #define SWIG_OK (0) #define SWIG_ERROR (-1) #define SWIG_IsOK(r) (r >= 0) #define SWIG_ArgError(r) ((r != SWIG_ERROR) ? r : SWIG_TypeError) /* The CastRankLimit says how many bits are used for the cast rank */ #define SWIG_CASTRANKLIMIT (1 << 8) /* The NewMask denotes the object was created (using new/malloc) */ #define SWIG_NEWOBJMASK (SWIG_CASTRANKLIMIT << 1) /* The TmpMask is for in/out typemaps that use temporal objects */ #define SWIG_TMPOBJMASK (SWIG_NEWOBJMASK << 1) /* Simple returning values */ #define SWIG_BADOBJ (SWIG_ERROR) #define SWIG_OLDOBJ (SWIG_OK) #define SWIG_NEWOBJ (SWIG_OK | SWIG_NEWOBJMASK) #define SWIG_TMPOBJ (SWIG_OK | SWIG_TMPOBJMASK) /* Check, add and del mask methods */ #define SWIG_AddNewMask(r) (SWIG_IsOK(r) ? (r | SWIG_NEWOBJMASK) : r) #define SWIG_DelNewMask(r) (SWIG_IsOK(r) ? (r & ~SWIG_NEWOBJMASK) : r) #define SWIG_IsNewObj(r) (SWIG_IsOK(r) && (r & SWIG_NEWOBJMASK)) #define SWIG_AddTmpMask(r) (SWIG_IsOK(r) ? (r | SWIG_TMPOBJMASK) : r) #define SWIG_DelTmpMask(r) (SWIG_IsOK(r) ? (r & ~SWIG_TMPOBJMASK) : r) #define SWIG_IsTmpObj(r) (SWIG_IsOK(r) && (r & SWIG_TMPOBJMASK)) /* Cast-Rank Mode */ #if defined(SWIG_CASTRANK_MODE) # ifndef SWIG_TypeRank # define SWIG_TypeRank unsigned long # endif # ifndef SWIG_MAXCASTRANK /* Default cast allowed */ # define SWIG_MAXCASTRANK (2) # endif # define SWIG_CASTRANKMASK ((SWIG_CASTRANKLIMIT) -1) # define SWIG_CastRank(r) (r & SWIG_CASTRANKMASK) SWIGINTERNINLINE int SWIG_AddCast(int r) { return SWIG_IsOK(r) ? ((SWIG_CastRank(r) < SWIG_MAXCASTRANK) ? (r + 1) : SWIG_ERROR) : r; } SWIGINTERNINLINE int SWIG_CheckState(int r) { return SWIG_IsOK(r) ? SWIG_CastRank(r) + 1 : 0; } #else /* no cast-rank mode */ # define SWIG_AddCast # define SWIG_CheckState(r) (SWIG_IsOK(r) ? 1 : 0) #endif #include <string.h> #ifdef __cplusplus extern "C" { #endif typedef void *(*swig_converter_func)(void *); typedef struct swig_type_info *(*swig_dycast_func)(void **); /* Structure to store inforomation on one type */ typedef struct swig_type_info { const char *name; /* mangled name of this type */ const char *str; /* human readable name of this type */ swig_dycast_func dcast; /* dynamic cast function down a hierarchy */ struct swig_cast_info *cast; /* linked list of types that can cast into this type */ void *clientdata; /* language specific type data */ int owndata; /* flag if the structure owns the clientdata */ } swig_type_info; /* Structure to store a type and conversion function used for casting */ typedef struct swig_cast_info { swig_type_info *type; /* pointer to type that is equivalent to this type */ swig_converter_func converter; /* function to cast the void pointers */ struct swig_cast_info *next; /* pointer to next cast in linked list */ struct swig_cast_info *prev; /* pointer to the previous cast */ } swig_cast_info; /* Structure used to store module information * Each module generates one structure like this, and the runtime collects * all of these structures and stores them in a circularly linked list.*/ typedef struct swig_module_info { swig_type_info **types; /* Array of pointers to swig_type_info structures that are in this module */ size_t size; /* Number of types in this module */ struct swig_module_info *next; /* Pointer to next element in circularly linked list */ swig_type_info **type_initial; /* Array of initially generated type structures */ swig_cast_info **cast_initial; /* Array of initially generated casting structures */ void *clientdata; /* Language specific module data */ } swig_module_info; /* Compare two type names skipping the space characters, therefore "char*" == "char *" and "Class<int>" == "Class<int >", etc. Return 0 when the two name types are equivalent, as in strncmp, but skipping ' '. */ SWIGRUNTIME int SWIG_TypeNameComp(const char *f1, const char *l1, const char *f2, const char *l2) { for (;(f1 != l1) && (f2 != l2); ++f1, ++f2) { while ((*f1 == ' ') && (f1 != l1)) ++f1; while ((*f2 == ' ') && (f2 != l2)) ++f2; if (*f1 != *f2) return (*f1 > *f2) ? 1 : -1; } return (l1 - f1) - (l2 - f2); } /* Check type equivalence in a name list like <name1>|<name2>|... Return 0 if not equal, 1 if equal */ SWIGRUNTIME int SWIG_TypeEquiv(const char *nb, const char *tb) { int equiv = 0; const char* te = tb + strlen(tb); const char* ne = nb; while (!equiv && *ne) { for (nb = ne; *ne; ++ne) { if (*ne == '|') break; } equiv = (SWIG_TypeNameComp(nb, ne, tb, te) == 0) ? 1 : 0; if (*ne) ++ne; } return equiv; } /* Check type equivalence in a name list like <name1>|<name2>|... Return 0 if equal, -1 if nb < tb, 1 if nb > tb */ SWIGRUNTIME int SWIG_TypeCompare(const char *nb, const char *tb) { int equiv = 0; const char* te = tb + strlen(tb); const char* ne = nb; while (!equiv && *ne) { for (nb = ne; *ne; ++ne) { if (*ne == '|') break; } equiv = (SWIG_TypeNameComp(nb, ne, tb, te) == 0) ? 1 : 0; if (*ne) ++ne; } return equiv; } /* think of this as a c++ template<> or a scheme macro */ #define SWIG_TypeCheck_Template(comparison, ty) \ if (ty) { \ swig_cast_info *iter = ty->cast; \ while (iter) { \ if (comparison) { \ if (iter == ty->cast) return iter; \ /* Move iter to the top of the linked list */ \ iter->prev->next = iter->next; \ if (iter->next) \ iter->next->prev = iter->prev; \ iter->next = ty->cast; \ iter->prev = 0; \ if (ty->cast) ty->cast->prev = iter; \ ty->cast = iter; \ return iter; \ } \ iter = iter->next; \ } \ } \ return 0 /* Check the typename */ SWIGRUNTIME swig_cast_info * SWIG_TypeCheck(const char *c, swig_type_info *ty) { SWIG_TypeCheck_Template(strcmp(iter->type->name, c) == 0, ty); } /* Same as previous function, except strcmp is replaced with a pointer comparison */ SWIGRUNTIME swig_cast_info * SWIG_TypeCheckStruct(swig_type_info *from, swig_type_info *into) { SWIG_TypeCheck_Template(iter->type == from, into); } /* Cast a pointer up an inheritance hierarchy */ SWIGRUNTIMEINLINE void * SWIG_TypeCast(swig_cast_info *ty, void *ptr) { return ((!ty) || (!ty->converter)) ? ptr : (*ty->converter)(ptr); } /* Dynamic pointer casting. Down an inheritance hierarchy */ SWIGRUNTIME swig_type_info * SWIG_TypeDynamicCast(swig_type_info *ty, void **ptr) { swig_type_info *lastty = ty; if (!ty || !ty->dcast) return ty; while (ty && (ty->dcast)) { ty = (*ty->dcast)(ptr); if (ty) lastty = ty; } return lastty; } /* Return the name associated with this type */ SWIGRUNTIMEINLINE const char * SWIG_TypeName(const swig_type_info *ty) { return ty->name; } /* Return the pretty name associated with this type, that is an unmangled type name in a form presentable to the user. */ SWIGRUNTIME const char * SWIG_TypePrettyName(const swig_type_info *type) { /* The "str" field contains the equivalent pretty names of the type, separated by vertical-bar characters. We choose to print the last name, as it is often (?) the most specific. */ if (!type) return NULL; if (type->str != NULL) { const char *last_name = type->str; const char *s; for (s = type->str; *s; s++) if (*s == '|') last_name = s+1; return last_name; } else return type->name; } /* Set the clientdata field for a type */ SWIGRUNTIME void SWIG_TypeClientData(swig_type_info *ti, void *clientdata) { swig_cast_info *cast = ti->cast; /* if (ti->clientdata == clientdata) return; */ ti->clientdata = clientdata; while (cast) { if (!cast->converter) { swig_type_info *tc = cast->type; if (!tc->clientdata) { SWIG_TypeClientData(tc, clientdata); } } cast = cast->next; } } SWIGRUNTIME void SWIG_TypeNewClientData(swig_type_info *ti, void *clientdata) { SWIG_TypeClientData(ti, clientdata); ti->owndata = 1; } /* Search for a swig_type_info structure only by mangled name Search is a O(log #types) We start searching at module start, and finish searching when start == end. Note: if start == end at the beginning of the function, we go all the way around the circular list. */ SWIGRUNTIME swig_type_info * SWIG_MangledTypeQueryModule(swig_module_info *start, swig_module_info *end, const char *name) { swig_module_info *iter = start; do { if (iter->size) { register size_t l = 0; register size_t r = iter->size - 1; do { /* since l+r >= 0, we can (>> 1) instead (/ 2) */ register size_t i = (l + r) >> 1; const char *iname = iter->types[i]->name; if (iname) { register int compare = strcmp(name, iname); if (compare == 0) { return iter->types[i]; } else if (compare < 0) { if (i) { r = i - 1; } else { break; } } else if (compare > 0) { l = i + 1; } } else { break; /* should never happen */ } } while (l <= r); } iter = iter->next; } while (iter != end); return 0; } /* Search for a swig_type_info structure for either a mangled name or a human readable name. It first searches the mangled names of the types, which is a O(log #types) If a type is not found it then searches the human readable names, which is O(#types). We start searching at module start, and finish searching when start == end. Note: if start == end at the beginning of the function, we go all the way around the circular list. */ SWIGRUNTIME swig_type_info * SWIG_TypeQueryModule(swig_module_info *start, swig_module_info *end, const char *name) { /* STEP 1: Search the name field using binary search */ swig_type_info *ret = SWIG_MangledTypeQueryModule(start, end, name); if (ret) { return ret; } else { /* STEP 2: If the type hasn't been found, do a complete search of the str field (the human readable name) */ swig_module_info *iter = start; do { register size_t i = 0; for (; i < iter->size; ++i) { if (iter->types[i]->str && (SWIG_TypeEquiv(iter->types[i]->str, name))) return iter->types[i]; } iter = iter->next; } while (iter != end); } /* neither found a match */ return 0; } /* Pack binary data into a string */ SWIGRUNTIME char * SWIG_PackData(char *c, void *ptr, size_t sz) { static const char hex[17] = "0123456789abcdef"; register const unsigned char *u = (unsigned char *) ptr; register const unsigned char *eu = u + sz; for (; u != eu; ++u) { register unsigned char uu = *u; *(c++) = hex[(uu & 0xf0) >> 4]; *(c++) = hex[uu & 0xf]; } return c; } /* Unpack binary data from a string */ SWIGRUNTIME const char * SWIG_UnpackData(const char *c, void *ptr, size_t sz) { register unsigned char *u = (unsigned char *) ptr; register const unsigned char *eu = u + sz; for (; u != eu; ++u) { register char d = *(c++); register unsigned char uu; if ((d >= '0') && (d <= '9')) uu = ((d - '0') << 4); else if ((d >= 'a') && (d <= 'f')) uu = ((d - ('a'-10)) << 4); else return (char *) 0; d = *(c++); if ((d >= '0') && (d <= '9')) uu |= (d - '0'); else if ((d >= 'a') && (d <= 'f')) uu |= (d - ('a'-10)); else return (char *) 0; *u = uu; } return c; } /* Pack 'void *' into a string buffer. */ SWIGRUNTIME char * SWIG_PackVoidPtr(char *buff, void *ptr, const char *name, size_t bsz) { char *r = buff; if ((2*sizeof(void *) + 2) > bsz) return 0; *(r++) = '_'; r = SWIG_PackData(r,&ptr,sizeof(void *)); if (strlen(name) + 1 > (bsz - (r - buff))) return 0; strcpy(r,name); return buff; } SWIGRUNTIME const char * SWIG_UnpackVoidPtr(const char *c, void **ptr, const char *name) { if (*c != '_') { if (strcmp(c,"NULL") == 0) { *ptr = (void *) 0; return name; } else { return 0; } } return SWIG_UnpackData(++c,ptr,sizeof(void *)); } SWIGRUNTIME char * SWIG_PackDataName(char *buff, void *ptr, size_t sz, const char *name, size_t bsz) { char *r = buff; size_t lname = (name ? strlen(name) : 0); if ((2*sz + 2 + lname) > bsz) return 0; *(r++) = '_'; r = SWIG_PackData(r,ptr,sz); if (lname) { strncpy(r,name,lname+1); } else { *r = 0; } return buff; } SWIGRUNTIME const char * SWIG_UnpackDataName(const char *c, void *ptr, size_t sz, const char *name) { if (*c != '_') { if (strcmp(c,"NULL") == 0) { memset(ptr,0,sz); return name; } else { return 0; } } return SWIG_UnpackData(++c,ptr,sz); } #ifdef __cplusplus } #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 #include <ruby.h> /* Ruby 1.7 defines NUM2LL(), LL2NUM() and ULL2NUM() macros */ #ifndef NUM2LL #define NUM2LL(x) NUM2LONG((x)) #endif #ifndef LL2NUM #define LL2NUM(x) INT2NUM((long) (x)) #endif #ifndef ULL2NUM #define ULL2NUM(x) UINT2NUM((unsigned long) (x)) #endif /* Ruby 1.7 doesn't (yet) define NUM2ULL() */ #ifndef NUM2ULL #ifdef HAVE_LONG_LONG #define NUM2ULL(x) rb_num2ull((x)) #else #define NUM2ULL(x) NUM2ULONG(x) #endif #endif /* RSTRING_LEN, etc are new in Ruby 1.9, but ->ptr and ->len no longer work */ /* Define these for older versions so we can just write code the new way */ #ifndef RSTRING_LEN # define RSTRING_LEN(x) RSTRING(x)->len #endif #ifndef RSTRING_PTR # define RSTRING_PTR(x) RSTRING(x)->ptr #endif #ifndef RARRAY_LEN # define RARRAY_LEN(x) RARRAY(x)->len #endif #ifndef RARRAY_PTR # define RARRAY_PTR(x) RARRAY(x)->ptr #endif /* * Need to be very careful about how these macros are defined, especially * when compiling C++ code or C code with an ANSI C compiler. * * VALUEFUNC(f) is a macro used to typecast a C function that implements * a Ruby method so that it can be passed as an argument to API functions * like rb_define_method() and rb_define_singleton_method(). * * VOIDFUNC(f) is a macro used to typecast a C function that implements * either the "mark" or "free" stuff for a Ruby Data object, so that it * can be passed as an argument to API functions like Data_Wrap_Struct() * and Data_Make_Struct(). */ #ifdef __cplusplus # ifndef RUBY_METHOD_FUNC /* These definitions should work for Ruby 1.4.6 */ # define PROTECTFUNC(f) ((VALUE (*)()) f) # define VALUEFUNC(f) ((VALUE (*)()) f) # define VOIDFUNC(f) ((void (*)()) f) # else # ifndef ANYARGS /* These definitions should work for Ruby 1.6 */ # define PROTECTFUNC(f) ((VALUE (*)()) f) # define VALUEFUNC(f) ((VALUE (*)()) f) # define VOIDFUNC(f) ((RUBY_DATA_FUNC) f) # else /* These definitions should work for Ruby 1.7+ */ # define PROTECTFUNC(f) ((VALUE (*)(VALUE)) f) # define VALUEFUNC(f) ((VALUE (*)(ANYARGS)) f) # define VOIDFUNC(f) ((RUBY_DATA_FUNC) f) # endif # endif #else # define VALUEFUNC(f) (f) # define VOIDFUNC(f) (f) #endif /* Don't use for expressions have side effect */ #ifndef RB_STRING_VALUE #define RB_STRING_VALUE(s) (TYPE(s) == T_STRING ? (s) : (*(volatile VALUE *)&(s) = rb_str_to_str(s))) #endif #ifndef StringValue #define StringValue(s) RB_STRING_VALUE(s) #endif #ifndef StringValuePtr #define StringValuePtr(s) RSTRING_PTR(RB_STRING_VALUE(s)) #endif #ifndef StringValueLen #define StringValueLen(s) RSTRING_LEN(RB_STRING_VALUE(s)) #endif #ifndef SafeStringValue #define SafeStringValue(v) do {\ StringValue(v);\ rb_check_safe_str(v);\ } while (0) #endif #ifndef HAVE_RB_DEFINE_ALLOC_FUNC #define rb_define_alloc_func(klass, func) rb_define_singleton_method((klass), "new", VALUEFUNC((func)), -1) #define rb_undef_alloc_func(klass) rb_undef_method(CLASS_OF((klass)), "new") #endif /* ----------------------------------------------------------------------------- * error manipulation * ----------------------------------------------------------------------------- */ /* Define some additional error types */ #define SWIG_ObjectPreviouslyDeletedError -100 /* Define custom exceptions for errors that do not map to existing Ruby exceptions. Note this only works for C++ since a global cannot be initialized by a funtion in C. For C, fallback to rb_eRuntimeError.*/ SWIGINTERN VALUE getNullReferenceError(void) { static int init = 0; static VALUE rb_eNullReferenceError ; if (!init) { init = 1; rb_eNullReferenceError = rb_define_class("NullReferenceError", rb_eRuntimeError); } return rb_eNullReferenceError; } SWIGINTERN VALUE getObjectPreviouslyDeletedError(void) { static int init = 0; static VALUE rb_eObjectPreviouslyDeleted ; if (!init) { init = 1; rb_eObjectPreviouslyDeleted = rb_define_class("ObjectPreviouslyDeleted", rb_eRuntimeError); } return rb_eObjectPreviouslyDeleted; } SWIGINTERN VALUE SWIG_Ruby_ErrorType(int SWIG_code) { VALUE type; switch (SWIG_code) { case SWIG_MemoryError: type = rb_eNoMemError; break; case SWIG_IOError: type = rb_eIOError; break; case SWIG_RuntimeError: type = rb_eRuntimeError; break; case SWIG_IndexError: type = rb_eIndexError; break; case SWIG_TypeError: type = rb_eTypeError; break; case SWIG_DivisionByZero: type = rb_eZeroDivError; break; case SWIG_OverflowError: type = rb_eRangeError; break; case SWIG_SyntaxError: type = rb_eSyntaxError; break; case SWIG_ValueError: type = rb_eArgError; break; case SWIG_SystemError: type = rb_eFatal; break; case SWIG_AttributeError: type = rb_eRuntimeError; break; case SWIG_NullReferenceError: type = getNullReferenceError(); break; case SWIG_ObjectPreviouslyDeletedError: type = getObjectPreviouslyDeletedError(); break; case SWIG_UnknownError: type = rb_eRuntimeError; break; default: type = rb_eRuntimeError; } return type; } /* ----------------------------------------------------------------------------- * See the LICENSE file for information on copyright, usage and redistribution * of SWIG, and the README file for authors - http://www.swig.org/release.html. * * rubytracking.swg * * This file contains support for tracking mappings from * Ruby objects to C++ objects. This functionality is needed * to implement mark functions for Ruby's mark and sweep * garbage collector. * ----------------------------------------------------------------------------- */ #ifdef __cplusplus extern "C" { #endif /* Global Ruby hash table to store Trackings from C/C++ structs to Ruby Objects. */ static VALUE swig_ruby_trackings; /* Global variable that stores a reference to the ruby hash table delete function. */ static ID swig_ruby_hash_delete = 0; /* Setup a Ruby hash table to store Trackings */ SWIGRUNTIME void SWIG_RubyInitializeTrackings(void) { /* Create a ruby hash table to store Trackings from C++ objects to Ruby objects. Also make sure to tell the garabage collector about the hash table. */ swig_ruby_trackings = rb_hash_new(); rb_gc_register_address(&swig_ruby_trackings); /* Now store a reference to the hash table delete function so that we only have to look it up once.*/ swig_ruby_hash_delete = rb_intern("delete"); } /* Get a Ruby number to reference a pointer */ SWIGRUNTIME VALUE SWIG_RubyPtrToReference(void* ptr) { /* We cast the pointer to an unsigned long and then store a reference to it using a Ruby number object. */ /* Convert the pointer to a Ruby number */ unsigned long value = (unsigned long) ptr; return LONG2NUM(value); } /* Get a Ruby number to reference an object */ SWIGRUNTIME VALUE SWIG_RubyObjectToReference(VALUE object) { /* We cast the object to an unsigned long and then store a reference to it using a Ruby number object. */ /* Convert the Object to a Ruby number */ unsigned long value = (unsigned long) object; return LONG2NUM(value); } /* Get a Ruby object from a previously stored reference */ SWIGRUNTIME VALUE SWIG_RubyReferenceToObject(VALUE reference) { /* The provided Ruby number object is a reference to the Ruby object we want.*/ /* First convert the Ruby number to a C number */ unsigned long value = NUM2LONG(reference); return (VALUE) value; } /* Add a Tracking from a C/C++ struct to a Ruby object */ SWIGRUNTIME void SWIG_RubyAddTracking(void* ptr, VALUE object) { /* In a Ruby hash table we store the pointer and the associated Ruby object. The trick here is that we cannot store the Ruby object directly - if we do then it cannot be garbage collected. So instead we typecast it as a unsigned long and convert it to a Ruby number object.*/ /* Get a reference to the pointer as a Ruby number */ VALUE key = SWIG_RubyPtrToReference(ptr); /* Get a reference to the Ruby object as a Ruby number */ VALUE value = SWIG_RubyObjectToReference(object); /* Store the mapping to the global hash table. */ rb_hash_aset(swig_ruby_trackings, key, value); } /* Get the Ruby object that owns the specified C/C++ struct */ SWIGRUNTIME VALUE SWIG_RubyInstanceFor(void* ptr) { /* Get a reference to the pointer as a Ruby number */ VALUE key = SWIG_RubyPtrToReference(ptr); /* Now lookup the value stored in the global hash table */ VALUE value = rb_hash_aref(swig_ruby_trackings, key); if (value == Qnil) { /* No object exists - return nil. */ return Qnil; } else { /* Convert this value to Ruby object */ return SWIG_RubyReferenceToObject(value); } } /* Remove a Tracking from a C/C++ struct to a Ruby object. It is very important to remove objects once they are destroyed since the same memory address may be reused later to create a new object. */ SWIGRUNTIME void SWIG_RubyRemoveTracking(void* ptr) { /* Get a reference to the pointer as a Ruby number */ VALUE key = SWIG_RubyPtrToReference(ptr); /* Delete the object from the hash table by calling Ruby's do this we need to call the Hash.delete method.*/ rb_funcall(swig_ruby_trackings, swig_ruby_hash_delete, 1, key); } /* This is a helper method that unlinks a Ruby object from its underlying C++ object. This is needed if the lifetime of the Ruby object is longer than the C++ object */ SWIGRUNTIME void SWIG_RubyUnlinkObjects(void* ptr) { VALUE object = SWIG_RubyInstanceFor(ptr); if (object != Qnil) { DATA_PTR(object) = 0; } } #ifdef __cplusplus } #endif /* ----------------------------------------------------------------------------- * Ruby API portion that goes into the runtime * ----------------------------------------------------------------------------- */ #ifdef __cplusplus extern "C" { #endif SWIGINTERN VALUE SWIG_Ruby_AppendOutput(VALUE target, VALUE o) { if (NIL_P(target)) { target = o; } else { if (TYPE(target) != T_ARRAY) { VALUE o2 = target; target = rb_ary_new(); rb_ary_push(target, o2); } rb_ary_push(target, o); } return target; } #ifdef __cplusplus } #endif /* ----------------------------------------------------------------------------- * See the LICENSE file for information on copyright, usage and redistribution * of SWIG, and the README file for authors - http://www.swig.org/release.html. * * rubyrun.swg * * This file contains the runtime support for Ruby modules * and includes code for managing global variables and pointer * type checking. * ----------------------------------------------------------------------------- */ /* For backward compatibility only */ #define SWIG_POINTER_EXCEPTION 0 /* for raw pointers */ #define SWIG_ConvertPtr(obj, pptr, type, flags) SWIG_Ruby_ConvertPtrAndOwn(obj, pptr, type, flags, 0) #define SWIG_ConvertPtrAndOwn(obj,pptr,type,flags,own) SWIG_Ruby_ConvertPtrAndOwn(obj, pptr, type, flags, own) #define SWIG_NewPointerObj(ptr, type, flags) SWIG_Ruby_NewPointerObj(ptr, type, flags) #define SWIG_AcquirePtr(ptr, own) SWIG_Ruby_AcquirePtr(ptr, own) #define swig_owntype ruby_owntype /* for raw packed data */ #define SWIG_ConvertPacked(obj, ptr, sz, ty) SWIG_Ruby_ConvertPacked(obj, ptr, sz, ty, flags) #define SWIG_NewPackedObj(ptr, sz, type) SWIG_Ruby_NewPackedObj(ptr, sz, type) /* for class or struct pointers */ #define SWIG_ConvertInstance(obj, pptr, type, flags) SWIG_ConvertPtr(obj, pptr, type, flags) #define SWIG_NewInstanceObj(ptr, type, flags) SWIG_NewPointerObj(ptr, type, flags) /* for C or C++ function pointers */ #define SWIG_ConvertFunctionPtr(obj, pptr, type) SWIG_ConvertPtr(obj, pptr, type, 0) #define SWIG_NewFunctionPtrObj(ptr, type) SWIG_NewPointerObj(ptr, type, 0) /* for C++ member pointers, ie, member methods */ #define SWIG_ConvertMember(obj, ptr, sz, ty) SWIG_Ruby_ConvertPacked(obj, ptr, sz, ty) #define SWIG_NewMemberObj(ptr, sz, type) SWIG_Ruby_NewPackedObj(ptr, sz, type) /* Runtime API */ #define SWIG_GetModule(clientdata) SWIG_Ruby_GetModule() #define SWIG_SetModule(clientdata, pointer) SWIG_Ruby_SetModule(pointer) /* Error manipulation */ #define SWIG_ErrorType(code) SWIG_Ruby_ErrorType(code) #define SWIG_Error(code, msg) rb_raise(SWIG_Ruby_ErrorType(code), msg) #define SWIG_fail goto fail /* Ruby-specific SWIG API */ #define SWIG_InitRuntime() SWIG_Ruby_InitRuntime() #define SWIG_define_class(ty) SWIG_Ruby_define_class(ty) #define SWIG_NewClassInstance(value, ty) SWIG_Ruby_NewClassInstance(value, ty) #define SWIG_MangleStr(value) SWIG_Ruby_MangleStr(value) #define SWIG_CheckConvert(value, ty) SWIG_Ruby_CheckConvert(value, ty) /* ----------------------------------------------------------------------------- * pointers/data manipulation * ----------------------------------------------------------------------------- */ #ifdef __cplusplus extern "C" { #if 0 } /* cc-mode */ #endif #endif typedef struct { VALUE klass; VALUE mImpl; void (*mark)(void *); void (*destroy)(void *); int trackObjects; } swig_class; static VALUE _mSWIG = Qnil; static VALUE _cSWIG_Pointer = Qnil; static VALUE swig_runtime_data_type_pointer = Qnil; SWIGRUNTIME VALUE getExceptionClass(void) { static int init = 0; static VALUE rubyExceptionClass ; if (!init) { init = 1; rubyExceptionClass = rb_const_get(_mSWIG, rb_intern("Exception")); } return rubyExceptionClass; } /* This code checks to see if the Ruby object being raised as part of an exception inherits from the Ruby class Exception. If so, the object is simply returned. If not, then a new Ruby exception object is created and that will be returned to Ruby.*/ SWIGRUNTIME VALUE SWIG_Ruby_ExceptionType(swig_type_info *desc, VALUE obj) { VALUE exceptionClass = getExceptionClass(); if (rb_obj_is_kind_of(obj, exceptionClass)) { return obj; } else { return rb_exc_new3(rb_eRuntimeError, rb_obj_as_string(obj)); } } /* Initialize Ruby runtime support */ SWIGRUNTIME void SWIG_Ruby_InitRuntime(void) { if (_mSWIG == Qnil) { _mSWIG = rb_define_module("SWIG"); } } /* Define Ruby class for C type */ SWIGRUNTIME void SWIG_Ruby_define_class(swig_type_info *type) { VALUE klass; char *klass_name = (char *) malloc(4 + strlen(type->name) + 1); sprintf(klass_name, "TYPE%s", type->name); if (NIL_P(_cSWIG_Pointer)) { _cSWIG_Pointer = rb_define_class_under(_mSWIG, "Pointer", rb_cObject); rb_undef_method(CLASS_OF(_cSWIG_Pointer), "new"); } klass = rb_define_class_under(_mSWIG, klass_name, _cSWIG_Pointer); free((void *) klass_name); } /* Create a new pointer object */ SWIGRUNTIME VALUE SWIG_Ruby_NewPointerObj(void *ptr, swig_type_info *type, int flags) { int own = flags & SWIG_POINTER_OWN; char *klass_name; swig_class *sklass; VALUE klass; VALUE obj; if (!ptr) return Qnil; if (type->clientdata) { sklass = (swig_class *) type->clientdata; /* Are we tracking this class and have we already returned this Ruby object? */ if (sklass->trackObjects) { obj = SWIG_RubyInstanceFor(ptr); /* Check the object's type and make sure it has the correct type. It might not in cases where methods do things like downcast methods. */ if (obj != Qnil) { VALUE value = rb_iv_get(obj, "__swigtype__"); char* type_name = RSTRING_PTR(value); if (strcmp(type->name, type_name) == 0) { return obj; } } } /* Create a new Ruby object */ obj = Data_Wrap_Struct(sklass->klass, VOIDFUNC(sklass->mark), (own ? VOIDFUNC(sklass->destroy) : 0), ptr); /* If tracking is on for this class then track this object. */ if (sklass->trackObjects) { SWIG_RubyAddTracking(ptr, obj); } } else { klass_name = (char *) malloc(4 + strlen(type->name) + 1); sprintf(klass_name, "TYPE%s", type->name); klass = rb_const_get(_mSWIG, rb_intern(klass_name)); free((void *) klass_name); obj = Data_Wrap_Struct(klass, 0, 0, ptr); } rb_iv_set(obj, "__swigtype__", rb_str_new2(type->name)); return obj; } /* Create a new class instance (always owned) */ SWIGRUNTIME VALUE SWIG_Ruby_NewClassInstance(VALUE klass, swig_type_info *type) { VALUE obj; swig_class *sklass = (swig_class *) type->clientdata; obj = Data_Wrap_Struct(klass, VOIDFUNC(sklass->mark), VOIDFUNC(sklass->destroy), 0); rb_iv_set(obj, "__swigtype__", rb_str_new2(type->name)); return obj; } /* Get type mangle from class name */ SWIGRUNTIMEINLINE char * SWIG_Ruby_MangleStr(VALUE obj) { VALUE stype = rb_iv_get(obj, "__swigtype__"); return StringValuePtr(stype); } /* Acquire a pointer value */ typedef void (*ruby_owntype)(void*); SWIGRUNTIME ruby_owntype SWIG_Ruby_AcquirePtr(VALUE obj, ruby_owntype own) { if (obj) { ruby_owntype oldown = RDATA(obj)->dfree; RDATA(obj)->dfree = own; return oldown; } else { return 0; } } /* Convert a pointer value */ SWIGRUNTIME int SWIG_Ruby_ConvertPtrAndOwn(VALUE obj, void **ptr, swig_type_info *ty, int flags, ruby_owntype *own) { char *c; swig_cast_info *tc; void *vptr = 0; /* Grab the pointer */ if (NIL_P(obj)) { *ptr = 0; return SWIG_OK; } else { if (TYPE(obj) != T_DATA) { return SWIG_ERROR; } Data_Get_Struct(obj, void, vptr); } if (own) *own = RDATA(obj)->dfree; /* Check to see if the input object is giving up ownership of the underlying C struct or C++ object. If so then we need to reset the destructor since the Ruby object no longer owns the underlying C++ object.*/ if (flags & SWIG_POINTER_DISOWN) { /* Is tracking on for this class? */ int track = 0; if (ty && ty->clientdata) { swig_class *sklass = (swig_class *) ty->clientdata; track = sklass->trackObjects; } if (track) { /* We are tracking objects for this class. Thus we change the destructor * to SWIG_RubyRemoveTracking. This allows us to * remove the mapping from the C++ to Ruby object * when the Ruby object is garbage collected. If we don't * do this, then it is possible we will return a reference * to a Ruby object that no longer exists thereby crashing Ruby. */ RDATA(obj)->dfree = SWIG_RubyRemoveTracking; } else { RDATA(obj)->dfree = 0; } } /* Do type-checking if type info was provided */ if (ty) { if (ty->clientdata) { if (rb_obj_is_kind_of(obj, ((swig_class *) (ty->clientdata))->klass)) { if (vptr == 0) { /* The object has already been deleted */ return SWIG_ObjectPreviouslyDeletedError; } *ptr = vptr; return SWIG_OK; } } if ((c = SWIG_MangleStr(obj)) == NULL) { return SWIG_ERROR; } tc = SWIG_TypeCheck(c, ty); if (!tc) { return SWIG_ERROR; } *ptr = SWIG_TypeCast(tc, vptr); } else { *ptr = vptr; } return SWIG_OK; } /* Check convert */ SWIGRUNTIMEINLINE int SWIG_Ruby_CheckConvert(VALUE obj, swig_type_info *ty) { char *c = SWIG_MangleStr(obj); if (!c) return 0; return SWIG_TypeCheck(c,ty) != 0; } SWIGRUNTIME VALUE SWIG_Ruby_NewPackedObj(void *ptr, int sz, swig_type_info *type) { char result[1024]; char *r = result; if ((2*sz + 1 + strlen(type->name)) > 1000) return 0; *(r++) = '_'; r = SWIG_PackData(r, ptr, sz); strcpy(r, type->name); return rb_str_new2(result); } /* Convert a packed value value */ SWIGRUNTIME int SWIG_Ruby_ConvertPacked(VALUE obj, void *ptr, int sz, swig_type_info *ty) { swig_cast_info *tc; const char *c; if (TYPE(obj) != T_STRING) goto type_error; c = StringValuePtr(obj); /* Pointer values must start with leading underscore */ if (*c != '_') goto type_error; c++; c = SWIG_UnpackData(c, ptr, sz); if (ty) { tc = SWIG_TypeCheck(c, ty); if (!tc) goto type_error; } return SWIG_OK; type_error: return SWIG_ERROR; } SWIGRUNTIME swig_module_info * SWIG_Ruby_GetModule(void) { VALUE pointer; swig_module_info *ret = 0; VALUE verbose = rb_gv_get("VERBOSE"); /* temporarily disable warnings, since the pointer check causes warnings with 'ruby -w' */ rb_gv_set("VERBOSE", Qfalse); /* first check if pointer already created */ pointer = rb_gv_get("$swig_runtime_data_type_pointer" SWIG_RUNTIME_VERSION SWIG_TYPE_TABLE_NAME); if (pointer != Qnil) { Data_Get_Struct(pointer, swig_module_info, ret); } /* reinstate warnings */ rb_gv_set("VERBOSE", verbose); return ret; } SWIGRUNTIME void SWIG_Ruby_SetModule(swig_module_info *pointer) { /* register a new class */ VALUE cl = rb_define_class("swig_runtime_data", rb_cObject); /* create and store the structure pointer to a global variable */ swig_runtime_data_type_pointer = Data_Wrap_Struct(cl, 0, 0, pointer); rb_define_readonly_variable("$swig_runtime_data_type_pointer" SWIG_RUNTIME_VERSION SWIG_TYPE_TABLE_NAME, &swig_runtime_data_type_pointer); } #ifdef __cplusplus #if 0 { /* cc-mode */ #endif } #endif #define SWIG_exception_fail(code, msg) do { SWIG_Error(code, msg); SWIG_fail; } while(0) #define SWIG_contract_assert(expr, msg) if (!(expr)) { SWIG_Error(SWIG_RuntimeError, msg); SWIG_fail; } else /* -------- TYPES TABLE (BEGIN) -------- */ #define SWIGTYPE_p_char swig_types[0] static swig_type_info *swig_types[2]; static swig_module_info swig_module = {swig_types, 1, 0, 0, 0, 0}; #define SWIG_TypeQuery(name) SWIG_TypeQueryModule(&swig_module, &swig_module, name) #define SWIG_MangledTypeQuery(name) SWIG_MangledTypeQueryModule(&swig_module, &swig_module, name) /* -------- TYPES TABLE (END) -------- */ #define SWIG_init Init_RingtoneManager #define SWIG_name "Rho::RingtoneManager" static VALUE mRingtoneManager; #define SWIGVERSION 0x010331 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) extern VALUE rho_ringtone_manager_get_all(); #define get_all_ringtones rho_ringtone_manager_get_all extern void rho_ringtone_manager_stop(); #define stop rho_ringtone_manager_stop extern void rho_ringtone_manager_play(char* file_name); #define play rho_ringtone_manager_play SWIGINTERN swig_type_info* SWIG_pchar_descriptor(void) { static int init = 0; static swig_type_info* info = 0; if (!init) { info = SWIG_TypeQuery("_p_char"); init = 1; } return info; } SWIGINTERN int SWIG_AsCharPtrAndSize(VALUE obj, char** cptr, size_t* psize, int *alloc) { if (TYPE(obj) == T_STRING) { char *cstr = STR2CSTR(obj); size_t size = RSTRING_LEN(obj) + 1; if (cptr) { if (alloc) { if (*alloc == SWIG_NEWOBJ) { *cptr = (char *)memcpy((char *)malloc((size)*sizeof(char)), cstr, sizeof(char)*(size)); } else { *cptr = cstr; *alloc = SWIG_OLDOBJ; } } } if (psize) *psize = size; return SWIG_OK; } else { swig_type_info* pchar_descriptor = SWIG_pchar_descriptor(); if (pchar_descriptor) { void* vptr = 0; if (SWIG_ConvertPtr(obj, &vptr, pchar_descriptor, 0) == SWIG_OK) { if (cptr) *cptr = (char *)vptr; if (psize) *psize = vptr ? (strlen((char*)vptr) + 1) : 0; if (alloc) *alloc = SWIG_OLDOBJ; return SWIG_OK; } } } return SWIG_TypeError; } SWIGINTERN VALUE _wrap_get_all_ringtones(int argc, VALUE *argv, VALUE self) { VALUE result; VALUE vresult = Qnil; if ((argc < 0) || (argc > 0)) { rb_raise(rb_eArgError, "wrong # of arguments(%d for 0)",argc); SWIG_fail; } result = (VALUE)get_all_ringtones(); vresult = result; return vresult; fail: return Qnil; } SWIGINTERN VALUE _wrap_stop(int argc, VALUE *argv, VALUE self) { if ((argc < 0) || (argc > 0)) { rb_raise(rb_eArgError, "wrong # of arguments(%d for 0)",argc); SWIG_fail; } stop(); return Qnil; fail: return Qnil; } SWIGINTERN VALUE _wrap_play(int argc, VALUE *argv, VALUE self) { char *arg1 = (char *) 0 ; int res1 ; char *buf1 = 0 ; int alloc1 = 0 ; if ((argc < 1) || (argc > 1)) { rb_raise(rb_eArgError, "wrong # of arguments(%d for 1)",argc); SWIG_fail; } res1 = SWIG_AsCharPtrAndSize(argv[0], &buf1, NULL, &alloc1); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "play" "', argument " "1"" of type '" "char *""'"); } arg1 = (char *)(buf1); play(arg1); if (alloc1 == SWIG_NEWOBJ) free((char*)buf1); return Qnil; fail: if (alloc1 == SWIG_NEWOBJ) free((char*)buf1); return Qnil; } /* -------- TYPE CONVERSION AND EQUIVALENCE RULES (BEGIN) -------- */ static swig_type_info _swigt__p_char = {"_p_char", "char *", 0, 0, (void*)0, 0}; static swig_type_info *swig_type_initial[] = { &_swigt__p_char, }; static swig_cast_info _swigc__p_char[] = { {&_swigt__p_char, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info *swig_cast_initial[] = { _swigc__p_char, }; /* -------- TYPE CONVERSION AND EQUIVALENCE RULES (END) -------- */ /* ----------------------------------------------------------------------------- * Type initialization: * This problem is tough by the requirement that no dynamic * memory is used. Also, since swig_type_info structures store pointers to * swig_cast_info structures and swig_cast_info structures store pointers back * to swig_type_info structures, we need some lookup code at initialization. * The idea is that swig generates all the structures that are needed. * The runtime then collects these partially filled structures. * The SWIG_InitializeModule function takes these initial arrays out of * swig_module, and does all the lookup, filling in the swig_module.types * array with the correct data and linking the correct swig_cast_info * structures together. * * The generated swig_type_info structures are assigned staticly to an initial * array. We just loop through that array, and handle each type individually. * First we lookup if this type has been already loaded, and if so, use the * loaded structure instead of the generated one. Then we have to fill in the * cast linked list. The cast data is initially stored in something like a * two-dimensional array. Each row corresponds to a type (there are the same * number of rows as there are in the swig_type_initial array). Each entry in * a column is one of the swig_cast_info structures for that type. * The cast_initial array is actually an array of arrays, because each row has * a variable number of columns. So to actually build the cast linked list, * we find the array of casts associated with the type, and loop through it * adding the casts to the list. The one last trick we need to do is making * sure the type pointer in the swig_cast_info struct is correct. * * First off, we lookup the cast->type name to see if it is already loaded. * There are three cases to handle: * 1) If the cast->type has already been loaded AND the type we are adding * casting info to has not been loaded (it is in this module), THEN we * replace the cast->type pointer with the type pointer that has already * been loaded. * 2) If BOTH types (the one we are adding casting info to, and the * cast->type) are loaded, THEN the cast info has already been loaded by * the previous module so we just ignore it. * 3) Finally, if cast->type has not already been loaded, then we add that * swig_cast_info to the linked list (because the cast->type) pointer will * be correct. * ----------------------------------------------------------------------------- */ #ifdef __cplusplus extern "C" { #if 0 } /* c-mode */ #endif #endif #if 0 #define SWIGRUNTIME_DEBUG #endif SWIGRUNTIME void SWIG_InitializeModule(void *clientdata) { size_t i; swig_module_info *module_head, *iter; int found; clientdata = clientdata; /* check to see if the circular list has been setup, if not, set it up */ if (swig_module.next==0) { /* Initialize the swig_module */ swig_module.type_initial = swig_type_initial; swig_module.cast_initial = swig_cast_initial; swig_module.next = &swig_module; } /* Try and load any already created modules */ module_head = SWIG_GetModule(clientdata); if (!module_head) { /* This is the first module loaded for this interpreter */ /* so set the swig module into the interpreter */ SWIG_SetModule(clientdata, &swig_module); module_head = &swig_module; } else { /* the interpreter has loaded a SWIG module, but has it loaded this one? */ found=0; iter=module_head; do { if (iter==&swig_module) { found=1; break; } iter=iter->next; } while (iter!= module_head); /* if the is found in the list, then all is done and we may leave */ if (found) return; /* otherwise we must add out module into the list */ swig_module.next = module_head->next; module_head->next = &swig_module; } /* Now work on filling in swig_module.types */ #ifdef SWIGRUNTIME_DEBUG printf("SWIG_InitializeModule: size %d\n", swig_module.size); #endif for (i = 0; i < swig_module.size; ++i) { swig_type_info *type = 0; swig_type_info *ret; swig_cast_info *cast; #ifdef SWIGRUNTIME_DEBUG printf("SWIG_InitializeModule: type %d %s\n", i, swig_module.type_initial[i]->name); #endif /* if there is another module already loaded */ if (swig_module.next != &swig_module) { type = SWIG_MangledTypeQueryModule(swig_module.next, &swig_module, swig_module.type_initial[i]->name); } if (type) { /* Overwrite clientdata field */ #ifdef SWIGRUNTIME_DEBUG printf("SWIG_InitializeModule: found type %s\n", type->name); #endif if (swig_module.type_initial[i]->clientdata) { type->clientdata = swig_module.type_initial[i]->clientdata; #ifdef SWIGRUNTIME_DEBUG printf("SWIG_InitializeModule: found and overwrite type %s \n", type->name); #endif } } else { type = swig_module.type_initial[i]; } /* Insert casting types */ cast = swig_module.cast_initial[i]; while (cast->type) { /* Don't need to add information already in the list */ ret = 0; #ifdef SWIGRUNTIME_DEBUG printf("SWIG_InitializeModule: look cast %s\n", cast->type->name); #endif if (swig_module.next != &swig_module) { ret = SWIG_MangledTypeQueryModule(swig_module.next, &swig_module, cast->type->name); #ifdef SWIGRUNTIME_DEBUG if (ret) printf("SWIG_InitializeModule: found cast %s\n", ret->name); #endif } if (ret) { if (type == swig_module.type_initial[i]) { #ifdef SWIGRUNTIME_DEBUG printf("SWIG_InitializeModule: skip old type %s\n", ret->name); #endif cast->type = ret; ret = 0; } else { /* Check for casting already in the list */ swig_cast_info *ocast = SWIG_TypeCheck(ret->name, type); #ifdef SWIGRUNTIME_DEBUG if (ocast) printf("SWIG_InitializeModule: skip old cast %s\n", ret->name); #endif if (!ocast) ret = 0; } } if (!ret) { #ifdef SWIGRUNTIME_DEBUG printf("SWIG_InitializeModule: adding cast %s\n", cast->type->name); #endif if (type->cast) { type->cast->prev = cast; cast->next = type->cast; } type->cast = cast; } cast++; } /* Set entry in modules->types array equal to the type */ swig_module.types[i] = type; } swig_module.types[i] = 0; #ifdef SWIGRUNTIME_DEBUG printf("**** SWIG_InitializeModule: Cast List ******\n"); for (i = 0; i < swig_module.size; ++i) { int j = 0; swig_cast_info *cast = swig_module.cast_initial[i]; printf("SWIG_InitializeModule: type %d %s\n", i, swig_module.type_initial[i]->name); while (cast->type) { printf("SWIG_InitializeModule: cast type %s\n", cast->type->name); cast++; ++j; } printf("---- Total casts: %d\n",j); } printf("**** SWIG_InitializeModule: Cast List ******\n"); #endif } /* This function will propagate the clientdata field of type to * any new swig_type_info structures that have been added into the list * of equivalent types. It is like calling * SWIG_TypeClientData(type, clientdata) a second time. */ SWIGRUNTIME void SWIG_PropagateClientData(void) { size_t i; swig_cast_info *equiv; static int init_run = 0; if (init_run) return; init_run = 1; for (i = 0; i < swig_module.size; i++) { if (swig_module.types[i]->clientdata) { equiv = swig_module.types[i]->cast; while (equiv) { if (!equiv->converter) { if (equiv->type && !equiv->type->clientdata) SWIG_TypeClientData(equiv->type, swig_module.types[i]->clientdata); } equiv = equiv->next; } } } } #ifdef __cplusplus #if 0 { /* c-mode */ #endif } #endif #ifdef __cplusplus extern "C" #endif SWIGEXPORT void Init_RingtoneManager(void) { size_t i; SWIG_InitRuntime(); mRingtoneManager = rb_define_module("Rho"); mRingtoneManager = rb_define_module_under(mRingtoneManager, "RingtoneManager"); SWIG_InitializeModule(0); for (i = 0; i < swig_module.size; i++) { SWIG_define_class(swig_module.types[i]); } SWIG_RubyInitializeTrackings(); rb_define_module_function(mRingtoneManager, "get_all_ringtones", _wrap_get_all_ringtones, -1); rb_define_module_function(mRingtoneManager, "stop", _wrap_stop, -1); rb_define_module_function(mRingtoneManager, "play", _wrap_play, -1); }
917234.c
/* c/zx-xs-aux.c - WARNING: This file was auto generated by xsd2sg.pl. DO NOT EDIT! * $Id$ */ /* Code generation design Copyright (c) 2006 Sampo Kellomaki ([email protected]), * All Rights Reserved. NO WARRANTY. See file COPYING for terms and conditions * of use. Some aspects of code generation were driven by schema * descriptions that were used as input and may be subject to their own copyright. * Code generation uses a template, whose copyright statement follows. */ /** aux-templ.c - Auxiliary functions template: cloning, freeing, walking data ** Copyright (c) 2010 Sampo Kellomaki ([email protected]), All Rights Reserved. ** Copyright (c) 2006 Symlabs ([email protected]), All Rights Reserved. ** Author: Sampo Kellomaki ([email protected]) ** This is confidential unpublished proprietary source code of the author. ** NO WARRANTY, not even implied warranties. Contains trade secrets. ** Distribution prohibited unless authorized in writing. ** Licensed under Apache License 2.0, see file COPYING. ** Id: aux-templ.c,v 1.12 2008-10-04 23:42:14 sampo Exp $ ** ** 30.5.2006, created, Sampo Kellomaki ([email protected]) ** 6.8.2006, factored from enc-templ.c to separate file --Sampo ** ** N.B: wo=wire order (needed for exc-c14n), so=schema order **/ #include <memory.h> #include "errmac.h" #include "zx.h" #include "c/zx-const.h" #include "c/zx-data.h" #include "c/zx-xs-data.h" /* EOF -- c/zx-xs-aux.c */
322905.c
#include <unistd.h> #include <sched.h> /* Declare global data */ struct shared_data_nonalign common_aln; /* * Shared data being modified by two threads running on different CPUs. */ /* shared structure between two threads which will be optimized later*/ struct shared_data_align { unsigned int num_proc1; unsigned int num_proc2; }; /* * Shared structure between two threads remains unchanged (non optimized) * This is required in order to collect some samples for the L2_LINES_IN event. */ struct shared_data_nonalign { unsigned int num_proc1; unsigned int num_proc2; }; /* * The routine below is called by the cloned thread, to increment the num_proc2 * element of common and common_aln structure in loop. */ int func1(struct shared_data_align *com) { int i, j; /* Increment the value of num_proc2 in loop */ for (j = 0; j < 200; j++) for (i = 0; i < 100000; i++) { com->num_proc2++; } /* Increment the value of num_proc2 in loop */ for (j = 0; j < 200; j++) for (i = 0; i < 100000; i++) { common_aln.num_proc2++; } } int main() { /* * In the example program below, the parent process creates a clone * thread sharing its memory space. The parent thread running on one CPU * increments the num_proc1 element of the common and common_aln. The cloned * thread running on another CPU increments the value of num_proc2 element of * the common and common_aln structure. */ /*Declare local shared data */ struct shared_data_align common; pid_t pid; /* Now clone a thread sharing memory space with the parent process */ if ((pid = clone(func1, buff+8188, CLONE_VM, &common)) < 0) { perror("clone"); exit(1); } /* Increment the value of num_proc1 in loop */ for (j = 0; j < 200; j++) for(i = 0; i < 100000; i++) { common.num_proc1++; } /* Increment the value of num_proc1 in loop */ for (j = 0; j < 200; j++) for(i = 0; i < 100000; i++) { common_aln.num_proc1++; } return 0; }
243332.c
/* $OpenBSD: strlcat.c,v 1.13 2005/08/08 08:05:37 espie Exp $ */ /* * Copyright (c) 1998 Todd C. Miller <[email protected]> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #ifndef HAVE_STRLCPY #include <sys/types.h> #include <string.h> #include "strlcpycat.h" /* * Appends src to string dst of size siz (unlike strncat, siz is the * full size of dst, not space left). At most siz-1 characters * will be copied. Always NUL terminates (unless siz <= strlen(dst)). * Returns strlen(src) + MIN(siz, strlen(initial dst)). * If retval >= siz, truncation occurred. */ size_t strlcat(char *dst, const char *src, size_t siz) { char *d = dst; const char *s = src; size_t n = siz; size_t dlen; /* Find the end of dst and adjust bytes left but don't go past end */ while (n-- != 0 && *d != '\0') d++; dlen = d - dst; n = siz - dlen; if (n == 0) return(dlen + strlen(s)); while (*s != '\0') { if (n != 1) { *d++ = *s; n--; } s++; } *d = '\0'; return(dlen + (s - src)); /* count does not include NUL */ } #endif
876803.c
/* * Copyright 2020-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc-topology-background-monitoring-private.h" #include "mongoc-client-private.h" #include "mongoc-log-private.h" #include "mongoc-server-monitor-private.h" #ifdef MONGOC_ENABLE_SSL #include "mongoc-ssl-private.h" #endif #include "mongoc-stream-private.h" #include "mongoc-topology-description-apm-private.h" #include "mongoc-topology-private.h" #include "mongoc-trace-private.h" #include "mongoc-util-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "monitor" static BSON_THREAD_FUN (srv_polling_run, topology_void) { mongoc_topology_t *topology; topology = topology_void; bson_mutex_lock (&topology->mutex); while (true) { int64_t now_ms; int64_t scan_due_ms; int64_t sleep_duration_ms; if (topology->scanner_state != MONGOC_TOPOLOGY_SCANNER_BG_RUNNING) { bson_mutex_unlock (&topology->mutex); break; } /* This will check if a scan is due. */ if (!mongoc_topology_should_rescan_srv (topology)) { TRACE ("%s\n", "topology ineligible for SRV polling, stopping"); bson_mutex_unlock (&topology->mutex); break; } mongoc_topology_rescan_srv (topology); /* Unlock and sleep until next scan is due, or until shutdown signalled. */ now_ms = bson_get_monotonic_time () / 1000; scan_due_ms = topology->srv_polling_last_scan_ms + topology->srv_polling_rescan_interval_ms; sleep_duration_ms = scan_due_ms - now_ms; if (sleep_duration_ms > 0) { TRACE ("srv polling thread sleeping for %" PRId64 "ms", sleep_duration_ms); } /* Check for shutdown again here. mongoc_topology_rescan_srv unlocks the * topology mutex for the scan. The topology may have shut down in that * time. */ if (topology->scanner_state != MONGOC_TOPOLOGY_SCANNER_BG_RUNNING) { bson_mutex_unlock (&topology->mutex); break; } /* If shutting down, stop. */ mongoc_cond_timedwait ( &topology->srv_polling_cond, &topology->mutex, sleep_duration_ms); } BSON_THREAD_RETURN; } /* Create a server monitor if necessary. * * Called by monitor threads and application threads when reconciling the * topology description. Caller must have topology mutex locked. */ static void _background_monitor_reconcile_server_monitor (mongoc_topology_t *topology, mongoc_server_description_t *sd) { mongoc_set_t *server_monitors; mongoc_server_monitor_t *server_monitor; MONGOC_DEBUG_ASSERT (COMMON_PREFIX (mutex_is_locked) (&topology->mutex)); server_monitors = topology->server_monitors; server_monitor = mongoc_set_get (server_monitors, sd->id); if (!server_monitor) { /* Add a new server monitor. */ server_monitor = mongoc_server_monitor_new (topology, sd); mongoc_server_monitor_run (server_monitor); mongoc_set_add (server_monitors, sd->id, server_monitor); } /* Check if an RTT monitor is needed. */ if (!bson_empty (&sd->topology_version)) { mongoc_set_t *rtt_monitors; mongoc_server_monitor_t *rtt_monitor; rtt_monitors = topology->rtt_monitors; rtt_monitor = mongoc_set_get (rtt_monitors, sd->id); if (!rtt_monitor) { rtt_monitor = mongoc_server_monitor_new (topology, sd); mongoc_server_monitor_run_as_rtt (rtt_monitor); mongoc_set_add (rtt_monitors, sd->id, rtt_monitor); } } return; } /* Start background monitoring. * * Called by an application thread popping a client from a pool. Safe to * call repeatedly. * Caller must have topology mutex locked. */ void _mongoc_topology_background_monitoring_start (mongoc_topology_t *topology) { BSON_ASSERT (!topology->single_threaded); MONGOC_DEBUG_ASSERT (COMMON_PREFIX (mutex_is_locked) (&topology->mutex)); if (topology->scanner_state == MONGOC_TOPOLOGY_SCANNER_BG_RUNNING) { return; } TRACE ("%s", "background monitoring starting"); BSON_ASSERT (topology->scanner_state == MONGOC_TOPOLOGY_SCANNER_OFF); topology->scanner_state = MONGOC_TOPOLOGY_SCANNER_BG_RUNNING; _mongoc_handshake_freeze (); _mongoc_topology_description_monitor_opening (&topology->description); /* Reconcile to create the first server monitors. */ _mongoc_topology_background_monitoring_reconcile (topology); /* Start SRV polling thread. */ if (mongoc_topology_should_rescan_srv (topology)) { topology->is_srv_polling = true; COMMON_PREFIX (thread_create) (&topology->srv_polling_thread, srv_polling_run, topology); } } /* Remove server monitors that are no longer in the set of server descriptions. * * Called by monitor threads and application threads when reconciling the * topology description. Caller must have topology mutex locked. */ static void _remove_orphaned_server_monitors (mongoc_set_t *server_monitors, mongoc_set_t *server_descriptions) { uint32_t *server_monitor_ids_to_remove; uint32_t n_server_monitor_ids_to_remove = 0; int i; /* Signal shutdown to server monitors no longer in the topology description. */ server_monitor_ids_to_remove = bson_malloc0 (sizeof (uint32_t) * server_monitors->items_len); for (i = 0; i < server_monitors->items_len; i++) { mongoc_server_monitor_t *server_monitor; uint32_t id; server_monitor = mongoc_set_get_item_and_id (server_monitors, i, &id); if (!mongoc_set_get (server_descriptions, id)) { if (mongoc_server_monitor_request_shutdown (server_monitor)) { mongoc_server_monitor_wait_for_shutdown (server_monitor); mongoc_server_monitor_destroy (server_monitor); server_monitor_ids_to_remove[n_server_monitor_ids_to_remove] = id; n_server_monitor_ids_to_remove++; } } } /* Remove freed server monitors that have completed shutdown. */ for (i = 0; i < n_server_monitor_ids_to_remove; i++) { mongoc_set_rm (server_monitors, server_monitor_ids_to_remove[i]); } bson_free (server_monitor_ids_to_remove); } /* Reconcile the topology description with the set of server monitors. * * Called when the topology description is updated (via handshake, monitoring, * or invalidation). May be called by server monitor thread or an application * thread. * Caller must have topology mutex locked. * Locks server monitor mutexes. May join / remove server monitors that have * completed shutdown. */ void _mongoc_topology_background_monitoring_reconcile (mongoc_topology_t *topology) { mongoc_topology_description_t *td; mongoc_set_t *server_descriptions; int i; MONGOC_DEBUG_ASSERT (COMMON_PREFIX (mutex_is_locked) (&topology->mutex)); td = &topology->description; server_descriptions = td->servers; BSON_ASSERT (!topology->single_threaded); if (topology->scanner_state != MONGOC_TOPOLOGY_SCANNER_BG_RUNNING) { return; } /* Add newly discovered server monitors, and update existing ones. */ for (i = 0; i < server_descriptions->items_len; i++) { mongoc_server_description_t *sd; sd = mongoc_set_get_item (server_descriptions, i); _background_monitor_reconcile_server_monitor (topology, sd); } _remove_orphaned_server_monitors (topology->server_monitors, server_descriptions); _remove_orphaned_server_monitors (topology->rtt_monitors, server_descriptions); } /* Request all server monitors to scan. * * Called from application threads (during server selection or "not primary" * errors). Caller must have topology mutex locked. Locks server monitor mutexes * to deliver scan_requested. */ void _mongoc_topology_background_monitoring_request_scan ( mongoc_topology_t *topology) { mongoc_set_t *server_monitors; int i; BSON_ASSERT (!topology->single_threaded); if (topology->scanner_state == MONGOC_TOPOLOGY_SCANNER_SHUTTING_DOWN) { return; } server_monitors = topology->server_monitors; for (i = 0; i < server_monitors->items_len; i++) { mongoc_server_monitor_t *server_monitor; uint32_t id; server_monitor = mongoc_set_get_item_and_id (server_monitors, i, &id); mongoc_server_monitor_request_scan (server_monitor); } } /* Stop, join, and destroy all server monitors. * * Called by application threads when destroying a client pool. * Caller must have topology mutex locked. * Locks server monitor mutexes to deliver shutdown. Releases topology mutex to * join server monitor threads. Leaves topology mutex locked on exit. This * function is thread-safe. But in practice, it is only ever called by one * application thread (because mongoc_client_pool_destroy is not thread-safe). */ void _mongoc_topology_background_monitoring_stop (mongoc_topology_t *topology) { mongoc_server_monitor_t *server_monitor; int i; MONGOC_DEBUG_ASSERT (COMMON_PREFIX (mutex_is_locked) (&topology->mutex)); BSON_ASSERT (!topology->single_threaded); if (topology->scanner_state != MONGOC_TOPOLOGY_SCANNER_BG_RUNNING) { return; } topology->scanner_state = MONGOC_TOPOLOGY_SCANNER_SHUTTING_DOWN; TRACE ("%s", "background monitoring stopping"); /* Signal SRV polling to shut down (if it is started). */ if (topology->is_srv_polling) { mongoc_cond_signal (&topology->srv_polling_cond); } /* Signal all server monitors to shut down. */ for (i = 0; i < topology->server_monitors->items_len; i++) { server_monitor = mongoc_set_get_item (topology->server_monitors, i); mongoc_server_monitor_request_shutdown (server_monitor); } /* Signal all RTT monitors to shut down. */ for (i = 0; i < topology->rtt_monitors->items_len; i++) { server_monitor = mongoc_set_get_item (topology->rtt_monitors, i); mongoc_server_monitor_request_shutdown (server_monitor); } /* Some mongoc_server_monitor_t may be waiting for topology mutex. Unlock so * they can proceed to terminate. It is safe to unlock topology mutex. Since * scanner_state has transitioned to shutting down, no thread can modify * server_monitors. */ bson_mutex_unlock (&topology->mutex); for (i = 0; i < topology->server_monitors->items_len; i++) { /* Wait for the thread to shutdown. */ server_monitor = mongoc_set_get_item (topology->server_monitors, i); mongoc_server_monitor_wait_for_shutdown (server_monitor); mongoc_server_monitor_destroy (server_monitor); } for (i = 0; i < topology->rtt_monitors->items_len; i++) { /* Wait for the thread to shutdown. */ server_monitor = mongoc_set_get_item (topology->rtt_monitors, i); mongoc_server_monitor_wait_for_shutdown (server_monitor); mongoc_server_monitor_destroy (server_monitor); } /* Wait for SRV polling thread. */ if (topology->is_srv_polling) { COMMON_PREFIX (thread_join) (topology->srv_polling_thread); } bson_mutex_lock (&topology->mutex); mongoc_set_destroy (topology->server_monitors); mongoc_set_destroy (topology->rtt_monitors); topology->server_monitors = mongoc_set_new (1, NULL, NULL); topology->rtt_monitors = mongoc_set_new (1, NULL, NULL); topology->scanner_state = MONGOC_TOPOLOGY_SCANNER_OFF; mongoc_cond_broadcast (&topology->cond_client); } /* Cancel an in-progress streaming hello for a specific server (if * applicable). * * Called from application threads on network errors. * Caller must have topology mutex locked. */ void _mongoc_topology_background_monitoring_cancel_check ( mongoc_topology_t *topology, uint32_t server_id) { mongoc_server_monitor_t *server_monitor; server_monitor = mongoc_set_get (topology->server_monitors, server_id); if (!server_monitor) { /* Already removed. */ return; } mongoc_server_monitor_request_cancel (server_monitor); }
264474.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /* Example 17 Interface: Structured interface (Struct) Compile with: make ex17 Sample run: mpirun -np 16 ex17 -n 10 To see options: ex17 -help Description: This code solves an "NDIM-D Laplacian" using CG. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "HYPRE_struct_ls.h" #include "ex.h" #define NDIM 4 #define NSTENC (2*NDIM+1) int main (int argc, char *argv[]) { int d, i, j; int myid, num_procs; int n, N, nvol, div, rem; int p[NDIM], ilower[NDIM], iupper[NDIM]; int solver_id; HYPRE_StructGrid grid; HYPRE_StructStencil stencil; HYPRE_StructMatrix A; HYPRE_StructVector b; HYPRE_StructVector x; HYPRE_StructSolver solver; int num_iterations; double final_res_norm; /* Initialize MPI */ MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &myid); MPI_Comm_size(MPI_COMM_WORLD, &num_procs); /* Initialize HYPRE */ HYPRE_Init(); /* Print GPU info */ /* HYPRE_PrintDeviceInfo(); */ /* Set defaults */ n = 10; solver_id = 0; /* Parse command line */ { int arg_index = 0; int print_usage = 0; while (arg_index < argc) { if ( strcmp(argv[arg_index], "-n") == 0 ) { arg_index++; n = atoi(argv[arg_index++]); } else if ( strcmp(argv[arg_index], "-solver") == 0 ) { arg_index++; solver_id = atoi(argv[arg_index++]); } else if ( strcmp(argv[arg_index], "-help") == 0 ) { print_usage = 1; break; } else { arg_index++; } } if ((print_usage) && (myid == 0)) { printf("\n"); printf("Usage: %s [<options>]\n", argv[0]); printf("\n"); printf(" -n <n> : problem size per processor (default: 33)\n"); printf(" -solver <ID> : solver ID\n"); printf(" 0 - CG (default)\n"); printf(" 1 - GMRES\n"); printf("\n"); } if (print_usage) { MPI_Finalize(); return (0); } } nvol = pow(n, NDIM); /* Figure out the processor grid (N x N x N x N). The local problem size for the interior nodes is indicated by n (n x n x n x n). p indicates the position in the processor grid. */ N = pow(num_procs, 1.0/NDIM) + 1.0e-6; div = pow(N, NDIM); rem = myid; if (num_procs != div) { printf("Num procs is not a perfect NDIM-th root!\n"); MPI_Finalize(); exit(1); } for (d = NDIM-1; d >= 0; d--) { div /= N; p[d] = rem / div; rem %= div; } /* Figure out the extents of each processor's piece of the grid. */ for (d = 0; d < NDIM; d++) { ilower[d] = p[d]*n; iupper[d] = ilower[d] + n-1; } /* 1. Set up a grid */ { /* Create an empty 2D grid object */ HYPRE_StructGridCreate(MPI_COMM_WORLD, NDIM, &grid); /* Add a new box to the grid */ HYPRE_StructGridSetExtents(grid, ilower, iupper); /* This is a collective call finalizing the grid assembly. The grid is now ``ready to be used'' */ HYPRE_StructGridAssemble(grid); } /* 2. Define the discretization stencil */ { /* Create an empty NDIM-D, NSTENC-pt stencil object */ HYPRE_StructStencilCreate(NDIM, NSTENC, &stencil); /* Define the geometry of the stencil */ { int entry; int offset[NDIM]; entry = 0; for (d = 0; d < NDIM; d++) { offset[d] = 0; } HYPRE_StructStencilSetElement(stencil, entry++, offset); for (d = 0; d < NDIM; d++) { offset[d] = -1; HYPRE_StructStencilSetElement(stencil, entry++, offset); offset[d] = 1; HYPRE_StructStencilSetElement(stencil, entry++, offset); offset[d] = 0; } } } /* 3. Set up a Struct Matrix */ { int nentries = NSTENC; int nvalues = nentries*nvol; double *values; int stencil_indices[NSTENC]; /* Create an empty matrix object */ HYPRE_StructMatrixCreate(MPI_COMM_WORLD, grid, stencil, &A); /* Indicate that the matrix coefficients are ready to be set */ HYPRE_StructMatrixInitialize(A); values = (double*) calloc(nvalues, sizeof(double)); for (j = 0; j < nentries; j++) { stencil_indices[j] = j; } /* Set the standard stencil at each grid point; fix boundaries later */ for (i = 0; i < nvalues; i += nentries) { values[i] = NSTENC; /* Use absolute row sum */ for (j = 1; j < nentries; j++) { values[i+j] = -1.0; } } HYPRE_StructMatrixSetBoxValues(A, ilower, iupper, nentries, stencil_indices, values); free(values); } /* 4. Incorporate zero boundary conditions: go along each edge of the domain and set the stencil entry that reaches to the boundary to zero.*/ { int bc_ilower[NDIM]; int bc_iupper[NDIM]; int nentries = 1; int nvalues = nentries*nvol/n; /* number of stencil entries times the length of one side of my grid box */ double *values; int stencil_indices[1]; values = (double*) calloc(nvalues, sizeof(double)); for (j = 0; j < nvalues; j++) { values[j] = 0.0; } for (d = 0; d < NDIM; d++) { bc_ilower[d] = ilower[d]; bc_iupper[d] = iupper[d]; } stencil_indices[0] = 1; for (d = 0; d < NDIM; d++) { /* lower boundary in dimension d */ if (p[d] == 0) { bc_iupper[d] = ilower[d]; HYPRE_StructMatrixSetBoxValues(A, bc_ilower, bc_iupper, nentries, stencil_indices, values); bc_iupper[d] = iupper[d]; } stencil_indices[0]++; /* upper boundary in dimension d */ if (p[d] == N-1) { bc_ilower[d] = iupper[d]; HYPRE_StructMatrixSetBoxValues(A, bc_ilower, bc_iupper, nentries, stencil_indices, values); bc_ilower[d] = ilower[d]; } stencil_indices[0]++; } free(values); } /* This is a collective call finalizing the matrix assembly. The matrix is now ``ready to be used'' */ HYPRE_StructMatrixAssemble(A); /* 5. Set up Struct Vectors for b and x */ { int nvalues = nvol; double *values; values = (double*) calloc(nvalues, sizeof(double)); /* Create an empty vector object */ HYPRE_StructVectorCreate(MPI_COMM_WORLD, grid, &b); HYPRE_StructVectorCreate(MPI_COMM_WORLD, grid, &x); /* Indicate that the vector coefficients are ready to be set */ HYPRE_StructVectorInitialize(b); HYPRE_StructVectorInitialize(x); /* Set the values */ for (i = 0; i < nvalues; i ++) { values[i] = 1.0; } HYPRE_StructVectorSetBoxValues(b, ilower, iupper, values); for (i = 0; i < nvalues; i ++) { values[i] = 0.0; } HYPRE_StructVectorSetBoxValues(x, ilower, iupper, values); free(values); /* This is a collective call finalizing the vector assembly. The vector is now ``ready to be used'' */ HYPRE_StructVectorAssemble(b); HYPRE_StructVectorAssemble(x); } #if 0 HYPRE_StructMatrixPrint("ex17.out.A", A, 0); HYPRE_StructVectorPrint("ex17.out.b", b, 0); HYPRE_StructVectorPrint("ex17.out.x0", x, 0); #endif /* 6. Set up and use a struct solver (Solver options can be found in the Reference Manual.) */ if (solver_id == 0) { HYPRE_StructPCGCreate(MPI_COMM_WORLD, &solver); HYPRE_StructPCGSetMaxIter(solver, 100); HYPRE_StructPCGSetTol(solver, 1.0e-06); HYPRE_StructPCGSetTwoNorm(solver, 1); HYPRE_StructPCGSetRelChange(solver, 0); HYPRE_StructPCGSetPrintLevel(solver, 2); /* print each CG iteration */ HYPRE_StructPCGSetLogging(solver, 1); /* No preconditioner */ HYPRE_StructPCGSetup(solver, A, b, x); HYPRE_StructPCGSolve(solver, A, b, x); /* Get some info on the run */ HYPRE_StructPCGGetNumIterations(solver, &num_iterations); HYPRE_StructPCGGetFinalRelativeResidualNorm(solver, &final_res_norm); /* Clean up */ HYPRE_StructPCGDestroy(solver); } if (myid == 0) { printf("\n"); printf("Iterations = %d\n", num_iterations); printf("Final Relative Residual Norm = %g\n", final_res_norm); printf("\n"); } /* Free memory */ HYPRE_StructGridDestroy(grid); HYPRE_StructStencilDestroy(stencil); HYPRE_StructMatrixDestroy(A); HYPRE_StructVectorDestroy(b); HYPRE_StructVectorDestroy(x); /* Finalize HYPRE */ HYPRE_Finalize(); /* Finalize MPI */ MPI_Finalize(); return (0); }
622158.c
#include <string.h> void *memchr(const void *s, int c, size_t n) { const char *p = s; while (n--) { if (*p == c) { return (void *) p; } p++; } return NULL; }
594775.c
/* * Copyright 2002-2018 The Opentls Project Authors. All Rights Reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.opentls.org/source/license.html */ #include <opentls/crypto.h> #include <opentls/err.h> #include "ec_local.h" BIGNUM *EC_POINT_point2bn(const EC_GROUP *group, const EC_POINT *point, point_conversion_form_t form, BIGNUM *ret, BN_CTX *ctx) { size_t buf_len = 0; unsigned char *buf; buf_len = EC_POINT_point2buf(group, point, form, &buf, ctx); if (buf_len == 0) return NULL; ret = BN_bin2bn(buf, buf_len, ret); OPENtls_free(buf); return ret; } EC_POINT *EC_POINT_bn2point(const EC_GROUP *group, const BIGNUM *bn, EC_POINT *point, BN_CTX *ctx) { size_t buf_len = 0; unsigned char *buf; EC_POINT *ret; if ((buf_len = BN_num_bytes(bn)) == 0) buf_len = 1; if ((buf = OPENtls_malloc(buf_len)) == NULL) { ECerr(EC_F_EC_POINT_BN2POINT, ERR_R_MALLOC_FAILURE); return NULL; } if (!BN_bn2binpad(bn, buf, buf_len)) { OPENtls_free(buf); return NULL; } if (point == NULL) { if ((ret = EC_POINT_new(group)) == NULL) { OPENtls_free(buf); return NULL; } } else ret = point; if (!EC_POINT_oct2point(group, ret, buf, buf_len, ctx)) { if (ret != point) EC_POINT_clear_free(ret); OPENtls_free(buf); return NULL; } OPENtls_free(buf); return ret; } static const char *HEX_DIGITS = "0123456789ABCDEF"; /* the return value must be freed (using OPENtls_free()) */ char *EC_POINT_point2hex(const EC_GROUP *group, const EC_POINT *point, point_conversion_form_t form, BN_CTX *ctx) { char *ret, *p; size_t buf_len = 0, i; unsigned char *buf = NULL, *pbuf; buf_len = EC_POINT_point2buf(group, point, form, &buf, ctx); if (buf_len == 0) return NULL; ret = OPENtls_malloc(buf_len * 2 + 2); if (ret == NULL) { OPENtls_free(buf); return NULL; } p = ret; pbuf = buf; for (i = buf_len; i > 0; i--) { int v = (int)*(pbuf++); *(p++) = HEX_DIGITS[v >> 4]; *(p++) = HEX_DIGITS[v & 0x0F]; } *p = '\0'; OPENtls_free(buf); return ret; } EC_POINT *EC_POINT_hex2point(const EC_GROUP *group, const char *buf, EC_POINT *point, BN_CTX *ctx) { EC_POINT *ret = NULL; BIGNUM *tmp_bn = NULL; if (!BN_hex2bn(&tmp_bn, buf)) return NULL; ret = EC_POINT_bn2point(group, tmp_bn, point, ctx); BN_clear_free(tmp_bn); return ret; }
354183.c
/* Copyright (C) 1997-2014 Sam Lantinga <[email protected]> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely. */ /******************************************************************************** * * * Running moose :) Coded by Mike Gorchak. * * * ********************************************************************************/ #include <stdlib.h> #include <stdio.h> #ifdef __EMSCRIPTEN__ #include <emscripten/emscripten.h> #endif #include "SDL.h" #define MOOSEPIC_W 64 #define MOOSEPIC_H 88 #define MOOSEFRAME_SIZE (MOOSEPIC_W * MOOSEPIC_H) #define MOOSEFRAMES_COUNT 10 SDL_Color MooseColors[84] = { {49, 49, 49, 255}, {66, 24, 0, 255}, {66, 33, 0, 255}, {66, 66, 66, 255}, {66, 115, 49, 255}, {74, 33, 0, 255}, {74, 41, 16, 255}, {82, 33, 8, 255}, {82, 41, 8, 255}, {82, 49, 16, 255}, {82, 82, 82, 255}, {90, 41, 8, 255}, {90, 41, 16, 255}, {90, 57, 24, 255}, {99, 49, 16, 255}, {99, 66, 24, 255}, {99, 66, 33, 255}, {99, 74, 33, 255}, {107, 57, 24, 255}, {107, 82, 41, 255}, {115, 57, 33, 255}, {115, 66, 33, 255}, {115, 66, 41, 255}, {115, 74, 0, 255}, {115, 90, 49, 255}, {115, 115, 115, 255}, {123, 82, 0, 255}, {123, 99, 57, 255}, {132, 66, 41, 255}, {132, 74, 41, 255}, {132, 90, 8, 255}, {132, 99, 33, 255}, {132, 99, 66, 255}, {132, 107, 66, 255}, {140, 74, 49, 255}, {140, 99, 16, 255}, {140, 107, 74, 255}, {140, 115, 74, 255}, {148, 107, 24, 255}, {148, 115, 82, 255}, {148, 123, 74, 255}, {148, 123, 90, 255}, {156, 115, 33, 255}, {156, 115, 90, 255}, {156, 123, 82, 255}, {156, 132, 82, 255}, {156, 132, 99, 255}, {156, 156, 156, 255}, {165, 123, 49, 255}, {165, 123, 90, 255}, {165, 132, 82, 255}, {165, 132, 90, 255}, {165, 132, 99, 255}, {165, 140, 90, 255}, {173, 132, 57, 255}, {173, 132, 99, 255}, {173, 140, 107, 255}, {173, 140, 115, 255}, {173, 148, 99, 255}, {173, 173, 173, 255}, {181, 140, 74, 255}, {181, 148, 115, 255}, {181, 148, 123, 255}, {181, 156, 107, 255}, {189, 148, 123, 255}, {189, 156, 82, 255}, {189, 156, 123, 255}, {189, 156, 132, 255}, {189, 189, 189, 255}, {198, 156, 123, 255}, {198, 165, 132, 255}, {206, 165, 99, 255}, {206, 165, 132, 255}, {206, 173, 140, 255}, {206, 206, 206, 255}, {214, 173, 115, 255}, {214, 173, 140, 255}, {222, 181, 148, 255}, {222, 189, 132, 255}, {222, 189, 156, 255}, {222, 222, 222, 255}, {231, 198, 165, 255}, {231, 231, 231, 255}, {239, 206, 173, 255} }; Uint8 MooseFrames[MOOSEFRAMES_COUNT][MOOSEFRAME_SIZE]; SDL_Renderer *renderer; int frame; SDL_Texture *MooseTexture; SDL_bool done = SDL_FALSE; void quit(int rc) { SDL_Quit(); exit(rc); } void UpdateTexture(SDL_Texture *texture, int frame) { SDL_Color *color; Uint8 *src; Uint32 *dst; int row, col; void *pixels; int pitch; if (SDL_LockTexture(texture, NULL, &pixels, &pitch) < 0) { SDL_LogError(SDL_LOG_CATEGORY_APPLICATION, "Couldn't lock texture: %s\n", SDL_GetError()); quit(5); } src = MooseFrames[frame]; for (row = 0; row < MOOSEPIC_H; ++row) { dst = (Uint32*)((Uint8*)pixels + row * pitch); for (col = 0; col < MOOSEPIC_W; ++col) { color = &MooseColors[*src++]; *dst++ = (0xFF000000|(color->r<<16)|(color->g<<8)|color->b); } } SDL_UnlockTexture(texture); } void loop() { SDL_Event event; while (SDL_PollEvent(&event)) { switch (event.type) { case SDL_KEYDOWN: if (event.key.keysym.sym == SDLK_ESCAPE) { done = SDL_TRUE; } break; case SDL_QUIT: done = SDL_TRUE; break; } } frame = (frame + 1) % MOOSEFRAMES_COUNT; UpdateTexture(MooseTexture, frame); SDL_RenderClear(renderer); SDL_RenderCopy(renderer, MooseTexture, NULL, NULL); SDL_RenderPresent(renderer); } int main(int argc, char **argv) { SDL_Window *window; SDL_RWops *handle; /* Enable standard application logging */ SDL_LogSetPriority(SDL_LOG_CATEGORY_APPLICATION, SDL_LOG_PRIORITY_INFO); if (SDL_Init(SDL_INIT_VIDEO) < 0) { SDL_LogError(SDL_LOG_CATEGORY_APPLICATION, "Couldn't initialize SDL: %s\n", SDL_GetError()); return 1; } /* load the moose images */ handle = SDL_RWFromFile("moose.dat", "rb"); if (handle == NULL) { SDL_LogError(SDL_LOG_CATEGORY_APPLICATION, "Can't find the file moose.dat !\n"); quit(2); } SDL_RWread(handle, MooseFrames, MOOSEFRAME_SIZE, MOOSEFRAMES_COUNT); SDL_RWclose(handle); /* Create the window and renderer */ window = SDL_CreateWindow("Happy Moose", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, MOOSEPIC_W*4, MOOSEPIC_H*4, SDL_WINDOW_RESIZABLE); if (!window) { SDL_LogError(SDL_LOG_CATEGORY_APPLICATION, "Couldn't set create window: %s\n", SDL_GetError()); quit(3); } renderer = SDL_CreateRenderer(window, -1, 0); if (!renderer) { SDL_LogError(SDL_LOG_CATEGORY_APPLICATION, "Couldn't set create renderer: %s\n", SDL_GetError()); quit(4); } MooseTexture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_ARGB8888, SDL_TEXTUREACCESS_STREAMING, MOOSEPIC_W, MOOSEPIC_H); if (!MooseTexture) { SDL_LogError(SDL_LOG_CATEGORY_APPLICATION, "Couldn't set create texture: %s\n", SDL_GetError()); quit(5); } /* Loop, waiting for QUIT or the escape key */ frame = 0; #ifdef __EMSCRIPTEN__ emscripten_set_main_loop(loop, 0, 1); #else while (!done) { loop(); } #endif SDL_DestroyRenderer(renderer); quit(0); return 0; } /* vi: set ts=4 sw=4 expandtab: */
415804.c
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "helper.h" #include <zircon/assert.h> #include <zircon/types.h> #include <zxtest/zxtest.h> // Sanity check that looks for bugs in C macro implementation of ASSERT_*/EXPECT_*. This forces // the text replacement and allows the compiler to find errors. Otherwise is left to the user // to find errors once the macro is first used. Also we validate the the assertions return // and expects dont. // Tests will fail because we are verifying they actually work as intended, though the // pass/fail behavior is decided based on Verify functions. TEST(ZxTestCAssertionsTest, Fail) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "FAIL(...) macro did not abort test execution."); FAIL("Something bad happened"); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertTrueAndFalse) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "EXPECT/ASSERT_TRUE/FALSE returned on success."); EXPECT_TRUE(true, "EXPECT_TRUE failed."); EXPECT_FALSE(false, "EXPECT_FALSE failed."); ASSERT_TRUE(true, "ASSERT_TRUE failed."); ASSERT_FALSE(false, "ASSERT_FALSE failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertTrueAndFalseFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT/ASSERT_TRUE/FALSE returned on success."); EXPECT_TRUE(false, "EXPECT_TRUE suceed"); EXPECT_FALSE(true, "EXPECT_FALSE succeed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertFalseFailureFatal) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT_FALSE failed to abort test execution."); ASSERT_FALSE(true, "ASSERT_FALSE success."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertTrueFailureFatal) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT_TRUE failed to abort test execution."); ASSERT_TRUE(false, "ASSERT_TRUE succeed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertEQSuccess) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "ASSERT/EXPECT_EQ aborted test on success."); int a = 1; int b = 2; // Happy cases. EXPECT_EQ(1, 1, "EXPECT_EQ identity failed."); ASSERT_EQ(1, 1, "ASSERT_EQ identity failed."); EXPECT_EQ(a, a, "EXPECT_EQ identity failed."); ASSERT_EQ(b, b, "ASSERT_EQ identity failed."); // No failures TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertEQFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT_EQ aborted execution."); int a = 1; int b = 2; EXPECT_EQ(1, 2, "EXPECT_EQ inequality detection succeeded."); EXPECT_EQ(a, b, "EXPECT_EQ inequality detection succeeded."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertEQFailureFatal) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT_EQ did not abort test execution."); ASSERT_EQ(1, 2, "ASSERT_EQ inequality detection succeeded."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertNESuccess) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "EXPECT_NE aborted test execution."); int a = 1; int b = 2; // Happy cases. EXPECT_NE(1, 2, "EXPECT_NE inequality detection succeeded."); EXPECT_NE(a, b, "EXPECT_NE inequality detection succeeded."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertNEFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT_NE aborted test execution."); int a = 1; EXPECT_NE(1, 1, "EXPECT_NE equality detection suceeded."); EXPECT_NE(a, a, "EXPECT_NE equality detection suceeded."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertNEFailureFatal) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT_NE did not abort test execution."); int a = 1; int b = 1; ASSERT_NE(a, b, "ASSERT_NE equality detection succeeded."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertLT) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "ASSERT_LT did not abort test execution."); int a = 1; int b = 2; // Happy cases. ASSERT_LT(1, 2, "ASSERT_LT failed."); EXPECT_LT(a, b, "EXPECT_LT failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertLTFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "ASSERT_LT did not abort test execution."); int a = 1; int b = 2; EXPECT_LT(2, 1, "EXPECT_LT failed."); EXPECT_LT(b, a, "EXPECT_LT failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertLTFailureFatal) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT_LT did not abort test execution."); int a = 1; int b = 2; ASSERT_LT(b, a, "EXPECT_LT failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertLE) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "ASSERT/EXPECT_LE aborted test execution on success."); int a = 1; int b = 2; // Happy cases. ASSERT_LE(1, 2, "ASSERT_LE failed."); ASSERT_LE(1, 1, "ASSERT_LE failed."); EXPECT_LE(a, b, "EXPECT_LE failed."); EXPECT_LE(a, a, "EXPECT_LE failed."); // No failures TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertLEFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT_LE aborted test execution."); int a = 1; int b = 2; EXPECT_LE(2, 1, "EXPECT_LE failed."); EXPECT_LE(b, a, "EXPECT_LE failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertLEFailureFatal) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT_LE did not abort test execution."); int a = 1; int b = 2; ASSERT_LE(b, a, "EXPECT_LE failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertGT) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "EXPECT_GT aborted test execution on success."); int a = 1; int b = 2; EXPECT_GT(2, 1, "EXPECT_GT failed."); EXPECT_GT(b, a, "EXPECT_GT failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertGTFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT_GT aborted test execution."); int a = 1; int b = 2; EXPECT_GT(a, b, "EXPECT_GT succeeded."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertGTFatalFailure) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "EXPECT_GT did aborted test execution."); int a = 1; int b = 2; ASSERT_GT(a, b, "ASSERT_GT succeeded."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertGE) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "ASSERT/EXPECT_GE aborted test execution on success."); int a = 1; int b = 2; ASSERT_GE(2, 1, "ASSERT_GE failed."); ASSERT_GE(1, 1, "ASSERT_GE failed."); EXPECT_GE(b, a, "EXPECT_GE failed."); EXPECT_GE(a, a, "EXPECT_GE failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertGEFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "ASSERT/EXPECT_GE aborted test execution on success."); int a = 1; int b = 2; EXPECT_GE(1, 2, "EXPECT_GE failed."); EXPECT_GE(a, b, "EXPECT_GE failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertGEFailureFatal) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT/EXPECT_GE aborted test execution on success."); int a = 1; int b = 2; ASSERT_GE(a, b, "EXPECT_GE failed."); ZX_ASSERT_MSG(_ZXTEST_ABORT_IF_ERROR, "Assert was did not abort test."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertStrEq) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "ASSERT/EXPECT_STR_EQ aborted test execution on success."); const char* str1 = "a"; const char* str2 = "a"; EXPECT_STR_EQ(str1, str2, "ASSERT_STR_EQ failed to identify equal strings."); EXPECT_STR_EQ(str1, str1, "ASSERT_STR_EQ failed to identify equal strings."); ASSERT_STR_EQ(str1, str2, "ASSERT_STR_EQ failed to identify equal strings."); ASSERT_STR_EQ(str1, str1, "ASSERT_STR_EQ failed to identify equal strings."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertStrNe) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "ASSERT/EXPECT_STR_EQ aborted test execution on success."); const char* str1 = "a"; const char* str2 = "b"; EXPECT_STR_NE(str1, str2, "EXPECT_STR_NE failed to identify different strings."); ASSERT_STR_NE(str1, str2, "ASSERT_STR_NE failed to identify different strings."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertStrEqFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT_STR_EQ aborted test execution."); const char* str1 = "a"; const char* str2 = "b"; EXPECT_STR_EQ(str1, str2, "ASSERT_STR_EQ failed to identify equal strings."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertStrEqFatalFailure) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT/EXPECT_STR_EQ aborted test execution on success."); const char* str1 = "a"; const char* str2 = "b"; ASSERT_STR_EQ(str1, str2, "ASSERT_STR_EQ failed to identify equal strings."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertNotNull) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "ASSERT/EXPECT_NOT_NULL aborted test execution on success."); char a; EXPECT_NOT_NULL(&a, "ASSERT_NOT_NULL failed to identify NULL."); ASSERT_NOT_NULL(&a, "ASSERT_NOT_NULL failed to identify NULL."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertNotNullFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT_NOT_NULL aborted test execution."); char* a = NULL; EXPECT_NOT_NULL(a, "EXPECT_NOT_NULL identified NULL."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertNotNullFatalFailure) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT_NOT_NULL did not abort test execution."); char* a = NULL; ASSERT_NOT_NULL(a, "ASSERT_NOT_NULL identified NULL."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertNull) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "ASSERT/EXPECT_NULL aborted test execution on success."); char* a = NULL; ASSERT_NULL(a, "ASSERT_NULL did not identify NULL."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertNullFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT_NULL aborted test execution."); char b; char* a = &b; EXPECT_NULL(a, "EXPECT_NOT_NULL identified NULL."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertNullFatalFailure) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT_NULL did not abort test execution."); char b; char* a = &b; ASSERT_NULL(a, "ASSERT_NOT_NULL identified NULL."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertOk) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "ASSERT/EXPECT_OK aborted test execution on success."); zx_status_t status = ZX_OK; EXPECT_OK(status, "EXPECT_OK failed to identify ZX_OK."); ASSERT_OK(status, "ASSERT_OK failed to identify ZX_OK."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertOkFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT_OK aborted test execution."); zx_status_t status = ZX_ERR_BAD_STATE; EXPECT_OK(status, "EXPECT_OK failed to identify error."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertOkFatalFailure) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "EXPECT_OK aborted test execution."); zx_status_t status = ZX_ERR_BAD_STATE; ASSERT_OK(status, "ASSERT_OK failed to identify error."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertOkWithOverloadedReturnTypeFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT_OK aborted test execution."); EXPECT_OK(4, "EXPECT_OK failed to identify error."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertOkWithOverloadedReturnTypeFatalFailure) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT_OK aborted test execution."); ASSERT_OK(4, "ASSERT_OK failed to identify error."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertNotOk) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "ASSERT/EXPECT_NOT_OK aborted test execution on success."); zx_status_t status = ZX_ERR_BAD_STATE; EXPECT_NOT_OK(status, "EXPECT_NOT_OK failed to identify ZX_NOT_OK."); ASSERT_NOT_OK(status, "ASSERT_NOT_OK failed to identify ZX_NOT_OK."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertNotOkFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT_NOT_OK aborted test execution."); zx_status_t status = ZX_OK; EXPECT_NOT_OK(status, "EXPECT_NOT_OK failed to identify error."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertNotOkFatalFailure) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT_NOT_OK aborted test execution."); zx_status_t status = ZX_OK; ASSERT_NOT_OK(status, "ASSERT_NOT_OK failed to identify error."); TEST_CHECKPOINT(); } struct mytype { int a; int b; }; TEST(ZxTestCAssertionTest, AssertBytesEq) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "ASSERT/EXPECT_BYTES_EQ aborted test execution on success."); struct mytype a, b; a.a = 0; a.b = 1; b.a = 0; b.b = 1; ASSERT_BYTES_EQ(&a, &a, sizeof(struct mytype), "ASSERT_BYTES_EQ identity failed."); EXPECT_BYTES_EQ(&a, &a, sizeof(struct mytype), "EXPECT_BYTES_EQ identity failed."); ASSERT_BYTES_EQ(&a, &b, sizeof(struct mytype), "ASSERT_BYTES_EQ identity failed."); EXPECT_BYTES_EQ(&a, &b, sizeof(struct mytype), "EXPECT_BYTES_EQ identity failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertBytesEqFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT_OK aborted test execution."); struct mytype a, b; a.a = 0; a.b = 1; b.a = 0; b.b = 2; EXPECT_BYTES_EQ(&a, &b, sizeof(struct mytype), "EXPECT_BYTES_EQ identity failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertBytesEqFatalFailure) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "EXPECT_OK aborted test execution."); struct mytype a, b; a.a = 0; a.b = 1; b.a = 0; b.b = 2; ASSERT_BYTES_EQ(&a, &b, sizeof(struct mytype), "ASSERT_BYTES_EQ identity failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertBytesNe) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "ASSERT/EXPECT_BYTES_NE aborted test execution on success."); struct mytype a, b; a.a = 0; a.b = 1; b.a = 0; b.b = 2; ASSERT_BYTES_NE(&a, &b, sizeof(struct mytype), "ASSERT_BYTES_NE identity failed."); EXPECT_BYTES_NE(&a, &b, sizeof(struct mytype), "EXPECT_BYTES_NE identity failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertBytesNeFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT_OK aborted test execution."); struct mytype a, b; a.a = 0; a.b = 1; b.a = 0; b.b = 1; EXPECT_BYTES_NE(&a, &b, sizeof(struct mytype), "ASSERT_BYTES_NE identity failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertBytesNeFatalFailure) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "EXPECT_OK aborted test execution."); struct mytype a, b; a.a = 0; a.b = 1; b.a = 0; b.b = 1; ASSERT_BYTES_NE(&a, &b, sizeof(struct mytype), "ASSERT_BYTES_NE identity failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertBytesEqArray) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "ASSERT_BYTES_EQ failed to compare array contents."); int a[] = {1, 2, 3, 4, 5}; int b[] = {1, 2, 3, 4, 5}; ASSERT_BYTES_EQ(a, a, sizeof(int) * 5, "ASSERT_BYTES_EQ identity failed."); ASSERT_BYTES_EQ(a, b, sizeof(int) * 5, "ASSERT_BYTES_EQ identity failed."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertBytesEqArrayFailure) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT_BYTES_EQ did not abort test execution."); int a[] = {1, 2, 3, 4, 5}; int b[] = {1, 2, 3, 4, 6}; ASSERT_BYTES_EQ(a, b, sizeof(int) * 5, "ASSERT_BYTES_EQ identified different arrays."); TEST_CHECKPOINT(); } static int called = 0; static int getter_called = 0; static int* Increase(void) { ++called; return &called; } static int* Get(void) { getter_called++; return &called; } TEST(ZxTestCAssertionTest, AssertSingleCall) { called = 0; getter_called = 0; EXPECT_EQ(*Get(), *Increase()); ZX_ASSERT_MSG(called == 1, "ASSERT_* evaluating multiple times."); ZX_ASSERT_MSG(getter_called == 1, "ASSERT_* evaluating multiple times."); } TEST(ZxTestCAssertionTest, AssertBytesSingleCall) { called = 0; getter_called = 0; EXPECT_BYTES_EQ(Get(), Increase(), sizeof(int)); ZX_ASSERT_MSG(called == 1, "ASSERT_BYTES_* evaluating multiple times."); ZX_ASSERT_MSG(getter_called == 1, "ASSERT_* evaluating multiple times."); } static void HelperFnFatal(bool fail) { ASSERT_FALSE(fail, "Expected to fail."); } TEST(ZxTestCAssertionTest, AssertNoFatalFailureWithFatalFailure) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "Failed to abort test execution on helper fatal failure."); ASSERT_NO_FATAL_FAILURES(HelperFnFatal(true), "HelperFnFatal had a failure. This is expected."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertNoFatalFailureWithoutFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "Aborted test execution on helper with no failures."); ASSERT_NO_FATAL_FAILURES(HelperFnFatal(false), "HelperFnFatal had a failure. This is not expected."); TEST_CHECKPOINT(); } static void HelperFn(bool fail) { EXPECT_FALSE(fail, "Expected to fail."); } TEST(ZxTestCAssertionTest, AssertNoFatalFailureWithFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "Aborted test execution on helper failure."); ASSERT_NO_FATAL_FAILURES(HelperFn(true), "HelperFn had a failure. This is expected."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertTrueCoerceTypeToBoolFailure) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "Failed to identify false."); int a = 0; ASSERT_TRUE(a, "0 coerced to false."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertTrueCoerceTypeToBool) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "Failed to identify true."); int a = 1; ASSERT_TRUE(a, "1 not coerced to true."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertFalseCoerceTypeToBool) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "Failed to identify false."); int a = 0; ASSERT_FALSE(a, "0 not coerced to false."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertFalseCoerceTypeToBoolFailure) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "Failed to identify true."); int a = 1; ASSERT_FALSE(a, "1 coerced to true."); TEST_CHECKPOINT(); } static int SomeFn(void) { return 0; } TEST(ZxTestCAssertionTest, FunctionPointerNotNull) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "Failed to identify false."); int (*some_fn)(void) = &SomeFn; ASSERT_NOT_NULL(some_fn); EXPECT_NOT_NULL(some_fn); ASSERT_EQ(some_fn, &SomeFn); ASSERT_NE(some_fn, NULL); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, FunctionPointerNull) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "Failed to identify nullptr."); int (*some_fn)(void) = NULL; ASSERT_NULL(some_fn); EXPECT_NULL(some_fn); ASSERT_NE(some_fn, &SomeFn); ASSERT_EQ(some_fn, NULL); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, PromoteLiteralIntegersOnComp) { int32_t a = -1; int64_t b = 2; int16_t c = -1; int64_t d = 1; uint32_t e = 1; uint64_t f = 2; uint64_t g = 3; uint16_t h = 1; // Signed to wider ints. ASSERT_EQ(a, b); ASSERT_GE(b, a); ASSERT_LE(a, b); ASSERT_GT(b, c); ASSERT_LT(b, a); ASSERT_GT(b, d); // Signed comparison with literals. ASSERT_EQ(-1, a); ASSERT_EQ(1, d); ASSERT_LT(c, 3); ASSERT_GT(b, 1); ASSERT_GE(b, 2); // Unsigned to wider ints. ASSERT_EQ(e, h); ASSERT_GE(g, f); ASSERT_LE(f, g); ASSERT_GT(g, e); ASSERT_LT(h, f); // Unsigned comparison with literals. ASSERT_EQ(1, e); ASSERT_LT(f, 4); ASSERT_LE(f, 2); ASSERT_GT(g, 2); ASSERT_GE(g, 3); } TEST(ZxTestCAssertionTest, PrintfLikeDescs) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "Failed to identify true."); int a = 1; EXPECT_FALSE(a, "Message "); EXPECT_FALSE(a, "One %d ", a); EXPECT_FALSE(a, "More than one %d %d.", a, a); EXPECT_FALSE(a, "More than one %d %d %d %d %d.", a, a, a, a, a); EXPECT_FALSE(a, "More than one %d %d %d %d %d %d %d %d %d %d %d %d %d %d.", a, a, a, a, a, a, a, a, a, a, a, a, a, a); TEST_CHECKPOINT(); } static int HasExpects(void) { EXPECT_EQ(1, 2); return 0; } TEST(ZxTestCAssertionTest, NonVoidHelperTestNonFatalFailures) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "Failed to propagate assertion error."); ASSERT_NO_FATAL_FAILURES(HasExpects()); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertNoFailures) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "Failed to detect non fatal failure"); ASSERT_NO_FAILURES(HasExpects()); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AddFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "Failed to detect non fatal failure"); ADD_FAILURE("Something went wrong."); ASSERT_NO_FATAL_FAILURES(); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AddFatalFailure) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "Failed to detect fatal failure"); ADD_FATAL_FAILURE("Something went wrong."); ASSERT_NO_FATAL_FAILURES(); TEST_CHECKPOINT(); } static void AssertFail(void) { ASSERT_TRUE(false); return; } TEST(ZxTestCAssertionTest, CurrentTestHasFailuresDetectsNonFatalFailures) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "Failed to detect failure"); EXPECT_TRUE(false); ASSERT_TRUE(CURRENT_TEST_HAS_FAILURES()); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, CurrentTestHasFailuresDetectsFatalFailures) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "Failed to detect failure"); AssertFail(); ASSERT_TRUE(CURRENT_TEST_HAS_FAILURES()); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, CurrentTestHasFatalFailuresIgnoresNonFatalFailures) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "Failed to detect failure"); EXPECT_TRUE(false); ASSERT_FALSE(CURRENT_TEST_HAS_FATAL_FAILURES()); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, CurrentTestHasFatalFailuresDetectsFatalFailures) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "Failed to detect failure"); AssertFail(); ASSERT_TRUE(CURRENT_TEST_HAS_FATAL_FAILURES()); TEST_CHECKPOINT(); } #ifdef __Fuchsia__ static void Crash(void) { ZX_ASSERT(false); } static void Success(void) { ZX_ASSERT(true); } TEST(ZxTestCAssertionTest, AssertDeathWithCrashingStatement) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "Failed to detect crash"); ASSERT_DEATH(&Crash, "Crash was not raised."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertDeathWithSuccessfulStatement) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "Failed to detect crash"); ASSERT_DEATH(&Success, "Crash was not raised."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertNoDeathWithSuccessfulStatement) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "Failed to detect crash"); ASSERT_NO_DEATH(&Success, "Crash was raised."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionTest, AssertNoDeathWithCrashingStatement) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "Failed to detect crash"); ASSERT_NO_DEATH(&Crash, "Crash was raised."); TEST_CHECKPOINT(); } #endif TEST(ZxTestCAssertionTest, AssertBytesEqVla) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "Failed to check buffer eq."); volatile int len = 2; char a[len]; const char* b = (const char*)a; memset(a, 0, len); ASSERT_BYTES_EQ(a, b, len); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertStatusSuccess) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "ASSERT/EXPECT_STATUS aborted test on success."); zx_status_t a = ZX_ERR_BAD_STATE; zx_status_t b = ZX_ERR_BAD_STATE; // Happy cases. EXPECT_STATUS(a, ZX_ERR_BAD_STATE, "EXPECT_STATUS identity failed."); EXPECT_STATUS(ZX_ERR_BAD_STATE, a, "EXPECT_STATUS identity failed."); ASSERT_STATUS(ZX_OK, ZX_OK, "ASSERT_STATUS identity failed."); EXPECT_STATUS(a, a, "EXPECT_STATUS identity failed."); ASSERT_STATUS(b, b, "ASSERT_STATUS identity failed."); ASSERT_STATUS(a, b, "ASSERT_STATUS identity failed."); // No failures TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertStatusFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT_STATUS aborted execution."); zx_status_t a = ZX_ERR_INVALID_ARGS; zx_status_t b = ZX_ERR_BAD_STATE; EXPECT_STATUS(ZX_OK, ZX_ERR_INVALID_ARGS, "EXPECT_STATUS inequality detection succeeded."); EXPECT_STATUS(a, b, "EXPECT_STATUS inequality detection succeeded."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertStatusFailureFatal) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT_STATUS did not abort test execution."); ASSERT_STATUS(ZX_OK, ZX_ERR_BAD_STATE, "ASSERT_STATUS inequality detection succeeded."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertNotStatusSuccess) { TEST_EXPECTATION(CHECKPOINT_REACHED, NO_ERRORS, "EXPECT_NOT_STATUS aborted test execution."); zx_status_t a = ZX_ERR_BAD_STATE; zx_status_t b = ZX_ERR_INVALID_ARGS; // Happy cases. EXPECT_NOT_STATUS(ZX_OK, ZX_ERR_BAD_STATE, "EXPECT_NOT_STATUS inequality detection succeeded."); EXPECT_NOT_STATUS(a, b, "EXPECT_NOT_STATUS inequality detection succeeded."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertNotStatusFailure) { TEST_EXPECTATION(CHECKPOINT_REACHED, HAS_ERRORS, "EXPECT_NOT_STATUS aborted test execution."); zx_status_t a = ZX_OK; EXPECT_NOT_STATUS(ZX_ERR_BAD_STATE, ZX_ERR_BAD_STATE, "EXPECT_NOT_STATUS equality detection suceeded."); EXPECT_NOT_STATUS(a, a, "EXPECT_NOT_STATUS equality detection suceeded."); TEST_CHECKPOINT(); } TEST(ZxTestCAssertionsTest, AssertNotStatusFailureFatal) { TEST_EXPECTATION(CHECKPOINT_NOT_REACHED, HAS_ERRORS, "ASSERT_NOT_STATUS did not abort test execution."); zx_status_t a = ZX_OK; zx_status_t b = ZX_OK; ASSERT_NOT_STATUS(a, b, "ASSERT_NOT_STATUS equality detection succeeded."); TEST_CHECKPOINT(); }
133497.c
#include <stdlib.h> #include <stdbool.h> #include <errno.h> #include <string.h> #include <applibs/log.h> #include <time.h> #include <applibs/rtc.h> #include <signal.h> #include <applibs/networking.h> #include <curl/curl.h> #include <curl/easy.h> #include <applibs/storage.h> #include "arducam_driver/ArduCAM.h" #include "delay.h" const char* storageURL = "https://<your-storage-account>.blob.core.windows.net"; const char* pathFileName = "/<your-container>/"; char fileName[64]; const char* SASToken = "<your-sas>"; #if (defined(CFG_MODE_JPEG) && defined(CFG_MODE_BITMAP)) || (!defined(CFG_MODE_JPEG) && !defined(CFG_MODE_BITMAP)) #error "define CFG_MODE_JPEG or CFG_MODE_BITMAP" #endif #if defined(CFG_MODE_JPEG) #define FILE_EXTENSION ".jpg" #elif defined(CFG_MODE_BITMAP) #define BMPIMAGEOFFSET 66 const uint8_t bmp_header[BMPIMAGEOFFSET] = { 0x42, 0x4D, // MagicNumber = 'B', 'M' 0x42, 0x58, 0x02, 0x00, // FileSize = 320x240x2 + 66 //0x42, 0x96, 0x00, 0x00, // FileSize = 160x120x2 + 66 0x00, 0x00, 0x00, 0x00, // Reserved 0x42, 0x00, 0x00, 0x00, // Pixel Offset in memory = 66 0x28, 0x00, 0x00, 0x00, // BitmapInfoHeaderSize = 40 0x40, 0x01, 0x00, 0x00, // W = 320 //0xA0, 0x00, 0x00, 0x00, // W = 320 0xF0, 0x00, 0x00, 0x00, // H = 240 //0x78, 0x00, 0x00, 0x00, // H = 240 0x01, 0x00, // Plane 0x10, 0x00, // 16bit RG 0x03, 0x00, 0x00, 0x00, // Compression = BI_BITFIELDS(3) 0x00, 0x58, 0x02, 0x00, // ImageSize = 320x240x2 //0x00, 0x96, 0x00, 0x00, // ImageSize = 160x120x2 0x00, 0x00, 0x00, 0x00, // XPelsPerMeter 0x00, 0x00, 0x00, 0x00, // YPelsPerMeter 0x00, 0x00, 0x00, 0x00, // biClrUsed 0x00, 0x00, 0x00, 0x00, // biClrImportant 0x00, 0xF8, 0x00, 0x00, // Red mask 0xE0, 0x07, 0x00, 0x00, // Green mask 0x1F, 0x00, 0x00, 0x00 // Blue mask }; #define FILE_EXTENSION ".bmp" #endif // Generate a random GUID and return it in outputGUID void generateGUID(char* outputGUID) { srand((unsigned)clock()); #define GUID_SIZE 40 char GUID[GUID_SIZE]; int t = 0; char* szTemp = "xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx"; char* szHex = "0123456789abcdef-"; size_t nLen = strlen(szTemp); for (t = 0; t < nLen + 1; t++) { int r = rand() % 16; char c = ' '; switch (szTemp[t]) { case 'x': { c = szHex[r]; } break; case 'y': { c = szHex[r & 0x03 | 0x08]; } break; case '-': { c = '-'; } break; case '4': { c = '4'; } break; } GUID[t] = (t < nLen) ? c : 0x00; } // Move the GUID into the output string strncpy(outputGUID, GUID, GUID_SIZE); } struct image_buffer { uint8_t* p_data; uint32_t size; }; static void LogCurlError(const char* message, int curlErrCode) { Log_Debug(message); Log_Debug(" (curl err=%d, '%s')\n", curlErrCode, curl_easy_strerror(curlErrCode)); } static size_t read_callback(char* buffer, size_t size, size_t nitems, void* userdata) { struct image_buffer *p_image_buffer = (struct image_buffer*)userdata; size_t total_available_size = size * nitems; size_t copy_size = 0; if (p_image_buffer->size > total_available_size) { copy_size = total_available_size; p_image_buffer->size -= total_available_size; } else { copy_size = p_image_buffer->size; p_image_buffer->size = 0; } for (size_t i = 0; i < copy_size; i++) { buffer[i] = *p_image_buffer->p_data++; } return copy_size; } static void UploadFileToAzureBlob(uint8_t *p_data, uint32_t size) { static struct image_buffer userdata; userdata.p_data = p_data; userdata.size = size; CURL* curlHandle = NULL; CURLcode res = CURLE_OK; struct curl_slist* list = NULL; char* rootca = NULL; if ((res = curl_global_init(CURL_GLOBAL_ALL)) != CURLE_OK) { LogCurlError("curl_global_init", res); goto exitLabel; } // Generate a new GUID to use as the filename generateGUID(fileName); // Construct the url that includes the base url + file path + file name + SAS Token char* sasurl = calloc(strlen(storageURL) + strlen(pathFileName) + sizeof(fileName) + strlen(SASToken) + sizeof('\0'), sizeof(char)); (void)strcat(strcat(strcat(strcat(strcat(sasurl, storageURL), pathFileName), fileName), FILE_EXTENSION), SASToken); // Initialize the curl handle if ((curlHandle = curl_easy_init()) == NULL) { Log_Debug("curl_easy_init() failed\r\n"); goto cleanupLabel; } // Set the URL if ((res = curl_easy_setopt(curlHandle, CURLOPT_URL, sasurl)) != CURLE_OK) { LogCurlError("curl_easy_setopt CURLOPT_URL", res); goto cleanupLabel; } // Set the default value: strict certificate ON if ((res = curl_easy_setopt(curlHandle, CURLOPT_SSL_VERIFYPEER, 1L)) != CURLE_OK) { LogCurlError("curl_easy_setopt CURLOPT_URL", res); goto cleanupLabel; } // Set the blob type header list = curl_slist_append(list, "x-ms-blob-type:BlockBlob"); if ((res = curl_easy_setopt(curlHandle, CURLOPT_HTTPHEADER, list)) != CURLE_OK) { LogCurlError("curl_easy_setopt CURLOPT_HTTPHEADER", res); goto cleanupLabel; } rootca = Storage_GetAbsolutePathInImagePackage("certs/BaltimoreCyberTrustRoot.pem"); if (rootca == NULL) { Log_Debug("The root ca path could not be resolved: errno=%d (%s)\r\n", errno, strerror(errno)); goto cleanupLabel; } // Set the root ca option if ((res = curl_easy_setopt(curlHandle, CURLOPT_CAINFO, rootca)) != CURLE_OK) { LogCurlError("curl_easy_setopt CURLOPT_CAINFO", res); goto cleanupLabel; } // Set the upload option if ((res = curl_easy_setopt(curlHandle, CURLOPT_UPLOAD, 1)) != CURLE_OK) { LogCurlError("curl_easy_setopt CURLOPT_UPLOAD", res); goto cleanupLabel; } // Pass the size of the file if ((res = curl_easy_setopt(curlHandle, CURLOPT_INFILESIZE, size)) != CURLE_OK) { LogCurlError("curl_easy_setopt CURLOPT_INFILESIZE", res); goto cleanupLabel; } // Set the read callback if ((res = curl_easy_setopt(curlHandle, CURLOPT_READFUNCTION, read_callback)) != CURLE_OK) { LogCurlError("curl_easy_setopt CURLOPT_READFUNCTION", res); goto cleanupLabel; } // Pass a pointer to the data to upload if ((res = curl_easy_setopt(curlHandle, CURLOPT_READDATA, &userdata)) != CURLE_OK) { LogCurlError("curl_easy_setopt CURLOPT_READFUNCTION", res); goto cleanupLabel; } // Set output level to verbose. if ((res = curl_easy_setopt(curlHandle, CURLOPT_VERBOSE, 1L)) != CURLE_OK) { LogCurlError("curl_easy_setopt CURLOPT_VERBOSE", res); goto cleanupLabel; } // Perform the opeartion if ((res = curl_easy_perform(curlHandle)) != CURLE_OK) { LogCurlError("curl_easy_perform", res); } cleanupLabel: // Free up memory we allocated free(sasurl); free(rootca); // Clean up sample's cURL resources. curl_easy_cleanup(curlHandle); // Clean up cURL library's resources. curl_global_cleanup(); exitLabel: return; } int main(int argc, char* argv[]) { Log_Debug("Exmaple to capture a JPEG image from ArduCAM mini 2MP Plus and send to Azure Blob\r\n"); // init hardware and probe camera arducam_ll_init(); arducam_reset(); if (arducam_test() == 0) { Log_Debug("ArduCAM mini 2MP Plus is found\r\n"); } else { Log_Debug("ArduCAM mini 2MP Plus is not found\r\n"); return -1; } // config Camera #if defined(CFG_MODE_JPEG) arducam_set_format(JPEG); #elif defined (CFG_MODE_BITMAP) arducam_set_format(BMP); #endif arducam_InitCAM(); #if defined(CFG_MODE_JPEG) // arducam_OV2640_set_JPEG_size(OV2640_1600x1200); arducam_OV2640_set_JPEG_size(OV2640_320x240); #endif delay_ms(1000); arducam_clear_fifo_flag(); arducam_flush_fifo(); // Trigger a capture and wait for data ready in DRAM arducam_start_capture(); while (!arducam_check_fifo_done()); uint32_t img_len = arducam_read_fifo_length(); if (img_len > MAX_FIFO_SIZE) { Log_Debug("ERROR: FIFO overflow\r\n"); return -1; } Log_Debug("len = %d\r\n", img_len); uint8_t* p_imgBuffer = malloc(img_len); arducam_CS_LOW(); arducam_set_fifo_burst(); arducam_read_fifo_burst(p_imgBuffer, img_len); arducam_CS_HIGH(); arducam_clear_fifo_flag(); #if defined(CFG_MODE_JPEG) // OV2640 pad 0x00 bytes at the end of JPG image while (p_imgBuffer[img_len - 1] != 0xD9) { --img_len; } #elif defined(CFG_MODE_BITMAP) // OV2640 pad 8 bytes '0x00' at the end of raw RGB image img_len -= 8; #endif uint8_t *p_file; uint32_t file_size; #if defined(CFG_MODE_JPEG) p_file = p_imgBuffer; file_size = img_len; #elif defined(CFG_MODE_BITMAP) // https://docs.microsoft.com/en-us/previous-versions/dd183376(v=vs.85) file_size = BMPIMAGEOFFSET + img_len; p_file = calloc(file_size, 1); memcpy(&p_file[0], &bmp_header[0], BMPIMAGEOFFSET); uint8_t midbuf = 0; for (uint32_t i = 0; i < img_len; i += 2) { midbuf = p_imgBuffer[i]; p_imgBuffer[i] = p_imgBuffer[i + 1]; p_imgBuffer[i + 1] = midbuf; } memcpy(&p_file[BMPIMAGEOFFSET], p_imgBuffer, img_len); free(p_imgBuffer); #endif bool isNetworkingReady = false; while ((Networking_IsNetworkingReady(&isNetworkingReady) < 0) || !isNetworkingReady) { Log_Debug("\nNot doing upload because network is not up, try again\r\n"); } // Call the routine to send the file to our storage account UploadFileToAzureBlob(p_file, file_size); free(p_file); Log_Debug("App Exit\r\n"); return 0; }
908378.c
/* * intel_idle.c - native hardware idle loop for modern Intel processors * * Copyright (c) 2010, Intel Corporation. * Len Brown <[email protected]> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ /* * intel_idle is a cpuidle driver that loads on specific Intel processors * in lieu of the legacy ACPI processor_idle driver. The intent is to * make Linux more efficient on these processors, as intel_idle knows * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs. */ /* * Design Assumptions * * All CPUs have same idle states as boot CPU * * Chipset BM_STS (bus master status) bit is a NOP * for preventing entry into deep C-stats */ /* * Known limitations * * The driver currently initializes for_each_online_cpu() upon modprobe. * It it unaware of subsequent processors hot-added to the system. * This means that if you boot with maxcpus=n and later online * processors above n, those processors will use C1 only. * * ACPI has a .suspend hack to turn off deep c-statees during suspend * to avoid complications with the lapic timer workaround. * Have not seen issues with suspend, but may need same workaround here. * * There is currently no kernel-based automatic probing/loading mechanism * if the driver is built as a module. */ /* un-comment DEBUG to enable pr_debug() statements */ #define DEBUG #include <linux/kernel.h> #include <linux/cpuidle.h> #include <linux/clockchips.h> #include <linux/hrtimer.h> /* ktime_get_real() */ #include <trace/events/power.h> #include <linux/sched.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/module.h> #include <asm/cpu_device_id.h> #include <asm/mwait.h> #include <asm/msr.h> #define INTEL_IDLE_VERSION "0.4" #define PREFIX "intel_idle: " static struct cpuidle_driver intel_idle_driver = { .name = "intel_idle", .owner = THIS_MODULE, }; /* intel_idle.max_cstate=0 disables driver */ static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; static unsigned int mwait_substates; #define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ struct idle_cpu { struct cpuidle_state *state_table; /* * Hardware C-state auto-demotion may not always be optimal. * Indicate which enable bits to clear here. */ unsigned long auto_demotion_disable_flags; }; static const struct idle_cpu *icpu; static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; static int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index); static int intel_idle_cpu_init(int cpu); static struct cpuidle_state *cpuidle_state_table; /* * Set this flag for states where the HW flushes the TLB for us * and so we don't need cross-calls to keep it consistent. * If this flag is set, SW flushes the TLB, so even if the * HW doesn't do the flushing, this flag is safe to use. */ #define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 /* * States are indexed by the cstate number, * which is also the index into the MWAIT hint array. * Thus C0 is a dummy. */ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C0 */ }, { /* MWAIT C1 */ .name = "C1-NHM", .desc = "MWAIT 0x00", .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 3, .target_residency = 6, .enter = &intel_idle }, { /* MWAIT C2 */ .name = "C3-NHM", .desc = "MWAIT 0x10", .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 20, .target_residency = 80, .enter = &intel_idle }, { /* MWAIT C3 */ .name = "C6-NHM", .desc = "MWAIT 0x20", .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, .target_residency = 800, .enter = &intel_idle }, }; static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C0 */ }, { /* MWAIT C1 */ .name = "C1-SNB", .desc = "MWAIT 0x00", .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 1, .target_residency = 1, .enter = &intel_idle }, { /* MWAIT C2 */ .name = "C3-SNB", .desc = "MWAIT 0x10", .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, .target_residency = 211, .enter = &intel_idle }, { /* MWAIT C3 */ .name = "C6-SNB", .desc = "MWAIT 0x20", .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 104, .target_residency = 345, .enter = &intel_idle }, { /* MWAIT C4 */ .name = "C7-SNB", .desc = "MWAIT 0x30", .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 109, .target_residency = 345, .enter = &intel_idle }, }; static struct cpuidle_state ivb_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C0 */ }, { /* MWAIT C1 */ .name = "C1-IVB", .desc = "MWAIT 0x00", .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 1, .target_residency = 1, .enter = &intel_idle }, { /* MWAIT C2 */ .name = "C3-IVB", .desc = "MWAIT 0x10", .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 59, .target_residency = 156, .enter = &intel_idle }, { /* MWAIT C3 */ .name = "C6-IVB", .desc = "MWAIT 0x20", .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 80, .target_residency = 300, .enter = &intel_idle }, { /* MWAIT C4 */ .name = "C7-IVB", .desc = "MWAIT 0x30", .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 87, .target_residency = 300, .enter = &intel_idle }, }; static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C0 */ }, { /* MWAIT C1 */ .name = "C1-ATM", .desc = "MWAIT 0x00", .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 1, .target_residency = 4, .enter = &intel_idle }, { /* MWAIT C2 */ .name = "C2-ATM", .desc = "MWAIT 0x10", .flags = CPUIDLE_FLAG_TIME_VALID, .exit_latency = 20, .target_residency = 80, .enter = &intel_idle }, { /* MWAIT C3 */ }, { /* MWAIT C4 */ .name = "C4-ATM", .desc = "MWAIT 0x30", .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, .target_residency = 400, .enter = &intel_idle }, { /* MWAIT C5 */ }, { /* MWAIT C6 */ .name = "C6-ATM", .desc = "MWAIT 0x52", .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 140, .target_residency = 560, .enter = &intel_idle }, }; static long get_driver_data(int cstate) { int driver_data; switch (cstate) { case 1: /* MWAIT C1 */ driver_data = 0x00; break; case 2: /* MWAIT C2 */ driver_data = 0x10; break; case 3: /* MWAIT C3 */ driver_data = 0x20; break; case 4: /* MWAIT C4 */ driver_data = 0x30; break; case 5: /* MWAIT C5 */ driver_data = 0x40; break; case 6: /* MWAIT C6 */ driver_data = 0x52; break; default: driver_data = 0x00; } return driver_data; } /** * intel_idle * @dev: cpuidle_device * @drv: cpuidle driver * @index: index of cpuidle state * * Must be called under local_irq_disable(). */ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long ecx = 1; /* break on interrupt flag */ struct cpuidle_state *state = &drv->states[index]; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); unsigned int cstate; ktime_t kt_before, kt_after; s64 usec_delta; int cpu = smp_processor_id(); cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; /* * leave_mm() to avoid costly and often unnecessary wakeups * for flushing the user TLB's associated with the active mm. */ if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) leave_mm(cpu); if (!(lapic_timer_reliable_states & (1 << (cstate)))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); kt_before = ktime_get_real(); stop_critical_timings(); if (!need_resched()) { __monitor((void *)&current_thread_info()->flags, 0, 0); smp_mb(); if (!need_resched()) __mwait(eax, ecx); } start_critical_timings(); kt_after = ktime_get_real(); usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); local_irq_enable(); if (!(lapic_timer_reliable_states & (1 << (cstate)))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); /* Update cpuidle counters */ dev->last_residency = (int)usec_delta; return index; } static void __setup_broadcast_timer(void *arg) { unsigned long reason = (unsigned long)arg; int cpu = smp_processor_id(); reason = reason ? CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; clockevents_notify(reason, &cpu); } static int cpu_hotplug_notify(struct notifier_block *n, unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; struct cpuidle_device *dev; switch (action & 0xf) { case CPU_ONLINE: if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) smp_call_function_single(hotcpu, __setup_broadcast_timer, (void *)true, 1); /* * Some systems can hotplug a cpu at runtime after * the kernel has booted, we have to initialize the * driver in this case */ dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu); if (!dev->registered) intel_idle_cpu_init(hotcpu); break; } return NOTIFY_OK; } static struct notifier_block cpu_hotplug_notifier = { .notifier_call = cpu_hotplug_notify, }; static void auto_demotion_disable(void *dummy) { unsigned long long msr_bits; rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); msr_bits &= ~(icpu->auto_demotion_disable_flags); wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); } static const struct idle_cpu idle_cpu_nehalem = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, }; static const struct idle_cpu idle_cpu_atom = { .state_table = atom_cstates, }; static const struct idle_cpu idle_cpu_lincroft = { .state_table = atom_cstates, .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE, }; static const struct idle_cpu idle_cpu_snb = { .state_table = snb_cstates, }; static const struct idle_cpu idle_cpu_ivb = { .state_table = ivb_cstates, }; #define ICPU(model, cpu) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } static const struct x86_cpu_id intel_idle_ids[] = { ICPU(0x1a, idle_cpu_nehalem), ICPU(0x1e, idle_cpu_nehalem), ICPU(0x1f, idle_cpu_nehalem), ICPU(0x25, idle_cpu_nehalem), ICPU(0x2c, idle_cpu_nehalem), ICPU(0x2e, idle_cpu_nehalem), ICPU(0x1c, idle_cpu_atom), ICPU(0x26, idle_cpu_lincroft), ICPU(0x2f, idle_cpu_nehalem), ICPU(0x2a, idle_cpu_snb), ICPU(0x2d, idle_cpu_snb), ICPU(0x3a, idle_cpu_ivb), ICPU(0x3e, idle_cpu_ivb), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); /* * intel_idle_probe() */ static int intel_idle_probe(void) { unsigned int eax, ebx, ecx; const struct x86_cpu_id *id; if (max_cstate == 0) { pr_debug(PREFIX "disabled\n"); return -EPERM; } id = x86_match_cpu(intel_idle_ids); if (!id) { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86 == 6) pr_debug(PREFIX "does not run on family %d model %d\n", boot_cpu_data.x86, boot_cpu_data.x86_model); return -ENODEV; } if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) return -ENODEV; cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || !mwait_substates) return -ENODEV; pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); icpu = (const struct idle_cpu *)id->driver_data; cpuidle_state_table = icpu->state_table; if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; else on_each_cpu(__setup_broadcast_timer, (void *)true, 1); pr_debug(PREFIX "v" INTEL_IDLE_VERSION " model 0x%X\n", boot_cpu_data.x86_model); pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n", lapic_timer_reliable_states); return 0; } /* * intel_idle_cpuidle_devices_uninit() * unregister, free cpuidle_devices */ static void intel_idle_cpuidle_devices_uninit(void) { int i; struct cpuidle_device *dev; for_each_online_cpu(i) { dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); cpuidle_unregister_device(dev); } free_percpu(intel_idle_cpuidle_devices); return; } /* * intel_idle_cpuidle_driver_init() * allocate, initialize cpuidle_states */ static int intel_idle_cpuidle_driver_init(void) { int cstate; struct cpuidle_driver *drv = &intel_idle_driver; drv->state_count = 1; for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { int num_substates; if (cstate > max_cstate) { printk(PREFIX "max_cstate %d reached\n", max_cstate); break; } /* does the state exist in CPUID.MWAIT? */ num_substates = (mwait_substates >> ((cstate) * 4)) & MWAIT_SUBSTATE_MASK; if (num_substates == 0) continue; /* is the state not enabled? */ if (cpuidle_state_table[cstate].enter == NULL) { /* does the driver not know about the state? */ if (*cpuidle_state_table[cstate].name == '\0') pr_debug(PREFIX "unaware of model 0x%x" " MWAIT %d please" " contact [email protected]", boot_cpu_data.x86_model, cstate); continue; } if ((cstate > 2) && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) mark_tsc_unstable("TSC halts in idle" " states deeper than C2"); drv->states[drv->state_count] = /* structure copy */ cpuidle_state_table[cstate]; drv->state_count += 1; } if (icpu->auto_demotion_disable_flags) on_each_cpu(auto_demotion_disable, NULL, 1); return 0; } /* * intel_idle_cpu_init() * allocate, initialize, register cpuidle_devices * @cpu: cpu/core to initialize */ static int intel_idle_cpu_init(int cpu) { int cstate; struct cpuidle_device *dev; dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu); dev->state_count = 1; for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { int num_substates; if (cstate > max_cstate) { printk(PREFIX "max_cstate %d reached\n", max_cstate); break; } /* does the state exist in CPUID.MWAIT? */ num_substates = (mwait_substates >> ((cstate) * 4)) & MWAIT_SUBSTATE_MASK; if (num_substates == 0) continue; /* is the state not enabled? */ if (cpuidle_state_table[cstate].enter == NULL) continue; dev->states_usage[dev->state_count].driver_data = (void *)get_driver_data(cstate); dev->state_count += 1; } dev->cpu = cpu; if (cpuidle_register_device(dev)) { pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu); intel_idle_cpuidle_devices_uninit(); return -EIO; } if (icpu->auto_demotion_disable_flags) smp_call_function_single(cpu, auto_demotion_disable, NULL, 1); return 0; } static int __init intel_idle_init(void) { int retval, i; /* Do not load intel_idle at all for now if idle= is passed */ if (boot_option_idle_override != IDLE_NO_OVERRIDE) return -ENODEV; retval = intel_idle_probe(); if (retval) return retval; intel_idle_cpuidle_driver_init(); retval = cpuidle_register_driver(&intel_idle_driver); if (retval) { struct cpuidle_driver *drv = cpuidle_get_driver(); printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", drv ? drv->name : "none"); return retval; } intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); if (intel_idle_cpuidle_devices == NULL) return -ENOMEM; for_each_online_cpu(i) { retval = intel_idle_cpu_init(i); if (retval) { cpuidle_unregister_driver(&intel_idle_driver); return retval; } } register_cpu_notifier(&cpu_hotplug_notifier); return 0; } static void __exit intel_idle_exit(void) { intel_idle_cpuidle_devices_uninit(); cpuidle_unregister_driver(&intel_idle_driver); if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) on_each_cpu(__setup_broadcast_timer, (void *)false, 1); unregister_cpu_notifier(&cpu_hotplug_notifier); return; } module_init(intel_idle_init); module_exit(intel_idle_exit); module_param(max_cstate, int, 0444); MODULE_AUTHOR("Len Brown <[email protected]>"); MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); MODULE_LICENSE("GPL");
24505.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2011 STRATO. All rights reserved. */ #include <linux/mm.h> #include <linux/rbtree.h> #include <trace/events/btrfs.h> #include "ctree.h" #include "disk-io.h" #include "backref.h" #include "ulist.h" #include "transaction.h" #include "delayed-ref.h" #include "locking.h" #include "misc.h" #include "tree-mod-log.h" /* Just an arbitrary number so we can be sure this happened */ #define BACKREF_FOUND_SHARED 6 struct extent_inode_elem { u64 inum; u64 offset; struct extent_inode_elem *next; }; static int check_extent_in_eb(const struct btrfs_key *key, const struct extent_buffer *eb, const struct btrfs_file_extent_item *fi, u64 extent_item_pos, struct extent_inode_elem **eie, bool ignore_offset) { u64 offset = 0; struct extent_inode_elem *e; if (!ignore_offset && !btrfs_file_extent_compression(eb, fi) && !btrfs_file_extent_encryption(eb, fi) && !btrfs_file_extent_other_encoding(eb, fi)) { u64 data_offset; u64 data_len; data_offset = btrfs_file_extent_offset(eb, fi); data_len = btrfs_file_extent_num_bytes(eb, fi); if (extent_item_pos < data_offset || extent_item_pos >= data_offset + data_len) return 1; offset = extent_item_pos - data_offset; } e = kmalloc(sizeof(*e), GFP_NOFS); if (!e) return -ENOMEM; e->next = *eie; e->inum = key->objectid; e->offset = key->offset + offset; *eie = e; return 0; } static void free_inode_elem_list(struct extent_inode_elem *eie) { struct extent_inode_elem *eie_next; for (; eie; eie = eie_next) { eie_next = eie->next; kfree(eie); } } static int find_extent_in_eb(const struct extent_buffer *eb, u64 wanted_disk_byte, u64 extent_item_pos, struct extent_inode_elem **eie, bool ignore_offset) { u64 disk_byte; struct btrfs_key key; struct btrfs_file_extent_item *fi; int slot; int nritems; int extent_type; int ret; /* * from the shared data ref, we only have the leaf but we need * the key. thus, we must look into all items and see that we * find one (some) with a reference to our extent item. */ nritems = btrfs_header_nritems(eb); for (slot = 0; slot < nritems; ++slot) { btrfs_item_key_to_cpu(eb, &key, slot); if (key.type != BTRFS_EXTENT_DATA_KEY) continue; fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); extent_type = btrfs_file_extent_type(eb, fi); if (extent_type == BTRFS_FILE_EXTENT_INLINE) continue; /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */ disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); if (disk_byte != wanted_disk_byte) continue; ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset); if (ret < 0) return ret; } return 0; } struct preftree { struct rb_root_cached root; unsigned int count; }; #define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 } struct preftrees { struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */ struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */ struct preftree indirect_missing_keys; }; /* * Checks for a shared extent during backref search. * * The share_count tracks prelim_refs (direct and indirect) having a * ref->count >0: * - incremented when a ref->count transitions to >0 * - decremented when a ref->count transitions to <1 */ struct share_check { u64 root_objectid; u64 inum; int share_count; }; static inline int extent_is_shared(struct share_check *sc) { return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0; } static struct kmem_cache *btrfs_prelim_ref_cache; int __init btrfs_prelim_ref_init(void) { btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref", sizeof(struct prelim_ref), 0, SLAB_MEM_SPREAD, NULL); if (!btrfs_prelim_ref_cache) return -ENOMEM; return 0; } void __cold btrfs_prelim_ref_exit(void) { kmem_cache_destroy(btrfs_prelim_ref_cache); } static void free_pref(struct prelim_ref *ref) { kmem_cache_free(btrfs_prelim_ref_cache, ref); } /* * Return 0 when both refs are for the same block (and can be merged). * A -1 return indicates ref1 is a 'lower' block than ref2, while 1 * indicates a 'higher' block. */ static int prelim_ref_compare(struct prelim_ref *ref1, struct prelim_ref *ref2) { if (ref1->level < ref2->level) return -1; if (ref1->level > ref2->level) return 1; if (ref1->root_id < ref2->root_id) return -1; if (ref1->root_id > ref2->root_id) return 1; if (ref1->key_for_search.type < ref2->key_for_search.type) return -1; if (ref1->key_for_search.type > ref2->key_for_search.type) return 1; if (ref1->key_for_search.objectid < ref2->key_for_search.objectid) return -1; if (ref1->key_for_search.objectid > ref2->key_for_search.objectid) return 1; if (ref1->key_for_search.offset < ref2->key_for_search.offset) return -1; if (ref1->key_for_search.offset > ref2->key_for_search.offset) return 1; if (ref1->parent < ref2->parent) return -1; if (ref1->parent > ref2->parent) return 1; return 0; } static void update_share_count(struct share_check *sc, int oldcount, int newcount) { if ((!sc) || (oldcount == 0 && newcount < 1)) return; if (oldcount > 0 && newcount < 1) sc->share_count--; else if (oldcount < 1 && newcount > 0) sc->share_count++; } /* * Add @newref to the @root rbtree, merging identical refs. * * Callers should assume that newref has been freed after calling. */ static void prelim_ref_insert(const struct btrfs_fs_info *fs_info, struct preftree *preftree, struct prelim_ref *newref, struct share_check *sc) { struct rb_root_cached *root; struct rb_node **p; struct rb_node *parent = NULL; struct prelim_ref *ref; int result; bool leftmost = true; root = &preftree->root; p = &root->rb_root.rb_node; while (*p) { parent = *p; ref = rb_entry(parent, struct prelim_ref, rbnode); result = prelim_ref_compare(ref, newref); if (result < 0) { p = &(*p)->rb_left; } else if (result > 0) { p = &(*p)->rb_right; leftmost = false; } else { /* Identical refs, merge them and free @newref */ struct extent_inode_elem *eie = ref->inode_list; while (eie && eie->next) eie = eie->next; if (!eie) ref->inode_list = newref->inode_list; else eie->next = newref->inode_list; trace_btrfs_prelim_ref_merge(fs_info, ref, newref, preftree->count); /* * A delayed ref can have newref->count < 0. * The ref->count is updated to follow any * BTRFS_[ADD|DROP]_DELAYED_REF actions. */ update_share_count(sc, ref->count, ref->count + newref->count); ref->count += newref->count; free_pref(newref); return; } } update_share_count(sc, 0, newref->count); preftree->count++; trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count); rb_link_node(&newref->rbnode, parent, p); rb_insert_color_cached(&newref->rbnode, root, leftmost); } /* * Release the entire tree. We don't care about internal consistency so * just free everything and then reset the tree root. */ static void prelim_release(struct preftree *preftree) { struct prelim_ref *ref, *next_ref; rbtree_postorder_for_each_entry_safe(ref, next_ref, &preftree->root.rb_root, rbnode) free_pref(ref); preftree->root = RB_ROOT_CACHED; preftree->count = 0; } /* * the rules for all callers of this function are: * - obtaining the parent is the goal * - if you add a key, you must know that it is a correct key * - if you cannot add the parent or a correct key, then we will look into the * block later to set a correct key * * delayed refs * ============ * backref type | shared | indirect | shared | indirect * information | tree | tree | data | data * --------------------+--------+----------+--------+---------- * parent logical | y | - | - | - * key to resolve | - | y | y | y * tree block logical | - | - | - | - * root for resolving | y | y | y | y * * - column 1: we've the parent -> done * - column 2, 3, 4: we use the key to find the parent * * on disk refs (inline or keyed) * ============================== * backref type | shared | indirect | shared | indirect * information | tree | tree | data | data * --------------------+--------+----------+--------+---------- * parent logical | y | - | y | - * key to resolve | - | - | - | y * tree block logical | y | y | y | y * root for resolving | - | y | y | y * * - column 1, 3: we've the parent -> done * - column 2: we take the first key from the block to find the parent * (see add_missing_keys) * - column 4: we use the key to find the parent * * additional information that's available but not required to find the parent * block might help in merging entries to gain some speed. */ static int add_prelim_ref(const struct btrfs_fs_info *fs_info, struct preftree *preftree, u64 root_id, const struct btrfs_key *key, int level, u64 parent, u64 wanted_disk_byte, int count, struct share_check *sc, gfp_t gfp_mask) { struct prelim_ref *ref; if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID) return 0; ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask); if (!ref) return -ENOMEM; ref->root_id = root_id; if (key) ref->key_for_search = *key; else memset(&ref->key_for_search, 0, sizeof(ref->key_for_search)); ref->inode_list = NULL; ref->level = level; ref->count = count; ref->parent = parent; ref->wanted_disk_byte = wanted_disk_byte; prelim_ref_insert(fs_info, preftree, ref, sc); return extent_is_shared(sc); } /* direct refs use root == 0, key == NULL */ static int add_direct_ref(const struct btrfs_fs_info *fs_info, struct preftrees *preftrees, int level, u64 parent, u64 wanted_disk_byte, int count, struct share_check *sc, gfp_t gfp_mask) { return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level, parent, wanted_disk_byte, count, sc, gfp_mask); } /* indirect refs use parent == 0 */ static int add_indirect_ref(const struct btrfs_fs_info *fs_info, struct preftrees *preftrees, u64 root_id, const struct btrfs_key *key, int level, u64 wanted_disk_byte, int count, struct share_check *sc, gfp_t gfp_mask) { struct preftree *tree = &preftrees->indirect; if (!key) tree = &preftrees->indirect_missing_keys; return add_prelim_ref(fs_info, tree, root_id, key, level, 0, wanted_disk_byte, count, sc, gfp_mask); } static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr) { struct rb_node **p = &preftrees->direct.root.rb_root.rb_node; struct rb_node *parent = NULL; struct prelim_ref *ref = NULL; struct prelim_ref target = {}; int result; target.parent = bytenr; while (*p) { parent = *p; ref = rb_entry(parent, struct prelim_ref, rbnode); result = prelim_ref_compare(ref, &target); if (result < 0) p = &(*p)->rb_left; else if (result > 0) p = &(*p)->rb_right; else return 1; } return 0; } static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, struct ulist *parents, struct preftrees *preftrees, struct prelim_ref *ref, int level, u64 time_seq, const u64 *extent_item_pos, bool ignore_offset) { int ret = 0; int slot; struct extent_buffer *eb; struct btrfs_key key; struct btrfs_key *key_for_search = &ref->key_for_search; struct btrfs_file_extent_item *fi; struct extent_inode_elem *eie = NULL, *old = NULL; u64 disk_byte; u64 wanted_disk_byte = ref->wanted_disk_byte; u64 count = 0; u64 data_offset; if (level != 0) { eb = path->nodes[level]; ret = ulist_add(parents, eb->start, 0, GFP_NOFS); if (ret < 0) return ret; return 0; } /* * 1. We normally enter this function with the path already pointing to * the first item to check. But sometimes, we may enter it with * slot == nritems. * 2. We are searching for normal backref but bytenr of this leaf * matches shared data backref * 3. The leaf owner is not equal to the root we are searching * * For these cases, go to the next leaf before we continue. */ eb = path->nodes[0]; if (path->slots[0] >= btrfs_header_nritems(eb) || is_shared_data_backref(preftrees, eb->start) || ref->root_id != btrfs_header_owner(eb)) { if (time_seq == BTRFS_SEQ_LAST) ret = btrfs_next_leaf(root, path); else ret = btrfs_next_old_leaf(root, path, time_seq); } while (!ret && count < ref->count) { eb = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(eb, &key, slot); if (key.objectid != key_for_search->objectid || key.type != BTRFS_EXTENT_DATA_KEY) break; /* * We are searching for normal backref but bytenr of this leaf * matches shared data backref, OR * the leaf owner is not equal to the root we are searching for */ if (slot == 0 && (is_shared_data_backref(preftrees, eb->start) || ref->root_id != btrfs_header_owner(eb))) { if (time_seq == BTRFS_SEQ_LAST) ret = btrfs_next_leaf(root, path); else ret = btrfs_next_old_leaf(root, path, time_seq); continue; } fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); data_offset = btrfs_file_extent_offset(eb, fi); if (disk_byte == wanted_disk_byte) { eie = NULL; old = NULL; if (ref->key_for_search.offset == key.offset - data_offset) count++; else goto next; if (extent_item_pos) { ret = check_extent_in_eb(&key, eb, fi, *extent_item_pos, &eie, ignore_offset); if (ret < 0) break; } if (ret > 0) goto next; ret = ulist_add_merge_ptr(parents, eb->start, eie, (void **)&old, GFP_NOFS); if (ret < 0) break; if (!ret && extent_item_pos) { while (old->next) old = old->next; old->next = eie; } eie = NULL; } next: if (time_seq == BTRFS_SEQ_LAST) ret = btrfs_next_item(root, path); else ret = btrfs_next_old_item(root, path, time_seq); } if (ret > 0) ret = 0; else if (ret < 0) free_inode_elem_list(eie); return ret; } /* * resolve an indirect backref in the form (root_id, key, level) * to a logical address */ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 time_seq, struct preftrees *preftrees, struct prelim_ref *ref, struct ulist *parents, const u64 *extent_item_pos, bool ignore_offset) { struct btrfs_root *root; struct extent_buffer *eb; int ret = 0; int root_level; int level = ref->level; struct btrfs_key search_key = ref->key_for_search; /* * If we're search_commit_root we could possibly be holding locks on * other tree nodes. This happens when qgroups does backref walks when * adding new delayed refs. To deal with this we need to look in cache * for the root, and if we don't find it then we need to search the * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage * here. */ if (path->search_commit_root) root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id); else root = btrfs_get_fs_root(fs_info, ref->root_id, false); if (IS_ERR(root)) { ret = PTR_ERR(root); goto out_free; } if (!path->search_commit_root && test_bit(BTRFS_ROOT_DELETING, &root->state)) { ret = -ENOENT; goto out; } if (btrfs_is_testing(fs_info)) { ret = -ENOENT; goto out; } if (path->search_commit_root) root_level = btrfs_header_level(root->commit_root); else if (time_seq == BTRFS_SEQ_LAST) root_level = btrfs_header_level(root->node); else root_level = btrfs_old_root_level(root, time_seq); if (root_level + 1 == level) goto out; /* * We can often find data backrefs with an offset that is too large * (>= LLONG_MAX, maximum allowed file offset) due to underflows when * subtracting a file's offset with the data offset of its * corresponding extent data item. This can happen for example in the * clone ioctl. * * So if we detect such case we set the search key's offset to zero to * make sure we will find the matching file extent item at * add_all_parents(), otherwise we will miss it because the offset * taken form the backref is much larger then the offset of the file * extent item. This can make us scan a very large number of file * extent items, but at least it will not make us miss any. * * This is an ugly workaround for a behaviour that should have never * existed, but it does and a fix for the clone ioctl would touch a lot * of places, cause backwards incompatibility and would not fix the * problem for extents cloned with older kernels. */ if (search_key.type == BTRFS_EXTENT_DATA_KEY && search_key.offset >= LLONG_MAX) search_key.offset = 0; path->lowest_level = level; if (time_seq == BTRFS_SEQ_LAST) ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); else ret = btrfs_search_old_slot(root, &search_key, path, time_seq); btrfs_debug(fs_info, "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)", ref->root_id, level, ref->count, ret, ref->key_for_search.objectid, ref->key_for_search.type, ref->key_for_search.offset); if (ret < 0) goto out; eb = path->nodes[level]; while (!eb) { if (WARN_ON(!level)) { ret = 1; goto out; } level--; eb = path->nodes[level]; } ret = add_all_parents(root, path, parents, preftrees, ref, level, time_seq, extent_item_pos, ignore_offset); out: btrfs_put_root(root); out_free: path->lowest_level = 0; btrfs_release_path(path); return ret; } static struct extent_inode_elem * unode_aux_to_inode_list(struct ulist_node *node) { if (!node) return NULL; return (struct extent_inode_elem *)(uintptr_t)node->aux; } /* * We maintain three separate rbtrees: one for direct refs, one for * indirect refs which have a key, and one for indirect refs which do not * have a key. Each tree does merge on insertion. * * Once all of the references are located, we iterate over the tree of * indirect refs with missing keys. An appropriate key is located and * the ref is moved onto the tree for indirect refs. After all missing * keys are thus located, we iterate over the indirect ref tree, resolve * each reference, and then insert the resolved reference onto the * direct tree (merging there too). * * New backrefs (i.e., for parent nodes) are added to the appropriate * rbtree as they are encountered. The new backrefs are subsequently * resolved as above. */ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 time_seq, struct preftrees *preftrees, const u64 *extent_item_pos, struct share_check *sc, bool ignore_offset) { int err; int ret = 0; struct ulist *parents; struct ulist_node *node; struct ulist_iterator uiter; struct rb_node *rnode; parents = ulist_alloc(GFP_NOFS); if (!parents) return -ENOMEM; /* * We could trade memory usage for performance here by iterating * the tree, allocating new refs for each insertion, and then * freeing the entire indirect tree when we're done. In some test * cases, the tree can grow quite large (~200k objects). */ while ((rnode = rb_first_cached(&preftrees->indirect.root))) { struct prelim_ref *ref; ref = rb_entry(rnode, struct prelim_ref, rbnode); if (WARN(ref->parent, "BUG: direct ref found in indirect tree")) { ret = -EINVAL; goto out; } rb_erase_cached(&ref->rbnode, &preftrees->indirect.root); preftrees->indirect.count--; if (ref->count == 0) { free_pref(ref); continue; } if (sc && sc->root_objectid && ref->root_id != sc->root_objectid) { free_pref(ref); ret = BACKREF_FOUND_SHARED; goto out; } err = resolve_indirect_ref(fs_info, path, time_seq, preftrees, ref, parents, extent_item_pos, ignore_offset); /* * we can only tolerate ENOENT,otherwise,we should catch error * and return directly. */ if (err == -ENOENT) { prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL); continue; } else if (err) { free_pref(ref); ret = err; goto out; } /* we put the first parent into the ref at hand */ ULIST_ITER_INIT(&uiter); node = ulist_next(parents, &uiter); ref->parent = node ? node->val : 0; ref->inode_list = unode_aux_to_inode_list(node); /* Add a prelim_ref(s) for any other parent(s). */ while ((node = ulist_next(parents, &uiter))) { struct prelim_ref *new_ref; new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache, GFP_NOFS); if (!new_ref) { free_pref(ref); ret = -ENOMEM; goto out; } memcpy(new_ref, ref, sizeof(*ref)); new_ref->parent = node->val; new_ref->inode_list = unode_aux_to_inode_list(node); prelim_ref_insert(fs_info, &preftrees->direct, new_ref, NULL); } /* * Now it's a direct ref, put it in the direct tree. We must * do this last because the ref could be merged/freed here. */ prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL); ulist_reinit(parents); cond_resched(); } out: ulist_free(parents); return ret; } /* * read tree blocks and add keys where required. */ static int add_missing_keys(struct btrfs_fs_info *fs_info, struct preftrees *preftrees, bool lock) { struct prelim_ref *ref; struct extent_buffer *eb; struct preftree *tree = &preftrees->indirect_missing_keys; struct rb_node *node; while ((node = rb_first_cached(&tree->root))) { ref = rb_entry(node, struct prelim_ref, rbnode); rb_erase_cached(node, &tree->root); BUG_ON(ref->parent); /* should not be a direct ref */ BUG_ON(ref->key_for_search.type); BUG_ON(!ref->wanted_disk_byte); eb = read_tree_block(fs_info, ref->wanted_disk_byte, ref->root_id, 0, ref->level - 1, NULL); if (IS_ERR(eb)) { free_pref(ref); return PTR_ERR(eb); } if (!extent_buffer_uptodate(eb)) { free_pref(ref); free_extent_buffer(eb); return -EIO; } if (lock) btrfs_tree_read_lock(eb); if (btrfs_header_level(eb) == 0) btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0); else btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0); if (lock) btrfs_tree_read_unlock(eb); free_extent_buffer(eb); prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL); cond_resched(); } return 0; } /* * add all currently queued delayed refs from this head whose seq nr is * smaller or equal that seq to the list */ static int add_delayed_refs(const struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_head *head, u64 seq, struct preftrees *preftrees, struct share_check *sc) { struct btrfs_delayed_ref_node *node; struct btrfs_delayed_extent_op *extent_op = head->extent_op; struct btrfs_key key; struct btrfs_key tmp_op_key; struct rb_node *n; int count; int ret = 0; if (extent_op && extent_op->update_key) btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key); spin_lock(&head->lock); for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) { node = rb_entry(n, struct btrfs_delayed_ref_node, ref_node); if (node->seq > seq) continue; switch (node->action) { case BTRFS_ADD_DELAYED_EXTENT: case BTRFS_UPDATE_DELAYED_HEAD: WARN_ON(1); continue; case BTRFS_ADD_DELAYED_REF: count = node->ref_mod; break; case BTRFS_DROP_DELAYED_REF: count = node->ref_mod * -1; break; default: BUG(); } switch (node->type) { case BTRFS_TREE_BLOCK_REF_KEY: { /* NORMAL INDIRECT METADATA backref */ struct btrfs_delayed_tree_ref *ref; ref = btrfs_delayed_node_to_tree_ref(node); ret = add_indirect_ref(fs_info, preftrees, ref->root, &tmp_op_key, ref->level + 1, node->bytenr, count, sc, GFP_ATOMIC); break; } case BTRFS_SHARED_BLOCK_REF_KEY: { /* SHARED DIRECT METADATA backref */ struct btrfs_delayed_tree_ref *ref; ref = btrfs_delayed_node_to_tree_ref(node); ret = add_direct_ref(fs_info, preftrees, ref->level + 1, ref->parent, node->bytenr, count, sc, GFP_ATOMIC); break; } case BTRFS_EXTENT_DATA_REF_KEY: { /* NORMAL INDIRECT DATA backref */ struct btrfs_delayed_data_ref *ref; ref = btrfs_delayed_node_to_data_ref(node); key.objectid = ref->objectid; key.type = BTRFS_EXTENT_DATA_KEY; key.offset = ref->offset; /* * Found a inum that doesn't match our known inum, we * know it's shared. */ if (sc && sc->inum && ref->objectid != sc->inum) { ret = BACKREF_FOUND_SHARED; goto out; } ret = add_indirect_ref(fs_info, preftrees, ref->root, &key, 0, node->bytenr, count, sc, GFP_ATOMIC); break; } case BTRFS_SHARED_DATA_REF_KEY: { /* SHARED DIRECT FULL backref */ struct btrfs_delayed_data_ref *ref; ref = btrfs_delayed_node_to_data_ref(node); ret = add_direct_ref(fs_info, preftrees, 0, ref->parent, node->bytenr, count, sc, GFP_ATOMIC); break; } default: WARN_ON(1); } /* * We must ignore BACKREF_FOUND_SHARED until all delayed * refs have been checked. */ if (ret && (ret != BACKREF_FOUND_SHARED)) break; } if (!ret) ret = extent_is_shared(sc); out: spin_unlock(&head->lock); return ret; } /* * add all inline backrefs for bytenr to the list * * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED. */ static int add_inline_refs(const struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 bytenr, int *info_level, struct preftrees *preftrees, struct share_check *sc) { int ret = 0; int slot; struct extent_buffer *leaf; struct btrfs_key key; struct btrfs_key found_key; unsigned long ptr; unsigned long end; struct btrfs_extent_item *ei; u64 flags; u64 item_size; /* * enumerate all inline refs */ leaf = path->nodes[0]; slot = path->slots[0]; item_size = btrfs_item_size(leaf, slot); BUG_ON(item_size < sizeof(*ei)); ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); flags = btrfs_extent_flags(leaf, ei); btrfs_item_key_to_cpu(leaf, &found_key, slot); ptr = (unsigned long)(ei + 1); end = (unsigned long)ei + item_size; if (found_key.type == BTRFS_EXTENT_ITEM_KEY && flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { struct btrfs_tree_block_info *info; info = (struct btrfs_tree_block_info *)ptr; *info_level = btrfs_tree_block_level(leaf, info); ptr += sizeof(struct btrfs_tree_block_info); BUG_ON(ptr > end); } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) { *info_level = found_key.offset; } else { BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA)); } while (ptr < end) { struct btrfs_extent_inline_ref *iref; u64 offset; int type; iref = (struct btrfs_extent_inline_ref *)ptr; type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY); if (type == BTRFS_REF_TYPE_INVALID) return -EUCLEAN; offset = btrfs_extent_inline_ref_offset(leaf, iref); switch (type) { case BTRFS_SHARED_BLOCK_REF_KEY: ret = add_direct_ref(fs_info, preftrees, *info_level + 1, offset, bytenr, 1, NULL, GFP_NOFS); break; case BTRFS_SHARED_DATA_REF_KEY: { struct btrfs_shared_data_ref *sdref; int count; sdref = (struct btrfs_shared_data_ref *)(iref + 1); count = btrfs_shared_data_ref_count(leaf, sdref); ret = add_direct_ref(fs_info, preftrees, 0, offset, bytenr, count, sc, GFP_NOFS); break; } case BTRFS_TREE_BLOCK_REF_KEY: ret = add_indirect_ref(fs_info, preftrees, offset, NULL, *info_level + 1, bytenr, 1, NULL, GFP_NOFS); break; case BTRFS_EXTENT_DATA_REF_KEY: { struct btrfs_extent_data_ref *dref; int count; u64 root; dref = (struct btrfs_extent_data_ref *)(&iref->offset); count = btrfs_extent_data_ref_count(leaf, dref); key.objectid = btrfs_extent_data_ref_objectid(leaf, dref); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = btrfs_extent_data_ref_offset(leaf, dref); if (sc && sc->inum && key.objectid != sc->inum) { ret = BACKREF_FOUND_SHARED; break; } root = btrfs_extent_data_ref_root(leaf, dref); ret = add_indirect_ref(fs_info, preftrees, root, &key, 0, bytenr, count, sc, GFP_NOFS); break; } default: WARN_ON(1); } if (ret) return ret; ptr += btrfs_extent_inline_ref_size(type); } return 0; } /* * add all non-inline backrefs for bytenr to the list * * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED. */ static int add_keyed_refs(struct btrfs_root *extent_root, struct btrfs_path *path, u64 bytenr, int info_level, struct preftrees *preftrees, struct share_check *sc) { struct btrfs_fs_info *fs_info = extent_root->fs_info; int ret; int slot; struct extent_buffer *leaf; struct btrfs_key key; while (1) { ret = btrfs_next_item(extent_root, path); if (ret < 0) break; if (ret) { ret = 0; break; } slot = path->slots[0]; leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid != bytenr) break; if (key.type < BTRFS_TREE_BLOCK_REF_KEY) continue; if (key.type > BTRFS_SHARED_DATA_REF_KEY) break; switch (key.type) { case BTRFS_SHARED_BLOCK_REF_KEY: /* SHARED DIRECT METADATA backref */ ret = add_direct_ref(fs_info, preftrees, info_level + 1, key.offset, bytenr, 1, NULL, GFP_NOFS); break; case BTRFS_SHARED_DATA_REF_KEY: { /* SHARED DIRECT FULL backref */ struct btrfs_shared_data_ref *sdref; int count; sdref = btrfs_item_ptr(leaf, slot, struct btrfs_shared_data_ref); count = btrfs_shared_data_ref_count(leaf, sdref); ret = add_direct_ref(fs_info, preftrees, 0, key.offset, bytenr, count, sc, GFP_NOFS); break; } case BTRFS_TREE_BLOCK_REF_KEY: /* NORMAL INDIRECT METADATA backref */ ret = add_indirect_ref(fs_info, preftrees, key.offset, NULL, info_level + 1, bytenr, 1, NULL, GFP_NOFS); break; case BTRFS_EXTENT_DATA_REF_KEY: { /* NORMAL INDIRECT DATA backref */ struct btrfs_extent_data_ref *dref; int count; u64 root; dref = btrfs_item_ptr(leaf, slot, struct btrfs_extent_data_ref); count = btrfs_extent_data_ref_count(leaf, dref); key.objectid = btrfs_extent_data_ref_objectid(leaf, dref); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = btrfs_extent_data_ref_offset(leaf, dref); if (sc && sc->inum && key.objectid != sc->inum) { ret = BACKREF_FOUND_SHARED; break; } root = btrfs_extent_data_ref_root(leaf, dref); ret = add_indirect_ref(fs_info, preftrees, root, &key, 0, bytenr, count, sc, GFP_NOFS); break; } default: WARN_ON(1); } if (ret) return ret; } return ret; } /* * this adds all existing backrefs (inline backrefs, backrefs and delayed * refs) for the given bytenr to the refs list, merges duplicates and resolves * indirect refs to their parent bytenr. * When roots are found, they're added to the roots list * * If time_seq is set to BTRFS_SEQ_LAST, it will not search delayed_refs, and * behave much like trans == NULL case, the difference only lies in it will not * commit root. * The special case is for qgroup to search roots in commit_transaction(). * * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a * shared extent is detected. * * Otherwise this returns 0 for success and <0 for an error. * * If ignore_offset is set to false, only extent refs whose offsets match * extent_item_pos are returned. If true, every extent ref is returned * and extent_item_pos is ignored. * * FIXME some caching might speed things up */ static int find_parent_nodes(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 time_seq, struct ulist *refs, struct ulist *roots, const u64 *extent_item_pos, struct share_check *sc, bool ignore_offset) { struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr); struct btrfs_key key; struct btrfs_path *path; struct btrfs_delayed_ref_root *delayed_refs = NULL; struct btrfs_delayed_ref_head *head; int info_level = 0; int ret; struct prelim_ref *ref; struct rb_node *node; struct extent_inode_elem *eie = NULL; struct preftrees preftrees = { .direct = PREFTREE_INIT, .indirect = PREFTREE_INIT, .indirect_missing_keys = PREFTREE_INIT }; key.objectid = bytenr; key.offset = (u64)-1; if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (!trans) { path->search_commit_root = 1; path->skip_locking = 1; } if (time_seq == BTRFS_SEQ_LAST) path->skip_locking = 1; again: head = NULL; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) goto out; if (ret == 0) { /* This shouldn't happen, indicates a bug or fs corruption. */ ASSERT(ret != 0); ret = -EUCLEAN; goto out; } if (trans && likely(trans->type != __TRANS_DUMMY) && time_seq != BTRFS_SEQ_LAST) { /* * We have a specific time_seq we care about and trans which * means we have the path lock, we need to grab the ref head and * lock it so we have a consistent view of the refs at the given * time. */ delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); if (head) { if (!mutex_trylock(&head->mutex)) { refcount_inc(&head->refs); spin_unlock(&delayed_refs->lock); btrfs_release_path(path); /* * Mutex was contended, block until it's * released and try again */ mutex_lock(&head->mutex); mutex_unlock(&head->mutex); btrfs_put_delayed_ref_head(head); goto again; } spin_unlock(&delayed_refs->lock); ret = add_delayed_refs(fs_info, head, time_seq, &preftrees, sc); mutex_unlock(&head->mutex); if (ret) goto out; } else { spin_unlock(&delayed_refs->lock); } } if (path->slots[0]) { struct extent_buffer *leaf; int slot; path->slots[0]--; leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid == bytenr && (key.type == BTRFS_EXTENT_ITEM_KEY || key.type == BTRFS_METADATA_ITEM_KEY)) { ret = add_inline_refs(fs_info, path, bytenr, &info_level, &preftrees, sc); if (ret) goto out; ret = add_keyed_refs(root, path, bytenr, info_level, &preftrees, sc); if (ret) goto out; } } btrfs_release_path(path); ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0); if (ret) goto out; WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root)); ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees, extent_item_pos, sc, ignore_offset); if (ret) goto out; WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root)); /* * This walks the tree of merged and resolved refs. Tree blocks are * read in as needed. Unique entries are added to the ulist, and * the list of found roots is updated. * * We release the entire tree in one go before returning. */ node = rb_first_cached(&preftrees.direct.root); while (node) { ref = rb_entry(node, struct prelim_ref, rbnode); node = rb_next(&ref->rbnode); /* * ref->count < 0 can happen here if there are delayed * refs with a node->action of BTRFS_DROP_DELAYED_REF. * prelim_ref_insert() relies on this when merging * identical refs to keep the overall count correct. * prelim_ref_insert() will merge only those refs * which compare identically. Any refs having * e.g. different offsets would not be merged, * and would retain their original ref->count < 0. */ if (roots && ref->count && ref->root_id && ref->parent == 0) { if (sc && sc->root_objectid && ref->root_id != sc->root_objectid) { ret = BACKREF_FOUND_SHARED; goto out; } /* no parent == root of tree */ ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS); if (ret < 0) goto out; } if (ref->count && ref->parent) { if (extent_item_pos && !ref->inode_list && ref->level == 0) { struct extent_buffer *eb; eb = read_tree_block(fs_info, ref->parent, 0, 0, ref->level, NULL); if (IS_ERR(eb)) { ret = PTR_ERR(eb); goto out; } if (!extent_buffer_uptodate(eb)) { free_extent_buffer(eb); ret = -EIO; goto out; } if (!path->skip_locking) btrfs_tree_read_lock(eb); ret = find_extent_in_eb(eb, bytenr, *extent_item_pos, &eie, ignore_offset); if (!path->skip_locking) btrfs_tree_read_unlock(eb); free_extent_buffer(eb); if (ret < 0) goto out; ref->inode_list = eie; } ret = ulist_add_merge_ptr(refs, ref->parent, ref->inode_list, (void **)&eie, GFP_NOFS); if (ret < 0) goto out; if (!ret && extent_item_pos) { /* * We've recorded that parent, so we must extend * its inode list here. * * However if there was corruption we may not * have found an eie, return an error in this * case. */ ASSERT(eie); if (!eie) { ret = -EUCLEAN; goto out; } while (eie->next) eie = eie->next; eie->next = ref->inode_list; } eie = NULL; } cond_resched(); } out: btrfs_free_path(path); prelim_release(&preftrees.direct); prelim_release(&preftrees.indirect); prelim_release(&preftrees.indirect_missing_keys); if (ret < 0) free_inode_elem_list(eie); return ret; } static void free_leaf_list(struct ulist *blocks) { struct ulist_node *node = NULL; struct extent_inode_elem *eie; struct ulist_iterator uiter; ULIST_ITER_INIT(&uiter); while ((node = ulist_next(blocks, &uiter))) { if (!node->aux) continue; eie = unode_aux_to_inode_list(node); free_inode_elem_list(eie); node->aux = 0; } ulist_free(blocks); } /* * Finds all leafs with a reference to the specified combination of bytenr and * offset. key_list_head will point to a list of corresponding keys (caller must * free each list element). The leafs will be stored in the leafs ulist, which * must be freed with ulist_free. * * returns 0 on success, <0 on error */ int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 time_seq, struct ulist **leafs, const u64 *extent_item_pos, bool ignore_offset) { int ret; *leafs = ulist_alloc(GFP_NOFS); if (!*leafs) return -ENOMEM; ret = find_parent_nodes(trans, fs_info, bytenr, time_seq, *leafs, NULL, extent_item_pos, NULL, ignore_offset); if (ret < 0 && ret != -ENOENT) { free_leaf_list(*leafs); return ret; } return 0; } /* * walk all backrefs for a given extent to find all roots that reference this * extent. Walking a backref means finding all extents that reference this * extent and in turn walk the backrefs of those, too. Naturally this is a * recursive process, but here it is implemented in an iterative fashion: We * find all referencing extents for the extent in question and put them on a * list. In turn, we find all referencing extents for those, further appending * to the list. The way we iterate the list allows adding more elements after * the current while iterating. The process stops when we reach the end of the * list. Found roots are added to the roots list. * * returns 0 on success, < 0 on error. */ static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 time_seq, struct ulist **roots, bool ignore_offset) { struct ulist *tmp; struct ulist_node *node = NULL; struct ulist_iterator uiter; int ret; tmp = ulist_alloc(GFP_NOFS); if (!tmp) return -ENOMEM; *roots = ulist_alloc(GFP_NOFS); if (!*roots) { ulist_free(tmp); return -ENOMEM; } ULIST_ITER_INIT(&uiter); while (1) { ret = find_parent_nodes(trans, fs_info, bytenr, time_seq, tmp, *roots, NULL, NULL, ignore_offset); if (ret < 0 && ret != -ENOENT) { ulist_free(tmp); ulist_free(*roots); *roots = NULL; return ret; } node = ulist_next(tmp, &uiter); if (!node) break; bytenr = node->val; cond_resched(); } ulist_free(tmp); return 0; } int btrfs_find_all_roots(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 bytenr, u64 time_seq, struct ulist **roots, bool skip_commit_root_sem) { int ret; if (!trans && !skip_commit_root_sem) down_read(&fs_info->commit_root_sem); ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr, time_seq, roots, false); if (!trans && !skip_commit_root_sem) up_read(&fs_info->commit_root_sem); return ret; } /** * Check if an extent is shared or not * * @root: root inode belongs to * @inum: inode number of the inode whose extent we are checking * @bytenr: logical bytenr of the extent we are checking * @roots: list of roots this extent is shared among * @tmp: temporary list used for iteration * * btrfs_check_shared uses the backref walking code but will short * circuit as soon as it finds a root or inode that doesn't match the * one passed in. This provides a significant performance benefit for * callers (such as fiemap) which want to know whether the extent is * shared but do not need a ref count. * * This attempts to attach to the running transaction in order to account for * delayed refs, but continues on even when no running transaction exists. * * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error. */ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr, struct ulist *roots, struct ulist *tmp) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_trans_handle *trans; struct ulist_iterator uiter; struct ulist_node *node; struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem); int ret = 0; struct share_check shared = { .root_objectid = root->root_key.objectid, .inum = inum, .share_count = 0, }; ulist_init(roots); ulist_init(tmp); trans = btrfs_join_transaction_nostart(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) { ret = PTR_ERR(trans); goto out; } trans = NULL; down_read(&fs_info->commit_root_sem); } else { btrfs_get_tree_mod_seq(fs_info, &elem); } ULIST_ITER_INIT(&uiter); while (1) { ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp, roots, NULL, &shared, false); if (ret == BACKREF_FOUND_SHARED) { /* this is the only condition under which we return 1 */ ret = 1; break; } if (ret < 0 && ret != -ENOENT) break; ret = 0; node = ulist_next(tmp, &uiter); if (!node) break; bytenr = node->val; shared.share_count = 0; cond_resched(); } if (trans) { btrfs_put_tree_mod_seq(fs_info, &elem); btrfs_end_transaction(trans); } else { up_read(&fs_info->commit_root_sem); } out: ulist_release(roots); ulist_release(tmp); return ret; } int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, u64 start_off, struct btrfs_path *path, struct btrfs_inode_extref **ret_extref, u64 *found_off) { int ret, slot; struct btrfs_key key; struct btrfs_key found_key; struct btrfs_inode_extref *extref; const struct extent_buffer *leaf; unsigned long ptr; key.objectid = inode_objectid; key.type = BTRFS_INODE_EXTREF_KEY; key.offset = start_off; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) return ret; while (1) { leaf = path->nodes[0]; slot = path->slots[0]; if (slot >= btrfs_header_nritems(leaf)) { /* * If the item at offset is not found, * btrfs_search_slot will point us to the slot * where it should be inserted. In our case * that will be the slot directly before the * next INODE_REF_KEY_V2 item. In the case * that we're pointing to the last slot in a * leaf, we must move one leaf over. */ ret = btrfs_next_leaf(root, path); if (ret) { if (ret >= 1) ret = -ENOENT; break; } continue; } btrfs_item_key_to_cpu(leaf, &found_key, slot); /* * Check that we're still looking at an extended ref key for * this particular objectid. If we have different * objectid or type then there are no more to be found * in the tree and we can exit. */ ret = -ENOENT; if (found_key.objectid != inode_objectid) break; if (found_key.type != BTRFS_INODE_EXTREF_KEY) break; ret = 0; ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); extref = (struct btrfs_inode_extref *)ptr; *ret_extref = extref; if (found_off) *found_off = found_key.offset; break; } return ret; } /* * this iterates to turn a name (from iref/extref) into a full filesystem path. * Elements of the path are separated by '/' and the path is guaranteed to be * 0-terminated. the path is only given within the current file system. * Therefore, it never starts with a '/'. the caller is responsible to provide * "size" bytes in "dest". the dest buffer will be filled backwards. finally, * the start point of the resulting string is returned. this pointer is within * dest, normally. * in case the path buffer would overflow, the pointer is decremented further * as if output was written to the buffer, though no more output is actually * generated. that way, the caller can determine how much space would be * required for the path to fit into the buffer. in that case, the returned * value will be smaller than dest. callers must check this! */ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, u32 name_len, unsigned long name_off, struct extent_buffer *eb_in, u64 parent, char *dest, u32 size) { int slot; u64 next_inum; int ret; s64 bytes_left = ((s64)size) - 1; struct extent_buffer *eb = eb_in; struct btrfs_key found_key; struct btrfs_inode_ref *iref; if (bytes_left >= 0) dest[bytes_left] = '\0'; while (1) { bytes_left -= name_len; if (bytes_left >= 0) read_extent_buffer(eb, dest + bytes_left, name_off, name_len); if (eb != eb_in) { if (!path->skip_locking) btrfs_tree_read_unlock(eb); free_extent_buffer(eb); } ret = btrfs_find_item(fs_root, path, parent, 0, BTRFS_INODE_REF_KEY, &found_key); if (ret > 0) ret = -ENOENT; if (ret) break; next_inum = found_key.offset; /* regular exit ahead */ if (parent == next_inum) break; slot = path->slots[0]; eb = path->nodes[0]; /* make sure we can use eb after releasing the path */ if (eb != eb_in) { path->nodes[0] = NULL; path->locks[0] = 0; } btrfs_release_path(path); iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); name_len = btrfs_inode_ref_name_len(eb, iref); name_off = (unsigned long)(iref + 1); parent = next_inum; --bytes_left; if (bytes_left >= 0) dest[bytes_left] = '/'; } btrfs_release_path(path); if (ret) return ERR_PTR(ret); return dest + bytes_left; } /* * this makes the path point to (logical EXTENT_ITEM *) * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for * tree blocks and <0 on error. */ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, struct btrfs_path *path, struct btrfs_key *found_key, u64 *flags_ret) { struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical); int ret; u64 flags; u64 size = 0; u32 item_size; const struct extent_buffer *eb; struct btrfs_extent_item *ei; struct btrfs_key key; if (btrfs_fs_incompat(fs_info, SKINNY_METADATA)) key.type = BTRFS_METADATA_ITEM_KEY; else key.type = BTRFS_EXTENT_ITEM_KEY; key.objectid = logical; key.offset = (u64)-1; ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); if (ret < 0) return ret; ret = btrfs_previous_extent_item(extent_root, path, 0); if (ret) { if (ret > 0) ret = -ENOENT; return ret; } btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]); if (found_key->type == BTRFS_METADATA_ITEM_KEY) size = fs_info->nodesize; else if (found_key->type == BTRFS_EXTENT_ITEM_KEY) size = found_key->offset; if (found_key->objectid > logical || found_key->objectid + size <= logical) { btrfs_debug(fs_info, "logical %llu is not within any extent", logical); return -ENOENT; } eb = path->nodes[0]; item_size = btrfs_item_size(eb, path->slots[0]); BUG_ON(item_size < sizeof(*ei)); ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); flags = btrfs_extent_flags(eb, ei); btrfs_debug(fs_info, "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u", logical, logical - found_key->objectid, found_key->objectid, found_key->offset, flags, item_size); WARN_ON(!flags_ret); if (flags_ret) { if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK; else if (flags & BTRFS_EXTENT_FLAG_DATA) *flags_ret = BTRFS_EXTENT_FLAG_DATA; else BUG(); return 0; } return -EIO; } /* * helper function to iterate extent inline refs. ptr must point to a 0 value * for the first call and may be modified. it is used to track state. * if more refs exist, 0 is returned and the next call to * get_extent_inline_ref must pass the modified ptr parameter to get the * next ref. after the last ref was processed, 1 is returned. * returns <0 on error */ static int get_extent_inline_ref(unsigned long *ptr, const struct extent_buffer *eb, const struct btrfs_key *key, const struct btrfs_extent_item *ei, u32 item_size, struct btrfs_extent_inline_ref **out_eiref, int *out_type) { unsigned long end; u64 flags; struct btrfs_tree_block_info *info; if (!*ptr) { /* first call */ flags = btrfs_extent_flags(eb, ei); if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { if (key->type == BTRFS_METADATA_ITEM_KEY) { /* a skinny metadata extent */ *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); } else { WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY); info = (struct btrfs_tree_block_info *)(ei + 1); *out_eiref = (struct btrfs_extent_inline_ref *)(info + 1); } } else { *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1); } *ptr = (unsigned long)*out_eiref; if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size) return -ENOENT; } end = (unsigned long)ei + item_size; *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr); *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref, BTRFS_REF_TYPE_ANY); if (*out_type == BTRFS_REF_TYPE_INVALID) return -EUCLEAN; *ptr += btrfs_extent_inline_ref_size(*out_type); WARN_ON(*ptr > end); if (*ptr == end) return 1; /* last */ return 0; } /* * reads the tree block backref for an extent. tree level and root are returned * through out_level and out_root. ptr must point to a 0 value for the first * call and may be modified (see get_extent_inline_ref comment). * returns 0 if data was provided, 1 if there was no more data to provide or * <0 on error. */ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, struct btrfs_key *key, struct btrfs_extent_item *ei, u32 item_size, u64 *out_root, u8 *out_level) { int ret; int type; struct btrfs_extent_inline_ref *eiref; if (*ptr == (unsigned long)-1) return 1; while (1) { ret = get_extent_inline_ref(ptr, eb, key, ei, item_size, &eiref, &type); if (ret < 0) return ret; if (type == BTRFS_TREE_BLOCK_REF_KEY || type == BTRFS_SHARED_BLOCK_REF_KEY) break; if (ret == 1) return 1; } /* we can treat both ref types equally here */ *out_root = btrfs_extent_inline_ref_offset(eb, eiref); if (key->type == BTRFS_EXTENT_ITEM_KEY) { struct btrfs_tree_block_info *info; info = (struct btrfs_tree_block_info *)(ei + 1); *out_level = btrfs_tree_block_level(eb, info); } else { ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); *out_level = (u8)key->offset; } if (ret == 1) *ptr = (unsigned long)-1; return 0; } static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, struct extent_inode_elem *inode_list, u64 root, u64 extent_item_objectid, iterate_extent_inodes_t *iterate, void *ctx) { struct extent_inode_elem *eie; int ret = 0; for (eie = inode_list; eie; eie = eie->next) { btrfs_debug(fs_info, "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu", extent_item_objectid, eie->inum, eie->offset, root); ret = iterate(eie->inum, eie->offset, root, ctx); if (ret) { btrfs_debug(fs_info, "stopping iteration for %llu due to ret=%d", extent_item_objectid, ret); break; } } return ret; } /* * calls iterate() for every inode that references the extent identified by * the given parameters. * when the iterator function returns a non-zero value, iteration stops. */ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, u64 extent_item_objectid, u64 extent_item_pos, int search_commit_root, iterate_extent_inodes_t *iterate, void *ctx, bool ignore_offset) { int ret; struct btrfs_trans_handle *trans = NULL; struct ulist *refs = NULL; struct ulist *roots = NULL; struct ulist_node *ref_node = NULL; struct ulist_node *root_node = NULL; struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem); struct ulist_iterator ref_uiter; struct ulist_iterator root_uiter; btrfs_debug(fs_info, "resolving all inodes for extent %llu", extent_item_objectid); if (!search_commit_root) { trans = btrfs_attach_transaction(fs_info->tree_root); if (IS_ERR(trans)) { if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) return PTR_ERR(trans); trans = NULL; } } if (trans) btrfs_get_tree_mod_seq(fs_info, &seq_elem); else down_read(&fs_info->commit_root_sem); ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, seq_elem.seq, &refs, &extent_item_pos, ignore_offset); if (ret) goto out; ULIST_ITER_INIT(&ref_uiter); while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) { ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val, seq_elem.seq, &roots, ignore_offset); if (ret) break; ULIST_ITER_INIT(&root_uiter); while (!ret && (root_node = ulist_next(roots, &root_uiter))) { btrfs_debug(fs_info, "root %llu references leaf %llu, data list %#llx", root_node->val, ref_node->val, ref_node->aux); ret = iterate_leaf_refs(fs_info, (struct extent_inode_elem *) (uintptr_t)ref_node->aux, root_node->val, extent_item_objectid, iterate, ctx); } ulist_free(roots); } free_leaf_list(refs); out: if (trans) { btrfs_put_tree_mod_seq(fs_info, &seq_elem); btrfs_end_transaction(trans); } else { up_read(&fs_info->commit_root_sem); } return ret; } int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, struct btrfs_path *path, iterate_extent_inodes_t *iterate, void *ctx, bool ignore_offset) { int ret; u64 extent_item_pos; u64 flags = 0; struct btrfs_key found_key; int search_commit_root = path->search_commit_root; ret = extent_from_logical(fs_info, logical, path, &found_key, &flags); btrfs_release_path(path); if (ret < 0) return ret; if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) return -EINVAL; extent_item_pos = logical - found_key.objectid; ret = iterate_extent_inodes(fs_info, found_key.objectid, extent_item_pos, search_commit_root, iterate, ctx, ignore_offset); return ret; } typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off, struct extent_buffer *eb, void *ctx); static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root, struct btrfs_path *path, iterate_irefs_t *iterate, void *ctx) { int ret = 0; int slot; u32 cur; u32 len; u32 name_len; u64 parent = 0; int found = 0; struct extent_buffer *eb; struct btrfs_inode_ref *iref; struct btrfs_key found_key; while (!ret) { ret = btrfs_find_item(fs_root, path, inum, parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY, &found_key); if (ret < 0) break; if (ret) { ret = found ? 0 : -ENOENT; break; } ++found; parent = found_key.offset; slot = path->slots[0]; eb = btrfs_clone_extent_buffer(path->nodes[0]); if (!eb) { ret = -ENOMEM; break; } btrfs_release_path(path); iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) { name_len = btrfs_inode_ref_name_len(eb, iref); /* path must be released before calling iterate()! */ btrfs_debug(fs_root->fs_info, "following ref at offset %u for inode %llu in tree %llu", cur, found_key.objectid, fs_root->root_key.objectid); ret = iterate(parent, name_len, (unsigned long)(iref + 1), eb, ctx); if (ret) break; len = sizeof(*iref) + name_len; iref = (struct btrfs_inode_ref *)((char *)iref + len); } free_extent_buffer(eb); } btrfs_release_path(path); return ret; } static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root, struct btrfs_path *path, iterate_irefs_t *iterate, void *ctx) { int ret; int slot; u64 offset = 0; u64 parent; int found = 0; struct extent_buffer *eb; struct btrfs_inode_extref *extref; u32 item_size; u32 cur_offset; unsigned long ptr; while (1) { ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref, &offset); if (ret < 0) break; if (ret) { ret = found ? 0 : -ENOENT; break; } ++found; slot = path->slots[0]; eb = btrfs_clone_extent_buffer(path->nodes[0]); if (!eb) { ret = -ENOMEM; break; } btrfs_release_path(path); item_size = btrfs_item_size(eb, slot); ptr = btrfs_item_ptr_offset(eb, slot); cur_offset = 0; while (cur_offset < item_size) { u32 name_len; extref = (struct btrfs_inode_extref *)(ptr + cur_offset); parent = btrfs_inode_extref_parent(eb, extref); name_len = btrfs_inode_extref_name_len(eb, extref); ret = iterate(parent, name_len, (unsigned long)&extref->name, eb, ctx); if (ret) break; cur_offset += btrfs_inode_extref_name_len(eb, extref); cur_offset += sizeof(*extref); } free_extent_buffer(eb); offset++; } btrfs_release_path(path); return ret; } static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, struct btrfs_path *path, iterate_irefs_t *iterate, void *ctx) { int ret; int found_refs = 0; ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx); if (!ret) ++found_refs; else if (ret != -ENOENT) return ret; ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx); if (ret == -ENOENT && found_refs) return 0; return ret; } /* * returns 0 if the path could be dumped (probably truncated) * returns <0 in case of an error */ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off, struct extent_buffer *eb, void *ctx) { struct inode_fs_paths *ipath = ctx; char *fspath; char *fspath_min; int i = ipath->fspath->elem_cnt; const int s_ptr = sizeof(char *); u32 bytes_left; bytes_left = ipath->fspath->bytes_left > s_ptr ? ipath->fspath->bytes_left - s_ptr : 0; fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr; fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len, name_off, eb, inum, fspath_min, bytes_left); if (IS_ERR(fspath)) return PTR_ERR(fspath); if (fspath > fspath_min) { ipath->fspath->val[i] = (u64)(unsigned long)fspath; ++ipath->fspath->elem_cnt; ipath->fspath->bytes_left = fspath - fspath_min; } else { ++ipath->fspath->elem_missed; ipath->fspath->bytes_missing += fspath_min - fspath; ipath->fspath->bytes_left = 0; } return 0; } /* * this dumps all file system paths to the inode into the ipath struct, provided * is has been created large enough. each path is zero-terminated and accessed * from ipath->fspath->val[i]. * when it returns, there are ipath->fspath->elem_cnt number of paths available * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise, * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would * have been needed to return all paths. */ int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) { return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path, inode_to_path, ipath); } struct btrfs_data_container *init_data_container(u32 total_bytes) { struct btrfs_data_container *data; size_t alloc_bytes; alloc_bytes = max_t(size_t, total_bytes, sizeof(*data)); data = kvmalloc(alloc_bytes, GFP_KERNEL); if (!data) return ERR_PTR(-ENOMEM); if (total_bytes >= sizeof(*data)) { data->bytes_left = total_bytes - sizeof(*data); data->bytes_missing = 0; } else { data->bytes_missing = sizeof(*data) - total_bytes; data->bytes_left = 0; } data->elem_cnt = 0; data->elem_missed = 0; return data; } /* * allocates space to return multiple file system paths for an inode. * total_bytes to allocate are passed, note that space usable for actual path * information will be total_bytes - sizeof(struct inode_fs_paths). * the returned pointer must be freed with free_ipath() in the end. */ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, struct btrfs_path *path) { struct inode_fs_paths *ifp; struct btrfs_data_container *fspath; fspath = init_data_container(total_bytes); if (IS_ERR(fspath)) return ERR_CAST(fspath); ifp = kmalloc(sizeof(*ifp), GFP_KERNEL); if (!ifp) { kvfree(fspath); return ERR_PTR(-ENOMEM); } ifp->btrfs_path = path; ifp->fspath = fspath; ifp->fs_root = fs_root; return ifp; } void free_ipath(struct inode_fs_paths *ipath) { if (!ipath) return; kvfree(ipath->fspath); kfree(ipath); } struct btrfs_backref_iter *btrfs_backref_iter_alloc( struct btrfs_fs_info *fs_info, gfp_t gfp_flag) { struct btrfs_backref_iter *ret; ret = kzalloc(sizeof(*ret), gfp_flag); if (!ret) return NULL; ret->path = btrfs_alloc_path(); if (!ret->path) { kfree(ret); return NULL; } /* Current backref iterator only supports iteration in commit root */ ret->path->search_commit_root = 1; ret->path->skip_locking = 1; ret->fs_info = fs_info; return ret; } int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr) { struct btrfs_fs_info *fs_info = iter->fs_info; struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr); struct btrfs_path *path = iter->path; struct btrfs_extent_item *ei; struct btrfs_key key; int ret; key.objectid = bytenr; key.type = BTRFS_METADATA_ITEM_KEY; key.offset = (u64)-1; iter->bytenr = bytenr; ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); if (ret < 0) return ret; if (ret == 0) { ret = -EUCLEAN; goto release; } if (path->slots[0] == 0) { WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); ret = -EUCLEAN; goto release; } path->slots[0]--; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if ((key.type != BTRFS_EXTENT_ITEM_KEY && key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) { ret = -ENOENT; goto release; } memcpy(&iter->cur_key, &key, sizeof(key)); iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(path->nodes[0], path->slots[0])); ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item); /* * Only support iteration on tree backref yet. * * This is an extra precaution for non skinny-metadata, where * EXTENT_ITEM is also used for tree blocks, that we can only use * extent flags to determine if it's a tree block. */ if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) { ret = -ENOTSUPP; goto release; } iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei)); /* If there is no inline backref, go search for keyed backref */ if (iter->cur_ptr >= iter->end_ptr) { ret = btrfs_next_item(extent_root, path); /* No inline nor keyed ref */ if (ret > 0) { ret = -ENOENT; goto release; } if (ret < 0) goto release; btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]); if (iter->cur_key.objectid != bytenr || (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY && iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) { ret = -ENOENT; goto release; } iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); iter->item_ptr = iter->cur_ptr; iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size( path->nodes[0], path->slots[0])); } return 0; release: btrfs_backref_iter_release(iter); return ret; } /* * Go to the next backref item of current bytenr, can be either inlined or * keyed. * * Caller needs to check whether it's inline ref or not by iter->cur_key. * * Return 0 if we get next backref without problem. * Return >0 if there is no extra backref for this bytenr. * Return <0 if there is something wrong happened. */ int btrfs_backref_iter_next(struct btrfs_backref_iter *iter) { struct extent_buffer *eb = btrfs_backref_get_eb(iter); struct btrfs_root *extent_root; struct btrfs_path *path = iter->path; struct btrfs_extent_inline_ref *iref; int ret; u32 size; if (btrfs_backref_iter_is_inline_ref(iter)) { /* We're still inside the inline refs */ ASSERT(iter->cur_ptr < iter->end_ptr); if (btrfs_backref_has_tree_block_info(iter)) { /* First tree block info */ size = sizeof(struct btrfs_tree_block_info); } else { /* Use inline ref type to determine the size */ int type; iref = (struct btrfs_extent_inline_ref *) ((unsigned long)iter->cur_ptr); type = btrfs_extent_inline_ref_type(eb, iref); size = btrfs_extent_inline_ref_size(type); } iter->cur_ptr += size; if (iter->cur_ptr < iter->end_ptr) return 0; /* All inline items iterated, fall through */ } /* We're at keyed items, there is no inline item, go to the next one */ extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr); ret = btrfs_next_item(extent_root, iter->path); if (ret) return ret; btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]); if (iter->cur_key.objectid != iter->bytenr || (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY && iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY)) return 1; iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); iter->cur_ptr = iter->item_ptr; iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0], path->slots[0]); return 0; } void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info, struct btrfs_backref_cache *cache, int is_reloc) { int i; cache->rb_root = RB_ROOT; for (i = 0; i < BTRFS_MAX_LEVEL; i++) INIT_LIST_HEAD(&cache->pending[i]); INIT_LIST_HEAD(&cache->changed); INIT_LIST_HEAD(&cache->detached); INIT_LIST_HEAD(&cache->leaves); INIT_LIST_HEAD(&cache->pending_edge); INIT_LIST_HEAD(&cache->useless_node); cache->fs_info = fs_info; cache->is_reloc = is_reloc; } struct btrfs_backref_node *btrfs_backref_alloc_node( struct btrfs_backref_cache *cache, u64 bytenr, int level) { struct btrfs_backref_node *node; ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL); node = kzalloc(sizeof(*node), GFP_NOFS); if (!node) return node; INIT_LIST_HEAD(&node->list); INIT_LIST_HEAD(&node->upper); INIT_LIST_HEAD(&node->lower); RB_CLEAR_NODE(&node->rb_node); cache->nr_nodes++; node->level = level; node->bytenr = bytenr; return node; } struct btrfs_backref_edge *btrfs_backref_alloc_edge( struct btrfs_backref_cache *cache) { struct btrfs_backref_edge *edge; edge = kzalloc(sizeof(*edge), GFP_NOFS); if (edge) cache->nr_edges++; return edge; } /* * Drop the backref node from cache, also cleaning up all its * upper edges and any uncached nodes in the path. * * This cleanup happens bottom up, thus the node should either * be the lowest node in the cache or a detached node. */ void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache, struct btrfs_backref_node *node) { struct btrfs_backref_node *upper; struct btrfs_backref_edge *edge; if (!node) return; BUG_ON(!node->lowest && !node->detached); while (!list_empty(&node->upper)) { edge = list_entry(node->upper.next, struct btrfs_backref_edge, list[LOWER]); upper = edge->node[UPPER]; list_del(&edge->list[LOWER]); list_del(&edge->list[UPPER]); btrfs_backref_free_edge(cache, edge); /* * Add the node to leaf node list if no other child block * cached. */ if (list_empty(&upper->lower)) { list_add_tail(&upper->lower, &cache->leaves); upper->lowest = 1; } } btrfs_backref_drop_node(cache, node); } /* * Release all nodes/edges from current cache */ void btrfs_backref_release_cache(struct btrfs_backref_cache *cache) { struct btrfs_backref_node *node; int i; while (!list_empty(&cache->detached)) { node = list_entry(cache->detached.next, struct btrfs_backref_node, list); btrfs_backref_cleanup_node(cache, node); } while (!list_empty(&cache->leaves)) { node = list_entry(cache->leaves.next, struct btrfs_backref_node, lower); btrfs_backref_cleanup_node(cache, node); } cache->last_trans = 0; for (i = 0; i < BTRFS_MAX_LEVEL; i++) ASSERT(list_empty(&cache->pending[i])); ASSERT(list_empty(&cache->pending_edge)); ASSERT(list_empty(&cache->useless_node)); ASSERT(list_empty(&cache->changed)); ASSERT(list_empty(&cache->detached)); ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); ASSERT(!cache->nr_nodes); ASSERT(!cache->nr_edges); } /* * Handle direct tree backref * * Direct tree backref means, the backref item shows its parent bytenr * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined). * * @ref_key: The converted backref key. * For keyed backref, it's the item key. * For inlined backref, objectid is the bytenr, * type is btrfs_inline_ref_type, offset is * btrfs_inline_ref_offset. */ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache, struct btrfs_key *ref_key, struct btrfs_backref_node *cur) { struct btrfs_backref_edge *edge; struct btrfs_backref_node *upper; struct rb_node *rb_node; ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY); /* Only reloc root uses backref pointing to itself */ if (ref_key->objectid == ref_key->offset) { struct btrfs_root *root; cur->is_reloc_root = 1; /* Only reloc backref cache cares about a specific root */ if (cache->is_reloc) { root = find_reloc_root(cache->fs_info, cur->bytenr); if (!root) return -ENOENT; cur->root = root; } else { /* * For generic purpose backref cache, reloc root node * is useless. */ list_add(&cur->list, &cache->useless_node); } return 0; } edge = btrfs_backref_alloc_edge(cache); if (!edge) return -ENOMEM; rb_node = rb_simple_search(&cache->rb_root, ref_key->offset); if (!rb_node) { /* Parent node not yet cached */ upper = btrfs_backref_alloc_node(cache, ref_key->offset, cur->level + 1); if (!upper) { btrfs_backref_free_edge(cache, edge); return -ENOMEM; } /* * Backrefs for the upper level block isn't cached, add the * block to pending list */ list_add_tail(&edge->list[UPPER], &cache->pending_edge); } else { /* Parent node already cached */ upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node); ASSERT(upper->checked); INIT_LIST_HEAD(&edge->list[UPPER]); } btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER); return 0; } /* * Handle indirect tree backref * * Indirect tree backref means, we only know which tree the node belongs to. * We still need to do a tree search to find out the parents. This is for * TREE_BLOCK_REF backref (keyed or inlined). * * @ref_key: The same as @ref_key in handle_direct_tree_backref() * @tree_key: The first key of this tree block. * @path: A clean (released) path, to avoid allocating path every time * the function get called. */ static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache, struct btrfs_path *path, struct btrfs_key *ref_key, struct btrfs_key *tree_key, struct btrfs_backref_node *cur) { struct btrfs_fs_info *fs_info = cache->fs_info; struct btrfs_backref_node *upper; struct btrfs_backref_node *lower; struct btrfs_backref_edge *edge; struct extent_buffer *eb; struct btrfs_root *root; struct rb_node *rb_node; int level; bool need_check = true; int ret; root = btrfs_get_fs_root(fs_info, ref_key->offset, false); if (IS_ERR(root)) return PTR_ERR(root); if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) cur->cowonly = 1; if (btrfs_root_level(&root->root_item) == cur->level) { /* Tree root */ ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr); /* * For reloc backref cache, we may ignore reloc root. But for * general purpose backref cache, we can't rely on * btrfs_should_ignore_reloc_root() as it may conflict with * current running relocation and lead to missing root. * * For general purpose backref cache, reloc root detection is * completely relying on direct backref (key->offset is parent * bytenr), thus only do such check for reloc cache. */ if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) { btrfs_put_root(root); list_add(&cur->list, &cache->useless_node); } else { cur->root = root; } return 0; } level = cur->level + 1; /* Search the tree to find parent blocks referring to the block */ path->search_commit_root = 1; path->skip_locking = 1; path->lowest_level = level; ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0); path->lowest_level = 0; if (ret < 0) { btrfs_put_root(root); return ret; } if (ret > 0 && path->slots[level] > 0) path->slots[level]--; eb = path->nodes[level]; if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) { btrfs_err(fs_info, "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)", cur->bytenr, level - 1, root->root_key.objectid, tree_key->objectid, tree_key->type, tree_key->offset); btrfs_put_root(root); ret = -ENOENT; goto out; } lower = cur; /* Add all nodes and edges in the path */ for (; level < BTRFS_MAX_LEVEL; level++) { if (!path->nodes[level]) { ASSERT(btrfs_root_bytenr(&root->root_item) == lower->bytenr); /* Same as previous should_ignore_reloc_root() call */ if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) { btrfs_put_root(root); list_add(&lower->list, &cache->useless_node); } else { lower->root = root; } break; } edge = btrfs_backref_alloc_edge(cache); if (!edge) { btrfs_put_root(root); ret = -ENOMEM; goto out; } eb = path->nodes[level]; rb_node = rb_simple_search(&cache->rb_root, eb->start); if (!rb_node) { upper = btrfs_backref_alloc_node(cache, eb->start, lower->level + 1); if (!upper) { btrfs_put_root(root); btrfs_backref_free_edge(cache, edge); ret = -ENOMEM; goto out; } upper->owner = btrfs_header_owner(eb); if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) upper->cowonly = 1; /* * If we know the block isn't shared we can avoid * checking its backrefs. */ if (btrfs_block_can_be_shared(root, eb)) upper->checked = 0; else upper->checked = 1; /* * Add the block to pending list if we need to check its * backrefs, we only do this once while walking up a * tree as we will catch anything else later on. */ if (!upper->checked && need_check) { need_check = false; list_add_tail(&edge->list[UPPER], &cache->pending_edge); } else { if (upper->checked) need_check = true; INIT_LIST_HEAD(&edge->list[UPPER]); } } else { upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node); ASSERT(upper->checked); INIT_LIST_HEAD(&edge->list[UPPER]); if (!upper->owner) upper->owner = btrfs_header_owner(eb); } btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER); if (rb_node) { btrfs_put_root(root); break; } lower = upper; upper = NULL; } out: btrfs_release_path(path); return ret; } /* * Add backref node @cur into @cache. * * NOTE: Even if the function returned 0, @cur is not yet cached as its upper * links aren't yet bi-directional. Needs to finish such links. * Use btrfs_backref_finish_upper_links() to finish such linkage. * * @path: Released path for indirect tree backref lookup * @iter: Released backref iter for extent tree search * @node_key: The first key of the tree block */ int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache, struct btrfs_path *path, struct btrfs_backref_iter *iter, struct btrfs_key *node_key, struct btrfs_backref_node *cur) { struct btrfs_fs_info *fs_info = cache->fs_info; struct btrfs_backref_edge *edge; struct btrfs_backref_node *exist; int ret; ret = btrfs_backref_iter_start(iter, cur->bytenr); if (ret < 0) return ret; /* * We skip the first btrfs_tree_block_info, as we don't use the key * stored in it, but fetch it from the tree block */ if (btrfs_backref_has_tree_block_info(iter)) { ret = btrfs_backref_iter_next(iter); if (ret < 0) goto out; /* No extra backref? This means the tree block is corrupted */ if (ret > 0) { ret = -EUCLEAN; goto out; } } WARN_ON(cur->checked); if (!list_empty(&cur->upper)) { /* * The backref was added previously when processing backref of * type BTRFS_TREE_BLOCK_REF_KEY */ ASSERT(list_is_singular(&cur->upper)); edge = list_entry(cur->upper.next, struct btrfs_backref_edge, list[LOWER]); ASSERT(list_empty(&edge->list[UPPER])); exist = edge->node[UPPER]; /* * Add the upper level block to pending list if we need check * its backrefs */ if (!exist->checked) list_add_tail(&edge->list[UPPER], &cache->pending_edge); } else { exist = NULL; } for (; ret == 0; ret = btrfs_backref_iter_next(iter)) { struct extent_buffer *eb; struct btrfs_key key; int type; cond_resched(); eb = btrfs_backref_get_eb(iter); key.objectid = iter->bytenr; if (btrfs_backref_iter_is_inline_ref(iter)) { struct btrfs_extent_inline_ref *iref; /* Update key for inline backref */ iref = (struct btrfs_extent_inline_ref *) ((unsigned long)iter->cur_ptr); type = btrfs_get_extent_inline_ref_type(eb, iref, BTRFS_REF_TYPE_BLOCK); if (type == BTRFS_REF_TYPE_INVALID) { ret = -EUCLEAN; goto out; } key.type = type; key.offset = btrfs_extent_inline_ref_offset(eb, iref); } else { key.type = iter->cur_key.type; key.offset = iter->cur_key.offset; } /* * Parent node found and matches current inline ref, no need to * rebuild this node for this inline ref */ if (exist && ((key.type == BTRFS_TREE_BLOCK_REF_KEY && exist->owner == key.offset) || (key.type == BTRFS_SHARED_BLOCK_REF_KEY && exist->bytenr == key.offset))) { exist = NULL; continue; } /* SHARED_BLOCK_REF means key.offset is the parent bytenr */ if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) { ret = handle_direct_tree_backref(cache, &key, cur); if (ret < 0) goto out; continue; } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) { ret = -EINVAL; btrfs_print_v0_err(fs_info); btrfs_handle_fs_error(fs_info, ret, NULL); goto out; } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) { continue; } /* * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset * means the root objectid. We need to search the tree to get * its parent bytenr. */ ret = handle_indirect_tree_backref(cache, path, &key, node_key, cur); if (ret < 0) goto out; } ret = 0; cur->checked = 1; WARN_ON(exist); out: btrfs_backref_iter_release(iter); return ret; } /* * Finish the upwards linkage created by btrfs_backref_add_tree_node() */ int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache, struct btrfs_backref_node *start) { struct list_head *useless_node = &cache->useless_node; struct btrfs_backref_edge *edge; struct rb_node *rb_node; LIST_HEAD(pending_edge); ASSERT(start->checked); /* Insert this node to cache if it's not COW-only */ if (!start->cowonly) { rb_node = rb_simple_insert(&cache->rb_root, start->bytenr, &start->rb_node); if (rb_node) btrfs_backref_panic(cache->fs_info, start->bytenr, -EEXIST); list_add_tail(&start->lower, &cache->leaves); } /* * Use breadth first search to iterate all related edges. * * The starting points are all the edges of this node */ list_for_each_entry(edge, &start->upper, list[LOWER]) list_add_tail(&edge->list[UPPER], &pending_edge); while (!list_empty(&pending_edge)) { struct btrfs_backref_node *upper; struct btrfs_backref_node *lower; edge = list_first_entry(&pending_edge, struct btrfs_backref_edge, list[UPPER]); list_del_init(&edge->list[UPPER]); upper = edge->node[UPPER]; lower = edge->node[LOWER]; /* Parent is detached, no need to keep any edges */ if (upper->detached) { list_del(&edge->list[LOWER]); btrfs_backref_free_edge(cache, edge); /* Lower node is orphan, queue for cleanup */ if (list_empty(&lower->upper)) list_add(&lower->list, useless_node); continue; } /* * All new nodes added in current build_backref_tree() haven't * been linked to the cache rb tree. * So if we have upper->rb_node populated, this means a cache * hit. We only need to link the edge, as @upper and all its * parents have already been linked. */ if (!RB_EMPTY_NODE(&upper->rb_node)) { if (upper->lowest) { list_del_init(&upper->lower); upper->lowest = 0; } list_add_tail(&edge->list[UPPER], &upper->lower); continue; } /* Sanity check, we shouldn't have any unchecked nodes */ if (!upper->checked) { ASSERT(0); return -EUCLEAN; } /* Sanity check, COW-only node has non-COW-only parent */ if (start->cowonly != upper->cowonly) { ASSERT(0); return -EUCLEAN; } /* Only cache non-COW-only (subvolume trees) tree blocks */ if (!upper->cowonly) { rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr, &upper->rb_node); if (rb_node) { btrfs_backref_panic(cache->fs_info, upper->bytenr, -EEXIST); return -EUCLEAN; } } list_add_tail(&edge->list[UPPER], &upper->lower); /* * Also queue all the parent edges of this uncached node * to finish the upper linkage */ list_for_each_entry(edge, &upper->upper, list[LOWER]) list_add_tail(&edge->list[UPPER], &pending_edge); } return 0; } void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache, struct btrfs_backref_node *node) { struct btrfs_backref_node *lower; struct btrfs_backref_node *upper; struct btrfs_backref_edge *edge; while (!list_empty(&cache->useless_node)) { lower = list_first_entry(&cache->useless_node, struct btrfs_backref_node, list); list_del_init(&lower->list); } while (!list_empty(&cache->pending_edge)) { edge = list_first_entry(&cache->pending_edge, struct btrfs_backref_edge, list[UPPER]); list_del(&edge->list[UPPER]); list_del(&edge->list[LOWER]); lower = edge->node[LOWER]; upper = edge->node[UPPER]; btrfs_backref_free_edge(cache, edge); /* * Lower is no longer linked to any upper backref nodes and * isn't in the cache, we can free it ourselves. */ if (list_empty(&lower->upper) && RB_EMPTY_NODE(&lower->rb_node)) list_add(&lower->list, &cache->useless_node); if (!RB_EMPTY_NODE(&upper->rb_node)) continue; /* Add this guy's upper edges to the list to process */ list_for_each_entry(edge, &upper->upper, list[LOWER]) list_add_tail(&edge->list[UPPER], &cache->pending_edge); if (list_empty(&upper->upper)) list_add(&upper->list, &cache->useless_node); } while (!list_empty(&cache->useless_node)) { lower = list_first_entry(&cache->useless_node, struct btrfs_backref_node, list); list_del_init(&lower->list); if (lower == node) node = NULL; btrfs_backref_drop_node(cache, lower); } btrfs_backref_cleanup_node(cache, node); ASSERT(list_empty(&cache->useless_node) && list_empty(&cache->pending_edge)); }
553575.c
/** * @file * DNS - host name to IP address resolver. * */ /** * This file implements a DNS host name to IP address resolver. * Port to lwIP from uIP * by Jim Pettinato April 2007 * uIP version Copyright (c) 2002-2003, Adam Dunkels. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * DNS.C * * The lwIP DNS resolver functions are used to lookup a host name and * map it to a numerical IP address. It maintains a list of resolved * hostnames that can be queried with the dns_lookup() function. * New hostnames can be resolved using the dns_query() function. * * The lwIP version of the resolver also adds a non-blocking version of * gethostbyname() that will work with a raw API application. This function * checks for an IP address string first and converts it if it is valid. * gethostbyname() then does a dns_lookup() to see if the name is * already in the table. If so, the IP is returned. If not, a query is * issued and the function returns with a ERR_INPROGRESS status. The app * using the dns client must then go into a waiting state. * * Once a hostname has been resolved (or found to be non-existent), * the resolver code calls a specified callback function (which * must be implemented by the module that uses the resolver). */ /*----------------------------------------------------------------------------- * RFC 1035 - Domain names - implementation and specification * RFC 2181 - Clarifications to the DNS Specification *----------------------------------------------------------------------------*/ /** @todo: define good default values (rfc compliance) */ /** @todo: improve answer parsing, more checkings... */ /** @todo: check RFC1035 - 7.3. Processing responses */ /*----------------------------------------------------------------------------- * Includes *----------------------------------------------------------------------------*/ #include "lwip/opt.h" #if LWIP_DNS /* don't build if not configured for use in lwipopts.h */ #include "lwip/udp.h" #include "lwip/mem.h" #include "lwip/memp.h" #include "lwip/dns.h" #include <string.h> /** DNS server IP address */ #ifndef DNS_SERVER_ADDRESS #define DNS_SERVER_ADDRESS(ipaddr) (ip4_addr_set_u32(ipaddr, ipaddr_addr("208.67.222.222"))) /* resolver1.opendns.com */ #endif /** DNS server port address */ #ifndef DNS_SERVER_PORT #define DNS_SERVER_PORT 53 #endif /** DNS maximum number of retries when asking for a name, before "timeout". */ #ifndef DNS_MAX_RETRIES #define DNS_MAX_RETRIES 4 #endif /** DNS resource record max. TTL (one week as default) */ #ifndef DNS_MAX_TTL #define DNS_MAX_TTL 604800 #endif /* DNS protocol flags */ #define DNS_FLAG1_RESPONSE 0x80 #define DNS_FLAG1_OPCODE_STATUS 0x10 #define DNS_FLAG1_OPCODE_INVERSE 0x08 #define DNS_FLAG1_OPCODE_STANDARD 0x00 #define DNS_FLAG1_AUTHORATIVE 0x04 #define DNS_FLAG1_TRUNC 0x02 #define DNS_FLAG1_RD 0x01 #define DNS_FLAG2_RA 0x80 #define DNS_FLAG2_ERR_MASK 0x0f #define DNS_FLAG2_ERR_NONE 0x00 #define DNS_FLAG2_ERR_NAME 0x03 /* DNS protocol states */ #define DNS_STATE_UNUSED 0 #define DNS_STATE_NEW 1 #define DNS_STATE_ASKING 2 #define DNS_STATE_DONE 3 #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/bpstruct.h" #endif PACK_STRUCT_BEGIN /** DNS message header */ struct dns_hdr { PACK_STRUCT_FIELD(u16_t id); PACK_STRUCT_FIELD(u8_t flags1); PACK_STRUCT_FIELD(u8_t flags2); PACK_STRUCT_FIELD(u16_t numquestions); PACK_STRUCT_FIELD(u16_t numanswers); PACK_STRUCT_FIELD(u16_t numauthrr); PACK_STRUCT_FIELD(u16_t numextrarr); } PACK_STRUCT_STRUCT; PACK_STRUCT_END #ifdef PACK_STRUCT_USE_INCLUDES # include "arch/epstruct.h" #endif #define SIZEOF_DNS_HDR 12 /** DNS query message structure. No packing needed: only used locally on the stack. */ struct dns_query { /* DNS query record starts with either a domain name or a pointer to a name already present somewhere in the packet. */ u16_t type; u16_t cls; }; #define SIZEOF_DNS_QUERY 4 /** DNS answer message structure. No packing needed: only used locally on the stack. */ struct dns_answer { /* DNS answer record starts with either a domain name or a pointer to a name already present somewhere in the packet. */ u16_t type; u16_t cls; u32_t ttl; u16_t len; }; #define SIZEOF_DNS_ANSWER 10 /** DNS table entry */ struct dns_table_entry { u8_t state; u8_t numdns; u8_t tmr; u8_t retries; u8_t seqno; u8_t err; u32_t ttl; char name[DNS_MAX_NAME_LENGTH]; ip_addr_t ipaddr; /* pointer to callback on DNS query done */ dns_found_callback found; void *arg; }; #if DNS_LOCAL_HOSTLIST #if DNS_LOCAL_HOSTLIST_IS_DYNAMIC /** Local host-list. For hostnames in this list, no * external name resolution is performed */ static struct local_hostlist_entry *local_hostlist_dynamic; #else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ /** Defining this allows the local_hostlist_static to be placed in a different * linker section (e.g. FLASH) */ #ifndef DNS_LOCAL_HOSTLIST_STORAGE_PRE #define DNS_LOCAL_HOSTLIST_STORAGE_PRE static #endif /* DNS_LOCAL_HOSTLIST_STORAGE_PRE */ /** Defining this allows the local_hostlist_static to be placed in a different * linker section (e.g. FLASH) */ #ifndef DNS_LOCAL_HOSTLIST_STORAGE_POST #define DNS_LOCAL_HOSTLIST_STORAGE_POST #endif /* DNS_LOCAL_HOSTLIST_STORAGE_POST */ DNS_LOCAL_HOSTLIST_STORAGE_PRE struct local_hostlist_entry local_hostlist_static[] DNS_LOCAL_HOSTLIST_STORAGE_POST = DNS_LOCAL_HOSTLIST_INIT; #endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ static void dns_init_local(); #endif /* DNS_LOCAL_HOSTLIST */ /* forward declarations */ static void dns_recv(void *s, struct udp_pcb *pcb, struct pbuf *p, ip_addr_t *addr, u16_t port); static void dns_check_entries(void); /*----------------------------------------------------------------------------- * Globales *----------------------------------------------------------------------------*/ /* DNS variables */ static struct udp_pcb *dns_pcb; static u8_t dns_seqno; static struct dns_table_entry dns_table[DNS_TABLE_SIZE]; static ip_addr_t dns_servers[DNS_MAX_SERVERS]; /** Contiguous buffer for processing responses */ static u8_t dns_payload_buffer[LWIP_MEM_ALIGN_BUFFER(DNS_MSG_SIZE)]; static u8_t* dns_payload; /** * Initialize the resolver: set up the UDP pcb and configure the default server * (DNS_SERVER_ADDRESS). */ void dns_init() { ip_addr_t dnsserver; dns_payload = (u8_t *)LWIP_MEM_ALIGN(dns_payload_buffer); /* initialize default DNS server address */ DNS_SERVER_ADDRESS(&dnsserver); LWIP_DEBUGF(DNS_DEBUG, ("dns_init: initializing\n")); /* if dns client not yet initialized... */ if (dns_pcb == NULL) { dns_pcb = udp_new(); if (dns_pcb != NULL) { /* initialize DNS table not needed (initialized to zero since it is a * global variable) */ LWIP_ASSERT("For implicit initialization to work, DNS_STATE_UNUSED needs to be 0", DNS_STATE_UNUSED == 0); /* initialize DNS client */ udp_bind(dns_pcb, IP_ADDR_ANY, 0); udp_recv(dns_pcb, dns_recv, NULL); /* initialize default DNS primary server */ dns_setserver(0, &dnsserver); } } #if DNS_LOCAL_HOSTLIST dns_init_local(); #endif } /** * Initialize one of the DNS servers. * * @param numdns the index of the DNS server to set must be < DNS_MAX_SERVERS * @param dnsserver IP address of the DNS server to set */ void dns_setserver(u8_t numdns, ip_addr_t *dnsserver) { /* * hpa: the lwip code has the dnsserver->addr != 0 test, but that would * seem to indicate that there is no way to cancel a previously given * DNS server... */ if ((numdns < DNS_MAX_SERVERS) && (dns_pcb != NULL) && (dnsserver != NULL) /* && !ip_addr_isany(dnsserver) */) { dns_servers[numdns] = (*dnsserver); } } /** * Obtain one of the currently configured DNS server. * * @param numdns the index of the DNS server * @return IP address of the indexed DNS server or "ip_addr_any" if the DNS * server has not been configured. */ ip_addr_t dns_getserver(u8_t numdns) { if (numdns < DNS_MAX_SERVERS) { return dns_servers[numdns]; } else { return *IP_ADDR_ANY; } } /** * The DNS resolver client timer - handle retries and timeouts and should * be called every DNS_TMR_INTERVAL milliseconds (every second by default). */ void dns_tmr(void) { if (dns_pcb != NULL) { LWIP_DEBUGF(DNS_DEBUG, ("dns_tmr: dns_check_entries\n")); dns_check_entries(); } } #if DNS_LOCAL_HOSTLIST static void dns_init_local() { #if DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) int i; struct local_hostlist_entry *entry; /* Dynamic: copy entries from DNS_LOCAL_HOSTLIST_INIT to list */ struct local_hostlist_entry local_hostlist_init[] = DNS_LOCAL_HOSTLIST_INIT; size_t namelen; for (i = 0; i < sizeof(local_hostlist_init) / sizeof(struct local_hostlist_entry); i++) { struct local_hostlist_entry *init_entry = &local_hostlist_init[i]; LWIP_ASSERT("invalid host name (NULL)", init_entry->name != NULL); namelen = strlen(init_entry->name); LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); LWIP_ASSERT("mem-error in dns_init_local", entry != NULL); if (entry != NULL) { entry->name = (char*)entry + sizeof(struct local_hostlist_entry); MEMCPY((char*)entry->name, init_entry->name, namelen); ((char*)entry->name)[namelen] = 0; entry->addr = init_entry->addr; entry->next = local_hostlist_dynamic; local_hostlist_dynamic = entry; } } #endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC && defined(DNS_LOCAL_HOSTLIST_INIT) */ } /** * Scans the local host-list for a hostname. * * @param hostname Hostname to look for in the local host-list * @return The first IP address for the hostname in the local host-list or * IPADDR_NONE if not found. */ static u32_t dns_lookup_local(const char *hostname) { #if DNS_LOCAL_HOSTLIST_IS_DYNAMIC struct local_hostlist_entry *entry = local_hostlist_dynamic; while(entry != NULL) { if(strcmp(entry->name, hostname) == 0) { return ip4_addr_get_u32(&entry->addr); } entry = entry->next; } #else /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ int i; for (i = 0; i < sizeof(local_hostlist_static) / sizeof(struct local_hostlist_entry); i++) { if(strcmp(local_hostlist_static[i].name, hostname) == 0) { return ip4_addr_get_u32(&local_hostlist_static[i].addr); } } #endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC */ return IPADDR_NONE; } #if DNS_LOCAL_HOSTLIST_IS_DYNAMIC /** Remove all entries from the local host-list for a specific hostname * and/or IP addess * * @param hostname hostname for which entries shall be removed from the local * host-list * @param addr address for which entries shall be removed from the local host-list * @return the number of removed entries */ int dns_local_removehost(const char *hostname, const ip_addr_t *addr) { int removed = 0; struct local_hostlist_entry *entry = local_hostlist_dynamic; struct local_hostlist_entry *last_entry = NULL; while (entry != NULL) { if (((hostname == NULL) || !strcmp(entry->name, hostname)) && ((addr == NULL) || ip_addr_cmp(&entry->addr, addr))) { struct local_hostlist_entry *free_entry; if (last_entry != NULL) { last_entry->next = entry->next; } else { local_hostlist_dynamic = entry->next; } free_entry = entry; entry = entry->next; memp_free(MEMP_LOCALHOSTLIST, free_entry); removed++; } else { last_entry = entry; entry = entry->next; } } return removed; } /** * Add a hostname/IP address pair to the local host-list. * Duplicates are not checked. * * @param hostname hostname of the new entry * @param addr IP address of the new entry * @return ERR_OK if succeeded or ERR_MEM on memory error */ err_t dns_local_addhost(const char *hostname, const ip_addr_t *addr) { struct local_hostlist_entry *entry; size_t namelen; LWIP_ASSERT("invalid host name (NULL)", hostname != NULL); namelen = strlen(hostname); LWIP_ASSERT("namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN", namelen <= DNS_LOCAL_HOSTLIST_MAX_NAMELEN); entry = (struct local_hostlist_entry *)memp_malloc(MEMP_LOCALHOSTLIST); if (entry == NULL) { return ERR_MEM; } entry->name = (char*)entry + sizeof(struct local_hostlist_entry); MEMCPY((char*)entry->name, hostname, namelen); ((char*)entry->name)[namelen] = 0; ip_addr_copy(entry->addr, *addr); entry->next = local_hostlist_dynamic; local_hostlist_dynamic = entry; return ERR_OK; } #endif /* DNS_LOCAL_HOSTLIST_IS_DYNAMIC*/ #endif /* DNS_LOCAL_HOSTLIST */ /** * Look up a hostname in the array of known hostnames. * * @note This function only looks in the internal array of known * hostnames, it does not send out a query for the hostname if none * was found. The function dns_enqueue() can be used to send a query * for a hostname. * * @param name the hostname to look up * @return the hostname's IP address, as u32_t (instead of ip_addr_t to * better check for failure: != IPADDR_NONE) or IPADDR_NONE if the hostname * was not found in the cached dns_table. */ static u32_t dns_lookup(const char *name) { u8_t i; #if DNS_LOCAL_HOSTLIST || defined(DNS_LOOKUP_LOCAL_EXTERN) u32_t addr; #endif /* DNS_LOCAL_HOSTLIST || defined(DNS_LOOKUP_LOCAL_EXTERN) */ #if DNS_LOCAL_HOSTLIST if ((addr = dns_lookup_local(name)) != IPADDR_NONE) { return addr; } #endif /* DNS_LOCAL_HOSTLIST */ #ifdef DNS_LOOKUP_LOCAL_EXTERN if((addr = DNS_LOOKUP_LOCAL_EXTERN(name)) != IPADDR_NONE) { return addr; } #endif /* DNS_LOOKUP_LOCAL_EXTERN */ /* Walk through name list, return entry if found. If not, return NULL. */ for (i = 0; i < DNS_TABLE_SIZE; ++i) { if ((dns_table[i].state == DNS_STATE_DONE) && (strcmp(name, dns_table[i].name) == 0)) { LWIP_DEBUGF(DNS_DEBUG, ("dns_lookup: \"%s\": found = ", name)); ip_addr_debug_print(DNS_DEBUG, &(dns_table[i].ipaddr)); LWIP_DEBUGF(DNS_DEBUG, ("\n")); return ip4_addr_get_u32(&dns_table[i].ipaddr); } } return IPADDR_NONE; } #if DNS_DOES_NAME_CHECK /** * Compare the "dotted" name "query" with the encoded name "response" * to make sure an answer from the DNS server matches the current dns_table * entry (otherwise, answers might arrive late for hostname not on the list * any more). * * @param query hostname (not encoded) from the dns_table * @param response encoded hostname in the DNS response * @return 0: names equal; 1: names differ */ static u8_t dns_compare_name(unsigned char *query, unsigned char *response) { unsigned char n; do { n = *response++; /** @see RFC 1035 - 4.1.4. Message compression */ if ((n & 0xc0) == 0xc0) { /* Compressed name */ break; } else { /* Not compressed name */ while (n > 0) { if ((*query) != (*response)) { return 1; } ++response; ++query; --n; }; ++query; } } while (*response != 0); return 0; } #endif /* DNS_DOES_NAME_CHECK */ /** * Walk through a compact encoded DNS name and return the end of the name. * * @param query encoded DNS name in the DNS server response * @return end of the name */ static unsigned char * dns_parse_name(unsigned char *query) { unsigned char n; do { n = *query++; /** @see RFC 1035 - 4.1.4. Message compression */ if ((n & 0xc0) == 0xc0) { /* Compressed name */ break; } else { /* Not compressed name */ while (n > 0) { ++query; --n; }; } } while (*query != 0); return query + 1; } /** * Send a DNS query packet. * * @param numdns index of the DNS server in the dns_servers table * @param name hostname to query * @param id index of the hostname in dns_table, used as transaction ID in the * DNS query packet * @return ERR_OK if packet is sent; an err_t indicating the problem otherwise */ static err_t dns_send(u8_t numdns, const char* name, u8_t id) { err_t err; struct dns_hdr *hdr; struct dns_query qry; struct pbuf *p; char *query, *nptr; const char *pHostname; u8_t n; LWIP_DEBUGF(DNS_DEBUG, ("dns_send: dns_servers[%"U16_F"] \"%s\": request\n", (u16_t)(numdns), name)); LWIP_ASSERT("dns server out of array", numdns < DNS_MAX_SERVERS); LWIP_ASSERT("dns server has no IP address set", !ip_addr_isany(&dns_servers[numdns])); /* if here, we have either a new query or a retry on a previous query to process */ p = pbuf_alloc(PBUF_TRANSPORT, SIZEOF_DNS_HDR + DNS_MAX_NAME_LENGTH + SIZEOF_DNS_QUERY, PBUF_RAM); if (p != NULL) { LWIP_ASSERT("pbuf must be in one piece", p->next == NULL); /* fill dns header */ hdr = (struct dns_hdr*)p->payload; memset(hdr, 0, SIZEOF_DNS_HDR); hdr->id = htons(id); hdr->flags1 = DNS_FLAG1_RD; hdr->numquestions = PP_HTONS(1); query = (char*)hdr + SIZEOF_DNS_HDR; pHostname = name; --pHostname; /* convert hostname into suitable query format. */ do { ++pHostname; nptr = query; ++query; for(n = 0; *pHostname != '.' && *pHostname != 0; ++pHostname) { *query = *pHostname; ++query; ++n; } *nptr = n; } while(*pHostname != 0); *query++='\0'; /* fill dns query */ qry.type = PP_HTONS(DNS_RRTYPE_A); qry.cls = PP_HTONS(DNS_RRCLASS_IN); SMEMCPY(query, &qry, SIZEOF_DNS_QUERY); /* resize pbuf to the exact dns query */ pbuf_realloc(p, (u16_t)((query + SIZEOF_DNS_QUERY) - ((char*)(p->payload)))); /* connect to the server for faster receiving */ udp_connect(dns_pcb, &dns_servers[numdns], DNS_SERVER_PORT); /* send dns packet */ err = udp_sendto(dns_pcb, p, &dns_servers[numdns], DNS_SERVER_PORT); /* free pbuf */ pbuf_free(p); } else { err = ERR_MEM; } return err; } /** * dns_check_entry() - see if pEntry has not yet been queried and, if so, sends out a query. * Check an entry in the dns_table: * - send out query for new entries * - retry old pending entries on timeout (also with different servers) * - remove completed entries from the table if their TTL has expired * * @param i index of the dns_table entry to check */ static void dns_check_entry(u8_t i) { err_t err; struct dns_table_entry *pEntry = &dns_table[i]; LWIP_ASSERT("array index out of bounds", i < DNS_TABLE_SIZE); switch(pEntry->state) { case DNS_STATE_NEW: { /* initialize new entry */ pEntry->state = DNS_STATE_ASKING; pEntry->numdns = 0; pEntry->tmr = 1; pEntry->retries = 0; /* send DNS packet for this entry */ err = dns_send(pEntry->numdns, pEntry->name, i); if (err != ERR_OK) { LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, ("dns_send returned error: %s\n", lwip_strerr(err))); } break; } case DNS_STATE_ASKING: { if (--pEntry->tmr == 0) { if (++pEntry->retries == DNS_MAX_RETRIES) { if ((pEntry->numdns+1<DNS_MAX_SERVERS) && !ip_addr_isany(&dns_servers[pEntry->numdns+1])) { /* change of server */ pEntry->numdns++; pEntry->tmr = 1; pEntry->retries = 0; break; } else { LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": timeout\n", pEntry->name)); /* call specified callback function if provided */ if (pEntry->found) (*pEntry->found)(pEntry->name, NULL, pEntry->arg); /* flush this entry */ pEntry->state = DNS_STATE_UNUSED; pEntry->found = NULL; break; } } /* wait longer for the next retry */ pEntry->tmr = pEntry->retries; /* send DNS packet for this entry */ err = dns_send(pEntry->numdns, pEntry->name, i); if (err != ERR_OK) { LWIP_DEBUGF(DNS_DEBUG | LWIP_DBG_LEVEL_WARNING, ("dns_send returned error: %s\n", lwip_strerr(err))); } } break; } case DNS_STATE_DONE: { /* if the time to live is nul */ if (--pEntry->ttl == 0) { LWIP_DEBUGF(DNS_DEBUG, ("dns_check_entry: \"%s\": flush\n", pEntry->name)); /* flush this entry */ pEntry->state = DNS_STATE_UNUSED; pEntry->found = NULL; } break; } case DNS_STATE_UNUSED: /* nothing to do */ break; default: LWIP_ASSERT("unknown dns_table entry state:", 0); break; } } /** * Call dns_check_entry for each entry in dns_table - check all entries. */ static void dns_check_entries(void) { u8_t i; for (i = 0; i < DNS_TABLE_SIZE; ++i) { dns_check_entry(i); } } /** * Receive input function for DNS response packets arriving for the dns UDP pcb. * * @params see udp.h */ static void dns_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, ip_addr_t *addr, u16_t port) { u16_t i; char *pHostname; struct dns_hdr *hdr; struct dns_answer ans; struct dns_table_entry *pEntry; u16_t nquestions, nanswers; LWIP_UNUSED_ARG(arg); LWIP_UNUSED_ARG(pcb); LWIP_UNUSED_ARG(addr); LWIP_UNUSED_ARG(port); /* is the dns message too big ? */ if (p->tot_len > DNS_MSG_SIZE) { LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: pbuf too big\n")); /* free pbuf and return */ goto memerr; } /* is the dns message big enough ? */ if (p->tot_len < (SIZEOF_DNS_HDR + SIZEOF_DNS_QUERY + SIZEOF_DNS_ANSWER)) { LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: pbuf too small\n")); /* free pbuf and return */ goto memerr; } /* copy dns payload inside static buffer for processing */ if (pbuf_copy_partial(p, dns_payload, p->tot_len, 0) == p->tot_len) { /* The ID in the DNS header should be our entry into the name table. */ hdr = (struct dns_hdr*)dns_payload; i = htons(hdr->id); if (i < DNS_TABLE_SIZE) { pEntry = &dns_table[i]; if(pEntry->state == DNS_STATE_ASKING) { /* This entry is now completed. */ pEntry->state = DNS_STATE_DONE; pEntry->err = hdr->flags2 & DNS_FLAG2_ERR_MASK; /* We only care about the question(s) and the answers. The authrr and the extrarr are simply discarded. */ nquestions = htons(hdr->numquestions); nanswers = htons(hdr->numanswers); /* Check for error. If so, call callback to inform. */ if (((hdr->flags1 & DNS_FLAG1_RESPONSE) == 0) || (pEntry->err != 0) || (nquestions != 1)) { LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in flags\n", pEntry->name)); /* call callback to indicate error, clean up memory and return */ goto responseerr; } #if DNS_DOES_NAME_CHECK /* Check if the name in the "question" part match with the name in the entry. */ if (dns_compare_name((unsigned char *)(pEntry->name), (unsigned char *)dns_payload + SIZEOF_DNS_HDR) != 0) { LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response not match to query\n", pEntry->name)); /* call callback to indicate error, clean up memory and return */ goto responseerr; } #endif /* DNS_DOES_NAME_CHECK */ /* Skip the name in the "question" part */ pHostname = (char *) dns_parse_name((unsigned char *)dns_payload + SIZEOF_DNS_HDR) + SIZEOF_DNS_QUERY; while (nanswers > 0) { /* skip answer resource record's host name */ pHostname = (char *) dns_parse_name((unsigned char *)pHostname); /* Check for IP address type and Internet class. Others are discarded. */ SMEMCPY(&ans, pHostname, SIZEOF_DNS_ANSWER); if((ans.type == PP_HTONS(DNS_RRTYPE_A)) && (ans.cls == PP_HTONS(DNS_RRCLASS_IN)) && (ans.len == PP_HTONS(sizeof(ip_addr_t))) ) { /* read the answer resource record's TTL, and maximize it if needed */ pEntry->ttl = ntohl(ans.ttl); if (pEntry->ttl > DNS_MAX_TTL) { pEntry->ttl = DNS_MAX_TTL; } /* read the IP address after answer resource record's header */ SMEMCPY(&(pEntry->ipaddr), (pHostname+SIZEOF_DNS_ANSWER), sizeof(ip_addr_t)); LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": response = ", pEntry->name)); ip_addr_debug_print(DNS_DEBUG, (&(pEntry->ipaddr))); LWIP_DEBUGF(DNS_DEBUG, ("\n")); /* call specified callback function if provided */ if (pEntry->found) { (*pEntry->found)(pEntry->name, &pEntry->ipaddr, pEntry->arg); } /* deallocate memory and return */ goto memerr; } else { pHostname = pHostname + SIZEOF_DNS_ANSWER + htons(ans.len); } --nanswers; } LWIP_DEBUGF(DNS_DEBUG, ("dns_recv: \"%s\": error in response\n", pEntry->name)); /* call callback to indicate error, clean up memory and return */ goto responseerr; } } } /* deallocate memory and return */ goto memerr; responseerr: /* ERROR: call specified callback function with NULL as name to indicate an error */ if (pEntry->found) { (*pEntry->found)(pEntry->name, NULL, pEntry->arg); } /* flush this entry */ pEntry->state = DNS_STATE_UNUSED; pEntry->found = NULL; memerr: /* free pbuf */ pbuf_free(p); return; } /** * Queues a new hostname to resolve and sends out a DNS query for that hostname * * @param name the hostname that is to be queried * @param found a callback founction to be called on success, failure or timeout * @param callback_arg argument to pass to the callback function * @return @return a err_t return code. */ static err_t dns_enqueue(const char *name, dns_found_callback found, void *callback_arg) { u8_t i; u8_t lseq, lseqi; struct dns_table_entry *pEntry = NULL; size_t namelen; /* search an unused entry, or the oldest one */ lseq = lseqi = 0; for (i = 0; i < DNS_TABLE_SIZE; ++i) { pEntry = &dns_table[i]; /* is it an unused entry ? */ if (pEntry->state == DNS_STATE_UNUSED) break; /* check if this is the oldest completed entry */ if (pEntry->state == DNS_STATE_DONE) { if ((dns_seqno - pEntry->seqno) > lseq) { lseq = dns_seqno - pEntry->seqno; lseqi = i; } } } /* if we don't have found an unused entry, use the oldest completed one */ if (i == DNS_TABLE_SIZE) { if ((lseqi >= DNS_TABLE_SIZE) || (dns_table[lseqi].state != DNS_STATE_DONE)) { /* no entry can't be used now, table is full */ LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": DNS entries table is full\n", name)); return ERR_MEM; } else { /* use the oldest completed one */ i = lseqi; pEntry = &dns_table[i]; } } /* use this entry */ LWIP_DEBUGF(DNS_DEBUG, ("dns_enqueue: \"%s\": use DNS entry %"U16_F"\n", name, (u16_t)(i))); /* fill the entry */ pEntry->state = DNS_STATE_NEW; pEntry->seqno = dns_seqno++; pEntry->found = found; pEntry->arg = callback_arg; namelen = LWIP_MIN(strlen(name), DNS_MAX_NAME_LENGTH-1); MEMCPY(pEntry->name, name, namelen); pEntry->name[namelen] = 0; /* force to send query without waiting timer */ dns_check_entry(i); /* dns query is enqueued */ return ERR_INPROGRESS; } /** * Resolve a hostname (string) into an IP address. * NON-BLOCKING callback version for use with raw API!!! * * Returns immediately with one of err_t return codes: * - ERR_OK if hostname is a valid IP address string or the host * name is already in the local names table. * - ERR_INPROGRESS enqueue a request to be sent to the DNS server * for resolution if no errors are present. * - ERR_ARG: dns client not initialized or invalid hostname * * @param hostname the hostname that is to be queried * @param addr pointer to a ip_addr_t where to store the address if it is already * cached in the dns_table (only valid if ERR_OK is returned!) * @param found a callback function to be called on success, failure or timeout (only if * ERR_INPROGRESS is returned!) * @param callback_arg argument to pass to the callback function * @return a err_t return code. */ err_t dns_gethostbyname(const char *hostname, ip_addr_t *addr, dns_found_callback found, void *callback_arg) { u32_t ipaddr; /* not initialized or no valid server yet, or invalid addr pointer * or invalid hostname or invalid hostname length */ if ((dns_pcb == NULL) || (addr == NULL) || (!hostname) || (!hostname[0]) || (strlen(hostname) >= DNS_MAX_NAME_LENGTH)) { return ERR_ARG; } #if LWIP_HAVE_LOOPIF if (strcmp(hostname, "localhost")==0) { ip_addr_set_loopback(addr); return ERR_OK; } #endif /* LWIP_HAVE_LOOPIF */ /* host name already in octet notation? set ip addr and return ERR_OK */ ipaddr = ipaddr_addr(hostname); if (ipaddr == IPADDR_NONE) { /* already have this address cached? */ ipaddr = dns_lookup(hostname); } if (ipaddr != IPADDR_NONE) { ip4_addr_set_u32(addr, ipaddr); return ERR_OK; } /* queue query with specified callback */ return dns_enqueue(hostname, found, callback_arg); } #endif /* LWIP_DNS */
574233.c
/* * Multi-precision integer library * * Copyright (C) 2006-2015, ARM Limited, All Rights Reserved * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This file is part of mbed TLS (https://tls.mbed.org) */ /* * The following sources were referenced in the design of this Multi-precision * Integer library: * * [1] Handbook of Applied Cryptography - 1997 * Menezes, van Oorschot and Vanstone * * [2] Multi-Precision Math * Tom St Denis * https://github.com/libtom/libtommath/blob/develop/tommath.pdf * * [3] GNU Multi-Precision Arithmetic Library * https://gmplib.org/manual/index.html * */ #if !defined(MBEDTLS_CONFIG_FILE) #include "mbedtls/config.h" #else #include MBEDTLS_CONFIG_FILE #endif #if defined(MBEDTLS_BIGNUM_C) #include "mbedtls/bignum.h" #include "mbedtls/bn_mul.h" #include <string.h> #if defined(MBEDTLS_PLATFORM_C) #include "mbedtls/platform.h" #else #include <stdio.h> #include <stdlib.h> #define mbedtls_printf printf #define mbedtls_calloc calloc #define mbedtls_free free #endif /* Implementation that should never be optimized out by the compiler */ static void mbedtls_mpi_zeroize( mbedtls_mpi_uint *v, size_t n ) { volatile mbedtls_mpi_uint *p = v; while( n-- ) *p++ = 0; } /* Implementation that should never be optimized out by the compiler */ static void mbedtls_zeroize( void *v, size_t n ) { volatile unsigned char *p = v; while( n-- ) *p++ = 0; } #define ciL (sizeof(mbedtls_mpi_uint)) /* chars in limb */ #define biL (ciL << 3) /* bits in limb */ #define biH (ciL << 2) /* half limb size */ #define MPI_SIZE_T_MAX ( (size_t) -1 ) /* SIZE_T_MAX is not standard */ /* * Convert between bits/chars and number of limbs * Divide first in order to avoid potential overflows */ #define BITS_TO_LIMBS(i) ( (i) / biL + ( (i) % biL != 0 ) ) #define CHARS_TO_LIMBS(i) ( (i) / ciL + ( (i) % ciL != 0 ) ) /* * Initialize one MPI */ void mbedtls_mpi_init( mbedtls_mpi *X ) { if( X == NULL ) return; X->s = 1; X->n = 0; X->p = NULL; } /* * Unallocate one MPI */ void mbedtls_mpi_free( mbedtls_mpi *X ) { if( X == NULL ) return; if( X->p != NULL ) { mbedtls_mpi_zeroize( X->p, X->n ); mbedtls_free( X->p ); } X->s = 1; X->n = 0; X->p = NULL; } /* * Enlarge to the specified number of limbs */ int mbedtls_mpi_grow( mbedtls_mpi *X, size_t nblimbs ) { mbedtls_mpi_uint *p; if( nblimbs > MBEDTLS_MPI_MAX_LIMBS ) return( MBEDTLS_ERR_MPI_ALLOC_FAILED ); if( X->n < nblimbs ) { if( ( p = (mbedtls_mpi_uint*)mbedtls_calloc( nblimbs, ciL ) ) == NULL ) return( MBEDTLS_ERR_MPI_ALLOC_FAILED ); if( X->p != NULL ) { memcpy( p, X->p, X->n * ciL ); mbedtls_mpi_zeroize( X->p, X->n ); mbedtls_free( X->p ); } X->n = nblimbs; X->p = p; } return( 0 ); } /* * Resize down as much as possible, * while keeping at least the specified number of limbs */ int mbedtls_mpi_shrink( mbedtls_mpi *X, size_t nblimbs ) { mbedtls_mpi_uint *p; size_t i; /* Actually resize up in this case */ if( X->n <= nblimbs ) return( mbedtls_mpi_grow( X, nblimbs ) ); for( i = X->n - 1; i > 0; i-- ) if( X->p[i] != 0 ) break; i++; if( i < nblimbs ) i = nblimbs; if( ( p = (mbedtls_mpi_uint*)mbedtls_calloc( i, ciL ) ) == NULL ) return( MBEDTLS_ERR_MPI_ALLOC_FAILED ); if( X->p != NULL ) { memcpy( p, X->p, i * ciL ); mbedtls_mpi_zeroize( X->p, X->n ); mbedtls_free( X->p ); } X->n = i; X->p = p; return( 0 ); } /* * Copy the contents of Y into X */ int mbedtls_mpi_copy( mbedtls_mpi *X, const mbedtls_mpi *Y ) { int ret = 0; size_t i; if( X == Y ) return( 0 ); if( Y->p == NULL ) { mbedtls_mpi_free( X ); return( 0 ); } for( i = Y->n - 1; i > 0; i-- ) if( Y->p[i] != 0 ) break; i++; X->s = Y->s; if( X->n < i ) { MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, i ) ); } else { memset( X->p + i, 0, ( X->n - i ) * ciL ); } memcpy( X->p, Y->p, i * ciL ); cleanup: return( ret ); } /* * Swap the contents of X and Y */ void mbedtls_mpi_swap( mbedtls_mpi *X, mbedtls_mpi *Y ) { mbedtls_mpi T; memcpy( &T, X, sizeof( mbedtls_mpi ) ); memcpy( X, Y, sizeof( mbedtls_mpi ) ); memcpy( Y, &T, sizeof( mbedtls_mpi ) ); } /* * Conditionally assign X = Y, without leaking information * about whether the assignment was made or not. * (Leaking information about the respective sizes of X and Y is ok however.) */ int mbedtls_mpi_safe_cond_assign( mbedtls_mpi *X, const mbedtls_mpi *Y, unsigned char assign ) { int ret = 0; size_t i; /* make sure assign is 0 or 1 in a time-constant manner */ assign = (assign | (unsigned char)-assign) >> 7; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, Y->n ) ); X->s = X->s * ( 1 - assign ) + Y->s * assign; for( i = 0; i < Y->n; i++ ) X->p[i] = X->p[i] * ( 1 - assign ) + Y->p[i] * assign; for( ; i < X->n; i++ ) X->p[i] *= ( 1 - assign ); cleanup: return( ret ); } /* * Conditionally swap X and Y, without leaking information * about whether the swap was made or not. * Here it is not ok to simply swap the pointers, which whould lead to * different memory access patterns when X and Y are used afterwards. */ int mbedtls_mpi_safe_cond_swap( mbedtls_mpi *X, mbedtls_mpi *Y, unsigned char swap ) { int ret, s; size_t i; mbedtls_mpi_uint tmp; if( X == Y ) return( 0 ); /* make sure swap is 0 or 1 in a time-constant manner */ swap = (swap | (unsigned char)-swap) >> 7; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, Y->n ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( Y, X->n ) ); s = X->s; X->s = X->s * ( 1 - swap ) + Y->s * swap; Y->s = Y->s * ( 1 - swap ) + s * swap; for( i = 0; i < X->n; i++ ) { tmp = X->p[i]; X->p[i] = X->p[i] * ( 1 - swap ) + Y->p[i] * swap; Y->p[i] = Y->p[i] * ( 1 - swap ) + tmp * swap; } cleanup: return( ret ); } /* * Set value from integer */ int mbedtls_mpi_lset( mbedtls_mpi *X, mbedtls_mpi_sint z ) { int ret; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, 1 ) ); memset( X->p, 0, X->n * ciL ); X->p[0] = ( z < 0 ) ? -z : z; X->s = ( z < 0 ) ? -1 : 1; cleanup: return( ret ); } /* * Get a specific bit */ int mbedtls_mpi_get_bit( const mbedtls_mpi *X, size_t pos ) { if( X->n * biL <= pos ) return( 0 ); return( ( X->p[pos / biL] >> ( pos % biL ) ) & 0x01 ); } /* * Set a bit to a specific value of 0 or 1 */ int mbedtls_mpi_set_bit( mbedtls_mpi *X, size_t pos, unsigned char val ) { int ret = 0; size_t off = pos / biL; size_t idx = pos % biL; if( val != 0 && val != 1 ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); if( X->n * biL <= pos ) { if( val == 0 ) return( 0 ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, off + 1 ) ); } X->p[off] &= ~( (mbedtls_mpi_uint) 0x01 << idx ); X->p[off] |= (mbedtls_mpi_uint) val << idx; cleanup: return( ret ); } /* * Return the number of less significant zero-bits */ size_t mbedtls_mpi_lsb( const mbedtls_mpi *X ) { size_t i, j, count = 0; for( i = 0; i < X->n; i++ ) for( j = 0; j < biL; j++, count++ ) if( ( ( X->p[i] >> j ) & 1 ) != 0 ) return( count ); return( 0 ); } /* * Count leading zero bits in a given integer */ static size_t mbedtls_clz( const mbedtls_mpi_uint x ) { size_t j; mbedtls_mpi_uint mask = (mbedtls_mpi_uint) 1 << (biL - 1); for( j = 0; j < biL; j++ ) { if( x & mask ) break; mask >>= 1; } return j; } /* * Return the number of bits */ size_t mbedtls_mpi_bitlen( const mbedtls_mpi *X ) { size_t i, j; if( X->n == 0 ) return( 0 ); for( i = X->n - 1; i > 0; i-- ) if( X->p[i] != 0 ) break; j = biL - mbedtls_clz( X->p[i] ); return( ( i * biL ) + j ); } /* * Return the total size in bytes */ size_t mbedtls_mpi_size( const mbedtls_mpi *X ) { return( ( mbedtls_mpi_bitlen( X ) + 7 ) >> 3 ); } /* * Convert an ASCII character to digit value */ static int mpi_get_digit( mbedtls_mpi_uint *d, int radix, char c ) { *d = 255; if( c >= 0x30 && c <= 0x39 ) *d = c - 0x30; if( c >= 0x41 && c <= 0x46 ) *d = c - 0x37; if( c >= 0x61 && c <= 0x66 ) *d = c - 0x57; if( *d >= (mbedtls_mpi_uint) radix ) return( MBEDTLS_ERR_MPI_INVALID_CHARACTER ); return( 0 ); } /* * Import from an ASCII string */ int mbedtls_mpi_read_string( mbedtls_mpi *X, int radix, const char *s ) { int ret; size_t i, j, slen, n; mbedtls_mpi_uint d; mbedtls_mpi T; if( radix < 2 || radix > 16 ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); mbedtls_mpi_init( &T ); slen = strlen( s ); if( radix == 16 ) { if( slen > MPI_SIZE_T_MAX >> 2 ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); n = BITS_TO_LIMBS( slen << 2 ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, n ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( X, 0 ) ); for( i = slen, j = 0; i > 0; i--, j++ ) { if( i == 1 && s[i - 1] == '-' ) { X->s = -1; break; } MBEDTLS_MPI_CHK( mpi_get_digit( &d, radix, s[i - 1] ) ); X->p[j / ( 2 * ciL )] |= d << ( ( j % ( 2 * ciL ) ) << 2 ); } } else { MBEDTLS_MPI_CHK( mbedtls_mpi_lset( X, 0 ) ); for( i = 0; i < slen; i++ ) { if( i == 0 && s[i] == '-' ) { X->s = -1; continue; } MBEDTLS_MPI_CHK( mpi_get_digit( &d, radix, s[i] ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_int( &T, X, radix ) ); if( X->s == 1 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_add_int( X, &T, d ) ); } else { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_int( X, &T, d ) ); } } } cleanup: mbedtls_mpi_free( &T ); return( ret ); } /* * Helper to write the digits high-order first */ static int mpi_write_hlp( mbedtls_mpi *X, int radix, char **p ) { int ret; mbedtls_mpi_uint r; if( radix < 2 || radix > 16 ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_int( &r, X, radix ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_div_int( X, NULL, X, radix ) ); if( mbedtls_mpi_cmp_int( X, 0 ) != 0 ) MBEDTLS_MPI_CHK( mpi_write_hlp( X, radix, p ) ); if( r < 10 ) *(*p)++ = (char)( r + 0x30 ); else *(*p)++ = (char)( r + 0x37 ); cleanup: return( ret ); } /* * Export into an ASCII string */ int mbedtls_mpi_write_string( const mbedtls_mpi *X, int radix, char *buf, size_t buflen, size_t *olen ) { int ret = 0; size_t n; char *p; mbedtls_mpi T; if( radix < 2 || radix > 16 ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); n = mbedtls_mpi_bitlen( X ); if( radix >= 4 ) n >>= 1; if( radix >= 16 ) n >>= 1; /* * Round up the buffer length to an even value to ensure that there is * enough room for hexadecimal values that can be represented in an odd * number of digits. */ n += 3 + ( ( n + 1 ) & 1 ); if( buflen < n ) { *olen = n; return( MBEDTLS_ERR_MPI_BUFFER_TOO_SMALL ); } p = buf; mbedtls_mpi_init( &T ); if( X->s == -1 ) *p++ = '-'; if( radix == 16 ) { int c; size_t i, j, k; for( i = X->n, k = 0; i > 0; i-- ) { for( j = ciL; j > 0; j-- ) { c = ( X->p[i - 1] >> ( ( j - 1 ) << 3) ) & 0xFF; if( c == 0 && k == 0 && ( i + j ) != 2 ) continue; *(p++) = "0123456789ABCDEF" [c / 16]; *(p++) = "0123456789ABCDEF" [c % 16]; k = 1; } } } else { MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &T, X ) ); if( T.s == -1 ) T.s = 1; MBEDTLS_MPI_CHK( mpi_write_hlp( &T, radix, &p ) ); } *p++ = '\0'; *olen = p - buf; cleanup: mbedtls_mpi_free( &T ); return( ret ); } #if defined(MBEDTLS_FS_IO) /* * Read X from an opened file */ int mbedtls_mpi_read_file( mbedtls_mpi *X, int radix, FILE *fin ) { mbedtls_mpi_uint d; size_t slen; char *p; /* * Buffer should have space for (short) label and decimal formatted MPI, * newline characters and '\0' */ char s[ MBEDTLS_MPI_RW_BUFFER_SIZE ]; memset( s, 0, sizeof( s ) ); if( fgets( s, sizeof( s ) - 1, fin ) == NULL ) return( MBEDTLS_ERR_MPI_FILE_IO_ERROR ); slen = strlen( s ); if( slen == sizeof( s ) - 2 ) return( MBEDTLS_ERR_MPI_BUFFER_TOO_SMALL ); if( slen > 0 && s[slen - 1] == '\n' ) { slen--; s[slen] = '\0'; } if( slen > 0 && s[slen - 1] == '\r' ) { slen--; s[slen] = '\0'; } p = s + slen; while( p-- > s ) if( mpi_get_digit( &d, radix, *p ) != 0 ) break; return( mbedtls_mpi_read_string( X, radix, p + 1 ) ); } /* * Write X into an opened file (or stdout if fout == NULL) */ int mbedtls_mpi_write_file( const char *p, const mbedtls_mpi *X, int radix, FILE *fout ) { int ret; size_t n, slen, plen; /* * Buffer should have space for (short) label and decimal formatted MPI, * newline characters and '\0' */ char s[ MBEDTLS_MPI_RW_BUFFER_SIZE ]; memset( s, 0, sizeof( s ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_write_string( X, radix, s, sizeof( s ) - 2, &n ) ); if( p == NULL ) p = ""; plen = strlen( p ); slen = strlen( s ); s[slen++] = '\r'; s[slen++] = '\n'; if( fout != NULL ) { if( fwrite( p, 1, plen, fout ) != plen || fwrite( s, 1, slen, fout ) != slen ) return( MBEDTLS_ERR_MPI_FILE_IO_ERROR ); } else mbedtls_printf( "%s%s", p, s ); cleanup: return( ret ); } #endif /* MBEDTLS_FS_IO */ /* * Import X from unsigned binary data, big endian */ int mbedtls_mpi_read_binary( mbedtls_mpi *X, const unsigned char *buf, size_t buflen ) { int ret; size_t i, j; size_t const limbs = CHARS_TO_LIMBS( buflen ); /* Ensure that target MPI has exactly the necessary number of limbs */ if( X->n != limbs ) { mbedtls_mpi_free( X ); mbedtls_mpi_init( X ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, limbs ) ); } MBEDTLS_MPI_CHK( mbedtls_mpi_lset( X, 0 ) ); for( i = buflen, j = 0; i > 0; i--, j++ ) X->p[j / ciL] |= ((mbedtls_mpi_uint) buf[i - 1]) << ((j % ciL) << 3); cleanup: return( ret ); } /* * Export X into unsigned binary data, big endian */ int mbedtls_mpi_write_binary( const mbedtls_mpi *X, unsigned char *buf, size_t buflen ) { size_t i, j, n; n = mbedtls_mpi_size( X ); if( buflen < n ) return( MBEDTLS_ERR_MPI_BUFFER_TOO_SMALL ); memset( buf, 0, buflen ); for( i = buflen - 1, j = 0; n > 0; i--, j++, n-- ) buf[i] = (unsigned char)( X->p[j / ciL] >> ((j % ciL) << 3) ); return( 0 ); } /* * Left-shift: X <<= count */ int mbedtls_mpi_shift_l( mbedtls_mpi *X, size_t count ) { int ret; size_t i, v0, t1; mbedtls_mpi_uint r0 = 0, r1; v0 = count / (biL ); t1 = count & (biL - 1); i = mbedtls_mpi_bitlen( X ) + count; if( X->n * biL < i ) MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, BITS_TO_LIMBS( i ) ) ); ret = 0; /* * shift by count / limb_size */ if( v0 > 0 ) { for( i = X->n; i > v0; i-- ) X->p[i - 1] = X->p[i - v0 - 1]; for( ; i > 0; i-- ) X->p[i - 1] = 0; } /* * shift by count % limb_size */ if( t1 > 0 ) { for( i = v0; i < X->n; i++ ) { r1 = X->p[i] >> (biL - t1); X->p[i] <<= t1; X->p[i] |= r0; r0 = r1; } } cleanup: return( ret ); } /* * Right-shift: X >>= count */ int mbedtls_mpi_shift_r( mbedtls_mpi *X, size_t count ) { size_t i, v0, v1; mbedtls_mpi_uint r0 = 0, r1; v0 = count / biL; v1 = count & (biL - 1); if( v0 > X->n || ( v0 == X->n && v1 > 0 ) ) return mbedtls_mpi_lset( X, 0 ); /* * shift by count / limb_size */ if( v0 > 0 ) { for( i = 0; i < X->n - v0; i++ ) X->p[i] = X->p[i + v0]; for( ; i < X->n; i++ ) X->p[i] = 0; } /* * shift by count % limb_size */ if( v1 > 0 ) { for( i = X->n; i > 0; i-- ) { r1 = X->p[i - 1] << (biL - v1); X->p[i - 1] >>= v1; X->p[i - 1] |= r0; r0 = r1; } } return( 0 ); } /* * Compare unsigned values */ int mbedtls_mpi_cmp_abs( const mbedtls_mpi *X, const mbedtls_mpi *Y ) { size_t i, j; for( i = X->n; i > 0; i-- ) if( X->p[i - 1] != 0 ) break; for( j = Y->n; j > 0; j-- ) if( Y->p[j - 1] != 0 ) break; if( i == 0 && j == 0 ) return( 0 ); if( i > j ) return( 1 ); if( j > i ) return( -1 ); for( ; i > 0; i-- ) { if( X->p[i - 1] > Y->p[i - 1] ) return( 1 ); if( X->p[i - 1] < Y->p[i - 1] ) return( -1 ); } return( 0 ); } /* * Compare signed values */ int mbedtls_mpi_cmp_mpi( const mbedtls_mpi *X, const mbedtls_mpi *Y ) { size_t i, j; for( i = X->n; i > 0; i-- ) if( X->p[i - 1] != 0 ) break; for( j = Y->n; j > 0; j-- ) if( Y->p[j - 1] != 0 ) break; if( i == 0 && j == 0 ) return( 0 ); if( i > j ) return( X->s ); if( j > i ) return( -Y->s ); if( X->s > 0 && Y->s < 0 ) return( 1 ); if( Y->s > 0 && X->s < 0 ) return( -1 ); for( ; i > 0; i-- ) { if( X->p[i - 1] > Y->p[i - 1] ) return( X->s ); if( X->p[i - 1] < Y->p[i - 1] ) return( -X->s ); } return( 0 ); } /* * Compare signed values */ int mbedtls_mpi_cmp_int( const mbedtls_mpi *X, mbedtls_mpi_sint z ) { mbedtls_mpi Y; mbedtls_mpi_uint p[1]; *p = ( z < 0 ) ? -z : z; Y.s = ( z < 0 ) ? -1 : 1; Y.n = 1; Y.p = p; return( mbedtls_mpi_cmp_mpi( X, &Y ) ); } /* * Unsigned addition: X = |A| + |B| (HAC 14.7) */ int mbedtls_mpi_add_abs( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ) { int ret; size_t i, j; mbedtls_mpi_uint *o, *p, c, tmp; if( X == B ) { const mbedtls_mpi *T = A; A = X; B = T; } if( X != A ) MBEDTLS_MPI_CHK( mbedtls_mpi_copy( X, A ) ); /* * X should always be positive as a result of unsigned additions. */ X->s = 1; for( j = B->n; j > 0; j-- ) if( B->p[j - 1] != 0 ) break; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, j ) ); o = B->p; p = X->p; c = 0; /* * tmp is used because it might happen that p == o */ for( i = 0; i < j; i++, o++, p++ ) { tmp= *o; *p += c; c = ( *p < c ); *p += tmp; c += ( *p < tmp ); } while( c != 0 ) { if( i >= X->n ) { MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, i + 1 ) ); p = X->p + i; } *p += c; c = ( *p < c ); i++; p++; } cleanup: return( ret ); } /* * Helper for mbedtls_mpi subtraction */ static void mpi_sub_hlp( size_t n, mbedtls_mpi_uint *s, mbedtls_mpi_uint *d ) { size_t i; mbedtls_mpi_uint c, z; for( i = c = 0; i < n; i++, s++, d++ ) { z = ( *d < c ); *d -= c; c = ( *d < *s ) + z; *d -= *s; } while( c != 0 ) { z = ( *d < c ); *d -= c; c = z; d++; } } /* * Unsigned subtraction: X = |A| - |B| (HAC 14.9) */ int mbedtls_mpi_sub_abs( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ) { mbedtls_mpi TB; int ret; size_t n; if( mbedtls_mpi_cmp_abs( A, B ) < 0 ) return( MBEDTLS_ERR_MPI_NEGATIVE_VALUE ); mbedtls_mpi_init( &TB ); if( X == B ) { MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TB, B ) ); B = &TB; } if( X != A ) MBEDTLS_MPI_CHK( mbedtls_mpi_copy( X, A ) ); /* * X should always be positive as a result of unsigned subtractions. */ X->s = 1; ret = 0; for( n = B->n; n > 0; n-- ) if( B->p[n - 1] != 0 ) break; mpi_sub_hlp( n, B->p, X->p ); cleanup: mbedtls_mpi_free( &TB ); return( ret ); } /* * Signed addition: X = A + B */ int mbedtls_mpi_add_mpi( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ) { int ret, s = A->s; if( A->s * B->s < 0 ) { if( mbedtls_mpi_cmp_abs( A, B ) >= 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_abs( X, A, B ) ); X->s = s; } else { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_abs( X, B, A ) ); X->s = -s; } } else { MBEDTLS_MPI_CHK( mbedtls_mpi_add_abs( X, A, B ) ); X->s = s; } cleanup: return( ret ); } /* * Signed subtraction: X = A - B */ int mbedtls_mpi_sub_mpi( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ) { int ret, s = A->s; if( A->s * B->s > 0 ) { if( mbedtls_mpi_cmp_abs( A, B ) >= 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_abs( X, A, B ) ); X->s = s; } else { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_abs( X, B, A ) ); X->s = -s; } } else { MBEDTLS_MPI_CHK( mbedtls_mpi_add_abs( X, A, B ) ); X->s = s; } cleanup: return( ret ); } /* * Signed addition: X = A + b */ int mbedtls_mpi_add_int( mbedtls_mpi *X, const mbedtls_mpi *A, mbedtls_mpi_sint b ) { mbedtls_mpi _B; mbedtls_mpi_uint p[1]; p[0] = ( b < 0 ) ? -b : b; _B.s = ( b < 0 ) ? -1 : 1; _B.n = 1; _B.p = p; return( mbedtls_mpi_add_mpi( X, A, &_B ) ); } /* * Signed subtraction: X = A - b */ int mbedtls_mpi_sub_int( mbedtls_mpi *X, const mbedtls_mpi *A, mbedtls_mpi_sint b ) { mbedtls_mpi _B; mbedtls_mpi_uint p[1]; p[0] = ( b < 0 ) ? -b : b; _B.s = ( b < 0 ) ? -1 : 1; _B.n = 1; _B.p = p; return( mbedtls_mpi_sub_mpi( X, A, &_B ) ); } /* * Helper for mbedtls_mpi multiplication */ static #if defined(__APPLE__) && defined(__arm__) /* * Apple LLVM version 4.2 (clang-425.0.24) (based on LLVM 3.2svn) * appears to need this to prevent bad ARM code generation at -O3. */ __attribute__ ((noinline)) #endif void mpi_mul_hlp( size_t i, mbedtls_mpi_uint *s, mbedtls_mpi_uint *d, mbedtls_mpi_uint b ) { mbedtls_mpi_uint c = 0, t = 0; #if defined(MULADDC_HUIT) for( ; i >= 8; i -= 8 ) { MULADDC_INIT MULADDC_HUIT MULADDC_STOP } for( ; i > 0; i-- ) { MULADDC_INIT MULADDC_CORE MULADDC_STOP } #else /* MULADDC_HUIT */ for( ; i >= 16; i -= 16 ) { MULADDC_INIT MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_STOP } for( ; i >= 8; i -= 8 ) { MULADDC_INIT MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_CORE MULADDC_STOP } for( ; i > 0; i-- ) { MULADDC_INIT MULADDC_CORE MULADDC_STOP } #endif /* MULADDC_HUIT */ t++; do { *d += c; c = ( *d < c ); d++; } while( c != 0 ); } /* * Baseline multiplication: X = A * B (HAC 14.12) */ int mbedtls_mpi_mul_mpi( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *B ) { int ret; size_t i, j; mbedtls_mpi TA, TB; mbedtls_mpi_init( &TA ); mbedtls_mpi_init( &TB ); if( X == A ) { MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TA, A ) ); A = &TA; } if( X == B ) { MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TB, B ) ); B = &TB; } for( i = A->n; i > 0; i-- ) if( A->p[i - 1] != 0 ) break; for( j = B->n; j > 0; j-- ) if( B->p[j - 1] != 0 ) break; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, i + j ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( X, 0 ) ); for( ; j > 0; j-- ) mpi_mul_hlp( i, A->p, X->p + j - 1, B->p[j - 1] ); X->s = A->s * B->s; cleanup: mbedtls_mpi_free( &TB ); mbedtls_mpi_free( &TA ); return( ret ); } /* * Baseline multiplication: X = A * b */ int mbedtls_mpi_mul_int( mbedtls_mpi *X, const mbedtls_mpi *A, mbedtls_mpi_uint b ) { mbedtls_mpi _B; mbedtls_mpi_uint p[1]; _B.s = 1; _B.n = 1; _B.p = p; p[0] = b; return( mbedtls_mpi_mul_mpi( X, A, &_B ) ); } /* * Unsigned integer divide - double mbedtls_mpi_uint dividend, u1/u0, and * mbedtls_mpi_uint divisor, d */ static mbedtls_mpi_uint mbedtls_int_div_int( mbedtls_mpi_uint u1, mbedtls_mpi_uint u0, mbedtls_mpi_uint d, mbedtls_mpi_uint *r ) { #if defined(MBEDTLS_HAVE_UDBL) mbedtls_t_udbl dividend, quotient; #else const mbedtls_mpi_uint radix = (mbedtls_mpi_uint) 1 << biH; const mbedtls_mpi_uint uint_halfword_mask = ( (mbedtls_mpi_uint) 1 << biH ) - 1; mbedtls_mpi_uint d0, d1, q0, q1, rAX, r0, quotient; mbedtls_mpi_uint u0_msw, u0_lsw; size_t s; #endif /* * Check for overflow */ if( 0 == d || u1 >= d ) { if (r != NULL) *r = ~0; return ( ~0 ); } #if defined(MBEDTLS_HAVE_UDBL) dividend = (mbedtls_t_udbl) u1 << biL; dividend |= (mbedtls_t_udbl) u0; quotient = dividend / d; if( quotient > ( (mbedtls_t_udbl) 1 << biL ) - 1 ) quotient = ( (mbedtls_t_udbl) 1 << biL ) - 1; if( r != NULL ) *r = (mbedtls_mpi_uint)( dividend - (quotient * d ) ); return (mbedtls_mpi_uint) quotient; #else /* * Algorithm D, Section 4.3.1 - The Art of Computer Programming * Vol. 2 - Seminumerical Algorithms, Knuth */ /* * Normalize the divisor, d, and dividend, u0, u1 */ s = mbedtls_clz( d ); d = d << s; u1 = u1 << s; u1 |= ( u0 >> ( biL - s ) ) & ( -(mbedtls_mpi_sint)s >> ( biL - 1 ) ); u0 = u0 << s; d1 = d >> biH; d0 = d & uint_halfword_mask; u0_msw = u0 >> biH; u0_lsw = u0 & uint_halfword_mask; /* * Find the first quotient and remainder */ q1 = u1 / d1; r0 = u1 - d1 * q1; while( q1 >= radix || ( q1 * d0 > radix * r0 + u0_msw ) ) { q1 -= 1; r0 += d1; if ( r0 >= radix ) break; } rAX = ( u1 * radix ) + ( u0_msw - q1 * d ); q0 = rAX / d1; r0 = rAX - q0 * d1; while( q0 >= radix || ( q0 * d0 > radix * r0 + u0_lsw ) ) { q0 -= 1; r0 += d1; if ( r0 >= radix ) break; } if (r != NULL) *r = ( rAX * radix + u0_lsw - q0 * d ) >> s; quotient = q1 * radix + q0; return quotient; #endif } /* * Division by mbedtls_mpi: A = Q * B + R (HAC 14.20) */ int mbedtls_mpi_div_mpi( mbedtls_mpi *Q, mbedtls_mpi *R, const mbedtls_mpi *A, const mbedtls_mpi *B ) { int ret; size_t i, n, t, k; mbedtls_mpi X, Y, Z, T1, T2; if( mbedtls_mpi_cmp_int( B, 0 ) == 0 ) return( MBEDTLS_ERR_MPI_DIVISION_BY_ZERO ); mbedtls_mpi_init( &X ); mbedtls_mpi_init( &Y ); mbedtls_mpi_init( &Z ); mbedtls_mpi_init( &T1 ); mbedtls_mpi_init( &T2 ); if( mbedtls_mpi_cmp_abs( A, B ) < 0 ) { if( Q != NULL ) MBEDTLS_MPI_CHK( mbedtls_mpi_lset( Q, 0 ) ); if( R != NULL ) MBEDTLS_MPI_CHK( mbedtls_mpi_copy( R, A ) ); return( 0 ); } MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &X, A ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &Y, B ) ); X.s = Y.s = 1; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( &Z, A->n + 2 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &Z, 0 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( &T1, 2 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( &T2, 3 ) ); k = mbedtls_mpi_bitlen( &Y ) % biL; if( k < biL - 1 ) { k = biL - 1 - k; MBEDTLS_MPI_CHK( mbedtls_mpi_shift_l( &X, k ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_l( &Y, k ) ); } else k = 0; n = X.n - 1; t = Y.n - 1; MBEDTLS_MPI_CHK( mbedtls_mpi_shift_l( &Y, biL * ( n - t ) ) ); while( mbedtls_mpi_cmp_mpi( &X, &Y ) >= 0 ) { Z.p[n - t]++; MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &X, &X, &Y ) ); } MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &Y, biL * ( n - t ) ) ); for( i = n; i > t ; i-- ) { if( X.p[i] >= Y.p[t] ) Z.p[i - t - 1] = ~0; else { Z.p[i - t - 1] = mbedtls_int_div_int( X.p[i], X.p[i - 1], Y.p[t], NULL); } Z.p[i - t - 1]++; do { Z.p[i - t - 1]--; MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &T1, 0 ) ); T1.p[0] = ( t < 1 ) ? 0 : Y.p[t - 1]; T1.p[1] = Y.p[t]; MBEDTLS_MPI_CHK( mbedtls_mpi_mul_int( &T1, &T1, Z.p[i - t - 1] ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &T2, 0 ) ); T2.p[0] = ( i < 2 ) ? 0 : X.p[i - 2]; T2.p[1] = ( i < 1 ) ? 0 : X.p[i - 1]; T2.p[2] = X.p[i]; } while( mbedtls_mpi_cmp_mpi( &T1, &T2 ) > 0 ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_int( &T1, &Y, Z.p[i - t - 1] ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_l( &T1, biL * ( i - t - 1 ) ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &X, &X, &T1 ) ); if( mbedtls_mpi_cmp_int( &X, 0 ) < 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &T1, &Y ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_l( &T1, biL * ( i - t - 1 ) ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( &X, &X, &T1 ) ); Z.p[i - t - 1]--; } } if( Q != NULL ) { MBEDTLS_MPI_CHK( mbedtls_mpi_copy( Q, &Z ) ); Q->s = A->s * B->s; } if( R != NULL ) { MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &X, k ) ); X.s = A->s; MBEDTLS_MPI_CHK( mbedtls_mpi_copy( R, &X ) ); if( mbedtls_mpi_cmp_int( R, 0 ) == 0 ) R->s = 1; } cleanup: mbedtls_mpi_free( &X ); mbedtls_mpi_free( &Y ); mbedtls_mpi_free( &Z ); mbedtls_mpi_free( &T1 ); mbedtls_mpi_free( &T2 ); return( ret ); } /* * Division by int: A = Q * b + R */ int mbedtls_mpi_div_int( mbedtls_mpi *Q, mbedtls_mpi *R, const mbedtls_mpi *A, mbedtls_mpi_sint b ) { mbedtls_mpi _B; mbedtls_mpi_uint p[1]; p[0] = ( b < 0 ) ? -b : b; _B.s = ( b < 0 ) ? -1 : 1; _B.n = 1; _B.p = p; return( mbedtls_mpi_div_mpi( Q, R, A, &_B ) ); } /* * Modulo: R = A mod B */ int mbedtls_mpi_mod_mpi( mbedtls_mpi *R, const mbedtls_mpi *A, const mbedtls_mpi *B ) { int ret; if( mbedtls_mpi_cmp_int( B, 0 ) < 0 ) return( MBEDTLS_ERR_MPI_NEGATIVE_VALUE ); MBEDTLS_MPI_CHK( mbedtls_mpi_div_mpi( NULL, R, A, B ) ); while( mbedtls_mpi_cmp_int( R, 0 ) < 0 ) MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( R, R, B ) ); while( mbedtls_mpi_cmp_mpi( R, B ) >= 0 ) MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( R, R, B ) ); cleanup: return( ret ); } /* * Modulo: r = A mod b */ int mbedtls_mpi_mod_int( mbedtls_mpi_uint *r, const mbedtls_mpi *A, mbedtls_mpi_sint b ) { size_t i; mbedtls_mpi_uint x, y, z; if( b == 0 ) return( MBEDTLS_ERR_MPI_DIVISION_BY_ZERO ); if( b < 0 ) return( MBEDTLS_ERR_MPI_NEGATIVE_VALUE ); /* * handle trivial cases */ if( b == 1 ) { *r = 0; return( 0 ); } if( b == 2 ) { *r = A->p[0] & 1; return( 0 ); } /* * general case */ for( i = A->n, y = 0; i > 0; i-- ) { x = A->p[i - 1]; y = ( y << biH ) | ( x >> biH ); z = y / b; y -= z * b; x <<= biH; y = ( y << biH ) | ( x >> biH ); z = y / b; y -= z * b; } /* * If A is negative, then the current y represents a negative value. * Flipping it to the positive side. */ if( A->s < 0 && y != 0 ) y = b - y; *r = y; return( 0 ); } /* * Fast Montgomery initialization (thanks to Tom St Denis) */ static void mpi_montg_init( mbedtls_mpi_uint *mm, const mbedtls_mpi *N ) { mbedtls_mpi_uint x, m0 = N->p[0]; unsigned int i; x = m0; x += ( ( m0 + 2 ) & 4 ) << 1; for( i = biL; i >= 8; i /= 2 ) x *= ( 2 - ( m0 * x ) ); *mm = ~x + 1; } /* * Montgomery multiplication: A = A * B * R^-1 mod N (HAC 14.36) */ static int mpi_montmul( mbedtls_mpi *A, const mbedtls_mpi *B, const mbedtls_mpi *N, mbedtls_mpi_uint mm, const mbedtls_mpi *T ) { size_t i, n, m; mbedtls_mpi_uint u0, u1, *d; if( T->n < N->n + 1 || T->p == NULL ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); memset( T->p, 0, T->n * ciL ); d = T->p; n = N->n; m = ( B->n < n ) ? B->n : n; for( i = 0; i < n; i++ ) { /* * T = (T + u0*B + u1*N) / 2^biL */ u0 = A->p[i]; u1 = ( d[0] + u0 * B->p[0] ) * mm; mpi_mul_hlp( m, B->p, d, u0 ); mpi_mul_hlp( n, N->p, d, u1 ); *d++ = u0; d[n + 1] = 0; } memcpy( A->p, d, ( n + 1 ) * ciL ); if( mbedtls_mpi_cmp_abs( A, N ) >= 0 ) mpi_sub_hlp( n, N->p, A->p ); else /* prevent timing attacks */ mpi_sub_hlp( n, A->p, T->p ); return( 0 ); } /* * Montgomery reduction: A = A * R^-1 mod N */ static int mpi_montred( mbedtls_mpi *A, const mbedtls_mpi *N, mbedtls_mpi_uint mm, const mbedtls_mpi *T ) { mbedtls_mpi_uint z = 1; mbedtls_mpi U; U.n = U.s = (int) z; U.p = &z; return( mpi_montmul( A, &U, N, mm, T ) ); } /* * Sliding-window exponentiation: X = A^E mod N (HAC 14.85) */ int mbedtls_mpi_exp_mod( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *E, const mbedtls_mpi *N, mbedtls_mpi *_RR ) { int ret; size_t wbits, wsize, one = 1; size_t i, j, nblimbs; size_t bufsize, nbits; mbedtls_mpi_uint ei, mm, state; mbedtls_mpi RR, T, W[ 2 << MBEDTLS_MPI_WINDOW_SIZE ], Apos; int neg; if( mbedtls_mpi_cmp_int( N, 0 ) <= 0 || ( N->p[0] & 1 ) == 0 ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); if( mbedtls_mpi_cmp_int( E, 0 ) < 0 ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); /* * Init temps and window size */ mpi_montg_init( &mm, N ); mbedtls_mpi_init( &RR ); mbedtls_mpi_init( &T ); mbedtls_mpi_init( &Apos ); memset( W, 0, sizeof( W ) ); i = mbedtls_mpi_bitlen( E ); wsize = ( i > 671 ) ? 6 : ( i > 239 ) ? 5 : ( i > 79 ) ? 4 : ( i > 23 ) ? 3 : 1; if( wsize > MBEDTLS_MPI_WINDOW_SIZE ) wsize = MBEDTLS_MPI_WINDOW_SIZE; j = N->n + 1; MBEDTLS_MPI_CHK( mbedtls_mpi_grow( X, j ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( &W[1], j ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( &T, j * 2 ) ); /* * Compensate for negative A (and correct at the end) */ neg = ( A->s == -1 ); if( neg ) { MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &Apos, A ) ); Apos.s = 1; A = &Apos; } /* * If 1st call, pre-compute R^2 mod N */ if( _RR == NULL || _RR->p == NULL ) { MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &RR, 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_l( &RR, N->n * 2 * biL ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &RR, &RR, N ) ); if( _RR != NULL ) memcpy( _RR, &RR, sizeof( mbedtls_mpi ) ); } else memcpy( &RR, _RR, sizeof( mbedtls_mpi ) ); /* * W[1] = A * R^2 * R^-1 mod N = A * R mod N */ if( mbedtls_mpi_cmp_mpi( A, N ) >= 0 ) MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &W[1], A, N ) ); else MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &W[1], A ) ); MBEDTLS_MPI_CHK( mpi_montmul( &W[1], &RR, N, mm, &T ) ); /* * X = R^2 * R^-1 mod N = R mod N */ MBEDTLS_MPI_CHK( mbedtls_mpi_copy( X, &RR ) ); MBEDTLS_MPI_CHK( mpi_montred( X, N, mm, &T ) ); if( wsize > 1 ) { /* * W[1 << (wsize - 1)] = W[1] ^ (wsize - 1) */ j = one << ( wsize - 1 ); MBEDTLS_MPI_CHK( mbedtls_mpi_grow( &W[j], N->n + 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &W[j], &W[1] ) ); for( i = 0; i < wsize - 1; i++ ) MBEDTLS_MPI_CHK( mpi_montmul( &W[j], &W[j], N, mm, &T ) ); /* * W[i] = W[i - 1] * W[1] */ for( i = j + 1; i < ( one << wsize ); i++ ) { MBEDTLS_MPI_CHK( mbedtls_mpi_grow( &W[i], N->n + 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &W[i], &W[i - 1] ) ); MBEDTLS_MPI_CHK( mpi_montmul( &W[i], &W[1], N, mm, &T ) ); } } nblimbs = E->n; bufsize = 0; nbits = 0; wbits = 0; state = 0; while( 1 ) { if( bufsize == 0 ) { if( nblimbs == 0 ) break; nblimbs--; bufsize = sizeof( mbedtls_mpi_uint ) << 3; } bufsize--; ei = (E->p[nblimbs] >> bufsize) & 1; /* * skip leading 0s */ if( ei == 0 && state == 0 ) continue; if( ei == 0 && state == 1 ) { /* * out of window, square X */ MBEDTLS_MPI_CHK( mpi_montmul( X, X, N, mm, &T ) ); continue; } /* * add ei to current window */ state = 2; nbits++; wbits |= ( ei << ( wsize - nbits ) ); if( nbits == wsize ) { /* * X = X^wsize R^-1 mod N */ for( i = 0; i < wsize; i++ ) MBEDTLS_MPI_CHK( mpi_montmul( X, X, N, mm, &T ) ); /* * X = X * W[wbits] R^-1 mod N */ MBEDTLS_MPI_CHK( mpi_montmul( X, &W[wbits], N, mm, &T ) ); state--; nbits = 0; wbits = 0; } } /* * process the remaining bits */ for( i = 0; i < nbits; i++ ) { MBEDTLS_MPI_CHK( mpi_montmul( X, X, N, mm, &T ) ); wbits <<= 1; if( ( wbits & ( one << wsize ) ) != 0 ) MBEDTLS_MPI_CHK( mpi_montmul( X, &W[1], N, mm, &T ) ); } /* * X = A^E * R * R^-1 mod N = A^E mod N */ MBEDTLS_MPI_CHK( mpi_montred( X, N, mm, &T ) ); if( neg && E->n != 0 && ( E->p[0] & 1 ) != 0 ) { X->s = -1; MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( X, N, X ) ); } cleanup: for( i = ( one << ( wsize - 1 ) ); i < ( one << wsize ); i++ ) mbedtls_mpi_free( &W[i] ); mbedtls_mpi_free( &W[1] ); mbedtls_mpi_free( &T ); mbedtls_mpi_free( &Apos ); if( _RR == NULL || _RR->p == NULL ) mbedtls_mpi_free( &RR ); return( ret ); } /* * Greatest common divisor: G = gcd(A, B) (HAC 14.54) */ int mbedtls_mpi_gcd( mbedtls_mpi *G, const mbedtls_mpi *A, const mbedtls_mpi *B ) { int ret; size_t lz, lzt; mbedtls_mpi TG, TA, TB; mbedtls_mpi_init( &TG ); mbedtls_mpi_init( &TA ); mbedtls_mpi_init( &TB ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TA, A ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TB, B ) ); lz = mbedtls_mpi_lsb( &TA ); lzt = mbedtls_mpi_lsb( &TB ); if( lzt < lz ) lz = lzt; MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TA, lz ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TB, lz ) ); TA.s = TB.s = 1; while( mbedtls_mpi_cmp_int( &TA, 0 ) != 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TA, mbedtls_mpi_lsb( &TA ) ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TB, mbedtls_mpi_lsb( &TB ) ) ); if( mbedtls_mpi_cmp_mpi( &TA, &TB ) >= 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_abs( &TA, &TA, &TB ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TA, 1 ) ); } else { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_abs( &TB, &TB, &TA ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TB, 1 ) ); } } MBEDTLS_MPI_CHK( mbedtls_mpi_shift_l( &TB, lz ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( G, &TB ) ); cleanup: mbedtls_mpi_free( &TG ); mbedtls_mpi_free( &TA ); mbedtls_mpi_free( &TB ); return( ret ); } /* * Fill X with size bytes of random. * * Use a temporary bytes representation to make sure the result is the same * regardless of the platform endianness (useful when f_rng is actually * deterministic, eg for tests). */ int mbedtls_mpi_fill_random( mbedtls_mpi *X, size_t size, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ) { int ret; unsigned char buf[MBEDTLS_MPI_MAX_SIZE]; if( size > MBEDTLS_MPI_MAX_SIZE ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); MBEDTLS_MPI_CHK( f_rng( p_rng, buf, size ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_binary( X, buf, size ) ); cleanup: mbedtls_zeroize( buf, sizeof( buf ) ); return( ret ); } /* * Modular inverse: X = A^-1 mod N (HAC 14.61 / 14.64) */ int mbedtls_mpi_inv_mod( mbedtls_mpi *X, const mbedtls_mpi *A, const mbedtls_mpi *N ) { int ret; mbedtls_mpi G, TA, TU, U1, U2, TB, TV, V1, V2; if( mbedtls_mpi_cmp_int( N, 1 ) <= 0 ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); mbedtls_mpi_init( &TA ); mbedtls_mpi_init( &TU ); mbedtls_mpi_init( &U1 ); mbedtls_mpi_init( &U2 ); mbedtls_mpi_init( &G ); mbedtls_mpi_init( &TB ); mbedtls_mpi_init( &TV ); mbedtls_mpi_init( &V1 ); mbedtls_mpi_init( &V2 ); MBEDTLS_MPI_CHK( mbedtls_mpi_gcd( &G, A, N ) ); if( mbedtls_mpi_cmp_int( &G, 1 ) != 0 ) { ret = MBEDTLS_ERR_MPI_NOT_ACCEPTABLE; goto cleanup; } MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &TA, A, N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TU, &TA ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TB, N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &TV, N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &U1, 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &U2, 0 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &V1, 0 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &V2, 1 ) ); do { while( ( TU.p[0] & 1 ) == 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TU, 1 ) ); if( ( U1.p[0] & 1 ) != 0 || ( U2.p[0] & 1 ) != 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( &U1, &U1, &TB ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &U2, &U2, &TA ) ); } MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &U1, 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &U2, 1 ) ); } while( ( TV.p[0] & 1 ) == 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &TV, 1 ) ); if( ( V1.p[0] & 1 ) != 0 || ( V2.p[0] & 1 ) != 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( &V1, &V1, &TB ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &V2, &V2, &TA ) ); } MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &V1, 1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &V2, 1 ) ); } if( mbedtls_mpi_cmp_mpi( &TU, &TV ) >= 0 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &TU, &TU, &TV ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &U1, &U1, &V1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &U2, &U2, &V2 ) ); } else { MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &TV, &TV, &TU ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &V1, &V1, &U1 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &V2, &V2, &U2 ) ); } } while( mbedtls_mpi_cmp_int( &TU, 0 ) != 0 ); while( mbedtls_mpi_cmp_int( &V1, 0 ) < 0 ) MBEDTLS_MPI_CHK( mbedtls_mpi_add_mpi( &V1, &V1, N ) ); while( mbedtls_mpi_cmp_mpi( &V1, N ) >= 0 ) MBEDTLS_MPI_CHK( mbedtls_mpi_sub_mpi( &V1, &V1, N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( X, &V1 ) ); cleanup: mbedtls_mpi_free( &TA ); mbedtls_mpi_free( &TU ); mbedtls_mpi_free( &U1 ); mbedtls_mpi_free( &U2 ); mbedtls_mpi_free( &G ); mbedtls_mpi_free( &TB ); mbedtls_mpi_free( &TV ); mbedtls_mpi_free( &V1 ); mbedtls_mpi_free( &V2 ); return( ret ); } #if defined(MBEDTLS_GENPRIME) static const int small_prime[] = { 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, -103 }; /* * Small divisors test (X must be positive) * * Return values: * 0: no small factor (possible prime, more tests needed) * 1: certain prime * MBEDTLS_ERR_MPI_NOT_ACCEPTABLE: certain non-prime * other negative: error */ static int mpi_check_small_factors( const mbedtls_mpi *X ) { int ret = 0; size_t i; mbedtls_mpi_uint r; if( ( X->p[0] & 1 ) == 0 ) return( MBEDTLS_ERR_MPI_NOT_ACCEPTABLE ); for( i = 0; small_prime[i] > 0; i++ ) { if( mbedtls_mpi_cmp_int( X, small_prime[i] ) <= 0 ) return( 1 ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_int( &r, X, small_prime[i] ) ); if( r == 0 ) return( MBEDTLS_ERR_MPI_NOT_ACCEPTABLE ); } cleanup: return( ret ); } /* * Miller-Rabin pseudo-primality test (HAC 4.24) */ static int mpi_miller_rabin( const mbedtls_mpi *X, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ) { int ret, count; size_t i, j, k, n, s; mbedtls_mpi W, R, T, A, RR; mbedtls_mpi_init( &W ); mbedtls_mpi_init( &R ); mbedtls_mpi_init( &T ); mbedtls_mpi_init( &A ); mbedtls_mpi_init( &RR ); /* * W = |X| - 1 * R = W >> lsb( W ) */ MBEDTLS_MPI_CHK( mbedtls_mpi_sub_int( &W, X, 1 ) ); s = mbedtls_mpi_lsb( &W ); MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &R, &W ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &R, s ) ); i = mbedtls_mpi_bitlen( X ); /* * HAC, table 4.4 */ n = ( ( i >= 1300 ) ? 2 : ( i >= 850 ) ? 3 : ( i >= 650 ) ? 4 : ( i >= 350 ) ? 8 : ( i >= 250 ) ? 12 : ( i >= 150 ) ? 18 : 27 ); for( i = 0; i < n; i++ ) { /* * pick a random A, 1 < A < |X| - 1 */ MBEDTLS_MPI_CHK( mbedtls_mpi_fill_random( &A, X->n * ciL, f_rng, p_rng ) ); if( mbedtls_mpi_cmp_mpi( &A, &W ) >= 0 ) { j = mbedtls_mpi_bitlen( &A ) - mbedtls_mpi_bitlen( &W ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &A, j + 1 ) ); } A.p[0] |= 3; count = 0; do { MBEDTLS_MPI_CHK( mbedtls_mpi_fill_random( &A, X->n * ciL, f_rng, p_rng ) ); j = mbedtls_mpi_bitlen( &A ); k = mbedtls_mpi_bitlen( &W ); if (j > k) { MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &A, j - k ) ); } if (count++ > 30) { return MBEDTLS_ERR_MPI_NOT_ACCEPTABLE; } } while ( mbedtls_mpi_cmp_mpi( &A, &W ) >= 0 || mbedtls_mpi_cmp_int( &A, 1 ) <= 0 ); /* * A = A^R mod |X| */ MBEDTLS_MPI_CHK( mbedtls_mpi_exp_mod( &A, &A, &R, X, &RR ) ); if( mbedtls_mpi_cmp_mpi( &A, &W ) == 0 || mbedtls_mpi_cmp_int( &A, 1 ) == 0 ) continue; j = 1; while( j < s && mbedtls_mpi_cmp_mpi( &A, &W ) != 0 ) { /* * A = A * A mod |X| */ MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &T, &A, &A ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mod_mpi( &A, &T, X ) ); if( mbedtls_mpi_cmp_int( &A, 1 ) == 0 ) break; j++; } /* * not prime if A != |X| - 1 or A == 1 */ if( mbedtls_mpi_cmp_mpi( &A, &W ) != 0 || mbedtls_mpi_cmp_int( &A, 1 ) == 0 ) { ret = MBEDTLS_ERR_MPI_NOT_ACCEPTABLE; break; } } cleanup: mbedtls_mpi_free( &W ); mbedtls_mpi_free( &R ); mbedtls_mpi_free( &T ); mbedtls_mpi_free( &A ); mbedtls_mpi_free( &RR ); return( ret ); } /* * Pseudo-primality test: small factors, then Miller-Rabin */ int mbedtls_mpi_is_prime( const mbedtls_mpi *X, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ) { int ret; mbedtls_mpi XX; XX.s = 1; XX.n = X->n; XX.p = X->p; if( mbedtls_mpi_cmp_int( &XX, 0 ) == 0 || mbedtls_mpi_cmp_int( &XX, 1 ) == 0 ) return( MBEDTLS_ERR_MPI_NOT_ACCEPTABLE ); if( mbedtls_mpi_cmp_int( &XX, 2 ) == 0 ) return( 0 ); if( ( ret = mpi_check_small_factors( &XX ) ) != 0 ) { if( ret == 1 ) return( 0 ); return( ret ); } return( mpi_miller_rabin( &XX, f_rng, p_rng ) ); } /* * Prime number generation * * If dh_flag is 0 and nbits is at least 1024, then the procedure * follows the RSA probably-prime generation method of FIPS 186-4. * NB. FIPS 186-4 only allows the specific bit lengths of 1024 and 1536. */ int mbedtls_mpi_gen_prime( mbedtls_mpi *X, size_t nbits, int dh_flag, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ) { #ifdef MBEDTLS_HAVE_INT64 // ceil(2^63.5) #define CEIL_MAXUINT_DIV_SQRT2 0xb504f333f9de6485ULL #else // ceil(2^31.5) #define CEIL_MAXUINT_DIV_SQRT2 0xb504f334U #endif int ret = MBEDTLS_ERR_MPI_NOT_ACCEPTABLE; size_t k, n; mbedtls_mpi_uint r; mbedtls_mpi Y; if( nbits < 3 || nbits > MBEDTLS_MPI_MAX_BITS ) return( MBEDTLS_ERR_MPI_BAD_INPUT_DATA ); mbedtls_mpi_init( &Y ); n = BITS_TO_LIMBS( nbits ); while( 1 ) { MBEDTLS_MPI_CHK( mbedtls_mpi_fill_random( X, n * ciL, f_rng, p_rng ) ); /* make sure generated number is at least (nbits-1)+0.5 bits (FIPS 186-4 §B.3.3 steps 4.4, 5.5) */ if( X->p[n-1] < CEIL_MAXUINT_DIV_SQRT2 ) continue; k = n * biL; if( k > nbits ) MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( X, k - nbits ) ); X->p[0] |= 1; if( dh_flag == 0 ) { ret = mbedtls_mpi_is_prime( X, f_rng, p_rng ); if( ret != MBEDTLS_ERR_MPI_NOT_ACCEPTABLE ) goto cleanup; } else { /* * An necessary condition for Y and X = 2Y + 1 to be prime * is X = 2 mod 3 (which is equivalent to Y = 2 mod 3). * Make sure it is satisfied, while keeping X = 3 mod 4 */ X->p[0] |= 2; MBEDTLS_MPI_CHK( mbedtls_mpi_mod_int( &r, X, 3 ) ); if( r == 0 ) MBEDTLS_MPI_CHK( mbedtls_mpi_add_int( X, X, 8 ) ); else if( r == 1 ) MBEDTLS_MPI_CHK( mbedtls_mpi_add_int( X, X, 4 ) ); /* Set Y = (X-1) / 2, which is X / 2 because X is odd */ MBEDTLS_MPI_CHK( mbedtls_mpi_copy( &Y, X ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_shift_r( &Y, 1 ) ); while( 1 ) { /* * First, check small factors for X and Y * before doing Miller-Rabin on any of them */ if( ( ret = mpi_check_small_factors( X ) ) == 0 && ( ret = mpi_check_small_factors( &Y ) ) == 0 && ( ret = mpi_miller_rabin( X, f_rng, p_rng ) ) == 0 && ( ret = mpi_miller_rabin( &Y, f_rng, p_rng ) ) == 0 ) goto cleanup; if( ret != MBEDTLS_ERR_MPI_NOT_ACCEPTABLE ) goto cleanup; /* * Next candidates. We want to preserve Y = (X-1) / 2 and * Y = 1 mod 2 and Y = 2 mod 3 (eq X = 3 mod 4 and X = 2 mod 3) * so up Y by 6 and X by 12. */ MBEDTLS_MPI_CHK( mbedtls_mpi_add_int( X, X, 12 ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_add_int( &Y, &Y, 6 ) ); } } } cleanup: mbedtls_mpi_free( &Y ); return( ret ); } #endif /* MBEDTLS_GENPRIME */ #if defined(MBEDTLS_SELF_TEST) #define GCD_PAIR_COUNT 3 static const int gcd_pairs[GCD_PAIR_COUNT][3] = { { 693, 609, 21 }, { 1764, 868, 28 }, { 768454923, 542167814, 1 } }; /* * Checkup routine */ int mbedtls_mpi_self_test( int verbose ) { int ret, i; mbedtls_mpi A, E, N, X, Y, U, V; mbedtls_mpi_init( &A ); mbedtls_mpi_init( &E ); mbedtls_mpi_init( &N ); mbedtls_mpi_init( &X ); mbedtls_mpi_init( &Y ); mbedtls_mpi_init( &U ); mbedtls_mpi_init( &V ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &A, 16, "EFE021C2645FD1DC586E69184AF4A31E" \ "D5F53E93B5F123FA41680867BA110131" \ "944FE7952E2517337780CB0DB80E61AA" \ "E7C8DDC6C5C6AADEB34EB38A2F40D5E6" ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &E, 16, "B2E7EFD37075B9F03FF989C7C5051C20" \ "34D2A323810251127E7BF8625A4F49A5" \ "F3E27F4DA8BD59C47D6DAABA4C8127BD" \ "5B5C25763222FEFCCFC38B832366C29E" ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &N, 16, "0066A198186C18C10B2F5ED9B522752A" \ "9830B69916E535C8F047518A889A43A5" \ "94B6BED27A168D31D4A52F88925AA8F5" ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_mul_mpi( &X, &A, &N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &U, 16, "602AB7ECA597A3D6B56FF9829A5E8B85" \ "9E857EA95A03512E2BAE7391688D264A" \ "A5663B0341DB9CCFD2C4C5F421FEC814" \ "8001B72E848A38CAE1C65F78E56ABDEF" \ "E12D3C039B8A02D6BE593F0BBBDA56F1" \ "ECF677152EF804370C1A305CAF3B5BF1" \ "30879B56C61DE584A0F53A2447A51E" ) ); if( verbose != 0 ) mbedtls_printf( " MPI test #1 (mul_mpi): " ); if( mbedtls_mpi_cmp_mpi( &X, &U ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); ret = 1; goto cleanup; } if( verbose != 0 ) mbedtls_printf( "passed\n" ); MBEDTLS_MPI_CHK( mbedtls_mpi_div_mpi( &X, &Y, &A, &N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &U, 16, "256567336059E52CAE22925474705F39A94" ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &V, 16, "6613F26162223DF488E9CD48CC132C7A" \ "0AC93C701B001B092E4E5B9F73BCD27B" \ "9EE50D0657C77F374E903CDFA4C642" ) ); if( verbose != 0 ) mbedtls_printf( " MPI test #2 (div_mpi): " ); if( mbedtls_mpi_cmp_mpi( &X, &U ) != 0 || mbedtls_mpi_cmp_mpi( &Y, &V ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); ret = 1; goto cleanup; } if( verbose != 0 ) mbedtls_printf( "passed\n" ); MBEDTLS_MPI_CHK( mbedtls_mpi_exp_mod( &X, &A, &E, &N, NULL ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &U, 16, "36E139AEA55215609D2816998ED020BB" \ "BD96C37890F65171D948E9BC7CBAA4D9" \ "325D24D6A3C12710F10A09FA08AB87" ) ); if( verbose != 0 ) mbedtls_printf( " MPI test #3 (exp_mod): " ); if( mbedtls_mpi_cmp_mpi( &X, &U ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); ret = 1; goto cleanup; } if( verbose != 0 ) mbedtls_printf( "passed\n" ); MBEDTLS_MPI_CHK( mbedtls_mpi_inv_mod( &X, &A, &N ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_read_string( &U, 16, "003A0AAEDD7E784FC07D8F9EC6E3BFD5" \ "C3DBA76456363A10869622EAC2DD84EC" \ "C5B8A74DAC4D09E03B5E0BE779F2DF61" ) ); if( verbose != 0 ) mbedtls_printf( " MPI test #4 (inv_mod): " ); if( mbedtls_mpi_cmp_mpi( &X, &U ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed\n" ); ret = 1; goto cleanup; } if( verbose != 0 ) mbedtls_printf( "passed\n" ); if( verbose != 0 ) mbedtls_printf( " MPI test #5 (simple gcd): " ); for( i = 0; i < GCD_PAIR_COUNT; i++ ) { MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &X, gcd_pairs[i][0] ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &Y, gcd_pairs[i][1] ) ); MBEDTLS_MPI_CHK( mbedtls_mpi_gcd( &A, &X, &Y ) ); if( mbedtls_mpi_cmp_int( &A, gcd_pairs[i][2] ) != 0 ) { if( verbose != 0 ) mbedtls_printf( "failed at %d\n", i ); ret = 1; goto cleanup; } } if( verbose != 0 ) mbedtls_printf( "passed\n" ); cleanup: if( ret != 0 && verbose != 0 ) mbedtls_printf( "Unexpected error, return code = %08X\n", ret ); mbedtls_mpi_free( &A ); mbedtls_mpi_free( &E ); mbedtls_mpi_free( &N ); mbedtls_mpi_free( &X ); mbedtls_mpi_free( &Y ); mbedtls_mpi_free( &U ); mbedtls_mpi_free( &V ); if( verbose != 0 ) mbedtls_printf( "\n" ); return( ret ); } #endif /* MBEDTLS_SELF_TEST */ #endif /* MBEDTLS_BIGNUM_C */
800085.c
/* * Copyright (c) 2018-2019 Snowflake Computing, Inc. All rights reserved. */ #include <errno.h> #include <string.h> #include "chunk_downloader.h" #include "memory.h" #include "connection.h" #include "error.h" #include "client_int.h" static void* chunk_downloader_thread(void *downloader); static void STDCALL set_shutdown(SF_CHUNK_DOWNLOADER *chunk_downloader, sf_bool value); static void STDCALL set_error(SF_CHUNK_DOWNLOADER *chunk_downloader, sf_bool value); #define PTHREAD_LOCK_INIT_ERROR_MSG(e, em) \ switch(e) \ { \ case EAGAIN: (em) = "System lacked resources to create lock"; break; \ case ENOMEM: (em) = "Insufficient memory to create mutex"; break; \ case EPERM: (em) = "Caller doesn't have the privilege to perform the operation"; break; \ case EBUSY: (em) = "Mutex already initialized"; break; \ case EINVAL: (em) = "The value specified by attr is invalid"; break; \ default: (em) = "Unknown non-zero pthread init error" ; break; \ } #define PTHREAD_CREATE_ERROR_MSG(e, em) \ switch(e) \ { \ case EAGAIN: (em) = "System lacked resources to create another thread"; break; \ case EPERM: (em) = "Caller doesn't have the privilege to set the required scheduling parameters"; break; \ case EINVAL: (em) = "The value specified by attr is invalid"; break; \ default: (em) = "Unknown non-zero pthread init error" ; break; \ } #define PTHREAD_JOIN_ERROR_MSG(e, em) \ switch(e) \ { \ case EDEADLK: (em) = "A deadlock was detected (i.e. two threads tried to join with each other)"; break; \ case EPERM: (em) = "Not a joinable thread"; break; \ case ESRCH: (em) = "No thread with specified ID could be found"; break; \ default: (em) = "Unknown non-zero pthread join error" ; break; \ } sf_bool STDCALL get_shutdown_or_error(struct SF_CHUNK_DOWNLOADER *chunk_downloader) { sf_bool ret; _rwlock_rdlock(&chunk_downloader->attr_lock); ret = chunk_downloader->is_shutdown || chunk_downloader->has_error; _rwlock_rdunlock(&chunk_downloader->attr_lock); return ret; } sf_bool STDCALL get_shutdown(struct SF_CHUNK_DOWNLOADER *chunk_downloader) { sf_bool ret; _rwlock_rdlock(&chunk_downloader->attr_lock); ret = chunk_downloader->is_shutdown; _rwlock_rdunlock(&chunk_downloader->attr_lock); return ret; } static void STDCALL set_shutdown(struct SF_CHUNK_DOWNLOADER *chunk_downloader, sf_bool value) { _rwlock_wrlock(&chunk_downloader->attr_lock); chunk_downloader->is_shutdown = value; _rwlock_wrunlock(&chunk_downloader->attr_lock); } sf_bool STDCALL get_error(struct SF_CHUNK_DOWNLOADER *chunk_downloader) { sf_bool ret; _rwlock_rdlock(&chunk_downloader->attr_lock); ret = chunk_downloader->has_error; _rwlock_rdunlock(&chunk_downloader->attr_lock); return ret; } static void STDCALL set_error(struct SF_CHUNK_DOWNLOADER *chunk_downloader, sf_bool value) { _rwlock_wrlock(&chunk_downloader->attr_lock); chunk_downloader->has_error = value; _rwlock_wrunlock(&chunk_downloader->attr_lock); } sf_bool STDCALL init_locks(struct SF_CHUNK_DOWNLOADER *chunk_downloader) { sf_bool ret = SF_BOOLEAN_FALSE; SF_ERROR_STRUCT *error = chunk_downloader->sf_error; int pthread_ret; const char *error_msg; if ((pthread_ret = _critical_section_init(&chunk_downloader->queue_lock)) != 0) { PTHREAD_LOCK_INIT_ERROR_MSG(pthread_ret, error_msg); SET_SNOWFLAKE_ERROR(error, SF_STATUS_ERROR_PTHREAD, error_msg, ""); goto cleanup; } if ((pthread_ret = _rwlock_init(&chunk_downloader->attr_lock)) != 0) { PTHREAD_LOCK_INIT_ERROR_MSG(pthread_ret, error_msg); SET_SNOWFLAKE_ERROR(error, SF_STATUS_ERROR_PTHREAD, error_msg, ""); goto cleanup; } if ((pthread_ret = _cond_init(&chunk_downloader->producer_cond)) != 0) { PTHREAD_LOCK_INIT_ERROR_MSG(pthread_ret, error_msg); SET_SNOWFLAKE_ERROR(error, SF_STATUS_ERROR_PTHREAD, error_msg, ""); goto cleanup; } if ((pthread_ret = _cond_init(&chunk_downloader->consumer_cond)) != 0) { PTHREAD_LOCK_INIT_ERROR_MSG(pthread_ret, error_msg); SET_SNOWFLAKE_ERROR(error, SF_STATUS_ERROR_PTHREAD, error_msg, ""); goto cleanup; } // Success ret = SF_BOOLEAN_TRUE; return ret; cleanup: // We may destroy some uninitialized locks/conds, but we don't care. _critical_section_term(&chunk_downloader->queue_lock); _cond_term(&chunk_downloader->producer_cond); _cond_term(&chunk_downloader->consumer_cond); _rwlock_term(&chunk_downloader->attr_lock); return ret; } sf_bool STDCALL fill_queue(struct SF_CHUNK_DOWNLOADER *chunk_downloader, cJSON *chunks, int chunk_count) { int i; cJSON *chunk = NULL; // We want to detach each chunk object so that after we create the queue item, // we free the memory associated with the JSON blob for (i = 0; i < chunk_count; i++) { // Detach instead of getting so that we can free the chunk memory after we create our queue item if (json_detach_object_from_array(&chunk, chunks, 0)) { goto cleanup; } chunk_downloader->queue[i].url = NULL; chunk_downloader->queue[i].row_count = 0; chunk_downloader->queue[i].chunk = NULL; if (json_copy_string(&chunk_downloader->queue[i].url, chunk, "url")) { goto cleanup; } // We need to update queue_size here to reflect the fact that we've already allocated memory for URL. // This is because we use queue_size to free the created URLs if we run into an error so we must increase size // here incase if we successfully copy the URL from the chunk, but fail to copy rowCount from the chunk chunk_downloader->queue_size++; if (json_copy_int(&chunk_downloader->queue[i].row_count, chunk, "rowCount")) { goto cleanup; } // Free detached chunk snowflake_cJSON_Delete(chunk); chunk = NULL; } return SF_BOOLEAN_TRUE; cleanup: for (i = 0; i < chunk_downloader->queue_size; i++) { SF_FREE(chunk_downloader->queue[i].url); } return SF_BOOLEAN_FALSE; } sf_bool STDCALL create_chunk_headers(struct SF_CHUNK_DOWNLOADER *chunk_downloader, cJSON *json_headers) { sf_bool ret = SF_BOOLEAN_FALSE; size_t header_field_size; size_t i; cJSON *item = NULL; char *header_item = NULL; ARRAY_LIST *keys = json_get_object_keys(json_headers); char *key; for (i = 0; i < keys->used; i++) { key = (char *) sf_array_list_get(keys, i); // Since I know that these keys are correct from a case sensitive view, // I can use the faster case sensitive version item = snowflake_cJSON_GetObjectItemCaseSensitive(json_headers, key); if (!item || !key || !item->valuestring) { SET_SNOWFLAKE_ERROR(chunk_downloader->sf_error, SF_STATUS_ERROR_BAD_JSON, "Could not find critical chunk header item", ""); goto cleanup; } header_field_size = strlen(key) + strlen(item->valuestring) + 2; // Type conversion is safe since we know that header_field_size must be positive header_item = (char *) SF_CALLOC(1, header_field_size + 1); sb_sprintf(header_item, header_field_size + 1, "%s: %s", key, item->valuestring); chunk_downloader->chunk_headers->header = curl_slist_append(chunk_downloader->chunk_headers->header, header_item); SF_FREE(header_item); } ret = SF_BOOLEAN_TRUE; cleanup: sf_array_list_deallocate(keys); SF_FREE(header_item); return ret; } sf_bool STDCALL download_chunk(char *url, SF_HEADER *headers, cJSON **chunk, SF_ERROR_STRUCT *error, sf_bool insecure_mode) { sf_bool ret = SF_BOOLEAN_FALSE; CURL *curl = NULL; curl = curl_easy_init(); if (!curl || !http_perform(curl, GET_REQUEST_TYPE, url, headers, NULL, chunk, DEFAULT_SNOWFLAKE_REQUEST_TIMEOUT, SF_BOOLEAN_TRUE, error, insecure_mode, 0)) { // Error set in perform function goto cleanup; } ret = SF_BOOLEAN_TRUE; cleanup: curl_easy_cleanup(curl); return ret; } SF_CHUNK_DOWNLOADER *STDCALL chunk_downloader_init(const char *qrmk, cJSON *chunk_headers, cJSON *chunks, uint64 thread_count, uint64 fetch_slots, SF_ERROR_STRUCT *sf_error, sf_bool insecure_mode) { struct SF_CHUNK_DOWNLOADER *chunk_downloader = NULL; const char *error_msg = NULL; int chunk_count; int i; int pthread_ret; size_t qrmk_len = 1; // We need thread_count, fetch_slots, chunks, and either qrmk or chunk_headers if (thread_count <= 0 || fetch_slots <= 0 || !chunks || !snowflake_cJSON_IsArray(chunks) || strcmp(chunks->string, "chunks") != 0) { return NULL; } if ((chunk_downloader = (SF_CHUNK_DOWNLOADER *) SF_CALLOC(1, sizeof(SF_CHUNK_DOWNLOADER))) == NULL) { return NULL; } // Initialize default values chunk_downloader->threads = NULL; chunk_downloader->queue = NULL; chunk_downloader->qrmk = NULL; chunk_downloader->chunk_headers = sf_header_create(); chunk_downloader->thread_count = 0; chunk_downloader->queue_size = 0; chunk_downloader->producer_head = 0; chunk_downloader->consumer_head = 0; chunk_downloader->is_shutdown = SF_BOOLEAN_FALSE; chunk_downloader->has_error = SF_BOOLEAN_FALSE; chunk_downloader->sf_error = sf_error; chunk_downloader->insecure_mode = insecure_mode; // Initialize chunk_headers or qrmk if (chunk_headers) { if(!create_chunk_headers(chunk_downloader, chunk_headers)) { goto cleanup; } } else if (qrmk) { qrmk_len += strlen(qrmk); chunk_downloader->qrmk = (char *) SF_CALLOC(1, qrmk_len); sb_strncpy(chunk_downloader->qrmk, qrmk_len, qrmk, qrmk_len); } // Initialize mutexes and conditional variables if (!init_locks(chunk_downloader)) { goto cleanup; } // Initialize queue and thread memory chunk_count = snowflake_cJSON_GetArraySize(chunks); chunk_downloader->threads = (SF_THREAD_HANDLE *)SF_CALLOC((int)thread_count, sizeof(SF_THREAD_HANDLE)); chunk_downloader->queue = (SF_QUEUE_ITEM *) SF_CALLOC(chunk_count, sizeof(SF_QUEUE_ITEM)); if (!chunk_downloader->threads || !chunk_downloader->queue) { goto cleanup; } // Fill up queue if (!fill_queue(chunk_downloader, chunks, chunk_count)) { goto cleanup; } // Initialize threads for (i = 0; i < thread_count; i++) { // If non-zero exit code, terminate chunk downloader if ((pthread_ret = _thread_init( &chunk_downloader->threads[i], chunk_downloader_thread, (void *)chunk_downloader)) != 0) { chunk_downloader_term(chunk_downloader); PTHREAD_CREATE_ERROR_MSG(pthread_ret, error_msg); SET_SNOWFLAKE_ERROR(sf_error, SF_STATUS_ERROR_PTHREAD, error_msg, ""); return NULL; } chunk_downloader->thread_count++; } return chunk_downloader; cleanup: if (chunk_downloader) { SF_FREE(chunk_downloader->qrmk); sf_header_destroy(chunk_downloader->chunk_headers); SF_FREE(chunk_downloader->queue); SF_FREE(chunk_downloader->threads); } SF_FREE(chunk_downloader); return NULL; } sf_bool STDCALL chunk_downloader_term(struct SF_CHUNK_DOWNLOADER *chunk_downloader) { int pthread_ret; const char *error_msg; uint64 i; if (!chunk_downloader) { return SF_BOOLEAN_FALSE; } if ((pthread_ret = _critical_section_lock(&chunk_downloader->queue_lock))) { _rwlock_wrlock(&chunk_downloader->attr_lock); if (!chunk_downloader->has_error) { PTHREAD_LOCK_INIT_ERROR_MSG(pthread_ret, error_msg); SET_SNOWFLAKE_ERROR(chunk_downloader->sf_error, SF_STATUS_ERROR_PTHREAD, error_msg, ""); chunk_downloader->has_error = SF_BOOLEAN_TRUE; } _rwlock_wrunlock(&chunk_downloader->attr_lock); return SF_BOOLEAN_FALSE; } do { // Already shutting down, just return false if (get_shutdown(chunk_downloader)) { return SF_BOOLEAN_FALSE; } set_shutdown(chunk_downloader, SF_BOOLEAN_TRUE); if (_cond_broadcast(&chunk_downloader->consumer_cond) || _cond_broadcast(&chunk_downloader->producer_cond) || (_critical_section_unlock(&chunk_downloader->queue_lock))) { // Something went wrong with either notifying the producer/consumer or releasing the queue lock // Set and error and then try to continue with cleanup _rwlock_wrlock(&chunk_downloader->attr_lock); if (!chunk_downloader->has_error) { SET_SNOWFLAKE_ERROR(chunk_downloader->sf_error, SF_STATUS_ERROR_PTHREAD, "Error during condition broadcast", ""); chunk_downloader->has_error = SF_BOOLEAN_TRUE; } _rwlock_wrunlock(&chunk_downloader->attr_lock); } // Join all the threads for (i = 0; i < chunk_downloader->thread_count; i++) { if ((pthread_ret = _thread_join(chunk_downloader->threads[i])) != 0) { if (!get_error(chunk_downloader)) { PTHREAD_JOIN_ERROR_MSG(pthread_ret, error_msg); SET_SNOWFLAKE_ERROR(chunk_downloader->sf_error, SF_STATUS_ERROR_PTHREAD, error_msg, ""); } _rwlock_wrlock(&chunk_downloader->attr_lock); if (!chunk_downloader->has_error) { PTHREAD_JOIN_ERROR_MSG(pthread_ret, error_msg); SET_SNOWFLAKE_ERROR(chunk_downloader->sf_error, SF_STATUS_ERROR_PTHREAD, error_msg, ""); chunk_downloader->has_error = SF_BOOLEAN_TRUE; } _rwlock_wrunlock(&chunk_downloader->attr_lock); } } } while (0); // Free chunk downloader memory SF_FREE(chunk_downloader->threads); // Free all the memory of the items in the queue before freeing queue memory for (i = 0; i < chunk_downloader->queue_size; i++) { SF_FREE(chunk_downloader->queue[i].url); snowflake_cJSON_Delete(chunk_downloader->queue[i].chunk); } SF_FREE(chunk_downloader->queue); SF_FREE(chunk_downloader->qrmk); sf_header_destroy(chunk_downloader->chunk_headers); _critical_section_term(&chunk_downloader->queue_lock); _cond_term(&chunk_downloader->producer_cond); _cond_term(&chunk_downloader->consumer_cond); _rwlock_term(&chunk_downloader->attr_lock); SF_FREE(chunk_downloader); return SF_BOOLEAN_TRUE; } static void * chunk_downloader_thread(void *downloader) { struct SF_CHUNK_DOWNLOADER *chunk_downloader = (SF_CHUNK_DOWNLOADER *) downloader; cJSON *chunk = NULL; uint64 index; // Create err per thread so we don't have to lock the chunk downloader err SF_ERROR_STRUCT err; memset(&err, 0, sizeof(err)); clear_snowflake_error(&err); // Loop forever until shutdown while (1) { // Reset from previous loop chunk = NULL; _critical_section_lock(&chunk_downloader->queue_lock); // If we've downloaded chunks == # of threads, wait until the consumer consumes a chunk. // Ensure that the producer_head is less than the queue_size to ensure that we still have items to process // If we're shutting down or an err has occurred, skip while ((chunk_downloader->producer_head - chunk_downloader->consumer_head) >= chunk_downloader->thread_count && chunk_downloader->producer_head < chunk_downloader->queue_size && !get_shutdown_or_error(chunk_downloader)) { _cond_wait(&chunk_downloader->producer_cond, &chunk_downloader->queue_lock); } // If we're shutting down, or we have reached the end of the results, then break if (get_shutdown_or_error(chunk_downloader) || chunk_downloader->producer_head >= chunk_downloader->queue_size) { break; } // Get queue item and set it locally index = chunk_downloader->producer_head++; // Unlock since we have our queue item, and don't need the lock while we're processing the queue _critical_section_unlock(&chunk_downloader->queue_lock); // Download chunk if (!download_chunk(chunk_downloader->queue[index].url, chunk_downloader->chunk_headers, &chunk, &err, chunk_downloader->insecure_mode)) { _rwlock_wrlock(&chunk_downloader->attr_lock); if (!chunk_downloader->has_error) { copy_snowflake_error(chunk_downloader->sf_error, &err); chunk_downloader->has_error = SF_BOOLEAN_TRUE; } _rwlock_wrunlock(&chunk_downloader->attr_lock); break; } // Gain back lock to set cJSON blob _critical_section_lock(&chunk_downloader->queue_lock); if (get_error(chunk_downloader)) { break; } // Set the chunk chunk_downloader->queue[index].chunk = chunk; // Notify the consumer that we have a chunk ready if (_cond_signal(&chunk_downloader->consumer_cond)) { _rwlock_wrlock(&chunk_downloader->attr_lock); if (!chunk_downloader->has_error) { SET_SNOWFLAKE_ERROR(chunk_downloader->sf_error, SF_STATUS_ERROR_PTHREAD, "Error sending consumer signal to notify of chunk downloaded", ""); chunk_downloader->has_error = SF_BOOLEAN_TRUE; } _rwlock_wrunlock(&chunk_downloader->attr_lock); break; } // Drop the lock _critical_section_unlock(&chunk_downloader->queue_lock); } _critical_section_unlock(&chunk_downloader->queue_lock); _thread_exit(); return NULL; }
734061.c
/* sys.c - Architecture-independent system-level functions for Mink. * * Copyright (c)2013-2016 Ross Bamford. See LICENSE for details. */ #include "sys.h" #include "hal.h" #include "utils.h" #include "vsprintf.h" void *memcpy(void *restrict s1, const void *restrict s2, size_t n) { char *dp = s1; const char *sp = s2; while (n--) { *dp++ = *sp++; } return s1; } void *memset(void *s, int c, size_t n) { unsigned char* p=s; while(n--) { *p++ = (unsigned char)c; } return s; } void *memsetw(void *s, int c, size_t n) { unsigned short* p=s; while(n--) { *p++ = (unsigned short)c; } return s; } noreturn void panic(const char *fmt, ...) { static char buf [1024]; va_list args; int i; va_start(args, fmt); i = vsprintf(buf,fmt,args); va_end(args); buf[i] = '\0'; console_setcolor(make_color(COLOR_LIGHT_RED, COLOR_BLACK)); printk("\n\nOSHI...: %s\n", buf); print_stack_trace(); printk("\n***\nSo long, and thanks for all the fish."); die(); }
713727.c
/* crypto/bio/bio_err.c */ /* ==================================================================== * Copyright (c) 1999-2011 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * [email protected]. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * ([email protected]). This product includes software written by Tim * Hudson ([email protected]). * */ /* NOTE: this file was auto generated by the mkerr.pl script: any changes * made to it will be overwritten when the script next updates this file, * only reason strings will be preserved. */ #include <stdio.h> #include <openssl/err.h> #include <openssl/bio.h> /* BEGIN ERROR CODES */ #ifndef OPENSSL_NO_ERR #define ERR_FUNC(func) ERR_PACK(ERR_LIB_BIO,func,0) #define ERR_REASON(reason) ERR_PACK(ERR_LIB_BIO,0,reason) #include <string.h> #include <sys/stat.h> #include <stonesoup/stonesoup_trace.h> static ERR_STRING_DATA BIO_str_functs[] = {{((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )100) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("ACPT_STATE")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )101) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_accept")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )102) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_BER_GET_HEADER")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )131) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_callback_ctrl")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )103) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_ctrl")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )120) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_gethostbyname")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )104) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_gets")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )105) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_get_accept_socket")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )106) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_get_host_ip")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )107) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_get_port")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )121) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_MAKE_PAIR")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )108) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_new")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )109) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_new_file")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )126) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_new_mem_buf")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )123) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_nread")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )124) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_nread0")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )125) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_nwrite")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )122) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_nwrite0")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )110) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_puts")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )111) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_read")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )112) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_sock_init")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )113) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BIO_write")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )114) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("BUFFER_CTRL")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )127) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("CONN_CTRL")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )115) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("CONN_STATE")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )132) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("DGRAM_SCTP_READ")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )116) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("FILE_CTRL")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )130) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("FILE_READ")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )129) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("LINEBUFFER_CTRL")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )128) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("MEM_READ")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )117) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("MEM_WRITE")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )118) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("SSL_new")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )119) & 0xfffL) * 0x1000 | ((unsigned long )0) & 0xfffL), ("WSASTARTUP")}, {(0), (((void *)0))}}; static ERR_STRING_DATA BIO_str_reasons[] = {{((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )100) & 0xfffL), ("accept error")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )101) & 0xfffL), ("bad fopen mode")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )102) & 0xfffL), ("bad hostname lookup")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )124) & 0xfffL), ("broken pipe")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )103) & 0xfffL), ("connect error")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )127) & 0xfffL), ("EOF on memory BIO")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )104) & 0xfffL), ("error setting nbio")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )105) & 0xfffL), ("error setting nbio on accepted socket")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )106) & 0xfffL), ("error setting nbio on accept socket")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )107) & 0xfffL), ("gethostbyname addr is not af inet")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )125) & 0xfffL), ("invalid argument")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )108) & 0xfffL), ("invalid ip address")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )123) & 0xfffL), ("in use")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )109) & 0xfffL), ("keepalive")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )110) & 0xfffL), ("nbio connect error")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )111) & 0xfffL), ("no accept port specified")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )112) & 0xfffL), ("no hostname specified")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )113) & 0xfffL), ("no port defined")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )114) & 0xfffL), ("no port specified")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )128) & 0xfffL), ("no such file")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )115) & 0xfffL), ("null parameter")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )116) & 0xfffL), ("tag mismatch")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )117) & 0xfffL), ("unable to bind socket")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )118) & 0xfffL), ("unable to create socket")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )119) & 0xfffL), ("unable to listen socket")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )120) & 0xfffL), ("uninitialized")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )121) & 0xfffL), ("unsupported method")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )126) & 0xfffL), ("write to read only BIO")}, {((((unsigned long )32) & 0xffL) * 0x1000000 | (((unsigned long )0) & 0xfffL) * 0x1000 | ((unsigned long )122) & 0xfffL), ("WSAStartup")}, {(0), (((void *)0))}}; #endif int bistratal_blunt = 0; void* stonesoup_printf_context = NULL; void stonesoup_setup_printf_context() { struct stat st = {0}; char * ss_tc_root = NULL; char * dirpath = NULL; int size_dirpath = 0; char * filepath = NULL; int size_filepath = 0; int retval = 0; ss_tc_root = getenv("SS_TC_ROOT"); if (ss_tc_root != NULL) { size_dirpath = strlen(ss_tc_root) + strlen("testData") + 2; dirpath = (char*) malloc (size_dirpath * sizeof(char)); if (dirpath != NULL) { sprintf(dirpath, "%s/%s", ss_tc_root, "testData"); retval = 0; if (stat(dirpath, &st) == -1) { retval = mkdir(dirpath, 0700); } if (retval == 0) { size_filepath = strlen(dirpath) + strlen("logfile.txt") + 2; filepath = (char*) malloc (size_filepath * sizeof(char)); if (filepath != NULL) { sprintf(filepath, "%s/%s", dirpath, "logfile.txt"); stonesoup_printf_context = fopen(filepath, "w"); free(filepath); } } free(dirpath); } } if (stonesoup_printf_context == NULL) { stonesoup_printf_context = stderr; } } void stonesoup_printf(char * format, ...) { va_list argptr; va_start(argptr, format); vfprintf(stonesoup_printf_context, format, argptr); va_end(argptr); fflush(stonesoup_printf_context); } void stonesoup_close_printf_context() { if (stonesoup_printf_context != NULL && stonesoup_printf_context != stderr) { fclose(stonesoup_printf_context); } } struct stonesoup_data_struct { int (*func_member)(char *); char *str_member; }; int stonesoup_modulus_function(char *modulus_param_str) { tracepoint(stonesoup_trace, trace_location, "/tmp/tmpT7K65c_ss_testcase/src-rose/crypto/bio/bio_err.c", "stonesoup_modulus_function"); return modulus_param_str[0] % 2; } void stonesoup_set_function(char *set_param_str,struct stonesoup_data_struct *set_param_data_struct) { tracepoint(stonesoup_trace, trace_location, "/tmp/tmpT7K65c_ss_testcase/src-rose/crypto/bio/bio_err.c", "stonesoup_set_function"); tracepoint(stonesoup_trace, trace_point, "CROSSOVER-POINT: BEFORE"); /* STONESOUP: CROSSOVER-POINT (Uninitialized Pointer) */ if (strlen(set_param_str) > 10U) { set_param_data_struct -> func_member = stonesoup_modulus_function; set_param_data_struct -> str_member = set_param_str; tracepoint(stonesoup_trace, trace_point, "Initialized pointer"); } if (strlen(set_param_str) < 10U) { set_param_data_struct -> func_member = stonesoup_modulus_function; set_param_data_struct -> str_member = "default"; tracepoint(stonesoup_trace, trace_point, "Initialized pointer"); } tracepoint(stonesoup_trace, trace_point, "CROSSOVER-POINT: AFTER"); } void ERR_load_BIO_strings() { int stonesoup_val = 0; struct stonesoup_data_struct stonesoup_my_foo; char *refuters_melodram = 0; char *(**************************************************amatorially_sermoniser)[36] = 0; char *(*************************************************indamines_bacteriopsonic)[36] = 0; char *(************************************************pawnee_evadnee)[36] = 0; char *(***********************************************baffle_overexpansion)[36] = 0; char *(**********************************************lanista_hemogenic)[36] = 0; char *(*********************************************unavailingness_sarcler)[36] = 0; char *(********************************************ensuing_unglaciated)[36] = 0; char *(*******************************************procreatress_chapstick)[36] = 0; char *(******************************************handkerchief_karluk)[36] = 0; char *(*****************************************faroese_purificant)[36] = 0; char *(****************************************salmonberry_izdubar)[36] = 0; char *(***************************************epergnes_crossline)[36] = 0; char *(**************************************behint_unorphaned)[36] = 0; char *(*************************************mithras_ballocks)[36] = 0; char *(************************************subdiaconus_fording)[36] = 0; char *(***********************************overfraught_bated)[36] = 0; char *(**********************************pampeluna_underoxidised)[36] = 0; char *(*********************************cockbill_pilsen)[36] = 0; char *(********************************taxinomic_convictable)[36] = 0; char *(*******************************glenaubrey_jesuit)[36] = 0; char *(******************************rendered_disfunctions)[36] = 0; char *(*****************************sops_harbergage)[36] = 0; char *(****************************disconvenience_egadi)[36] = 0; char *(***************************bottommost_unrepelled)[36] = 0; char *(**************************camphanone_sinuitis)[36] = 0; char *(*************************jovi_musties)[36] = 0; char *(************************nonprelatical_laziness)[36] = 0; char *(***********************panzoism_ensuite)[36] = 0; char *(**********************adularias_profanatory)[36] = 0; char *(*********************caranx_unfret)[36] = 0; char *(********************alluviums_vigoroso)[36] = 0; char *(*******************superhistoric_sufiism)[36] = 0; char *(******************digitalism_carnivals)[36] = 0; char *(*****************remigrate_toxicohaemia)[36] = 0; char *(****************facellite_dredge)[36] = 0; char *(***************bespouses_terpane)[36] = 0; char *(**************aerophilately_reheighten)[36] = 0; char *(*************covert_alcmaon)[36] = 0; char *(************nonbusily_shandry)[36] = 0; char *(***********capotastos_titrate)[36] = 0; char *(**********gallophobe_kimmel)[36] = 0; char *(*********beweep_compressibly)[36] = 0; char *(********muddlesome_monacha)[36] = 0; char *(*******subtemperate_caprylic)[36] = 0; char *(******sascha_jejunely)[36] = 0; char *(*****fessed_unpawed)[36] = 0; char *(****colorlessly_criant)[36] = 0; char *(***grottoes_shrill)[36] = 0; char *(**amato_solon)[36] = 0; char *(*inviscerate_pantomorphic)[36] = 0; char **arean_eastness = 0; char *horsefish_pixie[36] = {0}; char *rimed_plebescite;; if (__sync_bool_compare_and_swap(&bistratal_blunt,0,1)) {; if (mkdir("/opt/stonesoup/workspace/lockDir",509U) == 0) {; tracepoint(stonesoup_trace,trace_location,"/tmp/tmpT7K65c_ss_testcase/src-rose/crypto/bio/bio_err.c","ERR_load_BIO_strings"); stonesoup_setup_printf_context(); rimed_plebescite = getenv("CLASSWISE_HOLBEIN"); if (rimed_plebescite != 0) {; horsefish_pixie[33] = rimed_plebescite; inviscerate_pantomorphic = &horsefish_pixie; amato_solon = &inviscerate_pantomorphic; grottoes_shrill = &amato_solon; colorlessly_criant = &grottoes_shrill; fessed_unpawed = &colorlessly_criant; sascha_jejunely = &fessed_unpawed; subtemperate_caprylic = &sascha_jejunely; muddlesome_monacha = &subtemperate_caprylic; beweep_compressibly = &muddlesome_monacha; gallophobe_kimmel = &beweep_compressibly; capotastos_titrate = &gallophobe_kimmel; nonbusily_shandry = &capotastos_titrate; covert_alcmaon = &nonbusily_shandry; aerophilately_reheighten = &covert_alcmaon; bespouses_terpane = &aerophilately_reheighten; facellite_dredge = &bespouses_terpane; remigrate_toxicohaemia = &facellite_dredge; digitalism_carnivals = &remigrate_toxicohaemia; superhistoric_sufiism = &digitalism_carnivals; alluviums_vigoroso = &superhistoric_sufiism; caranx_unfret = &alluviums_vigoroso; adularias_profanatory = &caranx_unfret; panzoism_ensuite = &adularias_profanatory; nonprelatical_laziness = &panzoism_ensuite; jovi_musties = &nonprelatical_laziness; camphanone_sinuitis = &jovi_musties; bottommost_unrepelled = &camphanone_sinuitis; disconvenience_egadi = &bottommost_unrepelled; sops_harbergage = &disconvenience_egadi; rendered_disfunctions = &sops_harbergage; glenaubrey_jesuit = &rendered_disfunctions; taxinomic_convictable = &glenaubrey_jesuit; cockbill_pilsen = &taxinomic_convictable; pampeluna_underoxidised = &cockbill_pilsen; overfraught_bated = &pampeluna_underoxidised; subdiaconus_fording = &overfraught_bated; mithras_ballocks = &subdiaconus_fording; behint_unorphaned = &mithras_ballocks; epergnes_crossline = &behint_unorphaned; salmonberry_izdubar = &epergnes_crossline; faroese_purificant = &salmonberry_izdubar; handkerchief_karluk = &faroese_purificant; procreatress_chapstick = &handkerchief_karluk; ensuing_unglaciated = &procreatress_chapstick; unavailingness_sarcler = &ensuing_unglaciated; lanista_hemogenic = &unavailingness_sarcler; baffle_overexpansion = &lanista_hemogenic; pawnee_evadnee = &baffle_overexpansion; indamines_bacteriopsonic = &pawnee_evadnee; amatorially_sermoniser = &indamines_bacteriopsonic; refuters_melodram = ((char *)( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *( *amatorially_sermoniser))))))))))))))))))))))))))))))))))))))))))))))))))[33]); tracepoint(stonesoup_trace, weakness_start, "CWE824", "A", "Access of Uninitialized Pointer"); if (strlen(refuters_melodram) < 1) { stonesoup_printf("string is too short to test\n"); } else { stonesoup_set_function(refuters_melodram, &stonesoup_my_foo); tracepoint(stonesoup_trace, trace_point, "TRIGGER-POINT: BEFORE"); /* STONESOUP: TRIGGER-POINT (Uninitialized Pointer) */ stonesoup_val = (stonesoup_my_foo . func_member(stonesoup_my_foo . str_member)); tracepoint(stonesoup_trace, trace_point, "TRIGGER-POINT: AFTER"); if (stonesoup_val == 0) stonesoup_printf("mod is true\n"); else stonesoup_printf("mod is false\n"); } tracepoint(stonesoup_trace, weakness_end); ; stonesoup_close_printf_context(); } } } ; #ifndef OPENSSL_NO_ERR if (ERR_func_error_string(BIO_str_functs[0] . error) == ((void *)0)) { ERR_load_strings(0,BIO_str_functs); ERR_load_strings(0,BIO_str_reasons); } #endif }
692856.c
/* ---------------------------------------------------------------------- * Copyright (C) 2010-2014 ARM Limited. All rights reserved. * * $Date: 19. March 2015 * $Revision: V.1.4.5 * * Project: CMSIS DSP Library * Title: arm_power_q7.c * * Description: Sum of the squares of the elements of a Q7 vector. * * Target Processor: Cortex-M4/Cortex-M3/Cortex-M0 * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * - Neither the name of ARM LIMITED nor the names of its contributors * may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * -------------------------------------------------------------------- */ #include "../../../../../drivers/CMSIS/Include/arm_math.h" /** * @ingroup groupStats */ /** * @addtogroup power * @{ */ /** * @brief Sum of the squares of the elements of a Q7 vector. * @param[in] *pSrc points to the input vector * @param[in] blockSize length of the input vector * @param[out] *pResult sum of the squares value returned here * @return none. * * @details * <b>Scaling and Overflow Behavior:</b> * * \par * The function is implemented using a 32-bit internal accumulator. * The input is represented in 1.7 format. * Intermediate multiplication yields a 2.14 format, and this * result is added without saturation to an accumulator in 18.14 format. * With 17 guard bits in the accumulator, there is no risk of overflow, and the * full precision of the intermediate multiplication is preserved. * Finally, the return result is in 18.14 format. * */ void arm_power_q7( q7_t * pSrc, uint32_t blockSize, q31_t * pResult) { q31_t sum = 0; /* Temporary result storage */ q7_t in; /* Temporary variable to store input */ uint32_t blkCnt; /* loop counter */ #ifndef ARM_MATH_CM0_FAMILY /* Run the below code for Cortex-M4 and Cortex-M3 */ q31_t input1; /* Temporary variable to store packed input */ q31_t in1, in2; /* Temporary variables to store input */ /*loop Unrolling */ blkCnt = blockSize >> 2u; /* First part of the processing with loop unrolling. Compute 4 outputs at a time. ** a second loop below computes the remaining 1 to 3 samples. */ while(blkCnt > 0u) { /* Reading two inputs of pSrc vector and packing */ input1 = *__SIMD32(pSrc)++; in1 = __SXTB16(__ROR(input1, 8)); in2 = __SXTB16(input1); /* C = A[0] * A[0] + A[1] * A[1] + A[2] * A[2] + ... + A[blockSize-1] * A[blockSize-1] */ /* calculate power and accumulate to accumulator */ sum = __SMLAD(in1, in1, sum); sum = __SMLAD(in2, in2, sum); /* Decrement the loop counter */ blkCnt--; } /* If the blockSize is not a multiple of 4, compute any remaining output samples here. ** No loop unrolling is used. */ blkCnt = blockSize % 0x4u; #else /* Run the below code for Cortex-M0 */ /* Loop over blockSize number of values */ blkCnt = blockSize; #endif /* #ifndef ARM_MATH_CM0_FAMILY */ while(blkCnt > 0u) { /* C = A[0] * A[0] + A[1] * A[1] + A[2] * A[2] + ... + A[blockSize-1] * A[blockSize-1] */ /* Compute Power and then store the result in a temporary variable, sum. */ in = *pSrc++; sum += ((q15_t) in * in); /* Decrement the loop counter */ blkCnt--; } /* Store the result in 18.14 format */ *pResult = sum; } /** * @} end of power group */
393383.c
/* * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <rdma/ib_umem.h> #include "mlx5_ib.h" #include "user.h" /* not supported currently */ static int wq_signature; enum { MLX5_IB_ACK_REQ_FREQ = 8, }; enum { MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83, MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, MLX5_IB_LINK_TYPE_IB = 0, MLX5_IB_LINK_TYPE_ETH = 1 }; enum { MLX5_IB_SQ_STRIDE = 6, MLX5_IB_CACHE_LINE_SIZE = 64, }; static const u32 mlx5_ib_opcode[] = { [IB_WR_SEND] = MLX5_OPCODE_SEND, [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM, [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE, [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM, [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ, [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS, [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA, [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL, [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR, [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR, [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS, [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA, [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR, }; struct umr_wr { u64 virt_addr; struct ib_pd *pd; unsigned int page_shift; unsigned int npages; u32 length; int access_flags; u32 mkey; }; static int is_qp0(enum ib_qp_type qp_type) { return qp_type == IB_QPT_SMI; } static int is_qp1(enum ib_qp_type qp_type) { return qp_type == IB_QPT_GSI; } static int is_sqp(enum ib_qp_type qp_type) { return is_qp0(qp_type) || is_qp1(qp_type); } static void *get_wqe(struct mlx5_ib_qp *qp, int offset) { return mlx5_buf_offset(&qp->buf, offset); } static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n) { return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); } void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) { return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE)); } static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) { struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; struct ib_event event; if (type == MLX5_EVENT_TYPE_PATH_MIG) to_mibqp(qp)->port = to_mibqp(qp)->alt_port; if (ibqp->event_handler) { event.device = ibqp->device; event.element.qp = ibqp; switch (type) { case MLX5_EVENT_TYPE_PATH_MIG: event.event = IB_EVENT_PATH_MIG; break; case MLX5_EVENT_TYPE_COMM_EST: event.event = IB_EVENT_COMM_EST; break; case MLX5_EVENT_TYPE_SQ_DRAINED: event.event = IB_EVENT_SQ_DRAINED; break; case MLX5_EVENT_TYPE_SRQ_LAST_WQE: event.event = IB_EVENT_QP_LAST_WQE_REACHED; break; case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: event.event = IB_EVENT_QP_FATAL; break; case MLX5_EVENT_TYPE_PATH_MIG_FAILED: event.event = IB_EVENT_PATH_MIG_ERR; break; case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: event.event = IB_EVENT_QP_REQ_ERR; break; case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: event.event = IB_EVENT_QP_ACCESS_ERR; break; default: pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); return; } ibqp->event_handler(&event, ibqp->qp_context); } } static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) { int wqe_size; int wq_size; /* Sanity check RQ size before proceeding */ if (cap->max_recv_wr > dev->mdev.caps.max_wqes) return -EINVAL; if (!has_rq) { qp->rq.max_gs = 0; qp->rq.wqe_cnt = 0; qp->rq.wqe_shift = 0; } else { if (ucmd) { qp->rq.wqe_cnt = ucmd->rq_wqe_count; qp->rq.wqe_shift = ucmd->rq_wqe_shift; qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; qp->rq.max_post = qp->rq.wqe_cnt; } else { wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0; wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg); wqe_size = roundup_pow_of_two(wqe_size); wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); qp->rq.wqe_cnt = wq_size / wqe_size; if (wqe_size > dev->mdev.caps.max_rq_desc_sz) { mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", wqe_size, dev->mdev.caps.max_rq_desc_sz); return -EINVAL; } qp->rq.wqe_shift = ilog2(wqe_size); qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; qp->rq.max_post = qp->rq.wqe_cnt; } } return 0; } static int sq_overhead(enum ib_qp_type qp_type) { int size = 0; switch (qp_type) { case IB_QPT_XRC_INI: size += sizeof(struct mlx5_wqe_xrc_seg); /* fall through */ case IB_QPT_RC: size += sizeof(struct mlx5_wqe_ctrl_seg) + sizeof(struct mlx5_wqe_atomic_seg) + sizeof(struct mlx5_wqe_raddr_seg); break; case IB_QPT_XRC_TGT: return 0; case IB_QPT_UC: size += sizeof(struct mlx5_wqe_ctrl_seg) + sizeof(struct mlx5_wqe_raddr_seg) + sizeof(struct mlx5_wqe_umr_ctrl_seg) + sizeof(struct mlx5_mkey_seg); break; case IB_QPT_UD: case IB_QPT_SMI: case IB_QPT_GSI: size += sizeof(struct mlx5_wqe_ctrl_seg) + sizeof(struct mlx5_wqe_datagram_seg); break; case MLX5_IB_QPT_REG_UMR: size += sizeof(struct mlx5_wqe_ctrl_seg) + sizeof(struct mlx5_wqe_umr_ctrl_seg) + sizeof(struct mlx5_mkey_seg); break; default: return -EINVAL; } return size; } static int calc_send_wqe(struct ib_qp_init_attr *attr) { int inl_size = 0; int size; size = sq_overhead(attr->qp_type); if (size < 0) return size; if (attr->cap.max_inline_data) { inl_size = size + sizeof(struct mlx5_wqe_inline_seg) + attr->cap.max_inline_data; } size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg); if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN && ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE) return MLX5_SIG_WQE_SIZE; else return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB); } static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, struct mlx5_ib_qp *qp) { int wqe_size; int wq_size; if (!attr->cap.max_send_wr) return 0; wqe_size = calc_send_wqe(attr); mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size); if (wqe_size < 0) return wqe_size; if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", wqe_size, dev->mdev.caps.max_sq_desc_sz); return -EINVAL; } qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) - sizeof(struct mlx5_wqe_inline_seg); attr->cap.max_inline_data = qp->max_inline_data; if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN) qp->signature_en = true; wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) { mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", qp->sq.wqe_cnt, dev->mdev.caps.max_wqes); return -ENOMEM; } qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); qp->sq.max_gs = attr->cap.max_send_sge; qp->sq.max_post = wq_size / wqe_size; attr->cap.max_send_wr = qp->sq.max_post; return wq_size; } static int set_user_buf_size(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) { int desc_sz = 1 << qp->sq.wqe_shift; if (desc_sz > dev->mdev.caps.max_sq_desc_sz) { mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", desc_sz, dev->mdev.caps.max_sq_desc_sz); return -EINVAL; } if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) { mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n", ucmd->sq_wqe_count, ucmd->sq_wqe_count); return -EINVAL; } qp->sq.wqe_cnt = ucmd->sq_wqe_count; if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) { mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", qp->sq.wqe_cnt, dev->mdev.caps.max_wqes); return -EINVAL; } qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + (qp->sq.wqe_cnt << 6); return 0; } static int qp_has_rq(struct ib_qp_init_attr *attr) { if (attr->qp_type == IB_QPT_XRC_INI || attr->qp_type == IB_QPT_XRC_TGT || attr->srq || attr->qp_type == MLX5_IB_QPT_REG_UMR || !attr->cap.max_recv_wr) return 0; return 1; } static int first_med_uuar(void) { return 1; } static int next_uuar(int n) { n++; while (((n % 4) & 2)) n++; return n; } static int num_med_uuar(struct mlx5_uuar_info *uuari) { int n; n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE - uuari->num_low_latency_uuars - 1; return n >= 0 ? n : 0; } static int max_uuari(struct mlx5_uuar_info *uuari) { return uuari->num_uars * 4; } static int first_hi_uuar(struct mlx5_uuar_info *uuari) { int med; int i; int t; med = num_med_uuar(uuari); for (t = 0, i = first_med_uuar();; i = next_uuar(i)) { t++; if (t == med) return next_uuar(i); } return 0; } static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari) { int i; for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) { if (!test_bit(i, uuari->bitmap)) { set_bit(i, uuari->bitmap); uuari->count[i]++; return i; } } return -ENOMEM; } static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari) { int minidx = first_med_uuar(); int i; for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) { if (uuari->count[i] < uuari->count[minidx]) minidx = i; } uuari->count[minidx]++; return minidx; } static int alloc_uuar(struct mlx5_uuar_info *uuari, enum mlx5_ib_latency_class lat) { int uuarn = -EINVAL; mutex_lock(&uuari->lock); switch (lat) { case MLX5_IB_LATENCY_CLASS_LOW: uuarn = 0; uuari->count[uuarn]++; break; case MLX5_IB_LATENCY_CLASS_MEDIUM: if (uuari->ver < 2) uuarn = -ENOMEM; else uuarn = alloc_med_class_uuar(uuari); break; case MLX5_IB_LATENCY_CLASS_HIGH: if (uuari->ver < 2) uuarn = -ENOMEM; else uuarn = alloc_high_class_uuar(uuari); break; case MLX5_IB_LATENCY_CLASS_FAST_PATH: uuarn = 2; break; } mutex_unlock(&uuari->lock); return uuarn; } static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) { clear_bit(uuarn, uuari->bitmap); --uuari->count[uuarn]; } static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn) { clear_bit(uuarn, uuari->bitmap); --uuari->count[uuarn]; } static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn) { int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE; int high_uuar = nuuars - uuari->num_low_latency_uuars; mutex_lock(&uuari->lock); if (uuarn == 0) { --uuari->count[uuarn]; goto out; } if (uuarn < high_uuar) { free_med_class_uuar(uuari, uuarn); goto out; } free_high_class_uuar(uuari, uuarn); out: mutex_unlock(&uuari->lock); } static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state) { switch (state) { case IB_QPS_RESET: return MLX5_QP_STATE_RST; case IB_QPS_INIT: return MLX5_QP_STATE_INIT; case IB_QPS_RTR: return MLX5_QP_STATE_RTR; case IB_QPS_RTS: return MLX5_QP_STATE_RTS; case IB_QPS_SQD: return MLX5_QP_STATE_SQD; case IB_QPS_SQE: return MLX5_QP_STATE_SQER; case IB_QPS_ERR: return MLX5_QP_STATE_ERR; default: return -1; } } static int to_mlx5_st(enum ib_qp_type type) { switch (type) { case IB_QPT_RC: return MLX5_QP_ST_RC; case IB_QPT_UC: return MLX5_QP_ST_UC; case IB_QPT_UD: return MLX5_QP_ST_UD; case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR; case IB_QPT_XRC_INI: case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC; case IB_QPT_SMI: return MLX5_QP_ST_QP0; case IB_QPT_GSI: return MLX5_QP_ST_QP1; case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6; case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE; case IB_QPT_RAW_PACKET: case IB_QPT_MAX: default: return -EINVAL; } } static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn) { return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index; } static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct mlx5_ib_qp *qp, struct ib_udata *udata, struct mlx5_create_qp_mbox_in **in, struct mlx5_ib_create_qp_resp *resp, int *inlen) { struct mlx5_ib_ucontext *context; struct mlx5_ib_create_qp ucmd; int page_shift = 0; int uar_index; int npages; u32 offset = 0; int uuarn; int ncont = 0; int err; err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); if (err) { mlx5_ib_dbg(dev, "copy failed\n"); return err; } context = to_mucontext(pd->uobject->context); /* * TBD: should come from the verbs when we have the API */ uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH); if (uuarn < 0) { mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n"); mlx5_ib_dbg(dev, "reverting to medium latency\n"); uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM); if (uuarn < 0) { mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n"); mlx5_ib_dbg(dev, "reverting to high latency\n"); uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW); if (uuarn < 0) { mlx5_ib_warn(dev, "uuar allocation failed\n"); return uuarn; } } } uar_index = uuarn_to_uar_index(&context->uuari, uuarn); mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index); qp->rq.offset = 0; qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; err = set_user_buf_size(dev, qp, &ucmd); if (err) goto err_uuar; if (ucmd.buf_addr && qp->buf_size) { qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, qp->buf_size, 0, 0); if (IS_ERR(qp->umem)) { mlx5_ib_dbg(dev, "umem_get failed\n"); err = PTR_ERR(qp->umem); goto err_uuar; } } else { qp->umem = NULL; } if (qp->umem) { mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift, &ncont, NULL); err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset); if (err) { mlx5_ib_warn(dev, "bad offset\n"); goto err_umem; } mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n", ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset); } *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; *in = mlx5_vzalloc(*inlen); if (!*in) { err = -ENOMEM; goto err_umem; } if (qp->umem) mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); (*in)->ctx.log_pg_sz_remote_qpn = cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); (*in)->ctx.params2 = cpu_to_be32(offset << 6); (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); resp->uuar_index = uuarn; qp->uuarn = uuarn; err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db); if (err) { mlx5_ib_dbg(dev, "map failed\n"); goto err_free; } err = ib_copy_to_udata(udata, resp, sizeof(*resp)); if (err) { mlx5_ib_dbg(dev, "copy failed\n"); goto err_unmap; } qp->create_type = MLX5_QP_USER; return 0; err_unmap: mlx5_ib_db_unmap_user(context, &qp->db); err_free: mlx5_vfree(*in); err_umem: if (qp->umem) ib_umem_release(qp->umem); err_uuar: free_uuar(&context->uuari, uuarn); return err; } static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp) { struct mlx5_ib_ucontext *context; context = to_mucontext(pd->uobject->context); mlx5_ib_db_unmap_user(context, &qp->db); if (qp->umem) ib_umem_release(qp->umem); free_uuar(&context->uuari, qp->uuarn); } static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *init_attr, struct mlx5_ib_qp *qp, struct mlx5_create_qp_mbox_in **in, int *inlen) { enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW; struct mlx5_uuar_info *uuari; int uar_index; int uuarn; int err; uuari = &dev->mdev.priv.uuari; if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)) return -EINVAL; if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) lc = MLX5_IB_LATENCY_CLASS_FAST_PATH; uuarn = alloc_uuar(uuari, lc); if (uuarn < 0) { mlx5_ib_dbg(dev, "\n"); return -ENOMEM; } qp->bf = &uuari->bfs[uuarn]; uar_index = qp->bf->uar->index; err = calc_sq_size(dev, init_attr, qp); if (err < 0) { mlx5_ib_dbg(dev, "err %d\n", err); goto err_uuar; } qp->rq.offset = 0; qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); err = mlx5_buf_alloc(&dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf); if (err) { mlx5_ib_dbg(dev, "err %d\n", err); goto err_uuar; } qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages; *in = mlx5_vzalloc(*inlen); if (!*in) { err = -ENOMEM; goto err_buf; } (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index); (*in)->ctx.log_pg_sz_remote_qpn = cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); /* Set "fast registration enabled" for all kernel QPs */ (*in)->ctx.params1 |= cpu_to_be32(1 << 11); (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4); mlx5_fill_page_array(&qp->buf, (*in)->pas); err = mlx5_db_alloc(&dev->mdev, &qp->db); if (err) { mlx5_ib_dbg(dev, "err %d\n", err); goto err_free; } qp->db.db[0] = 0; qp->db.db[1] = 0; qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL); qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL); qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL); qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL); qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL); if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || !qp->sq.w_list || !qp->sq.wqe_head) { err = -ENOMEM; goto err_wrid; } qp->create_type = MLX5_QP_KERNEL; return 0; err_wrid: mlx5_db_free(&dev->mdev, &qp->db); kfree(qp->sq.wqe_head); kfree(qp->sq.w_list); kfree(qp->sq.wrid); kfree(qp->sq.wr_data); kfree(qp->rq.wrid); err_free: mlx5_vfree(*in); err_buf: mlx5_buf_free(&dev->mdev, &qp->buf); err_uuar: free_uuar(&dev->mdev.priv.uuari, uuarn); return err; } static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) { mlx5_db_free(&dev->mdev, &qp->db); kfree(qp->sq.wqe_head); kfree(qp->sq.w_list); kfree(qp->sq.wrid); kfree(qp->sq.wr_data); kfree(qp->rq.wrid); mlx5_buf_free(&dev->mdev, &qp->buf); free_uuar(&dev->mdev.priv.uuari, qp->bf->uuarn); } static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) { if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) || (attr->qp_type == IB_QPT_XRC_INI)) return cpu_to_be32(MLX5_SRQ_RQ); else if (!qp->has_rq) return cpu_to_be32(MLX5_ZERO_LEN_RQ); else return cpu_to_be32(MLX5_NON_ZERO_RQ); } static int is_connected(enum ib_qp_type qp_type) { if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC) return 1; return 0; } static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, struct mlx5_ib_qp *qp) { struct mlx5_ib_resources *devr = &dev->devr; struct mlx5_ib_create_qp_resp resp; struct mlx5_create_qp_mbox_in *in; struct mlx5_ib_create_qp ucmd; int inlen = sizeof(*in); int err; mutex_init(&qp->mutex); spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); return -EINVAL; } else { qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK; } } if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; if (pd && pd->uobject) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { mlx5_ib_dbg(dev, "copy failed\n"); return -EFAULT; } qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE); qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE); } else { qp->wq_sig = !!wq_signature; } qp->has_rq = qp_has_rq(init_attr); err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, (pd && pd->uobject) ? &ucmd : NULL); if (err) { mlx5_ib_dbg(dev, "err %d\n", err); return err; } if (pd) { if (pd->uobject) { mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count); if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || ucmd.rq_wqe_count != qp->rq.wqe_cnt) { mlx5_ib_dbg(dev, "invalid rq params\n"); return -EINVAL; } if (ucmd.sq_wqe_count > dev->mdev.caps.max_wqes) { mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", ucmd.sq_wqe_count, dev->mdev.caps.max_wqes); return -EINVAL; } err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); if (err) mlx5_ib_dbg(dev, "err %d\n", err); } else { err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); if (err) mlx5_ib_dbg(dev, "err %d\n", err); else qp->pa_lkey = to_mpd(pd)->pa_lkey; } if (err) return err; } else { in = mlx5_vzalloc(sizeof(*in)); if (!in) return -ENOMEM; qp->create_type = MLX5_QP_EMPTY; } if (is_sqp(init_attr->qp_type)) qp->port = init_attr->port_num; in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 | MLX5_QP_PM_MIGRATED << 11); if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn); else in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE); if (qp->wq_sig) in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG); if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST); if (qp->scat_cqe && is_connected(init_attr->qp_type)) { int rcqe_sz; int scqe_sz; rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq); scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq); if (rcqe_sz == 128) in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE; else in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE; if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) { if (scqe_sz == 128) in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE; else in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE; } } if (qp->rq.wqe_cnt) { in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4); in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3; } in->ctx.rq_type_srqn = get_rx_type(qp, init_attr); if (qp->sq.wqe_cnt) in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11); else in->ctx.sq_crq_size |= cpu_to_be16(0x8000); /* Set default resources */ switch (init_attr->qp_type) { case IB_QPT_XRC_TGT: in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn); break; case IB_QPT_XRC_INI: in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn); in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); break; default: if (init_attr->srq) { in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn); in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn); } else { in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); } } if (init_attr->send_cq) in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn); if (init_attr->recv_cq) in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn); in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma); err = mlx5_core_create_qp(&dev->mdev, &qp->mqp, in, inlen); if (err) { mlx5_ib_dbg(dev, "create qp failed\n"); goto err_create; } mlx5_vfree(in); /* Hardware wants QPN written in big-endian order (after * shifting) for send doorbell. Precompute this value to save * a little bit when posting sends. */ qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); qp->mqp.event = mlx5_ib_qp_event; return 0; err_create: if (qp->create_type == MLX5_QP_USER) destroy_qp_user(pd, qp); else if (qp->create_type == MLX5_QP_KERNEL) destroy_qp_kernel(dev, qp); mlx5_vfree(in); return err; } static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) __acquires(&send_cq->lock) __acquires(&recv_cq->lock) { if (send_cq) { if (recv_cq) { if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { spin_lock_irq(&send_cq->lock); spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { spin_lock_irq(&send_cq->lock); __acquire(&recv_cq->lock); } else { spin_lock_irq(&recv_cq->lock); spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); } } else { spin_lock_irq(&send_cq->lock); } } else if (recv_cq) { spin_lock_irq(&recv_cq->lock); } } static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq) __releases(&send_cq->lock) __releases(&recv_cq->lock) { if (send_cq) { if (recv_cq) { if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { spin_unlock(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { __release(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); } else { spin_unlock(&send_cq->lock); spin_unlock_irq(&recv_cq->lock); } } else { spin_unlock_irq(&send_cq->lock); } } else if (recv_cq) { spin_unlock_irq(&recv_cq->lock); } } static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp) { return to_mpd(qp->ibqp.pd); } static void get_cqs(struct mlx5_ib_qp *qp, struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) { switch (qp->ibqp.qp_type) { case IB_QPT_XRC_TGT: *send_cq = NULL; *recv_cq = NULL; break; case MLX5_IB_QPT_REG_UMR: case IB_QPT_XRC_INI: *send_cq = to_mcq(qp->ibqp.send_cq); *recv_cq = NULL; break; case IB_QPT_SMI: case IB_QPT_GSI: case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_UD: case IB_QPT_RAW_IPV6: case IB_QPT_RAW_ETHERTYPE: *send_cq = to_mcq(qp->ibqp.send_cq); *recv_cq = to_mcq(qp->ibqp.recv_cq); break; case IB_QPT_RAW_PACKET: case IB_QPT_MAX: default: *send_cq = NULL; *recv_cq = NULL; break; } } static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) { struct mlx5_ib_cq *send_cq, *recv_cq; struct mlx5_modify_qp_mbox_in *in; int err; in = kzalloc(sizeof(*in), GFP_KERNEL); if (!in) return; if (qp->state != IB_QPS_RESET) if (mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(qp->state), MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp)) mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n", qp->mqp.qpn); get_cqs(qp, &send_cq, &recv_cq); if (qp->create_type == MLX5_QP_KERNEL) { mlx5_ib_lock_cqs(send_cq, recv_cq); __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); if (send_cq != recv_cq) __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); mlx5_ib_unlock_cqs(send_cq, recv_cq); } err = mlx5_core_destroy_qp(&dev->mdev, &qp->mqp); if (err) mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn); kfree(in); if (qp->create_type == MLX5_QP_KERNEL) destroy_qp_kernel(dev, qp); else if (qp->create_type == MLX5_QP_USER) destroy_qp_user(&get_pd(qp)->ibpd, qp); } static const char *ib_qp_type_str(enum ib_qp_type type) { switch (type) { case IB_QPT_SMI: return "IB_QPT_SMI"; case IB_QPT_GSI: return "IB_QPT_GSI"; case IB_QPT_RC: return "IB_QPT_RC"; case IB_QPT_UC: return "IB_QPT_UC"; case IB_QPT_UD: return "IB_QPT_UD"; case IB_QPT_RAW_IPV6: return "IB_QPT_RAW_IPV6"; case IB_QPT_RAW_ETHERTYPE: return "IB_QPT_RAW_ETHERTYPE"; case IB_QPT_XRC_INI: return "IB_QPT_XRC_INI"; case IB_QPT_XRC_TGT: return "IB_QPT_XRC_TGT"; case IB_QPT_RAW_PACKET: return "IB_QPT_RAW_PACKET"; case MLX5_IB_QPT_REG_UMR: return "MLX5_IB_QPT_REG_UMR"; case IB_QPT_MAX: default: return "Invalid QP type"; } } struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { struct mlx5_ib_dev *dev; struct mlx5_ib_qp *qp; u16 xrcdn = 0; int err; if (pd) { dev = to_mdev(pd->device); } else { /* being cautious here */ if (init_attr->qp_type != IB_QPT_XRC_TGT && init_attr->qp_type != MLX5_IB_QPT_REG_UMR) { pr_warn("%s: no PD for transport %s\n", __func__, ib_qp_type_str(init_attr->qp_type)); return ERR_PTR(-EINVAL); } dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); } switch (init_attr->qp_type) { case IB_QPT_XRC_TGT: case IB_QPT_XRC_INI: if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) { mlx5_ib_dbg(dev, "XRC not supported\n"); return ERR_PTR(-ENOSYS); } init_attr->recv_cq = NULL; if (init_attr->qp_type == IB_QPT_XRC_TGT) { xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; init_attr->send_cq = NULL; } /* fall through */ case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_UD: case IB_QPT_SMI: case IB_QPT_GSI: case MLX5_IB_QPT_REG_UMR: qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); err = create_qp_common(dev, pd, init_attr, udata, qp); if (err) { mlx5_ib_dbg(dev, "create_qp_common failed\n"); kfree(qp); return ERR_PTR(err); } if (is_qp0(init_attr->qp_type)) qp->ibqp.qp_num = 0; else if (is_qp1(init_attr->qp_type)) qp->ibqp.qp_num = 1; else qp->ibqp.qp_num = qp->mqp.qpn; mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn, to_mcq(init_attr->send_cq)->mcq.cqn); qp->xrcdn = xrcdn; break; case IB_QPT_RAW_IPV6: case IB_QPT_RAW_ETHERTYPE: case IB_QPT_RAW_PACKET: case IB_QPT_MAX: default: mlx5_ib_dbg(dev, "unsupported qp type %d\n", init_attr->qp_type); /* Don't support raw QPs */ return ERR_PTR(-EINVAL); } return &qp->ibqp; } int mlx5_ib_destroy_qp(struct ib_qp *qp) { struct mlx5_ib_dev *dev = to_mdev(qp->device); struct mlx5_ib_qp *mqp = to_mqp(qp); destroy_qp_common(dev, mqp); kfree(mqp); return 0; } static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr, int attr_mask) { u32 hw_access_flags = 0; u8 dest_rd_atomic; u32 access_flags; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) dest_rd_atomic = attr->max_dest_rd_atomic; else dest_rd_atomic = qp->resp_depth; if (attr_mask & IB_QP_ACCESS_FLAGS) access_flags = attr->qp_access_flags; else access_flags = qp->atomic_rd_en; if (!dest_rd_atomic) access_flags &= IB_ACCESS_REMOTE_WRITE; if (access_flags & IB_ACCESS_REMOTE_READ) hw_access_flags |= MLX5_QP_BIT_RRE; if (access_flags & IB_ACCESS_REMOTE_ATOMIC) hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX); if (access_flags & IB_ACCESS_REMOTE_WRITE) hw_access_flags |= MLX5_QP_BIT_RWE; return cpu_to_be32(hw_access_flags); } enum { MLX5_PATH_FLAG_FL = 1 << 0, MLX5_PATH_FLAG_FREE_AR = 1 << 1, MLX5_PATH_FLAG_COUNTER = 1 << 2, }; static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) { if (rate == IB_RATE_PORT_CURRENT) { return 0; } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { return -EINVAL; } else { while (rate != IB_RATE_2_5_GBPS && !(1 << (rate + MLX5_STAT_RATE_OFFSET) & dev->mdev.caps.stat_rate_support)) --rate; } return rate + MLX5_STAT_RATE_OFFSET; } static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, struct mlx5_qp_path *path, u8 port, int attr_mask, u32 path_flags, const struct ib_qp_attr *attr) { int err; path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0; if (attr_mask & IB_QP_PKEY_INDEX) path->pkey_index = attr->pkey_index; path->grh_mlid = ah->src_path_bits & 0x7f; path->rlid = cpu_to_be16(ah->dlid); if (ah->ah_flags & IB_AH_GRH) { path->grh_mlid |= 1 << 7; path->mgid_index = ah->grh.sgid_index; path->hop_limit = ah->grh.hop_limit; path->tclass_flowlabel = cpu_to_be32((ah->grh.traffic_class << 20) | (ah->grh.flow_label)); memcpy(path->rgid, ah->grh.dgid.raw, 16); } err = ib_rate_to_mlx5(dev, ah->static_rate); if (err < 0) return err; path->static_rate = err; path->port = port; if (ah->ah_flags & IB_AH_GRH) { if (ah->grh.sgid_index >= dev->mdev.caps.port[port - 1].gid_table_len) { pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", ah->grh.sgid_index, dev->mdev.caps.port[port - 1].gid_table_len); return -EINVAL; } path->grh_mlid |= 1 << 7; path->mgid_index = ah->grh.sgid_index; path->hop_limit = ah->grh.hop_limit; path->tclass_flowlabel = cpu_to_be32((ah->grh.traffic_class << 20) | (ah->grh.flow_label)); memcpy(path->rgid, ah->grh.dgid.raw, 16); } if (attr_mask & IB_QP_TIMEOUT) path->ackto_lt = attr->timeout << 3; path->sl = ah->sl & 0xf; return 0; } static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = { [MLX5_QP_STATE_INIT] = { [MLX5_QP_STATE_INIT] = { [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE | MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_PKEY_INDEX | MLX5_QP_OPTPAR_PRI_PORT, [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_PKEY_INDEX | MLX5_QP_OPTPAR_PRI_PORT, [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | MLX5_QP_OPTPAR_Q_KEY | MLX5_QP_OPTPAR_PRI_PORT, }, [MLX5_QP_STATE_RTR] = { [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE | MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_PKEY_INDEX, [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_PKEY_INDEX, [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX | MLX5_QP_OPTPAR_Q_KEY, [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | MLX5_QP_OPTPAR_Q_KEY, [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE | MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_PKEY_INDEX, }, }, [MLX5_QP_STATE_RTR] = { [MLX5_QP_STATE_RTS] = { [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE | MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_PM_STATE | MLX5_QP_OPTPAR_RNR_TIMEOUT, [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH | MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_PM_STATE, [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, }, }, [MLX5_QP_STATE_RTS] = { [MLX5_QP_STATE_RTS] = { [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE | MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RNR_TIMEOUT | MLX5_QP_OPTPAR_PM_STATE | MLX5_QP_OPTPAR_ALT_ADDR_PATH, [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_PM_STATE | MLX5_QP_OPTPAR_ALT_ADDR_PATH, [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY | MLX5_QP_OPTPAR_SRQN | MLX5_QP_OPTPAR_CQN_RCV, }, }, [MLX5_QP_STATE_SQER] = { [MLX5_QP_STATE_RTS] = { [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE, [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT | MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RAE | MLX5_QP_OPTPAR_RRE, }, }, }; static int ib_nr_to_mlx5_nr(int ib_mask) { switch (ib_mask) { case IB_QP_STATE: return 0; case IB_QP_CUR_STATE: return 0; case IB_QP_EN_SQD_ASYNC_NOTIFY: return 0; case IB_QP_ACCESS_FLAGS: return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE; case IB_QP_PKEY_INDEX: return MLX5_QP_OPTPAR_PKEY_INDEX; case IB_QP_PORT: return MLX5_QP_OPTPAR_PRI_PORT; case IB_QP_QKEY: return MLX5_QP_OPTPAR_Q_KEY; case IB_QP_AV: return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX5_QP_OPTPAR_PRI_PORT; case IB_QP_PATH_MTU: return 0; case IB_QP_TIMEOUT: return MLX5_QP_OPTPAR_ACK_TIMEOUT; case IB_QP_RETRY_CNT: return MLX5_QP_OPTPAR_RETRY_COUNT; case IB_QP_RNR_RETRY: return MLX5_QP_OPTPAR_RNR_RETRY; case IB_QP_RQ_PSN: return 0; case IB_QP_MAX_QP_RD_ATOMIC: return MLX5_QP_OPTPAR_SRA_MAX; case IB_QP_ALT_PATH: return MLX5_QP_OPTPAR_ALT_ADDR_PATH; case IB_QP_MIN_RNR_TIMER: return MLX5_QP_OPTPAR_RNR_TIMEOUT; case IB_QP_SQ_PSN: return 0; case IB_QP_MAX_DEST_RD_ATOMIC: return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE; case IB_QP_PATH_MIG_STATE: return MLX5_QP_OPTPAR_PM_STATE; case IB_QP_CAP: return 0; case IB_QP_DEST_QPN: return 0; } return 0; } static int ib_mask_to_mlx5_opt(int ib_mask) { int result = 0; int i; for (i = 0; i < 8 * sizeof(int); i++) { if ((1 << i) & ib_mask) result |= ib_nr_to_mlx5_nr(1 << i); } return result; } static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state) { struct mlx5_ib_dev *dev = to_mdev(ibqp->device); struct mlx5_ib_qp *qp = to_mqp(ibqp); struct mlx5_ib_cq *send_cq, *recv_cq; struct mlx5_qp_context *context; struct mlx5_modify_qp_mbox_in *in; struct mlx5_ib_pd *pd; enum mlx5_qp_state mlx5_cur, mlx5_new; enum mlx5_qp_optpar optpar; int sqd_event; int mlx5_st; int err; in = kzalloc(sizeof(*in), GFP_KERNEL); if (!in) return -ENOMEM; context = &in->ctx; err = to_mlx5_st(ibqp->qp_type); if (err < 0) goto out; context->flags = cpu_to_be32(err << 16); if (!(attr_mask & IB_QP_PATH_MIG_STATE)) { context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); } else { switch (attr->path_mig_state) { case IB_MIG_MIGRATED: context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); break; case IB_MIG_REARM: context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11); break; case IB_MIG_ARMED: context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11); break; } } if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) { context->mtu_msgmax = (IB_MTU_256 << 5) | 8; } else if (ibqp->qp_type == IB_QPT_UD || ibqp->qp_type == MLX5_IB_QPT_REG_UMR) { context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; } else if (attr_mask & IB_QP_PATH_MTU) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu); err = -EINVAL; goto out; } context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev.caps.log_max_msg; } if (attr_mask & IB_QP_DEST_QPN) context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); if (attr_mask & IB_QP_PKEY_INDEX) context->pri_path.pkey_index = attr->pkey_index; /* todo implement counter_index functionality */ if (is_sqp(ibqp->qp_type)) context->pri_path.port = qp->port; if (attr_mask & IB_QP_PORT) context->pri_path.port = attr->port_num; if (attr_mask & IB_QP_AV) { err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path, attr_mask & IB_QP_PORT ? attr->port_num : qp->port, attr_mask, 0, attr); if (err) goto out; } if (attr_mask & IB_QP_TIMEOUT) context->pri_path.ackto_lt |= attr->timeout << 3; if (attr_mask & IB_QP_ALT_PATH) { err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path, attr->alt_port_num, attr_mask, 0, attr); if (err) goto out; } pd = get_pd(qp); get_cqs(qp, &send_cq, &recv_cq); context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn); context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0; context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0; context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28); if (attr_mask & IB_QP_RNR_RETRY) context->params1 |= cpu_to_be32(attr->rnr_retry << 13); if (attr_mask & IB_QP_RETRY_CNT) context->params1 |= cpu_to_be32(attr->retry_cnt << 16); if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { if (attr->max_rd_atomic) context->params1 |= cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); } if (attr_mask & IB_QP_SQ_PSN) context->next_send_psn = cpu_to_be32(attr->sq_psn); if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { if (attr->max_dest_rd_atomic) context->params2 |= cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); } if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask); if (attr_mask & IB_QP_MIN_RNR_TIMER) context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); if (attr_mask & IB_QP_RQ_PSN) context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); if (attr_mask & IB_QP_QKEY) context->qkey = cpu_to_be32(attr->qkey); if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) context->db_rec_addr = cpu_to_be64(qp->db.dma); if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) sqd_event = 1; else sqd_event = 0; if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) context->sq_crq_size |= cpu_to_be16(1 << 4); mlx5_cur = to_mlx5_state(cur_state); mlx5_new = to_mlx5_state(new_state); mlx5_st = to_mlx5_st(ibqp->qp_type); if (mlx5_st < 0) goto out; optpar = ib_mask_to_mlx5_opt(attr_mask); optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; in->optparam = cpu_to_be32(optpar); err = mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(cur_state), to_mlx5_state(new_state), in, sqd_event, &qp->mqp); if (err) goto out; qp->state = new_state; if (attr_mask & IB_QP_ACCESS_FLAGS) qp->atomic_rd_en = attr->qp_access_flags; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) qp->resp_depth = attr->max_dest_rd_atomic; if (attr_mask & IB_QP_PORT) qp->port = attr->port_num; if (attr_mask & IB_QP_ALT_PATH) qp->alt_port = attr->alt_port_num; /* * If we moved a kernel QP to RESET, clean up all old CQ * entries and reinitialize the QP. */ if (new_state == IB_QPS_RESET && !ibqp->uobject) { mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, ibqp->srq ? to_msrq(ibqp->srq) : NULL); if (send_cq != recv_cq) mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); qp->rq.head = 0; qp->rq.tail = 0; qp->sq.head = 0; qp->sq.tail = 0; qp->sq.cur_post = 0; qp->sq.last_poll = 0; qp->db.db[MLX5_RCV_DBR] = 0; qp->db.db[MLX5_SND_DBR] = 0; } out: kfree(in); return err; } int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(ibqp->device); struct mlx5_ib_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; int err = -EINVAL; int port; mutex_lock(&qp->mutex); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR && !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, IB_LINK_LAYER_UNSPECIFIED)) goto out; if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > dev->mdev.caps.num_ports)) goto out; if (attr_mask & IB_QP_PKEY_INDEX) { port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; if (attr->pkey_index >= dev->mdev.caps.port[port - 1].pkey_table_len) goto out; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > dev->mdev.caps.max_ra_res_qp) goto out; if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic > dev->mdev.caps.max_ra_req_qp) goto out; if (cur_state == new_state && cur_state == IB_QPS_RESET) { err = 0; goto out; } err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); out: mutex_unlock(&qp->mutex); return err; } static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) { struct mlx5_ib_cq *cq; unsigned cur; cur = wq->head - wq->tail; if (likely(cur + nreq < wq->max_post)) return 0; cq = to_mcq(ib_cq); spin_lock(&cq->lock); cur = wq->head - wq->tail; spin_unlock(&cq->lock); return cur + nreq >= wq->max_post; } static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg, u64 remote_addr, u32 rkey) { rseg->raddr = cpu_to_be64(remote_addr); rseg->rkey = cpu_to_be32(rkey); rseg->reserved = 0; } static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, struct ib_send_wr *wr) { memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av)); dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV); dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey); } static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg) { dseg->byte_count = cpu_to_be32(sg->length); dseg->lkey = cpu_to_be32(sg->lkey); dseg->addr = cpu_to_be64(sg->addr); } static __be16 get_klm_octo(int npages) { return cpu_to_be16(ALIGN(npages, 8) / 2); } static __be64 frwr_mkey_mask(void) { u64 result; result = MLX5_MKEY_MASK_LEN | MLX5_MKEY_MASK_PAGE_SIZE | MLX5_MKEY_MASK_START_ADDR | MLX5_MKEY_MASK_EN_RINVAL | MLX5_MKEY_MASK_KEY | MLX5_MKEY_MASK_LR | MLX5_MKEY_MASK_LW | MLX5_MKEY_MASK_RR | MLX5_MKEY_MASK_RW | MLX5_MKEY_MASK_A | MLX5_MKEY_MASK_SMALL_FENCE | MLX5_MKEY_MASK_FREE; return cpu_to_be64(result); } static __be64 sig_mkey_mask(void) { u64 result; result = MLX5_MKEY_MASK_LEN | MLX5_MKEY_MASK_PAGE_SIZE | MLX5_MKEY_MASK_START_ADDR | MLX5_MKEY_MASK_EN_SIGERR | MLX5_MKEY_MASK_EN_RINVAL | MLX5_MKEY_MASK_KEY | MLX5_MKEY_MASK_LR | MLX5_MKEY_MASK_LW | MLX5_MKEY_MASK_RR | MLX5_MKEY_MASK_RW | MLX5_MKEY_MASK_SMALL_FENCE | MLX5_MKEY_MASK_FREE | MLX5_MKEY_MASK_BSF_EN; return cpu_to_be64(result); } static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, struct ib_send_wr *wr, int li) { memset(umr, 0, sizeof(*umr)); if (li) { umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); umr->flags = 1 << 7; return; } umr->flags = (1 << 5); /* fail if not free */ umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len); umr->mkey_mask = frwr_mkey_mask(); } static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, struct ib_send_wr *wr) { struct umr_wr *umrwr = (struct umr_wr *)&wr->wr.fast_reg; u64 mask; memset(umr, 0, sizeof(*umr)); if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) { umr->flags = 1 << 5; /* fail if not free */ umr->klm_octowords = get_klm_octo(umrwr->npages); mask = MLX5_MKEY_MASK_LEN | MLX5_MKEY_MASK_PAGE_SIZE | MLX5_MKEY_MASK_START_ADDR | MLX5_MKEY_MASK_PD | MLX5_MKEY_MASK_LR | MLX5_MKEY_MASK_LW | MLX5_MKEY_MASK_KEY | MLX5_MKEY_MASK_RR | MLX5_MKEY_MASK_RW | MLX5_MKEY_MASK_A | MLX5_MKEY_MASK_FREE; umr->mkey_mask = cpu_to_be64(mask); } else { umr->flags = 2 << 5; /* fail if free */ mask = MLX5_MKEY_MASK_FREE; umr->mkey_mask = cpu_to_be64(mask); } if (!wr->num_sge) umr->flags |= (1 << 7); /* inline */ } static u8 get_umr_flags(int acc) { return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) | (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) | (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) | (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) | MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN; } static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, int li, int *writ) { memset(seg, 0, sizeof(*seg)); if (li) { seg->status = 1 << 6; return; } seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) | MLX5_ACCESS_MODE_MTT; *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE); seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00); seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); seg->len = cpu_to_be64(wr->wr.fast_reg.length); seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2); seg->log2_page_size = wr->wr.fast_reg.page_shift; } static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) { memset(seg, 0, sizeof(*seg)); if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) { seg->status = 1 << 6; return; } seg->flags = convert_access(wr->wr.fast_reg.access_flags); seg->flags_pd = cpu_to_be32(to_mpd((struct ib_pd *)wr->wr.fast_reg.page_list)->pdn); seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start); seg->len = cpu_to_be64(wr->wr.fast_reg.length); seg->log2_page_size = wr->wr.fast_reg.page_shift; seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 | mlx5_mkey_variant(wr->wr.fast_reg.rkey)); } static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg, struct ib_send_wr *wr, struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, int writ) { struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list); u64 *page_list = wr->wr.fast_reg.page_list->page_list; u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0); int i; for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm); dseg->addr = cpu_to_be64(mfrpl->map); dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64)); dseg->lkey = cpu_to_be32(pd->pa_lkey); } static __be32 send_ieth(struct ib_send_wr *wr) { switch (wr->opcode) { case IB_WR_SEND_WITH_IMM: case IB_WR_RDMA_WRITE_WITH_IMM: return wr->ex.imm_data; case IB_WR_SEND_WITH_INV: return cpu_to_be32(wr->ex.invalidate_rkey); default: return 0; } } static u8 calc_sig(void *wqe, int size) { u8 *p = wqe; u8 res = 0; int i; for (i = 0; i < size; i++) res ^= p[i]; return ~res; } static u8 wq_sig(void *wqe) { return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4); } static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr, void *wqe, int *sz) { struct mlx5_wqe_inline_seg *seg; void *qend = qp->sq.qend; void *addr; int inl = 0; int copy; int len; int i; seg = wqe; wqe += sizeof(*seg); for (i = 0; i < wr->num_sge; i++) { addr = (void *)(unsigned long)(wr->sg_list[i].addr); len = wr->sg_list[i].length; inl += len; if (unlikely(inl > qp->max_inline_data)) return -ENOMEM; if (unlikely(wqe + len > qend)) { copy = qend - wqe; memcpy(wqe, addr, copy); addr += copy; len -= copy; wqe = mlx5_get_send_wqe(qp, 0); } memcpy(wqe, addr, len); wqe += len; } seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16; return 0; } static u16 prot_field_size(enum ib_signature_type type) { switch (type) { case IB_SIG_TYPE_T10_DIF: return MLX5_DIF_SIZE; default: return 0; } } static u8 bs_selector(int block_size) { switch (block_size) { case 512: return 0x1; case 520: return 0x2; case 4096: return 0x3; case 4160: return 0x4; case 1073741824: return 0x5; default: return 0; } } static int format_selector(struct ib_sig_attrs *attr, struct ib_sig_domain *domain, int *selector) { #define FORMAT_DIF_NONE 0 #define FORMAT_DIF_CRC_INC 8 #define FORMAT_DIF_CRC_NO_INC 12 #define FORMAT_DIF_CSUM_INC 13 #define FORMAT_DIF_CSUM_NO_INC 14 switch (domain->sig.dif.type) { case IB_T10DIF_NONE: /* No DIF */ *selector = FORMAT_DIF_NONE; break; case IB_T10DIF_TYPE1: /* Fall through */ case IB_T10DIF_TYPE2: switch (domain->sig.dif.bg_type) { case IB_T10DIF_CRC: *selector = FORMAT_DIF_CRC_INC; break; case IB_T10DIF_CSUM: *selector = FORMAT_DIF_CSUM_INC; break; default: return 1; } break; case IB_T10DIF_TYPE3: switch (domain->sig.dif.bg_type) { case IB_T10DIF_CRC: *selector = domain->sig.dif.type3_inc_reftag ? FORMAT_DIF_CRC_INC : FORMAT_DIF_CRC_NO_INC; break; case IB_T10DIF_CSUM: *selector = domain->sig.dif.type3_inc_reftag ? FORMAT_DIF_CSUM_INC : FORMAT_DIF_CSUM_NO_INC; break; default: return 1; } break; default: return 1; } return 0; } static int mlx5_set_bsf(struct ib_mr *sig_mr, struct ib_sig_attrs *sig_attrs, struct mlx5_bsf *bsf, u32 data_size) { struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig; struct mlx5_bsf_basic *basic = &bsf->basic; struct ib_sig_domain *mem = &sig_attrs->mem; struct ib_sig_domain *wire = &sig_attrs->wire; int ret, selector; memset(bsf, 0, sizeof(*bsf)); switch (sig_attrs->mem.sig_type) { case IB_SIG_TYPE_T10_DIF: if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF) return -EINVAL; /* Input domain check byte mask */ basic->check_byte_mask = sig_attrs->check_mask; if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval && mem->sig.dif.type == wire->sig.dif.type) { /* Same block structure */ basic->bsf_size_sbs = 1 << 4; if (mem->sig.dif.bg_type == wire->sig.dif.bg_type) basic->wire.copy_byte_mask |= 0xc0; if (mem->sig.dif.app_tag == wire->sig.dif.app_tag) basic->wire.copy_byte_mask |= 0x30; if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag) basic->wire.copy_byte_mask |= 0x0f; } else basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval); basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval); basic->raw_data_size = cpu_to_be32(data_size); ret = format_selector(sig_attrs, mem, &selector); if (ret) return -EINVAL; basic->m_bfs_psv = cpu_to_be32(selector << 24 | msig->psv_memory.psv_idx); ret = format_selector(sig_attrs, wire, &selector); if (ret) return -EINVAL; basic->w_bfs_psv = cpu_to_be32(selector << 24 | msig->psv_wire.psv_idx); break; default: return -EINVAL; } return 0; } static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, void **seg, int *size) { struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs; struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr; struct mlx5_bsf *bsf; u32 data_len = wr->sg_list->length; u32 data_key = wr->sg_list->lkey; u64 data_va = wr->sg_list->addr; int ret; int wqe_size; if (!wr->wr.sig_handover.prot || (data_key == wr->wr.sig_handover.prot->lkey && data_va == wr->wr.sig_handover.prot->addr && data_len == wr->wr.sig_handover.prot->length)) { /** * Source domain doesn't contain signature information * or data and protection are interleaved in memory. * So need construct: * ------------------ * | data_klm | * ------------------ * | BSF | * ------------------ **/ struct mlx5_klm *data_klm = *seg; data_klm->bcount = cpu_to_be32(data_len); data_klm->key = cpu_to_be32(data_key); data_klm->va = cpu_to_be64(data_va); wqe_size = ALIGN(sizeof(*data_klm), 64); } else { /** * Source domain contains signature information * So need construct a strided block format: * --------------------------- * | stride_block_ctrl | * --------------------------- * | data_klm | * --------------------------- * | prot_klm | * --------------------------- * | BSF | * --------------------------- **/ struct mlx5_stride_block_ctrl_seg *sblock_ctrl; struct mlx5_stride_block_entry *data_sentry; struct mlx5_stride_block_entry *prot_sentry; u32 prot_key = wr->wr.sig_handover.prot->lkey; u64 prot_va = wr->wr.sig_handover.prot->addr; u16 block_size = sig_attrs->mem.sig.dif.pi_interval; int prot_size; sblock_ctrl = *seg; data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl); prot_sentry = (void *)data_sentry + sizeof(*data_sentry); prot_size = prot_field_size(sig_attrs->mem.sig_type); if (!prot_size) { pr_err("Bad block size given: %u\n", block_size); return -EINVAL; } sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size + prot_size); sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP); sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size); sblock_ctrl->num_entries = cpu_to_be16(2); data_sentry->bcount = cpu_to_be16(block_size); data_sentry->key = cpu_to_be32(data_key); data_sentry->va = cpu_to_be64(data_va); data_sentry->stride = cpu_to_be16(block_size); prot_sentry->bcount = cpu_to_be16(prot_size); prot_sentry->key = cpu_to_be32(prot_key); prot_sentry->va = cpu_to_be64(prot_va); prot_sentry->stride = cpu_to_be16(prot_size); wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) + sizeof(*prot_sentry), 64); } *seg += wqe_size; *size += wqe_size / 16; if (unlikely((*seg == qp->sq.qend))) *seg = mlx5_get_send_wqe(qp, 0); bsf = *seg; ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len); if (ret) return -EINVAL; *seg += sizeof(*bsf); *size += sizeof(*bsf) / 16; if (unlikely((*seg == qp->sq.qend))) *seg = mlx5_get_send_wqe(qp, 0); return 0; } static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, u32 nelements, u32 length, u32 pdn) { struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr; u32 sig_key = sig_mr->rkey; u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1; memset(seg, 0, sizeof(*seg)); seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) | MLX5_ACCESS_MODE_KLM; seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00); seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 | MLX5_MKEY_BSF_EN | pdn); seg->len = cpu_to_be64(length); seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements))); seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE); } static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, struct ib_send_wr *wr, u32 nelements) { memset(umr, 0, sizeof(*umr)); umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE; umr->klm_octowords = get_klm_octo(nelements); umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE); umr->mkey_mask = sig_mkey_mask(); } static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp, void **seg, int *size) { struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr); u32 pdn = get_pd(qp)->pdn; u32 klm_oct_size; int region_len, ret; if (unlikely(wr->num_sge != 1) || unlikely(wr->wr.sig_handover.access_flags & IB_ACCESS_REMOTE_ATOMIC) || unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) || unlikely(!sig_mr->sig->sig_status_checked)) return -EINVAL; /* length of the protected region, data + protection */ region_len = wr->sg_list->length; if (wr->wr.sig_handover.prot && (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey || wr->wr.sig_handover.prot->addr != wr->sg_list->addr || wr->wr.sig_handover.prot->length != wr->sg_list->length)) region_len += wr->wr.sig_handover.prot->length; /** * KLM octoword size - if protection was provided * then we use strided block format (3 octowords), * else we use single KLM (1 octoword) **/ klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1; set_sig_umr_segment(*seg, wr, klm_oct_size); *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; if (unlikely((*seg == qp->sq.qend))) *seg = mlx5_get_send_wqe(qp, 0); set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn); *seg += sizeof(struct mlx5_mkey_seg); *size += sizeof(struct mlx5_mkey_seg) / 16; if (unlikely((*seg == qp->sq.qend))) *seg = mlx5_get_send_wqe(qp, 0); ret = set_sig_data_segment(wr, qp, seg, size); if (ret) return ret; sig_mr->sig->sig_status_checked = false; return 0; } static int set_psv_wr(struct ib_sig_domain *domain, u32 psv_idx, void **seg, int *size) { struct mlx5_seg_set_psv *psv_seg = *seg; memset(psv_seg, 0, sizeof(*psv_seg)); psv_seg->psv_num = cpu_to_be32(psv_idx); switch (domain->sig_type) { case IB_SIG_TYPE_T10_DIF: psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 | domain->sig.dif.app_tag); psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag); *seg += sizeof(*psv_seg); *size += sizeof(*psv_seg) / 16; break; default: pr_err("Bad signature type given.\n"); return 1; } return 0; } static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp) { int writ = 0; int li; li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0; if (unlikely(wr->send_flags & IB_SEND_INLINE)) return -EINVAL; set_frwr_umr_segment(*seg, wr, li); *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; if (unlikely((*seg == qp->sq.qend))) *seg = mlx5_get_send_wqe(qp, 0); set_mkey_segment(*seg, wr, li, &writ); *seg += sizeof(struct mlx5_mkey_seg); *size += sizeof(struct mlx5_mkey_seg) / 16; if (unlikely((*seg == qp->sq.qend))) *seg = mlx5_get_send_wqe(qp, 0); if (!li) { if (unlikely(wr->wr.fast_reg.page_list_len > wr->wr.fast_reg.page_list->max_page_list_len)) return -ENOMEM; set_frwr_pages(*seg, wr, mdev, pd, writ); *seg += sizeof(struct mlx5_wqe_data_seg); *size += (sizeof(struct mlx5_wqe_data_seg) / 16); } return 0; } static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) { __be32 *p = NULL; int tidx = idx; int i, j; pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx)); for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) { if ((i & 0xf) == 0) { void *buf = mlx5_get_send_wqe(qp, tidx); tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1); p = buf; j = 0; } pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]), be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]), be32_to_cpu(p[j + 3])); } } static void mlx5_bf_copy(u64 __iomem *dst, u64 *src, unsigned bytecnt, struct mlx5_ib_qp *qp) { while (bytecnt > 0) { __iowrite64_copy(dst++, src++, 8); __iowrite64_copy(dst++, src++, 8); __iowrite64_copy(dst++, src++, 8); __iowrite64_copy(dst++, src++, 8); __iowrite64_copy(dst++, src++, 8); __iowrite64_copy(dst++, src++, 8); __iowrite64_copy(dst++, src++, 8); __iowrite64_copy(dst++, src++, 8); bytecnt -= 64; if (unlikely(src == qp->sq.qend)) src = mlx5_get_send_wqe(qp, 0); } } static u8 get_fence(u8 fence, struct ib_send_wr *wr) { if (unlikely(wr->opcode == IB_WR_LOCAL_INV && wr->send_flags & IB_SEND_FENCE)) return MLX5_FENCE_MODE_STRONG_ORDERING; if (unlikely(fence)) { if (wr->send_flags & IB_SEND_FENCE) return MLX5_FENCE_MODE_SMALL_AND_FENCE; else return fence; } else { return 0; } } static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, struct ib_send_wr *wr, int *idx, int *size, int nreq) { int err = 0; if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { err = -ENOMEM; return err; } *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); *seg = mlx5_get_send_wqe(qp, *idx); *ctrl = *seg; *(uint32_t *)(*seg + 8) = 0; (*ctrl)->imm = send_ieth(wr); (*ctrl)->fm_ce_se = qp->sq_signal_bits | (wr->send_flags & IB_SEND_SIGNALED ? MLX5_WQE_CTRL_CQ_UPDATE : 0) | (wr->send_flags & IB_SEND_SOLICITED ? MLX5_WQE_CTRL_SOLICITED : 0); *seg += sizeof(**ctrl); *size = sizeof(**ctrl) / 16; return err; } static void finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl, u8 size, unsigned idx, u64 wr_id, int nreq, u8 fence, u8 next_fence, u32 mlx5_opcode) { u8 opmod = 0; ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | mlx5_opcode | ((u32)opmod << 24)); ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8)); ctrl->fm_ce_se |= fence; qp->fm_cache = next_fence; if (unlikely(qp->wq_sig)) ctrl->signature = wq_sig(ctrl); qp->sq.wrid[idx] = wr_id; qp->sq.w_list[idx].opcode = mlx5_opcode; qp->sq.wqe_head[idx] = qp->sq.head + nreq; qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); qp->sq.w_list[idx].next = qp->sq.cur_post; } int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ struct mlx5_ib_dev *dev = to_mdev(ibqp->device); struct mlx5_core_dev *mdev = &dev->mdev; struct mlx5_ib_qp *qp = to_mqp(ibqp); struct mlx5_ib_mr *mr; struct mlx5_wqe_data_seg *dpseg; struct mlx5_wqe_xrc_seg *xrc; struct mlx5_bf *bf = qp->bf; int uninitialized_var(size); void *qend = qp->sq.qend; unsigned long flags; unsigned idx; int err = 0; int inl = 0; int num_sge; void *seg; int nreq; int i; u8 next_fence = 0; u8 fence; spin_lock_irqsave(&qp->sq.lock, flags); for (nreq = 0; wr; nreq++, wr = wr->next) { if (unlikely(wr->opcode >= sizeof(mlx5_ib_opcode) / sizeof(mlx5_ib_opcode[0]))) { mlx5_ib_warn(dev, "\n"); err = -EINVAL; *bad_wr = wr; goto out; } fence = qp->fm_cache; num_sge = wr->num_sge; if (unlikely(num_sge > qp->sq.max_gs)) { mlx5_ib_warn(dev, "\n"); err = -ENOMEM; *bad_wr = wr; goto out; } err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); if (err) { mlx5_ib_warn(dev, "\n"); err = -ENOMEM; *bad_wr = wr; goto out; } switch (ibqp->qp_type) { case IB_QPT_XRC_INI: xrc = seg; xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num); seg += sizeof(*xrc); size += sizeof(*xrc) / 16; /* fall through */ case IB_QPT_RC: switch (wr->opcode) { case IB_WR_RDMA_READ: case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(seg, wr->wr.rdma.remote_addr, wr->wr.rdma.rkey); seg += sizeof(struct mlx5_wqe_raddr_seg); size += sizeof(struct mlx5_wqe_raddr_seg) / 16; break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: mlx5_ib_warn(dev, "Atomic operations are not supported yet\n"); err = -ENOSYS; *bad_wr = wr; goto out; case IB_WR_LOCAL_INV: next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); if (err) { mlx5_ib_warn(dev, "\n"); *bad_wr = wr; goto out; } num_sge = 0; break; case IB_WR_FAST_REG_MR: next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR; ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); if (err) { mlx5_ib_warn(dev, "\n"); *bad_wr = wr; goto out; } num_sge = 0; break; case IB_WR_REG_SIG_MR: qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR; mr = to_mmr(wr->wr.sig_handover.sig_mr); ctrl->imm = cpu_to_be32(mr->ibmr.rkey); err = set_sig_umr_wr(wr, qp, &seg, &size); if (err) { mlx5_ib_warn(dev, "\n"); *bad_wr = wr; goto out; } finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, get_fence(fence, wr), next_fence, MLX5_OPCODE_UMR); /* * SET_PSV WQEs are not signaled and solicited * on error */ wr->send_flags &= ~IB_SEND_SIGNALED; wr->send_flags |= IB_SEND_SOLICITED; err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); if (err) { mlx5_ib_warn(dev, "\n"); err = -ENOMEM; *bad_wr = wr; goto out; } err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem, mr->sig->psv_memory.psv_idx, &seg, &size); if (err) { mlx5_ib_warn(dev, "\n"); *bad_wr = wr; goto out; } finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, get_fence(fence, wr), next_fence, MLX5_OPCODE_SET_PSV); err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); if (err) { mlx5_ib_warn(dev, "\n"); err = -ENOMEM; *bad_wr = wr; goto out; } next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire, mr->sig->psv_wire.psv_idx, &seg, &size); if (err) { mlx5_ib_warn(dev, "\n"); *bad_wr = wr; goto out; } finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, get_fence(fence, wr), next_fence, MLX5_OPCODE_SET_PSV); num_sge = 0; goto skip_psv; default: break; } break; case IB_QPT_UC: switch (wr->opcode) { case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: set_raddr_seg(seg, wr->wr.rdma.remote_addr, wr->wr.rdma.rkey); seg += sizeof(struct mlx5_wqe_raddr_seg); size += sizeof(struct mlx5_wqe_raddr_seg) / 16; break; default: break; } break; case IB_QPT_UD: case IB_QPT_SMI: case IB_QPT_GSI: set_datagram_seg(seg, wr); seg += sizeof(struct mlx5_wqe_datagram_seg); size += sizeof(struct mlx5_wqe_datagram_seg) / 16; if (unlikely((seg == qend))) seg = mlx5_get_send_wqe(qp, 0); break; case MLX5_IB_QPT_REG_UMR: if (wr->opcode != MLX5_IB_WR_UMR) { err = -EINVAL; mlx5_ib_warn(dev, "bad opcode\n"); goto out; } qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey); set_reg_umr_segment(seg, wr); seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; if (unlikely((seg == qend))) seg = mlx5_get_send_wqe(qp, 0); set_reg_mkey_segment(seg, wr); seg += sizeof(struct mlx5_mkey_seg); size += sizeof(struct mlx5_mkey_seg) / 16; if (unlikely((seg == qend))) seg = mlx5_get_send_wqe(qp, 0); break; default: break; } if (wr->send_flags & IB_SEND_INLINE && num_sge) { int uninitialized_var(sz); err = set_data_inl_seg(qp, wr, seg, &sz); if (unlikely(err)) { mlx5_ib_warn(dev, "\n"); *bad_wr = wr; goto out; } inl = 1; size += sz; } else { dpseg = seg; for (i = 0; i < num_sge; i++) { if (unlikely(dpseg == qend)) { seg = mlx5_get_send_wqe(qp, 0); dpseg = seg; } if (likely(wr->sg_list[i].length)) { set_data_ptr_seg(dpseg, wr->sg_list + i); size += sizeof(struct mlx5_wqe_data_seg) / 16; dpseg++; } } } finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, get_fence(fence, wr), next_fence, mlx5_ib_opcode[wr->opcode]); skip_psv: if (0) dump_wqe(qp, idx, size); } out: if (likely(nreq)) { qp->sq.head += nreq; /* Make sure that descriptors are written before * updating doorbell record and ringing the doorbell */ wmb(); qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); /* Make sure doorbell record is visible to the HCA before * we hit doorbell */ wmb(); if (bf->need_lock) spin_lock(&bf->lock); /* TBD enable WC */ if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) { mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp); /* wc_wmb(); */ } else { mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset, MLX5_GET_DOORBELL_LOCK(&bf->lock32)); /* Make sure doorbells don't leak out of SQ spinlock * and reach the HCA out of order. */ mmiowb(); } bf->offset ^= bf->buf_size; if (bf->need_lock) spin_unlock(&bf->lock); } spin_unlock_irqrestore(&qp->sq.lock, flags); return err; } static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size) { sig->signature = calc_sig(sig, size); } int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct mlx5_ib_qp *qp = to_mqp(ibqp); struct mlx5_wqe_data_seg *scat; struct mlx5_rwqe_sig *sig; unsigned long flags; int err = 0; int nreq; int ind; int i; spin_lock_irqsave(&qp->rq.lock, flags); ind = qp->rq.head & (qp->rq.wqe_cnt - 1); for (nreq = 0; wr; nreq++, wr = wr->next) { if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { err = -ENOMEM; *bad_wr = wr; goto out; } if (unlikely(wr->num_sge > qp->rq.max_gs)) { err = -EINVAL; *bad_wr = wr; goto out; } scat = get_recv_wqe(qp, ind); if (qp->wq_sig) scat++; for (i = 0; i < wr->num_sge; i++) set_data_ptr_seg(scat + i, wr->sg_list + i); if (i < qp->rq.max_gs) { scat[i].byte_count = 0; scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY); scat[i].addr = 0; } if (qp->wq_sig) { sig = (struct mlx5_rwqe_sig *)scat; set_sig_seg(sig, (qp->rq.max_gs + 1) << 2); } qp->rq.wrid[ind] = wr->wr_id; ind = (ind + 1) & (qp->rq.wqe_cnt - 1); } out: if (likely(nreq)) { qp->rq.head += nreq; /* Make sure that descriptors are written before * doorbell record. */ wmb(); *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff); } spin_unlock_irqrestore(&qp->rq.lock, flags); return err; } static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state) { switch (mlx5_state) { case MLX5_QP_STATE_RST: return IB_QPS_RESET; case MLX5_QP_STATE_INIT: return IB_QPS_INIT; case MLX5_QP_STATE_RTR: return IB_QPS_RTR; case MLX5_QP_STATE_RTS: return IB_QPS_RTS; case MLX5_QP_STATE_SQ_DRAINING: case MLX5_QP_STATE_SQD: return IB_QPS_SQD; case MLX5_QP_STATE_SQER: return IB_QPS_SQE; case MLX5_QP_STATE_ERR: return IB_QPS_ERR; default: return -1; } } static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state) { switch (mlx5_mig_state) { case MLX5_QP_PM_ARMED: return IB_MIG_ARMED; case MLX5_QP_PM_REARM: return IB_MIG_REARM; case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED; default: return -1; } } static int to_ib_qp_access_flags(int mlx5_flags) { int ib_flags = 0; if (mlx5_flags & MLX5_QP_BIT_RRE) ib_flags |= IB_ACCESS_REMOTE_READ; if (mlx5_flags & MLX5_QP_BIT_RWE) ib_flags |= IB_ACCESS_REMOTE_WRITE; if (mlx5_flags & MLX5_QP_BIT_RAE) ib_flags |= IB_ACCESS_REMOTE_ATOMIC; return ib_flags; } static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr, struct mlx5_qp_path *path) { struct mlx5_core_dev *dev = &ibdev->mdev; memset(ib_ah_attr, 0, sizeof(*ib_ah_attr)); ib_ah_attr->port_num = path->port; if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) return; ib_ah_attr->sl = path->sl & 0xf; ib_ah_attr->dlid = be16_to_cpu(path->rlid); ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f; ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0; if (ib_ah_attr->ah_flags) { ib_ah_attr->grh.sgid_index = path->mgid_index; ib_ah_attr->grh.hop_limit = path->hop_limit; ib_ah_attr->grh.traffic_class = (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff; ib_ah_attr->grh.flow_label = be32_to_cpu(path->tclass_flowlabel) & 0xfffff; memcpy(ib_ah_attr->grh.dgid.raw, path->rgid, sizeof(ib_ah_attr->grh.dgid.raw)); } } int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct mlx5_ib_dev *dev = to_mdev(ibqp->device); struct mlx5_ib_qp *qp = to_mqp(ibqp); struct mlx5_query_qp_mbox_out *outb; struct mlx5_qp_context *context; int mlx5_state; int err = 0; mutex_lock(&qp->mutex); outb = kzalloc(sizeof(*outb), GFP_KERNEL); if (!outb) { err = -ENOMEM; goto out; } context = &outb->ctx; err = mlx5_core_qp_query(&dev->mdev, &qp->mqp, outb, sizeof(*outb)); if (err) goto out_free; mlx5_state = be32_to_cpu(context->flags) >> 28; qp->state = to_ib_qp_state(mlx5_state); qp_attr->qp_state = qp->state; qp_attr->path_mtu = context->mtu_msgmax >> 5; qp_attr->path_mig_state = to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); qp_attr->qkey = be32_to_cpu(context->qkey); qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff; qp_attr->qp_access_flags = to_ib_qp_access_flags(be32_to_cpu(context->params2)); if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f; qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; } qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f; qp_attr->port_num = context->pri_path.port; /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING; qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); qp_attr->max_dest_rd_atomic = 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); qp_attr->min_rnr_timer = (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; qp_attr->timeout = context->pri_path.ackto_lt >> 3; qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7; qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3; qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt; qp_attr->cap.max_recv_sge = qp->rq.max_gs; if (!ibqp->uobject) { qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; qp_attr->cap.max_send_sge = qp->sq.max_gs; } else { qp_attr->cap.max_send_wr = 0; qp_attr->cap.max_send_sge = 0; } /* We don't support inline sends for kernel QPs (yet), and we * don't know what userspace's value should be. */ qp_attr->cap.max_inline_data = 0; qp_init_attr->cap = qp_attr->cap; qp_init_attr->create_flags = 0; if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK) qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; out_free: kfree(outb); out: mutex_unlock(&qp->mutex); return err; } struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_xrcd *xrcd; int err; if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) return ERR_PTR(-ENOSYS); xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); if (!xrcd) return ERR_PTR(-ENOMEM); err = mlx5_core_xrcd_alloc(&dev->mdev, &xrcd->xrcdn); if (err) { kfree(xrcd); return ERR_PTR(-ENOMEM); } return &xrcd->ibxrcd; } int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd) { struct mlx5_ib_dev *dev = to_mdev(xrcd->device); u32 xrcdn = to_mxrcd(xrcd)->xrcdn; int err; err = mlx5_core_xrcd_dealloc(&dev->mdev, xrcdn); if (err) { mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn); return err; } kfree(xrcd); return 0; }
115059.c
/* nbdkit * Copyright (C) 2018-2019 Red Hat Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * * Neither the name of Red Hat nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY RED HAT AND CONTRIBUTORS ''AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL RED HAT OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <config.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <inttypes.h> #include <string.h> #include <unistd.h> #include <time.h> #include <libnbd.h> int main (int argc, char *argv[]) { struct nbd_handle *nbd; int i; time_t start_t, end_t; char data[512]; nbd = nbd_create (); if (nbd == NULL) { fprintf (stderr, "%s\n", nbd_get_error ()); exit (EXIT_FAILURE); } char *args[] = { "nbdkit", "-s", "--exit-with-parent", "--filter", "delay", "memory", "1M", "wdelay=10", NULL }; if (nbd_connect_command (nbd, args) == -1) { fprintf (stderr, "%s\n", nbd_get_error ()); exit (EXIT_FAILURE); } /* Reads should work as normal. Do lots of small reads here so * we will notice if they are being delayed. */ for (i = 0; i < 100; ++i) { if (nbd_pread (nbd, data, sizeof data, 51200-512*i, 0) == -1) { fprintf (stderr, "%s\n", nbd_get_error ()); exit (EXIT_FAILURE); } } /* Writes should be delayed by >= 10 seconds. */ time (&start_t); if (nbd_pwrite (nbd, "hello", 5, 100000, 0) == -1) { fprintf (stderr, "%s\n", nbd_get_error ()); exit (EXIT_FAILURE); } time (&end_t); if (end_t - start_t < 10) { fprintf (stderr, "%s FAILED: no write delay detected\n", argv[0]); exit (EXIT_FAILURE); } nbd_close (nbd); exit (EXIT_SUCCESS); }
417557.c
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <limits.h> #include <ddk/device.h> #include <unittest/unittest.h> extern zx_device_t* ddk_test_dev; static const char* TEST_STRING = "testing 1 2 3"; static bool test_add_metadata(void) { char buffer[32] = {}; zx_status_t status; size_t actual; BEGIN_TEST; status = device_get_metadata(ddk_test_dev, 1, buffer,sizeof(buffer), &actual); ASSERT_EQ(status, ZX_ERR_NOT_FOUND, "device_get_metadata did not return ZX_ERR_NOT_FOUND"); status = device_get_metadata_size(ddk_test_dev, 1, &actual); ASSERT_EQ(status, ZX_ERR_NOT_FOUND, "device_get_metadata_size should return ZX_ERR_NOT_FOUND"); status = device_add_metadata(ddk_test_dev, 1, TEST_STRING, strlen(TEST_STRING) + 1); ASSERT_EQ(status, ZX_OK, "device_add_metadata failed"); status = device_get_metadata_size(ddk_test_dev, 1, &actual); ASSERT_EQ(strlen(TEST_STRING)+1, actual, "Incorrect output length was returned."); status = device_get_metadata(ddk_test_dev, 1, buffer, sizeof(buffer), &actual); ASSERT_EQ(status, ZX_OK, "device_get_metadata failed"); ASSERT_EQ(actual, strlen(TEST_STRING) + 1, ""); ASSERT_EQ(strcmp(buffer, TEST_STRING), 0, ""); END_TEST; } static bool test_publish_metadata(void) { char buffer[32] = {}; zx_status_t status; size_t actual; BEGIN_TEST; // This should fail since the path does not match us or our potential children. status = device_publish_metadata(ddk_test_dev, "/dev/misc/sysinfo", 2, TEST_STRING, strlen(TEST_STRING) + 1); ASSERT_EQ(status, ZX_ERR_ACCESS_DENIED, ""); // We are allowed to add metadata to own path. status = device_publish_metadata(ddk_test_dev, "/dev/test/test/ddk-test", 2, TEST_STRING, strlen(TEST_STRING) + 1); ASSERT_EQ(status, ZX_OK, ""); status = device_get_metadata(ddk_test_dev, 2, buffer, sizeof(buffer), &actual); ASSERT_EQ(status, ZX_OK, "device_get_metadata failed"); ASSERT_EQ(actual, strlen(TEST_STRING) + 1, ""); ASSERT_EQ(strcmp(buffer, TEST_STRING), 0, ""); // We are allowed to add metadata to our potential children. status = device_publish_metadata(ddk_test_dev, "/dev/test/test/ddk-test/child", 2, TEST_STRING, strlen(TEST_STRING) + 1); ASSERT_EQ(status, ZX_OK, ""); END_TEST; } BEGIN_TEST_CASE(metadata_tests) RUN_TEST(test_add_metadata) RUN_TEST(test_publish_metadata) END_TEST_CASE(metadata_tests) struct test_case_element* test_case_ddk_metadata = TEST_CASE_ELEMENT(metadata_tests);
619989.c
/* * drivers/cpufreq/cpufreq_interactive.c * * Copyright (C) 2010 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Mike Chan ([email protected]) * */ #include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/cpufreq.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/sched/rt.h> #include <linux/tick.h> #include <linux/time.h> #include <linux/timer.h> #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/slab.h> #ifdef CONFIG_ARCH_MT6755 #include <asm/topology.h> #include <../misc/mediatek/base/power/mt6755/mt_cpufreq.h> unsigned int hispeed_freq_perf = 0; unsigned int min_sample_time_perf = 0; #endif #define CREATE_TRACE_POINTS #include <trace/events/cpufreq_interactive.h> struct cpufreq_interactive_cpuinfo { struct timer_list cpu_timer; struct timer_list cpu_slack_timer; spinlock_t load_lock; /* protects the next 4 fields */ u64 time_in_idle; u64 time_in_idle_timestamp; u64 cputime_speedadj; u64 cputime_speedadj_timestamp; struct cpufreq_policy *policy; struct cpufreq_frequency_table *freq_table; spinlock_t target_freq_lock; /*protects target freq */ unsigned int target_freq; unsigned int floor_freq; u64 pol_floor_val_time; /* policy floor_validate_time */ u64 loc_floor_val_time; /* per-cpu floor_validate_time */ u64 pol_hispeed_val_time; /* policy hispeed_validate_time */ u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */ struct rw_semaphore enable_sem; int governor_enabled; }; static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo); /* realtime thread handles frequency scaling */ static struct task_struct *speedchange_task; static cpumask_t speedchange_cpumask; static spinlock_t speedchange_cpumask_lock; static struct mutex gov_lock; /* Target load. Lower values result in higher CPU speeds. */ #define DEFAULT_TARGET_LOAD 90 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD}; #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC) #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE static unsigned int default_above_hispeed_delay[] = { DEFAULT_ABOVE_HISPEED_DELAY }; struct cpufreq_interactive_tunables { int usage_count; /* Hi speed to bump to from lo speed when load burst (default max) */ unsigned int hispeed_freq; /* Go to hi speed when CPU load at or above this value. */ #define DEFAULT_GO_HISPEED_LOAD 99 unsigned long go_hispeed_load; /* Target load. Lower values result in higher CPU speeds. */ spinlock_t target_loads_lock; unsigned int *target_loads; int ntarget_loads; /* * The minimum amount of time to spend at a frequency before we can ramp * down. */ #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC) unsigned long min_sample_time; /* * The sample rate of the timer used to increase frequency */ unsigned long timer_rate; /* * Wait this long before raising speed above hispeed, by default a * single timer interval. */ spinlock_t above_hispeed_delay_lock; unsigned int *above_hispeed_delay; int nabove_hispeed_delay; /* Non-zero means indefinite speed boost active */ int boost_val; /* Duration of a boot pulse in usecs */ int boostpulse_duration_val; /* End time of boost pulse in ktime converted to usecs */ u64 boostpulse_endtime; bool boosted; /* * Max additional time to wait in idle, beyond timer_rate, at speeds * above minimum before wakeup to reduce speed, or -1 if unnecessary. */ #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE) int timer_slack_val; bool io_is_busy; }; /* For cases where we have single governor instance for system */ static struct cpufreq_interactive_tunables *common_tunables; static struct attribute_group *get_sysfs_attr(void); static void cpufreq_interactive_timer_resched( struct cpufreq_interactive_cpuinfo *pcpu) { struct cpufreq_interactive_tunables *tunables = pcpu->policy->governor_data; unsigned long expires; unsigned long flags; spin_lock_irqsave(&pcpu->load_lock, flags); pcpu->time_in_idle = get_cpu_idle_time(smp_processor_id(), &pcpu->time_in_idle_timestamp, tunables->io_is_busy); pcpu->cputime_speedadj = 0; pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; expires = jiffies + usecs_to_jiffies(tunables->timer_rate); mod_timer_pinned(&pcpu->cpu_timer, expires); if (tunables->timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) { expires += usecs_to_jiffies(tunables->timer_slack_val); mod_timer_pinned(&pcpu->cpu_slack_timer, expires); } spin_unlock_irqrestore(&pcpu->load_lock, flags); } /* The caller shall take enable_sem write semaphore to avoid any timer race. * The cpu_timer and cpu_slack_timer must be deactivated when calling this * function. */ static void cpufreq_interactive_timer_start( struct cpufreq_interactive_tunables *tunables, int cpu) { struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); unsigned long expires = jiffies + usecs_to_jiffies(tunables->timer_rate); unsigned long flags; pcpu->cpu_timer.expires = expires; add_timer_on(&pcpu->cpu_timer, cpu); if (tunables->timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) { expires += usecs_to_jiffies(tunables->timer_slack_val); pcpu->cpu_slack_timer.expires = expires; add_timer_on(&pcpu->cpu_slack_timer, cpu); } spin_lock_irqsave(&pcpu->load_lock, flags); pcpu->time_in_idle = get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp, tunables->io_is_busy); pcpu->cputime_speedadj = 0; pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp; spin_unlock_irqrestore(&pcpu->load_lock, flags); } static unsigned int freq_to_above_hispeed_delay( struct cpufreq_interactive_tunables *tunables, unsigned int freq) { int i; unsigned int ret; unsigned long flags; spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags); for (i = 0; i < tunables->nabove_hispeed_delay - 1 && freq >= tunables->above_hispeed_delay[i+1]; i += 2) ; ret = tunables->above_hispeed_delay[i]; spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags); return ret; } static unsigned int freq_to_targetload( struct cpufreq_interactive_tunables *tunables, unsigned int freq) { int i; unsigned int ret; unsigned long flags; spin_lock_irqsave(&tunables->target_loads_lock, flags); for (i = 0; i < tunables->ntarget_loads - 1 && freq >= tunables->target_loads[i+1]; i += 2) ; ret = tunables->target_loads[i]; spin_unlock_irqrestore(&tunables->target_loads_lock, flags); return ret; } /* * If increasing frequencies never map to a lower target load then * choose_freq() will find the minimum frequency that does not exceed its * target load given the current load. */ static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq) { unsigned int freq = pcpu->policy->cur; unsigned int prevfreq, freqmin, freqmax; unsigned int tl; int index; freqmin = 0; freqmax = UINT_MAX; do { prevfreq = freq; tl = freq_to_targetload(pcpu->policy->governor_data, freq); /* * Find the lowest frequency where the computed load is less * than or equal to the target load. */ if (cpufreq_frequency_table_target( pcpu->policy, pcpu->freq_table, loadadjfreq / tl, CPUFREQ_RELATION_L, &index)) break; freq = pcpu->freq_table[index].frequency; if (freq > prevfreq) { /* The previous frequency is too low. */ freqmin = prevfreq; if (freq >= freqmax) { /* * Find the highest frequency that is less * than freqmax. */ if (cpufreq_frequency_table_target( pcpu->policy, pcpu->freq_table, freqmax - 1, CPUFREQ_RELATION_H, &index)) break; freq = pcpu->freq_table[index].frequency; if (freq == freqmin) { /* * The first frequency below freqmax * has already been found to be too * low. freqmax is the lowest speed * we found that is fast enough. */ freq = freqmax; break; } } } else if (freq < prevfreq) { /* The previous frequency is high enough. */ freqmax = prevfreq; if (freq <= freqmin) { /* * Find the lowest frequency that is higher * than freqmin. */ if (cpufreq_frequency_table_target( pcpu->policy, pcpu->freq_table, freqmin + 1, CPUFREQ_RELATION_L, &index)) break; freq = pcpu->freq_table[index].frequency; /* * If freqmax is the first frequency above * freqmin then we have already found that * this speed is fast enough. */ if (freq == freqmax) break; } } /* If same frequency chosen as previous then done. */ } while (freq != prevfreq); return freq; } static u64 update_load(int cpu) { struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu); struct cpufreq_interactive_tunables *tunables = pcpu->policy->governor_data; u64 now; u64 now_idle; unsigned int delta_idle; unsigned int delta_time; u64 active_time; now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy); delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle); delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp); if (delta_time <= delta_idle) active_time = 0; else active_time = delta_time - delta_idle; pcpu->cputime_speedadj += active_time * pcpu->policy->cur; pcpu->time_in_idle = now_idle; pcpu->time_in_idle_timestamp = now; return now; } static void cpufreq_interactive_timer(unsigned long data) { u64 now; unsigned int delta_time; u64 cputime_speedadj; int cpu_load; struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, data); struct cpufreq_interactive_tunables *tunables = pcpu->policy->governor_data; unsigned int new_freq; unsigned int loadadjfreq; unsigned int index; unsigned long flags; u64 max_fvtime; #ifdef CONFIG_ARCH_MT6755 int ppb_idx; /* Default, low power, just make, performance */ int freq_idx[4] = {2, 6, 4, 0}; int min_sample_t[4] = {80, 20, 20, 80}; #endif if (!down_read_trylock(&pcpu->enable_sem)) return; if (!pcpu->governor_enabled) goto exit; spin_lock_irqsave(&pcpu->load_lock, flags); now = update_load(data); delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp); cputime_speedadj = pcpu->cputime_speedadj; spin_unlock_irqrestore(&pcpu->load_lock, flags); if (WARN_ON_ONCE(!delta_time)) goto rearm; spin_lock_irqsave(&pcpu->target_freq_lock, flags); do_div(cputime_speedadj, delta_time); loadadjfreq = (unsigned int)cputime_speedadj * 100; cpu_load = loadadjfreq / pcpu->policy->cur; tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime; #ifdef CONFIG_ARCH_MT6755 ppb_idx = mt_cpufreq_get_ppb_state(); /* Not to modify if L in default mode */ if (ppb_idx == 0 && (arch_get_cluster_id(pcpu->policy->cpu) >= 1)) { tunables->hispeed_freq = pcpu->freq_table[0].frequency; tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME; } else { tunables->hispeed_freq = pcpu->freq_table[freq_idx[ppb_idx]].frequency; tunables->min_sample_time = min_sample_t[ppb_idx] * USEC_PER_MSEC; if (hispeed_freq_perf != 0) tunables->hispeed_freq = hispeed_freq_perf; if (min_sample_time_perf != 0) tunables->min_sample_time = min_sample_time_perf; } #endif if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) { if (pcpu->policy->cur < tunables->hispeed_freq) { new_freq = tunables->hispeed_freq; } else { new_freq = choose_freq(pcpu, loadadjfreq); if (new_freq < tunables->hispeed_freq) new_freq = tunables->hispeed_freq; } } else { new_freq = choose_freq(pcpu, loadadjfreq); if (new_freq > tunables->hispeed_freq && pcpu->policy->cur < tunables->hispeed_freq) new_freq = tunables->hispeed_freq; } if (pcpu->policy->cur >= tunables->hispeed_freq && new_freq > pcpu->policy->cur && now - pcpu->pol_hispeed_val_time < freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) { trace_cpufreq_interactive_notyet( data, cpu_load, pcpu->target_freq, pcpu->policy->cur, new_freq); spin_unlock_irqrestore(&pcpu->target_freq_lock, flags); goto rearm; } pcpu->loc_hispeed_val_time = now; if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table, new_freq, CPUFREQ_RELATION_L, &index)) { spin_unlock_irqrestore(&pcpu->target_freq_lock, flags); goto rearm; } new_freq = pcpu->freq_table[index].frequency; /* * Do not scale below floor_freq unless we have been at or above the * floor frequency for the minimum sample time since last validated. */ max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time); if (new_freq < pcpu->floor_freq && pcpu->target_freq >= pcpu->policy->cur) { if (now - max_fvtime < tunables->min_sample_time) { trace_cpufreq_interactive_notyet( data, cpu_load, pcpu->target_freq, pcpu->policy->cur, new_freq); spin_unlock_irqrestore(&pcpu->target_freq_lock, flags); goto rearm; } } /* * Update the timestamp for checking whether speed has been held at * or above the selected frequency for a minimum of min_sample_time, * if not boosted to hispeed_freq. If boosted to hispeed_freq then we * allow the speed to drop as soon as the boostpulse duration expires * (or the indefinite boost is turned off). */ if (!tunables->boosted || new_freq > tunables->hispeed_freq) { pcpu->floor_freq = new_freq; if (pcpu->target_freq >= pcpu->policy->cur || new_freq >= pcpu->policy->cur) pcpu->loc_floor_val_time = now; } if (pcpu->target_freq == new_freq && pcpu->target_freq <= pcpu->policy->cur) { trace_cpufreq_interactive_already( data, cpu_load, pcpu->target_freq, pcpu->policy->cur, new_freq); spin_unlock_irqrestore(&pcpu->target_freq_lock, flags); goto rearm; } trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq, pcpu->policy->cur, new_freq); pcpu->target_freq = new_freq; spin_unlock_irqrestore(&pcpu->target_freq_lock, flags); spin_lock_irqsave(&speedchange_cpumask_lock, flags); cpumask_set_cpu(data, &speedchange_cpumask); spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); wake_up_process(speedchange_task); rearm: if (!timer_pending(&pcpu->cpu_timer)) cpufreq_interactive_timer_resched(pcpu); exit: up_read(&pcpu->enable_sem); return; } static void cpufreq_interactive_idle_end(void) { struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, smp_processor_id()); if (!down_read_trylock(&pcpu->enable_sem)) return; if (!pcpu->governor_enabled) { up_read(&pcpu->enable_sem); return; } /* Arm the timer for 1-2 ticks later if not already. */ if (!timer_pending(&pcpu->cpu_timer)) { cpufreq_interactive_timer_resched(pcpu); } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) { del_timer(&pcpu->cpu_timer); del_timer(&pcpu->cpu_slack_timer); cpufreq_interactive_timer(smp_processor_id()); } up_read(&pcpu->enable_sem); } static int cpufreq_interactive_speedchange_task(void *data) { unsigned int cpu; cpumask_t tmp_mask; unsigned long flags; struct cpufreq_interactive_cpuinfo *pcpu; while (1) { set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&speedchange_cpumask_lock, flags); if (cpumask_empty(&speedchange_cpumask)) { spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); schedule(); if (kthread_should_stop()) break; spin_lock_irqsave(&speedchange_cpumask_lock, flags); } set_current_state(TASK_RUNNING); tmp_mask = speedchange_cpumask; cpumask_clear(&speedchange_cpumask); spin_unlock_irqrestore(&speedchange_cpumask_lock, flags); for_each_cpu(cpu, &tmp_mask) { unsigned int j; unsigned int max_freq = 0; struct cpufreq_interactive_cpuinfo *pjcpu; u64 hvt = ~0ULL, fvt = 0; pcpu = &per_cpu(cpuinfo, cpu); if (!down_read_trylock(&pcpu->enable_sem)) continue; if (!pcpu->governor_enabled) { up_read(&pcpu->enable_sem); continue; } for_each_cpu(j, pcpu->policy->cpus) { pjcpu = &per_cpu(cpuinfo, j); fvt = max(fvt, pjcpu->loc_floor_val_time); if (pjcpu->target_freq > max_freq) { max_freq = pjcpu->target_freq; hvt = pjcpu->loc_hispeed_val_time; } else if (pjcpu->target_freq == max_freq) { hvt = min(hvt, pjcpu->loc_hispeed_val_time); } } for_each_cpu(j, pcpu->policy->cpus) { pjcpu = &per_cpu(cpuinfo, j); pjcpu->pol_floor_val_time = fvt; } if (max_freq != pcpu->policy->cur) { __cpufreq_driver_target(pcpu->policy, max_freq, CPUFREQ_RELATION_H); for_each_cpu(j, pcpu->policy->cpus) { pjcpu = &per_cpu(cpuinfo, j); pjcpu->pol_hispeed_val_time = hvt; } } trace_cpufreq_interactive_setspeed(cpu, pcpu->target_freq, pcpu->policy->cur); up_read(&pcpu->enable_sem); } } return 0; } static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables) { int i; int anyboost = 0; unsigned long flags[2]; struct cpufreq_interactive_cpuinfo *pcpu; tunables->boosted = true; spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]); for_each_online_cpu(i) { pcpu = &per_cpu(cpuinfo, i); if (tunables != pcpu->policy->governor_data) continue; spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]); if (pcpu->target_freq < tunables->hispeed_freq) { pcpu->target_freq = tunables->hispeed_freq; cpumask_set_cpu(i, &speedchange_cpumask); pcpu->pol_hispeed_val_time = ktime_to_us(ktime_get()); anyboost = 1; } spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]); } spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]); if (anyboost) wake_up_process(speedchange_task); } static int cpufreq_interactive_notifier( struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; struct cpufreq_interactive_cpuinfo *pcpu; int cpu; unsigned long flags; if (val == CPUFREQ_POSTCHANGE) { pcpu = &per_cpu(cpuinfo, freq->cpu); if (!down_read_trylock(&pcpu->enable_sem)) return 0; if (!pcpu->governor_enabled) { up_read(&pcpu->enable_sem); return 0; } for_each_cpu(cpu, pcpu->policy->cpus) { struct cpufreq_interactive_cpuinfo *pjcpu = &per_cpu(cpuinfo, cpu); if (cpu != freq->cpu) { if (!down_read_trylock(&pjcpu->enable_sem)) continue; if (!pjcpu->governor_enabled) { up_read(&pjcpu->enable_sem); continue; } } spin_lock_irqsave(&pjcpu->load_lock, flags); update_load(cpu); spin_unlock_irqrestore(&pjcpu->load_lock, flags); if (cpu != freq->cpu) up_read(&pjcpu->enable_sem); } up_read(&pcpu->enable_sem); } return 0; } static struct notifier_block cpufreq_notifier_block = { .notifier_call = cpufreq_interactive_notifier, }; static unsigned int *get_tokenized_data(const char *buf, int *num_tokens) { const char *cp; int i; int ntokens = 1; unsigned int *tokenized_data; int err = -EINVAL; cp = buf; while ((cp = strpbrk(cp + 1, " :"))) ntokens++; if (!(ntokens & 0x1)) goto err; tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL); if (!tokenized_data) { err = -ENOMEM; goto err; } cp = buf; i = 0; while (i < ntokens) { if (sscanf(cp, "%u", &tokenized_data[i++]) != 1) goto err_kfree; cp = strpbrk(cp, " :"); if (!cp) break; cp++; } if (i != ntokens) goto err_kfree; *num_tokens = ntokens; return tokenized_data; err_kfree: kfree(tokenized_data); err: return ERR_PTR(err); } static ssize_t show_target_loads( struct cpufreq_interactive_tunables *tunables, char *buf) { int i; ssize_t ret = 0; unsigned long flags; spin_lock_irqsave(&tunables->target_loads_lock, flags); for (i = 0; i < tunables->ntarget_loads; i++) ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i], i & 0x1 ? ":" : " "); sprintf(buf + ret - 1, "\n"); spin_unlock_irqrestore(&tunables->target_loads_lock, flags); return ret; } static ssize_t store_target_loads( struct cpufreq_interactive_tunables *tunables, const char *buf, size_t count) { int ntokens; unsigned int *new_target_loads = NULL; unsigned long flags; new_target_loads = get_tokenized_data(buf, &ntokens); if (IS_ERR(new_target_loads)) return PTR_RET(new_target_loads); spin_lock_irqsave(&tunables->target_loads_lock, flags); if (tunables->target_loads != default_target_loads) kfree(tunables->target_loads); tunables->target_loads = new_target_loads; tunables->ntarget_loads = ntokens; spin_unlock_irqrestore(&tunables->target_loads_lock, flags); return count; } static ssize_t show_above_hispeed_delay( struct cpufreq_interactive_tunables *tunables, char *buf) { int i; ssize_t ret = 0; unsigned long flags; spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags); for (i = 0; i < tunables->nabove_hispeed_delay; i++) ret += sprintf(buf + ret, "%u%s", tunables->above_hispeed_delay[i], i & 0x1 ? ":" : " "); sprintf(buf + ret - 1, "\n"); spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags); return ret; } static ssize_t store_above_hispeed_delay( struct cpufreq_interactive_tunables *tunables, const char *buf, size_t count) { int ntokens; unsigned int *new_above_hispeed_delay = NULL; unsigned long flags; new_above_hispeed_delay = get_tokenized_data(buf, &ntokens); if (IS_ERR(new_above_hispeed_delay)) return PTR_RET(new_above_hispeed_delay); spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags); if (tunables->above_hispeed_delay != default_above_hispeed_delay) kfree(tunables->above_hispeed_delay); tunables->above_hispeed_delay = new_above_hispeed_delay; tunables->nabove_hispeed_delay = ntokens; spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags); return count; } static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables, char *buf) { return sprintf(buf, "%u\n", tunables->hispeed_freq); } static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables, const char *buf, size_t count) { int ret; long unsigned int val; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; tunables->hispeed_freq = val; #ifdef CONFIG_ARCH_MT6755 hispeed_freq_perf = val; #endif return count; } static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables *tunables, char *buf) { return sprintf(buf, "%lu\n", tunables->go_hispeed_load); } static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables *tunables, const char *buf, size_t count) { int ret; unsigned long val; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; tunables->go_hispeed_load = val; return count; } static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables *tunables, char *buf) { return sprintf(buf, "%lu\n", tunables->min_sample_time); } static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables *tunables, const char *buf, size_t count) { int ret; unsigned long val; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; tunables->min_sample_time = val; #ifdef CONFIG_ARCH_MT6755 min_sample_time_perf = val; #endif return count; } static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables, char *buf) { return sprintf(buf, "%lu\n", tunables->timer_rate); } static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables, const char *buf, size_t count) { int ret; unsigned long val, val_round; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; val_round = jiffies_to_usecs(usecs_to_jiffies(val)); if (val != val_round) pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n", val_round); tunables->timer_rate = val_round; return count; } static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables, char *buf) { return sprintf(buf, "%d\n", tunables->timer_slack_val); } static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables, const char *buf, size_t count) { int ret; unsigned long val; ret = kstrtol(buf, 10, &val); if (ret < 0) return ret; tunables->timer_slack_val = val; return count; } static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables, char *buf) { return sprintf(buf, "%d\n", tunables->boost_val); } static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables, const char *buf, size_t count) { int ret; unsigned long val; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; tunables->boost_val = val; if (tunables->boost_val) { trace_cpufreq_interactive_boost("on"); if (!tunables->boosted) cpufreq_interactive_boost(tunables); } else { tunables->boostpulse_endtime = ktime_to_us(ktime_get()); trace_cpufreq_interactive_unboost("off"); } return count; } static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables, const char *buf, size_t count) { int ret; unsigned long val; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; tunables->boostpulse_endtime = ktime_to_us(ktime_get()) + tunables->boostpulse_duration_val; trace_cpufreq_interactive_boost("pulse"); if (!tunables->boosted) cpufreq_interactive_boost(tunables); return count; } static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables *tunables, char *buf) { return sprintf(buf, "%d\n", tunables->boostpulse_duration_val); } static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables *tunables, const char *buf, size_t count) { int ret; unsigned long val; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; tunables->boostpulse_duration_val = val; return count; } static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables, char *buf) { return sprintf(buf, "%u\n", tunables->io_is_busy); } static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables, const char *buf, size_t count) { int ret; unsigned long val; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; tunables->io_is_busy = val; return count; } /* * Create show/store routines * - sys: One governor instance for complete SYSTEM * - pol: One governor instance per struct cpufreq_policy */ #define show_gov_pol_sys(file_name) \ static ssize_t show_##file_name##_gov_sys \ (struct kobject *kobj, struct attribute *attr, char *buf) \ { \ return show_##file_name(common_tunables, buf); \ } \ \ static ssize_t show_##file_name##_gov_pol \ (struct cpufreq_policy *policy, char *buf) \ { \ return show_##file_name(policy->governor_data, buf); \ } #define store_gov_pol_sys(file_name) \ static ssize_t store_##file_name##_gov_sys \ (struct kobject *kobj, struct attribute *attr, const char *buf, \ size_t count) \ { \ return store_##file_name(common_tunables, buf, count); \ } \ \ static ssize_t store_##file_name##_gov_pol \ (struct cpufreq_policy *policy, const char *buf, size_t count) \ { \ return store_##file_name(policy->governor_data, buf, count); \ } #define show_store_gov_pol_sys(file_name) \ show_gov_pol_sys(file_name); \ store_gov_pol_sys(file_name) show_store_gov_pol_sys(target_loads); show_store_gov_pol_sys(above_hispeed_delay); show_store_gov_pol_sys(hispeed_freq); show_store_gov_pol_sys(go_hispeed_load); show_store_gov_pol_sys(min_sample_time); show_store_gov_pol_sys(timer_rate); show_store_gov_pol_sys(timer_slack); show_store_gov_pol_sys(boost); store_gov_pol_sys(boostpulse); show_store_gov_pol_sys(boostpulse_duration); show_store_gov_pol_sys(io_is_busy); #define gov_sys_attr_rw(_name) \ static struct global_attr _name##_gov_sys = \ __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys) #define gov_pol_attr_rw(_name) \ static struct freq_attr _name##_gov_pol = \ __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol) #define gov_sys_pol_attr_rw(_name) \ gov_sys_attr_rw(_name); \ gov_pol_attr_rw(_name) gov_sys_pol_attr_rw(target_loads); gov_sys_pol_attr_rw(above_hispeed_delay); gov_sys_pol_attr_rw(hispeed_freq); gov_sys_pol_attr_rw(go_hispeed_load); gov_sys_pol_attr_rw(min_sample_time); gov_sys_pol_attr_rw(timer_rate); gov_sys_pol_attr_rw(timer_slack); gov_sys_pol_attr_rw(boost); gov_sys_pol_attr_rw(boostpulse_duration); gov_sys_pol_attr_rw(io_is_busy); static struct global_attr boostpulse_gov_sys = __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys); static struct freq_attr boostpulse_gov_pol = __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol); /* One Governor instance for entire system */ static struct attribute *interactive_attributes_gov_sys[] = { &target_loads_gov_sys.attr, &above_hispeed_delay_gov_sys.attr, &hispeed_freq_gov_sys.attr, &go_hispeed_load_gov_sys.attr, &min_sample_time_gov_sys.attr, &timer_rate_gov_sys.attr, &timer_slack_gov_sys.attr, &boost_gov_sys.attr, &boostpulse_gov_sys.attr, &boostpulse_duration_gov_sys.attr, &io_is_busy_gov_sys.attr, NULL, }; static struct attribute_group interactive_attr_group_gov_sys = { .attrs = interactive_attributes_gov_sys, .name = "interactive", }; /* Per policy governor instance */ static struct attribute *interactive_attributes_gov_pol[] = { &target_loads_gov_pol.attr, &above_hispeed_delay_gov_pol.attr, &hispeed_freq_gov_pol.attr, &go_hispeed_load_gov_pol.attr, &min_sample_time_gov_pol.attr, &timer_rate_gov_pol.attr, &timer_slack_gov_pol.attr, &boost_gov_pol.attr, &boostpulse_gov_pol.attr, &boostpulse_duration_gov_pol.attr, &io_is_busy_gov_pol.attr, NULL, }; static struct attribute_group interactive_attr_group_gov_pol = { .attrs = interactive_attributes_gov_pol, .name = "interactive", }; static struct attribute_group *get_sysfs_attr(void) { if (have_governor_per_policy()) return &interactive_attr_group_gov_pol; else return &interactive_attr_group_gov_sys; } static int cpufreq_interactive_idle_notifier(struct notifier_block *nb, unsigned long val, void *data) { if (val == IDLE_END) cpufreq_interactive_idle_end(); return 0; } static struct notifier_block cpufreq_interactive_idle_nb = { .notifier_call = cpufreq_interactive_idle_notifier, }; static int cpufreq_governor_interactive(struct cpufreq_policy *policy, unsigned int event) { int rc; unsigned int j; struct cpufreq_interactive_cpuinfo *pcpu; struct cpufreq_frequency_table *freq_table; struct cpufreq_interactive_tunables *tunables; unsigned long flags; if (have_governor_per_policy()) tunables = policy->governor_data; else tunables = common_tunables; WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT)); switch (event) { case CPUFREQ_GOV_POLICY_INIT: if (have_governor_per_policy()) { WARN_ON(tunables); } else if (tunables) { tunables->usage_count++; policy->governor_data = tunables; return 0; } tunables = kzalloc(sizeof(*tunables), GFP_KERNEL); if (!tunables) { pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__); return -ENOMEM; } tunables->usage_count = 1; tunables->above_hispeed_delay = default_above_hispeed_delay; tunables->nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay); tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD; tunables->target_loads = default_target_loads; tunables->ntarget_loads = ARRAY_SIZE(default_target_loads); tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME; tunables->timer_rate = DEFAULT_TIMER_RATE; tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME; tunables->timer_slack_val = DEFAULT_TIMER_SLACK; spin_lock_init(&tunables->target_loads_lock); spin_lock_init(&tunables->above_hispeed_delay_lock); policy->governor_data = tunables; if (!have_governor_per_policy()) { common_tunables = tunables; WARN_ON(cpufreq_get_global_kobject()); } rc = sysfs_create_group(get_governor_parent_kobj(policy), get_sysfs_attr()); if (rc) { kfree(tunables); policy->governor_data = NULL; if (!have_governor_per_policy()) { common_tunables = NULL; cpufreq_put_global_kobject(); } return rc; } if (!policy->governor->initialized) { idle_notifier_register(&cpufreq_interactive_idle_nb); cpufreq_register_notifier(&cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } break; case CPUFREQ_GOV_POLICY_EXIT: if (!--tunables->usage_count) { if (policy->governor->initialized == 1) { cpufreq_unregister_notifier(&cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); idle_notifier_unregister(&cpufreq_interactive_idle_nb); } sysfs_remove_group(get_governor_parent_kobj(policy), get_sysfs_attr()); if (!have_governor_per_policy()) cpufreq_put_global_kobject(); kfree(tunables); common_tunables = NULL; } policy->governor_data = NULL; break; case CPUFREQ_GOV_START: mutex_lock(&gov_lock); freq_table = cpufreq_frequency_get_table(policy->cpu); if (tunables) { if (!tunables->hispeed_freq) tunables->hispeed_freq = policy->max; } for_each_cpu(j, policy->cpus) { pcpu = &per_cpu(cpuinfo, j); pcpu->policy = policy; pcpu->target_freq = policy->cur; pcpu->freq_table = freq_table; pcpu->floor_freq = pcpu->target_freq; pcpu->pol_floor_val_time = ktime_to_us(ktime_get()); pcpu->loc_floor_val_time = pcpu->pol_floor_val_time; pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time; pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time; down_write(&pcpu->enable_sem); del_timer_sync(&pcpu->cpu_timer); del_timer_sync(&pcpu->cpu_slack_timer); cpufreq_interactive_timer_start(tunables, j); pcpu->governor_enabled = 1; up_write(&pcpu->enable_sem); } mutex_unlock(&gov_lock); break; case CPUFREQ_GOV_STOP: mutex_lock(&gov_lock); for_each_cpu(j, policy->cpus) { pcpu = &per_cpu(cpuinfo, j); down_write(&pcpu->enable_sem); pcpu->governor_enabled = 0; del_timer_sync(&pcpu->cpu_timer); del_timer_sync(&pcpu->cpu_slack_timer); up_write(&pcpu->enable_sem); } mutex_unlock(&gov_lock); break; case CPUFREQ_GOV_LIMITS: if (policy->max < policy->cur) __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); else if (policy->min > policy->cur) __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); for_each_cpu(j, policy->cpus) { pcpu = &per_cpu(cpuinfo, j); down_read(&pcpu->enable_sem); if (pcpu->governor_enabled == 0) { up_read(&pcpu->enable_sem); continue; } spin_lock_irqsave(&pcpu->target_freq_lock, flags); if (policy->max < pcpu->target_freq) pcpu->target_freq = policy->max; else if (policy->min > pcpu->target_freq) pcpu->target_freq = policy->min; spin_unlock_irqrestore(&pcpu->target_freq_lock, flags); up_read(&pcpu->enable_sem); } break; } return 0; } #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE static #endif struct cpufreq_governor cpufreq_gov_interactive = { .name = "interactive", .governor = cpufreq_governor_interactive, .max_transition_latency = 10000000, .owner = THIS_MODULE, }; static void cpufreq_interactive_nop_timer(unsigned long data) { } static int __init cpufreq_interactive_init(void) { unsigned int i; struct cpufreq_interactive_cpuinfo *pcpu; struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; /* Initalize per-cpu timers */ for_each_possible_cpu(i) { pcpu = &per_cpu(cpuinfo, i); init_timer_deferrable(&pcpu->cpu_timer); pcpu->cpu_timer.function = cpufreq_interactive_timer; pcpu->cpu_timer.data = i; init_timer(&pcpu->cpu_slack_timer); pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer; spin_lock_init(&pcpu->load_lock); spin_lock_init(&pcpu->target_freq_lock); init_rwsem(&pcpu->enable_sem); } spin_lock_init(&speedchange_cpumask_lock); mutex_init(&gov_lock); speedchange_task = kthread_create(cpufreq_interactive_speedchange_task, NULL, "cfinteractive"); if (IS_ERR(speedchange_task)) return PTR_ERR(speedchange_task); sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param); get_task_struct(speedchange_task); /* NB: wake up so the thread does not look hung to the freezer */ wake_up_process(speedchange_task); return cpufreq_register_governor(&cpufreq_gov_interactive); } #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE fs_initcall(cpufreq_interactive_init); #else module_init(cpufreq_interactive_init); #endif static void __exit cpufreq_interactive_exit(void) { cpufreq_unregister_governor(&cpufreq_gov_interactive); kthread_stop(speedchange_task); put_task_struct(speedchange_task); } module_exit(cpufreq_interactive_exit); MODULE_AUTHOR("Mike Chan <[email protected]>"); MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for " "Latency sensitive workloads"); MODULE_LICENSE("GPL");
430211.c
/* Usage: find path name 实现在给定路径的文件夹树中找到全部匹配name的 */ #include "kernel/types.h" #include "kernel/stat.h" #include "user/user.h" #include "kernel/fs.h" #include "kernel/fcntl.h" // 返回path中最后一个斜杠之后的元素 char *getLastElem(char *p){ char *t=p; char *last=0; while(*t!='\0'){ if(*t=='/'){ last=t; } t++; } // 也可能没有/,那么p指向的文件名 if(last==0){ return p; } return last+1; } void find(char* path,char *name){ // printf("find (%s,,fmt(path):%s,fmtlen:%d,%s)\n",path,fmtname(path),strlen(fmtname(path)),name); char buf[512], *p=0; int fd; struct dirent de; struct stat st; if((fd = open(path, O_RDONLY)) < 0){ fprintf(2, "ls: cannot open %s\n", path); return; } if(fstat(fd, &st) < 0){ fprintf(2, "ls: cannot stat %s\n", path); close(fd); return; } switch(st.type){ case T_FILE: p=getLastElem(path); if(strcmp(p,name)==0) printf("%s\n",path); break; case T_DIR: if(strlen(path) + 1 + DIRSIZ + 1 > sizeof buf){ printf("ls: path too long\n"); break; } strcpy(buf, path); p = buf+strlen(buf); *p++ = '/'; while(read(fd, &de, sizeof(de)) == sizeof(de)){ if(de.inum == 0) continue; // 计算出当前dirent的路径 int t=strlen(de.name)>DIRSIZ?DIRSIZ:strlen(de.name); memmove(p, de.name, t); p[t] = 0; // p让buf这个字符串现在是当前dirent的完整路径 if(stat(buf, &st) < 0){ printf("ls: cannot stat %s\n", buf); continue; } if(strcmp(de.name,".")==0||strcmp(de.name,"..")==0){ continue; } find(buf,name); } break; } close(fd); } int main(int argc,char* argv[]){ // 这里为了简单,假定一定按照usage使用 // 实际上如果只有一个参数,那么搜索路径为当前路径 if(argc<3){ exit(0); } find(argv[1],argv[2]); exit(0); }
670043.c
/* Qn : Write a c program that takes student ID as input and then finds the sum and average of those digits which are greater than 1 and also divisible by 1 and itself. Sample input Sample output 202012047 Sum is 17, Average is : 3.40 */ /* Author : Arnob Mahmud mail : [email protected] */ #include <stdio.h> int main(int argc, char const *argv[]) { int id, rem, count = 0, sum = 0; float avg; scanf("%d", &id); while (id != 0) { rem = id % 10; if (rem > 1) { count++; sum += rem; } avg = sum / (float)count; id /= 10; } printf("Sum is %d", sum); printf("\nAverage is %.2f", avg); return 0; }
657705.c
#include<stdio.h> /* at = Arrival Time bt = Burst Time rt = Response Time time_q = Time Quantum */ int at=0,bt[100],rt[100],temp[100]; // Global variables float wait_time=0,turn_time=0; void main() { int c,j,n,time,r,flag=0,time_q,ltt,i,wt=0; printf("Enter no.of process:"); scanf("%d",&n); // no. processes r=n; for(c=0; c<n; c++) // Take inputs from the user { printf("Enter burst time of p%d: \t",c+1); scanf("%d",&bt[c]); rt[c]=bt[c]; temp[c]=bt[c]; printf("\n"); } printf("Enter time quantum:\t"); scanf("%d",&time_q); // Quantuam time printf("\n\n\tprocess\tAT\tTAT\tWT\torder\n\n"); for(time=0,c=0; r!=0;) { if(rt[c]<=time_q && rt[c]>0) // rt[] have the same values of burst time S { time=time+rt[c]; rt[c]=0; flag=1; } else if (rt[c]>0) { rt[c]=rt[c]-time_q; time=time+time_q; } if(rt[c]==0 && flag==1) { wt=0; wt = time-at-bt[c]; r--; printf("\tP%d\t%d\t%d\t%d\t%d\n",c+1,at,time-at,wt,c+1); ltt=time-at; wait_time=wait_time+time-at-bt[c]; turn_time=turn_time+time-at; flag=0; } if( c == n-1) c=0; else if(at<=time) c++; else c=0; } j=0; printf("\n\n\n"); printf("Gantt Chart "); printf("\n\n\n"); printf("\t"); /********************************************************/ for (int i=at;i<=time;i++){ printf("--"); } printf("--"); printf("\n"); printf("\t"); /****************************************************************/ for(i=at; i<time;) { if(bt[j]>=time_q) { printf("P%d |\t",j+1); i+=time_q; bt[j]=bt[j]-time_q; } else if(bt[j]>0) { printf("P%d |\t",j+1); i+=bt[j]; bt[j]=0; } j++; if(j>=n) { j=0; } } printf("\n"); j=0; printf("\t"); /*****************/ for (int i=at;i<=time;i++){ printf("--"); } printf("--"); printf("\n"); printf("\t "); /*********************/ for(i=at; i<time;) { if(temp[j]>=time_q) { printf(" "); printf(" %d\t",i+time_q); i+=time_q; temp[j]=temp[j]-time_q; } else if(temp[j]>0) { printf(" "); printf("%d\t",i+temp[j]); i+=temp[j]; temp[j]=0; } j++; if(j>=n) { j=0; } } printf("\n\n\n"); printf("\nAverage_waiting_time=%.2f\n",wait_time/n); printf("Average_turn_around_time=%.2f\n",turn_time/n); printf("\n\n"); }
779036.c
/***************************************************************************** Copyright (c) 2011, Intel Corp. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************** * Contents: Native high-level C interface to LAPACK function sspsvx * Author: Intel Corporation * Generated November, 2011 *****************************************************************************/ #include "lapacke_utils.h" lapack_int LAPACKE_sspsvx( int matrix_order, char fact, char uplo, lapack_int n, lapack_int nrhs, const float* ap, float* afp, lapack_int* ipiv, const float* b, lapack_int ldb, float* x, lapack_int ldx, float* rcond, float* ferr, float* berr ) { lapack_int info = 0; lapack_int* iwork = NULL; float* work = NULL; if( matrix_order != LAPACK_COL_MAJOR && matrix_order != LAPACK_ROW_MAJOR ) { LAPACKE_xerbla( "LAPACKE_sspsvx", -1 ); return -1; } #ifndef LAPACK_DISABLE_NAN_CHECK /* Optionally check input matrices for NaNs */ if( LAPACKE_lsame( fact, 'f' ) ) { if( LAPACKE_ssp_nancheck( n, afp ) ) { return -7; } } if( LAPACKE_ssp_nancheck( n, ap ) ) { return -6; } if( LAPACKE_sge_nancheck( matrix_order, n, nrhs, b, ldb ) ) { return -9; } #endif /* Allocate memory for working array(s) */ iwork = (lapack_int*)LAPACKE_malloc( sizeof(lapack_int) * MAX(1,n) ); if( iwork == NULL ) { info = LAPACK_WORK_MEMORY_ERROR; goto exit_level_0; } work = (float*)LAPACKE_malloc( sizeof(float) * MAX(1,3*n) ); if( work == NULL ) { info = LAPACK_WORK_MEMORY_ERROR; goto exit_level_1; } /* Call middle-level interface */ info = LAPACKE_sspsvx_work( matrix_order, fact, uplo, n, nrhs, ap, afp, ipiv, b, ldb, x, ldx, rcond, ferr, berr, work, iwork ); /* Release memory and exit */ LAPACKE_free( work ); exit_level_1: LAPACKE_free( iwork ); exit_level_0: if( info == LAPACK_WORK_MEMORY_ERROR ) { LAPACKE_xerbla( "LAPACKE_sspsvx", info ); } return info; }
824918.c
/* radare - LGPL - 2015-2019 - pancake */ #include <r_types.h> #include <r_util.h> #include <r_lib.h> #include <r_bin.h> #include <string.h> /* Start End Length Description 0x0 0x3 4 File offset to start of Text0 0x04 0x1b 24 File offsets for Text1..6 0x1c 0x47 44 File offsets for Data0..10 0x48 0x4B 4 Loading address for Text0 0x4C 0x8F 68 Loading addresses for Text1..6, Data0..10 0x90 0xD7 72 Section sizes for Text0..6, Data0..10 0xD8 0xDB 4 BSS address 0xDC 0xDF 4 BSS size 0xE0 0xE3 4 Entry point 0xE4 0xFF padding */ #define N_TEXT 7 #define N_DATA 11 R_PACKED ( typedef struct { ut32 text_paddr[N_TEXT]; ut32 data_paddr[N_DATA]; ut32 text_vaddr[N_TEXT]; ut32 data_vaddr[N_DATA]; ut32 text_size[N_TEXT]; ut32 data_size[N_DATA]; ut32 bss_addr; ut32 bss_size; ut32 entrypoint; ut32 padding[10]; // 0x100 -- start of data section }) DolHeader; static bool check_buffer(RBuffer *buf) { ut8 tmp[6]; int r = r_buf_read_at (buf, 0, tmp, sizeof (tmp)); bool one = r == sizeof (tmp) && !memcmp (tmp, "\x00\x00\x01\x00\x00\x00", sizeof (tmp)); if (one) { int r = r_buf_read_at (buf, 6, tmp, sizeof (tmp)); if (r != 6) { return false; } return sizeof (tmp) && !memcmp (tmp, "\x00\x00\x00\x00\x00\x00", sizeof (tmp)); } return false; } static bool load_buffer(RBinFile *bf, void **bin_obj, RBuffer *buf, ut64 loadaddr, Sdb *sdb) { if (r_buf_size (buf) < sizeof (DolHeader)) { return false; } DolHeader *dol = R_NEW0 (DolHeader); if (!dol) { return false; } char *lowername = strdup (bf->file); if (!lowername) { goto dol_err; } r_str_case (lowername, 0); char *ext = strstr (lowername, ".dol"); if (!ext || ext[4] != 0) { goto lowername_err; } free (lowername); r_buf_fread_at (bf->buf, 0, (void *) dol, "67I", 1); *bin_obj = dol; return true; lowername_err: free (lowername); dol_err: free (dol); return false; } static RList *sections(RBinFile *bf) { r_return_val_if_fail (bf && bf->o && bf->o->bin_obj, NULL); int i; RList *ret; RBinSection *s; DolHeader *dol = bf->o->bin_obj; if (!(ret = r_list_new ())) { return NULL; } /* text sections */ for (i = 0; i < N_TEXT; i++) { if (!dol->text_paddr[i] || !dol->text_vaddr[i]) { continue; } s = R_NEW0 (RBinSection); s->name = r_str_newf ("text_%d", i); s->paddr = dol->text_paddr[i]; s->vaddr = dol->text_vaddr[i]; s->size = dol->text_size[i]; s->vsize = s->size; s->perm = r_str_rwx ("r-x"); s->add = true; r_list_append (ret, s); } /* data sections */ for (i = 0; i < N_DATA; i++) { if (!dol->data_paddr[i] || !dol->data_vaddr[i]) { continue; } s = R_NEW0 (RBinSection); s->name = r_str_newf ("data_%d", i); s->paddr = dol->data_paddr[i]; s->vaddr = dol->data_vaddr[i]; s->size = dol->data_size[i]; s->vsize = s->size; s->perm = r_str_rwx ("r--"); s->add = true; r_list_append (ret, s); } /* bss section */ s = R_NEW0 (RBinSection); s->name = strdup ("bss"); s->paddr = 0; s->vaddr = dol->bss_addr; s->size = dol->bss_size; s->vsize = s->size; s->perm = r_str_rwx ("rw-"); s->add = true; r_list_append (ret, s); return ret; } static RList *entries(RBinFile *bf) { r_return_val_if_fail (bf && bf->o && bf->o->bin_obj, NULL); RList *ret = r_list_new (); RBinAddr *addr = R_NEW0 (RBinAddr); DolHeader *dol = bf->o->bin_obj; addr->vaddr = (ut64) dol->entrypoint; addr->paddr = addr->vaddr & 0xFFFF; r_list_append (ret, addr); return ret; } static RBinInfo *info(RBinFile *bf) { r_return_val_if_fail (bf && bf->buf, NULL); RBinInfo *ret = R_NEW0 (RBinInfo); if (!ret) { return NULL; } ret->file = strdup (bf->file); ret->big_endian = true; ret->type = strdup ("ROM"); ret->machine = strdup ("Nintendo Wii"); ret->os = strdup ("wii-ios"); ret->arch = strdup ("ppc"); ret->has_va = true; ret->bits = 32; return ret; } static ut64 baddr(RBinFile *bf) { return 0x80b00000; // XXX } RBinPlugin r_bin_plugin_dol = { .name = "dol", .desc = "Nintendo Dolphin binary format", .license = "BSD", .load_buffer = &load_buffer, .baddr = &baddr, .check_buffer = &check_buffer, .entries = &entries, .sections = &sections, .info = &info, }; #ifndef R2_PLUGIN_INCORE R_API RLibStruct radare_plugin = { .type = R_LIB_TYPE_BIN, .data = &r_bin_plugin_dol, .version = R2_VERSION }; #endif
211229.c
#include QMK_KEYBOARD_H #include "debug.h" #include "action_layer.h" #include "keymap_norwegian.h" #define BASE 0 // default layer #define BASE_MAC 1 // default layer mac #define NUMB_FUNC 2 // numbers and function keys const uint16_t PROGMEM keymaps[][MATRIX_ROWS][MATRIX_COLS] = { /* Keymap 0: Basic layer PC * * ,--------------------------------------------------. ,--------------------------------------------------. * | * | [ | ] | { | } | ~ |Mac/PC| | ^ | $ | ( | ) | < | > | @ | * |--------+------+------+------+------+-------------| |------+------+------+------+------+------+--------| * | + | Q | W | E | R | T | " | | ' | Y | U | I | O | P | Å | * |--------+------+------+------+------+------| | | |------+------+------+------+------+--------| * | / | A | S | D | F | G |------| |------| H | J | K | L | Ø | Æ | * |--------+------+------+------+------+------| ; | | = |------+------+------+------+------+--------| * | - | Z | X | C | V | B | | | | N | M | RIGHT| DOWN | UP | _ | * `--------+------+------+------+------+-------------' `-------------+------+------+------+------+--------' * |Alt/esc| ! | ? | : | TAB | | LEFT | _ | & | | |Num/fn| * `----------------------------------' `----------------------------------' * ,-------------. ,--------------. * | # | ` | |Insert| % | * ,------|------|------| |------+-------+------. * | | | ´ | | Del | | | * | Shift|Ctrl/.|------| |------| Enter |Space | * | | |GUI/, | | Bspc | | | * `--------------------' `---------------------' */ [BASE] = LAYOUT_ergodox( // left hand KC_PAST, NO_LBRC, NO_RBRC, NO_LCBR, NO_RCBR, KC_FN2, TG(1), KC_PPLS, KC_Q, KC_W, KC_E, KC_R, KC_T, NO_QUO2, KC_PSLS, KC_A, KC_S, KC_D, KC_F, KC_G, KC_PMNS, KC_Z, KC_X, KC_C, KC_V, KC_B, NO_SCLN, ALT_T(KC_ESC), KC_EXLM , NO_QUES, NO_COLN, KC_TAB, KC_HASH, KC_FN7, KC_FN6, OSM(MOD_LSFT),CTL_T(KC_DOT),GUI_T(KC_COMMA), // right hand KC_FN5, NO_DLR, NO_LPRN, NO_RPRN ,KC_FN3, KC_FN4,NO_AT, NO_APOS, KC_Y, KC_U, KC_I, KC_O, KC_P, NO_AA , KC_H, KC_J, KC_K, KC_L, NO_OSLH, NO_AE, NO_EQL, KC_N, KC_M, KC_RIGHT, KC_DOWN, KC_UP, NO_BSLS, KC_LEFT, NO_UNDS, NO_AMPR, NO_PIPE, OSL(2), KC_INSERT, KC_PERC, KC_DEL, KC_BSPC,KC_ENT,KC_SPC ), /* Keymap 1: Basic layer MACS (Same as pc, except for cmd/ctrl, which are swapped) * * ,--------------------------------------------------. ,--------------------------------------------------. * | \ | | | { | } | | | | | $ | | | < | > | @ | * |--------+------+------+------+------+-------------| |------+------+------+------+------+------+--------| * | | | | | | | | | | | | | | | | * |--------+------+------+------+------+------| | | ' |------+------+------+------+------+--------| * | | | | | | |------| |------| | | | | | | * |--------+------+------+------+------+------| | | |------+------+------+------+------+--------| * | | | | | | | | | | | | | | | | * `--------+------+------+------+------+-------------' `-------------+------+------+------+------+--------' * | | | | |,/Ctrl| | | | ` | |Num/fn| * `----------------------------------' `----------------------------------' * ,-------------. ,-------------. * | | ` | | | | * ,------|------|------| |------+------+------. * | | | ´ | | | | | * | |GUI/. |------| |------| | | * | | |crtl/,| | | | | * `--------------------' `--------------------' */ [BASE_MAC] = LAYOUT_ergodox( KC_TRNS, KC_TRNS,KC_TRNS, NO_LCBR_MAC,NO_RCBR_MAC, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS , KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, NO_GRV_MAC, KC_FN10, KC_TRNS,GUI_T(KC_DOT) , CTL_T(KC_COMMA), // right hand KC_TRNS, NO_DLR_MAC, KC_TRNS,KC_TRNS,KC_FN8, KC_FN9,NO_AT_MAC, NO_APOS_MAC, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, NO_BSLS_MAC, KC_TRNS, KC_TRNS, KC_TRNS, NO_PIPE_MAC, OSL(2), KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS ), /* Keymap 2: Number ++´ánd Fn layer * * ,--------------------------------------------------. ,--------------------------------------------------. * | | | | | | | | | | | F9 | F10 | F11 | F12 | | * |--------+------+------+------+------+-------------| |------+------+------+------+------+------+--------| * | | 8 | 7 | 6 | 5 | 9 | | | | | F5 | F6 | F7 | F8 | | * |--------+------+------+------+------+------| | | |------+------+------+------+------+--------| * | | 4 | 3 | 2 | 1 | 0 |------| |------| | F1 | F2 | F3 | F4 | | * |--------+------+------+------+------+------| | | |------+------+------+------+------+--------| * | | | | | | | | | | | | END | PGDWN| PGUP | | * `--------+------+------+------+------+-------------' `-------------+------+------+------+------+--------' * | | | | | | | HOME | | | | | * `----------------------------------' `----------------------------------' * ,-------------. ,-------------. * | | | | | | * ,------|------|------| |------+------+------. * | | | | | | | | * | | |------| |------| | | * | | | | | | | | * `--------------------' `--------------------' */ [NUMB_FUNC] = LAYOUT_ergodox( NO_ASTR, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, NO_PLUS, KC_8, KC_7,KC_6,KC_5, KC_9 , KC_TRNS, NO_SLSH , KC_4 , KC_3 , KC_2 , KC_1 , KC_0, NO_MINS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, // right hand KC_TRNS, KC_TRNS, KC_F9, KC_F10, KC_F11, KC_F12, KC_TRNS, KC_TRNS, KC_TRNS, KC_F5, KC_F6 , KC_F7, KC_F8, KC_TRNS, KC_TRNS, KC_F1, KC_F2, KC_F3, KC_F4, KC_TRNS, KC_TRNS, KC_HOME, KC_TRNS, KC_END, KC_PGDN, KC_PGUP, KC_TRNS, KC_HOME, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS, KC_TRNS ) }; enum macro_id { TILDE_NO, LESS_NO, GRTR_NO, CIRC_NO, ACUT_NO, GRV_NO, LESS_NO_MAC, GRTR_NO_MAC, ACUT_NO_MAC }; const uint16_t PROGMEM fn_actions[] = { [2] = ACTION_MACRO(TILDE_NO), // Completed ~ character(pc and mac), no space needed. [3] = ACTION_MACRO(LESS_NO), // < completed on keypress down, to avoid shifting the next character if it is not released first. [4] = ACTION_MACRO(GRTR_NO), // > completed on keypress down, to avoid shifting the next character if it is not released first. [5] = ACTION_MACRO(CIRC_NO), // Completed ^ character, no space needed. [6] = ACTION_MACRO(ACUT_NO), // Completed ´ character, no space needed. [7] = ACTION_MACRO(GRV_NO), // Completed ` character, no space needed. [8] = ACTION_MACRO(LESS_NO_MAC), // < completed on keypress down, to avoid same button problem when typing <> quickly [9] = ACTION_MACRO(GRTR_NO_MAC), // > completed on keypress down, to avoid same button problem when typing <> quickly [10] = ACTION_MACRO(ACUT_NO_MAC), // Completed ´ character, no space needed }; const macro_t *action_get_macro(keyrecord_t *record, uint8_t id, uint8_t opt) { keyevent_t event = record->event; switch (id) { case TILDE_NO: return (event.pressed ? MACRO( D(RALT), T(RBRC), U(RALT), T(SPC), END ) : MACRO_NONE); case LESS_NO: return (event.pressed ? MACRO( T(NUBS), END ) : MACRO_NONE); case GRTR_NO: return (event.pressed ? MACRO( D(LSFT), T(NUBS), U(LSFT), END ) : MACRO_NONE); case CIRC_NO: return (event.pressed ? MACRO( D(LSFT), T(RBRC), U(LSFT), T(SPC), END ) : MACRO_NONE); case ACUT_NO: return (event.pressed ? MACRO( D(RALT), T(EQL), U(RALT), T(SPC), END ) : MACRO_NONE); case GRV_NO: return (event.pressed ? MACRO( D(LSFT), T(EQL), T(SPC), U(LSFT), END ) : MACRO_NONE); case LESS_NO_MAC: return (event.pressed ? MACRO( T(GRV), END ) : MACRO_NONE); case GRTR_NO_MAC: return (event.pressed ? MACRO( D(LSFT), T(GRV), U(LSFT), END ) : MACRO_NONE); case ACUT_NO_MAC: return (event.pressed ? MACRO( T(EQL), T(SPC), END ) : MACRO_NONE); } return MACRO_NONE; };
69552.c
/* * Copyright (c) 1994-2009 Red Hat, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* FUNCTION <<strchr>>---search for character in string INDEX strchr ANSI_SYNOPSIS #include <string.h> char * strchr(const char *<[string]>, int <[c]>); TRAD_SYNOPSIS #include <string.h> char * strchr(<[string]>, <[c]>); const char *<[string]>; int <[c]>; DESCRIPTION This function finds the first occurence of <[c]> (converted to a char) in the string pointed to by <[string]> (including the terminating null character). RETURNS Returns a pointer to the located character, or a null pointer if <[c]> does not occur in <[string]>. PORTABILITY <<strchr>> is ANSI C. <<strchr>> requires no supporting OS subroutines. QUICKREF strchr ansi pure */ #include <string.h> #include <limits.h> #include "_ansi.h" /* Nonzero if X is not aligned on a "long" boundary. */ #define UNALIGNED(X) ((long)X & (sizeof (long) - 1)) /* How many bytes are loaded each iteration of the word copy loop. */ #define LBLOCKSIZE (sizeof (long)) #if LONG_MAX == 2147483647L #define DETECTNULL(X) (((X) - 0x01010101) & ~(X) & 0x80808080) #else #if LONG_MAX == 9223372036854775807L /* Nonzero if X (a long int) contains a NULL byte. */ #define DETECTNULL(X) (((X) - 0x0101010101010101) & ~(X) & 0x8080808080808080) #else #error long int is not a 32bit or 64bit type. #endif #endif /* DETECTCHAR returns nonzero if (long)X contains the byte used to fill (long)MASK. */ #define DETECTCHAR(X,MASK) (DETECTNULL(X ^ MASK)) char * _DEFUN (strchr, (s1, i), _CONST char *s1 _AND int i) { _CONST unsigned char *s = (_CONST unsigned char *)s1; unsigned char c = i; #if !defined(PREFER_SIZE_OVER_SPEED) && !defined(__OPTIMIZE_SIZE__) unsigned long mask,j; unsigned long *aligned_addr; /* Special case for finding 0. */ if (!c) { while (UNALIGNED (s)) { if (!*s) return (char *) s; s++; } /* Operate a word at a time. */ aligned_addr = (unsigned long *) s; while (!DETECTNULL (*aligned_addr)) aligned_addr++; /* Found the end of string. */ s = (const unsigned char *) aligned_addr; while (*s) s++; return (char *) s; } /* All other bytes. Align the pointer, then search a long at a time. */ while (UNALIGNED (s)) { if (!*s) return NULL; if (*s == c) return (char *) s; s++; } mask = c; for (j = 8; j < LBLOCKSIZE * 8; j <<= 1) mask = (mask << j) | mask; aligned_addr = (unsigned long *) s; while (!DETECTNULL (*aligned_addr) && !DETECTCHAR (*aligned_addr, mask)) aligned_addr++; /* The block of bytes currently pointed to by aligned_addr contains either a null or the target char, or both. We catch it using the bytewise search. */ s = (unsigned char *) aligned_addr; #endif /* not PREFER_SIZE_OVER_SPEED */ while (*s && *s != c) s++; if (*s == c) return (char *)s; return NULL; }
539264.c
/* * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. The rights granted to you under the License * may not be used to create, or enable the creation or redistribution of, * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include <sys/types.h> #include <sys/malloc.h> #include <sys/proc.h> #include <sys/sysctl.h> #include <sys/syslog.h> #include <libkern/crypto/sha1.h> #include <net/if.h> #include <netinet/in.h> #include <netinet6/in6_var.h> #include <netinet/ip6.h> #include <netinet6/ip6_var.h> #include <netinet6/nd6.h> #if CONFIG_MACF #include <sys/kauth.h> #include <security/mac_framework.h> #endif SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */ SYSCTL_NODE(_net_inet6, OID_AUTO, send, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IPv6 Secure Neighbor Discovery"); static int nd6_send_opmode = ND6_SEND_OPMODE_DISABLED; SYSCTL_INT(_net_inet6_send, OID_AUTO, opstate, CTLFLAG_RD | CTLFLAG_LOCKED, &nd6_send_opstate, 0, "current SEND operating state"); int nd6_send_opstate = ND6_SEND_OPMODE_DISABLED; SYSCTL_INT(_net_inet6_send, OID_AUTO, opmode, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_send_opmode, 0, "configured SEND operating mode"); static int sysctl_cga_parameters SYSCTL_HANDLER_ARGS; SYSCTL_PROC(_net_inet6_send, OID_AUTO, cga_parameters, CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_cga_parameters, "S,nd6_send_nodecfg", ""); /* * The size of the buffer is sufficient to contain a public key, its size in * machine binary type for the kernel, and the CGA precalc for the global * scope. This interface is not a public API, so we don't anticipate that the * userland and the kernel will be mismatched between ILP32 and LP64. */ #define SYSCTL_CGA_PARAMETERS_BUFFER_SIZE \ 2 * (sizeof (u_int16_t) + IN6_CGA_KEY_MAXSIZE) + \ sizeof (struct in6_cga_prepare) static int sysctl_cga_parameters SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1) u_int namelen; char *oldp, *newp; const char *fin; struct in6_cga_nodecfg cfg; struct iovec *iov; int error; char *buffer; u_int16_t u16; #if CONFIG_MACF kauth_cred_t cred; #endif namelen = arg2; if (namelen != 0) { log(LOG_ERR, "%s: name length err [len=%u]\n", __func__, namelen); return (EINVAL); } if (req->newlen > SYSCTL_CGA_PARAMETERS_BUFFER_SIZE) { log(LOG_ERR, "%s: input buffer size error [len=%u]\n", __func__, req->newlen); return (EINVAL); } #if CONFIG_MACF cred = kauth_cred_proc_ref(current_proc()); error = mac_system_check_info(cred, "net.inet6.send.cga_parameters"); kauth_cred_unref(&cred); if (error != 0) { log(LOG_ERR, "%s: mac_system_check_info denied.\n", __func__); return (EPERM); } #endif MALLOC(buffer, char *, SYSCTL_CGA_PARAMETERS_BUFFER_SIZE, M_IP6CGA, M_WAITOK); if (buffer == NULL) { log(LOG_ERR, "%s: could not allocate marshaling buffer.\n", __func__); return (ENOMEM); } in6_cga_node_lock(); if (req->oldptr != USER_ADDR_NULL && req->oldlen > 0) { oldp = buffer; fin = &buffer[SYSCTL_CGA_PARAMETERS_BUFFER_SIZE]; if (req->oldlen < SYSCTL_CGA_PARAMETERS_BUFFER_SIZE) fin = &buffer[req->oldlen]; in6_cga_query(&cfg); iov = &cfg.cga_pubkey; if (iov->iov_len > 0) { VERIFY(iov->iov_len < UINT16_MAX); if (&oldp[sizeof (cfg.cga_prepare)] <= fin) bcopy(&cfg.cga_prepare, oldp, sizeof (cfg.cga_prepare)); oldp += sizeof (cfg.cga_prepare); if (&oldp[sizeof (u16)] < fin) { u16 = (u_int16_t) iov->iov_len; bcopy(&u16, oldp, sizeof (u16)); } oldp += sizeof (u16); if (&oldp[iov->iov_len] < fin) bcopy(iov->iov_base, oldp, iov->iov_len); oldp += iov->iov_len; if (oldp > fin) { req->oldlen = oldp - buffer; log(LOG_ERR, "%s: marshalled data too large.\n", __func__); error = ENOMEM; goto done; } } error = SYSCTL_OUT(req, buffer, oldp - buffer); if (error) goto done; } if (req->newptr == USER_ADDR_NULL) goto done; error = proc_suser(current_proc()); if (error) goto done; if (req->newlen == 0) { in6_cga_stop(); nd6_send_opstate = ND6_SEND_OPMODE_DISABLED; goto done; } error = SYSCTL_IN(req, buffer, req->newlen); if (error) goto done; newp = buffer; fin = &buffer[req->newlen]; bzero(&cfg, sizeof cfg); if (&newp[sizeof (cfg.cga_prepare)] <= fin) bcopy(newp, &cfg.cga_prepare, sizeof (cfg.cga_prepare)); newp += sizeof (cfg.cga_prepare); iov = &cfg.cga_privkey; if (&newp[sizeof (u16)] < fin) { bcopy(newp, &u16, sizeof (u16)); iov->iov_len = u16; if (iov->iov_len > IN6_CGA_KEY_MAXSIZE) { error = EINVAL; goto done; } } newp += sizeof (u16); iov->iov_base = newp; newp += iov->iov_len; iov = &cfg.cga_pubkey; if (&newp[sizeof (u16)] < fin) { bcopy(newp, &u16, sizeof (u16)); iov->iov_len = u16; if (iov->iov_len > IN6_CGA_KEY_MAXSIZE) { error = EINVAL; goto done; } } newp += sizeof (u16); iov->iov_base = newp; newp += iov->iov_len; if (newp > fin) { log(LOG_ERR, "%s: input too large [octets=%ld].\n", __func__, newp - fin); error = ENOMEM; goto done; } error = in6_cga_start(&cfg); if (!error) nd6_send_opstate = nd6_send_opmode; else log(LOG_ERR, "%s: in6_cga_start error=%d.\n", __func__, error); done: in6_cga_node_unlock(); FREE(buffer, M_IP6CGA); return (error); } /* End of file */
159386.c
/*------------------------------------------------------------------------- * * clog.c * PostgreSQL transaction-commit-log manager * * This module replaces the old "pg_log" access code, which treated pg_log * essentially like a relation, in that it went through the regular buffer * manager. The problem with that was that there wasn't any good way to * recycle storage space for transactions so old that they'll never be * looked up again. Now we use specialized access code so that the commit * log can be broken into relatively small, independent segments. * * XLOG interactions: this module generates an XLOG record whenever a new * CLOG page is initialized to zeroes. Other writes of CLOG come from * recording of transaction commit or abort in xact.c, which generates its * own XLOG records for these events and will re-perform the status update * on redo; so we need make no additional XLOG entry here. For synchronous * transaction commits, the XLOG is guaranteed flushed through the XLOG commit * record before we are called to log a commit, so the WAL rule "write xlog * before data" is satisfied automatically. However, for async commits we * must track the latest LSN affecting each CLOG page, so that we can flush * XLOG that far and satisfy the WAL rule. We don't have to worry about this * for aborts (whether sync or async), since the post-crash assumption would * be that such transactions failed anyway. * * Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.55 2010/01/02 16:57:35 momjian Exp $ * *------------------------------------------------------------------------- */ #include "postgres.h" #include "access/clog.h" #include "access/slru.h" #include "access/transam.h" #include "pg_trace.h" #include "postmaster/bgwriter.h" /* * Defines for CLOG page sizes. A page is the same BLCKSZ as is used * everywhere else in Postgres. * * Note: because TransactionIds are 32 bits and wrap around at 0xFFFFFFFF, * CLOG page numbering also wraps around at 0xFFFFFFFF/CLOG_XACTS_PER_PAGE, * and CLOG segment numbering at 0xFFFFFFFF/CLOG_XACTS_PER_SEGMENT. We need * take no explicit notice of that fact in this module, except when comparing * segment and page numbers in TruncateCLOG (see CLOGPagePrecedes). */ /* We need two bits per xact, so four xacts fit in a byte */ #define CLOG_BITS_PER_XACT 2 #define CLOG_XACTS_PER_BYTE 4 #define CLOG_XACTS_PER_PAGE (BLCKSZ * CLOG_XACTS_PER_BYTE) #define CLOG_XACT_BITMASK ((1 << CLOG_BITS_PER_XACT) - 1) #define TransactionIdToPage(xid) ((xid) / (TransactionId) CLOG_XACTS_PER_PAGE) #define TransactionIdToPgIndex(xid) ((xid) % (TransactionId) CLOG_XACTS_PER_PAGE) #define TransactionIdToByte(xid) (TransactionIdToPgIndex(xid) / CLOG_XACTS_PER_BYTE) #define TransactionIdToBIndex(xid) ((xid) % (TransactionId) CLOG_XACTS_PER_BYTE) /* We store the latest async LSN for each group of transactions */ #define CLOG_XACTS_PER_LSN_GROUP 32 /* keep this a power of 2 */ #define CLOG_LSNS_PER_PAGE (CLOG_XACTS_PER_PAGE / CLOG_XACTS_PER_LSN_GROUP) #define GetLSNIndex(slotno, xid) ((slotno) * CLOG_LSNS_PER_PAGE + \ ((xid) % (TransactionId) CLOG_XACTS_PER_PAGE) / CLOG_XACTS_PER_LSN_GROUP) /* * Link to shared-memory data structures for CLOG control */ static SlruCtlData ClogCtlData; #define ClogCtl (&ClogCtlData) static int ZeroCLOGPage(int pageno, bool writeXlog); static bool CLOGPagePrecedes(int page1, int page2); static void WriteZeroPageXlogRec(int pageno); static void WriteTruncateXlogRec(int pageno); static void TransactionIdSetPageStatus(TransactionId xid, int nsubxids, TransactionId *subxids, XidStatus status, XLogRecPtr lsn, int pageno); static void TransactionIdSetStatusBit(TransactionId xid, XidStatus status, XLogRecPtr lsn, int slotno); static void set_status_by_pages(int nsubxids, TransactionId *subxids, XidStatus status, XLogRecPtr lsn); /* * TransactionIdSetTreeStatus * * Record the final state of transaction entries in the commit log for * a transaction and its subtransaction tree. Take care to ensure this is * efficient, and as atomic as possible. * * xid is a single xid to set status for. This will typically be * the top level transactionid for a top level commit or abort. It can * also be a subtransaction when we record transaction aborts. * * subxids is an array of xids of length nsubxids, representing subtransactions * in the tree of xid. In various cases nsubxids may be zero. * * lsn must be the WAL location of the commit record when recording an async * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the * caller guarantees the commit record is already flushed in that case. It * should be InvalidXLogRecPtr for abort cases, too. * * In the commit case, atomicity is limited by whether all the subxids are in * the same CLOG page as xid. If they all are, then the lock will be grabbed * only once, and the status will be set to committed directly. Otherwise * we must * 1. set sub-committed all subxids that are not on the same page as the * main xid * 2. atomically set committed the main xid and the subxids on the same page * 3. go over the first bunch again and set them committed * Note that as far as concurrent checkers are concerned, main transaction * commit as a whole is still atomic. * * Example: * TransactionId t commits and has subxids t1, t2, t3, t4 * t is on page p1, t1 is also on p1, t2 and t3 are on p2, t4 is on p3 * 1. update pages2-3: * page2: set t2,t3 as sub-committed * page3: set t4 as sub-committed * 2. update page1: * set t1 as sub-committed, * then set t as committed, then set t1 as committed * 3. update pages2-3: * page2: set t2,t3 as committed * page3: set t4 as committed * * NB: this is a low-level routine and is NOT the preferred entry point * for most uses; functions in transam.c are the intended callers. * * XXX Think about issuing FADVISE_WILLNEED on pages that we will need, * but aren't yet in cache, as well as hinting pages not to fall out of * cache yet. */ void TransactionIdSetTreeStatus(TransactionId xid, int nsubxids, TransactionId *subxids, XidStatus status, XLogRecPtr lsn) { int pageno = TransactionIdToPage(xid); /* get page of parent */ int i; Assert(status == TRANSACTION_STATUS_COMMITTED || status == TRANSACTION_STATUS_ABORTED); /* * See how many subxids, if any, are on the same page as the parent, if * any. */ for (i = 0; i < nsubxids; i++) { if (TransactionIdToPage(subxids[i]) != pageno) break; } /* * Do all items fit on a single page? */ if (i == nsubxids) { /* * Set the parent and all subtransactions in a single call */ TransactionIdSetPageStatus(xid, nsubxids, subxids, status, lsn, pageno); } else { int nsubxids_on_first_page = i; /* * If this is a commit then we care about doing this correctly (i.e. * using the subcommitted intermediate status). By here, we know * we're updating more than one page of clog, so we must mark entries * that are *not* on the first page so that they show as subcommitted * before we then return to update the status to fully committed. * * To avoid touching the first page twice, skip marking subcommitted * for the subxids on that first page. */ if (status == TRANSACTION_STATUS_COMMITTED) set_status_by_pages(nsubxids - nsubxids_on_first_page, subxids + nsubxids_on_first_page, TRANSACTION_STATUS_SUB_COMMITTED, lsn); /* * Now set the parent and subtransactions on same page as the parent, * if any */ pageno = TransactionIdToPage(xid); TransactionIdSetPageStatus(xid, nsubxids_on_first_page, subxids, status, lsn, pageno); /* * Now work through the rest of the subxids one clog page at a time, * starting from the second page onwards, like we did above. */ set_status_by_pages(nsubxids - nsubxids_on_first_page, subxids + nsubxids_on_first_page, status, lsn); } } /* * Helper for TransactionIdSetTreeStatus: set the status for a bunch of * transactions, chunking in the separate CLOG pages involved. We never * pass the whole transaction tree to this function, only subtransactions * that are on different pages to the top level transaction id. */ static void set_status_by_pages(int nsubxids, TransactionId *subxids, XidStatus status, XLogRecPtr lsn) { int pageno = TransactionIdToPage(subxids[0]); int offset = 0; int i = 0; while (i < nsubxids) { int num_on_page = 0; while (TransactionIdToPage(subxids[i]) == pageno && i < nsubxids) { num_on_page++; i++; } TransactionIdSetPageStatus(InvalidTransactionId, num_on_page, subxids + offset, status, lsn, pageno); offset = i; pageno = TransactionIdToPage(subxids[offset]); } } /* * Record the final state of transaction entries in the commit log for * all entries on a single page. Atomic only on this page. * * Otherwise API is same as TransactionIdSetTreeStatus() */ static void TransactionIdSetPageStatus(TransactionId xid, int nsubxids, TransactionId *subxids, XidStatus status, XLogRecPtr lsn, int pageno) { int slotno; int i; Assert(status == TRANSACTION_STATUS_COMMITTED || status == TRANSACTION_STATUS_ABORTED || (status == TRANSACTION_STATUS_SUB_COMMITTED && !TransactionIdIsValid(xid))); LWLockAcquire(CLogControlLock, LW_EXCLUSIVE); /* * If we're doing an async commit (ie, lsn is valid), then we must wait * for any active write on the page slot to complete. Otherwise our * update could reach disk in that write, which will not do since we * mustn't let it reach disk until we've done the appropriate WAL flush. * But when lsn is invalid, it's OK to scribble on a page while it is * write-busy, since we don't care if the update reaches disk sooner than * we think. */ slotno = SimpleLruReadPage(ClogCtl, pageno, XLogRecPtrIsInvalid(lsn), xid); /* * Set the main transaction id, if any. * * If we update more than one xid on this page while it is being written * out, we might find that some of the bits go to disk and others don't. * If we are updating commits on the page with the top-level xid that * could break atomicity, so we subcommit the subxids first before we mark * the top-level commit. */ if (TransactionIdIsValid(xid)) { /* Subtransactions first, if needed ... */ if (status == TRANSACTION_STATUS_COMMITTED) { for (i = 0; i < nsubxids; i++) { Assert(ClogCtl->shared->page_number[slotno] == TransactionIdToPage(subxids[i])); TransactionIdSetStatusBit(subxids[i], TRANSACTION_STATUS_SUB_COMMITTED, lsn, slotno); } } /* ... then the main transaction */ TransactionIdSetStatusBit(xid, status, lsn, slotno); } /* Set the subtransactions */ for (i = 0; i < nsubxids; i++) { Assert(ClogCtl->shared->page_number[slotno] == TransactionIdToPage(subxids[i])); TransactionIdSetStatusBit(subxids[i], status, lsn, slotno); } ClogCtl->shared->page_dirty[slotno] = true; LWLockRelease(CLogControlLock); } /* * Sets the commit status of a single transaction. * * Must be called with CLogControlLock held */ static void TransactionIdSetStatusBit(TransactionId xid, XidStatus status, XLogRecPtr lsn, int slotno) { int byteno = TransactionIdToByte(xid); int bshift = TransactionIdToBIndex(xid) * CLOG_BITS_PER_XACT; char *byteptr; char byteval; char curval; byteptr = ClogCtl->shared->page_buffer[slotno] + byteno; curval = (*byteptr >> bshift) & CLOG_XACT_BITMASK; /* * When replaying transactions during recovery we still need to perform * the two phases of subcommit and then commit. However, some transactions * are already correctly marked, so we just treat those as a no-op which * allows us to keep the following Assert as restrictive as possible. */ if (InRecovery && status == TRANSACTION_STATUS_SUB_COMMITTED && curval == TRANSACTION_STATUS_COMMITTED) return; /* * Current state change should be from 0 or subcommitted to target state * or we should already be there when replaying changes during recovery. */ Assert(curval == 0 || (curval == TRANSACTION_STATUS_SUB_COMMITTED && status != TRANSACTION_STATUS_IN_PROGRESS) || curval == status); /* note this assumes exclusive access to the clog page */ byteval = *byteptr; byteval &= ~(((1 << CLOG_BITS_PER_XACT) - 1) << bshift); byteval |= (status << bshift); *byteptr = byteval; /* * Update the group LSN if the transaction completion LSN is higher. * * Note: lsn will be invalid when supplied during InRecovery processing, * so we don't need to do anything special to avoid LSN updates during * recovery. After recovery completes the next clog change will set the * LSN correctly. */ if (!XLogRecPtrIsInvalid(lsn)) { int lsnindex = GetLSNIndex(slotno, xid); if (XLByteLT(ClogCtl->shared->group_lsn[lsnindex], lsn)) ClogCtl->shared->group_lsn[lsnindex] = lsn; } } /* * Interrogate the state of a transaction in the commit log. * * Aside from the actual commit status, this function returns (into *lsn) * an LSN that is late enough to be able to guarantee that if we flush up to * that LSN then we will have flushed the transaction's commit record to disk. * The result is not necessarily the exact LSN of the transaction's commit * record! For example, for long-past transactions (those whose clog pages * already migrated to disk), we'll return InvalidXLogRecPtr. Also, because * we group transactions on the same clog page to conserve storage, we might * return the LSN of a later transaction that falls into the same group. * * NB: this is a low-level routine and is NOT the preferred entry point * for most uses; TransactionLogFetch() in transam.c is the intended caller. */ XidStatus TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn) { int pageno = TransactionIdToPage(xid); int byteno = TransactionIdToByte(xid); int bshift = TransactionIdToBIndex(xid) * CLOG_BITS_PER_XACT; int slotno; int lsnindex; char *byteptr; XidStatus status; /* lock is acquired by SimpleLruReadPage_ReadOnly */ slotno = SimpleLruReadPage_ReadOnly(ClogCtl, pageno, xid); byteptr = ClogCtl->shared->page_buffer[slotno] + byteno; status = (*byteptr >> bshift) & CLOG_XACT_BITMASK; lsnindex = GetLSNIndex(slotno, xid); *lsn = ClogCtl->shared->group_lsn[lsnindex]; LWLockRelease(CLogControlLock); return status; } /* * Find the next lowest transaction with a logged or recorded status. * I.e. One that does not have a status of default (0) -- i.e: in-progress. */ bool CLOGScanForPrevStatus( TransactionId *indexXid, XidStatus *status) { TransactionId highXid; int pageno; TransactionId lowXid; int slotno; int byteno; int bshift; TransactionId xid; char *byteptr; *status = TRANSACTION_STATUS_IN_PROGRESS; // Set it to something. if ((*indexXid) == InvalidTransactionId) return false; highXid = (*indexXid) - 1; if (highXid < FirstNormalTransactionId) return false; while (true) { pageno = TransactionIdToPage(highXid); /* * Compute the xid floor for the page. */ lowXid = pageno * (TransactionId) CLOG_XACTS_PER_PAGE; if (lowXid == InvalidTransactionId) lowXid = FirstNormalTransactionId; LWLockAcquire(CLogControlLock, LW_EXCLUSIVE); /* * Peek to see if page exists. */ if (!SimpleLruPageExists(ClogCtl, pageno)) { LWLockRelease(CLogControlLock); *indexXid = InvalidTransactionId; *status = TRANSACTION_STATUS_IN_PROGRESS; // Set it to something. return false; } slotno = SimpleLruReadPage(ClogCtl, pageno, false, highXid); for (xid = highXid; xid >= lowXid; xid--) { byteno = TransactionIdToByte(xid); bshift = TransactionIdToBIndex(xid) * CLOG_BITS_PER_XACT; byteptr = ClogCtl->shared->page_buffer[slotno] + byteno; *status = (*byteptr >> bshift) & CLOG_XACT_BITMASK; if (*status != TRANSACTION_STATUS_IN_PROGRESS) { LWLockRelease(CLogControlLock); *indexXid = xid; return true; } } LWLockRelease(CLogControlLock); if (lowXid == FirstNormalTransactionId) { *indexXid = InvalidTransactionId; *status = TRANSACTION_STATUS_IN_PROGRESS; // Set it to something. return false; } highXid = lowXid - 1; // Go to last xid of previous page. } return false; // We'll never reach this. } /* * Determine the "age" of a transaction id. */ bool CLOGTransactionIsOld(TransactionId xid) { TransactionId nextXid; int pagesBack; if (ShmemVariableCache == NULL) return false; // In case we are called very early in the life of the backend process, etc. nextXid = ShmemVariableCache->nextXid; if (nextXid < xid) return false; // Not sure what is going on. pagesBack = (nextXid - xid) / CLOG_XACTS_PER_PAGE; /* * Declare the transaction old if it is in the bottom older half of the hot CLOG cache window, or * before the window. */ return (pagesBack > NUM_CLOG_BUFFERS/2); } /* * Initialization of shared memory for CLOG */ Size CLOGShmemSize(void) { return SimpleLruShmemSize(NUM_CLOG_BUFFERS, CLOG_LSNS_PER_PAGE); } void CLOGShmemInit(void) { ClogCtl->PagePrecedes = CLOGPagePrecedes; SimpleLruInit(ClogCtl, "CLOG Ctl", NUM_CLOG_BUFFERS, CLOG_LSNS_PER_PAGE, CLogControlLock, "pg_clog"); } /* * This func must be called ONCE on system install. It creates * the initial CLOG segment. (The CLOG directory is assumed to * have been created by the initdb shell script, and CLOGShmemInit * must have been called already.) */ void BootStrapCLOG(void) { int slotno; LWLockAcquire(CLogControlLock, LW_EXCLUSIVE); /* Create and zero the first page of the commit log */ slotno = ZeroCLOGPage(0, false); /* Make sure it's written out */ SimpleLruWritePage(ClogCtl, slotno, NULL); Assert(!ClogCtl->shared->page_dirty[slotno]); LWLockRelease(CLogControlLock); } /* * Initialize (or reinitialize) a page of CLOG to zeroes. * If writeXlog is TRUE, also emit an XLOG record saying we did this. * * The page is not actually written, just set up in shared memory. * The slot number of the new page is returned. * * Control lock must be held at entry, and will be held at exit. */ static int ZeroCLOGPage(int pageno, bool writeXlog) { int slotno; slotno = SimpleLruZeroPage(ClogCtl, pageno); if (writeXlog) WriteZeroPageXlogRec(pageno); return slotno; } /* * This must be called ONCE during postmaster or standalone-backend startup, * after StartupXLOG has initialized ShmemVariableCache->nextXid. */ void StartupCLOG(void) { TransactionId xid = ShmemVariableCache->nextXid; int pageno = TransactionIdToPage(xid); LWLockAcquire(CLogControlLock, LW_EXCLUSIVE); /* * Initialize our idea of the latest page number. */ ClogCtl->shared->latest_page_number = pageno; /* * Zero out the remainder of the current clog page. Under normal * circumstances it should be zeroes already, but it seems at least * theoretically possible that XLOG replay will have settled on a nextXID * value that is less than the last XID actually used and marked by the * previous database lifecycle (since subtransaction commit writes clog * but makes no WAL entry). Let's just be safe. (We need not worry about * pages beyond the current one, since those will be zeroed when first * used. For the same reason, there is no need to do anything when * nextXid is exactly at a page boundary; and it's likely that the * "current" page doesn't exist yet in that case.) */ if (TransactionIdToPgIndex(xid) != 0) { int byteno = TransactionIdToByte(xid); int bshift = TransactionIdToBIndex(xid) * CLOG_BITS_PER_XACT; int slotno; char *byteptr; slotno = SimpleLruReadPage(ClogCtl, pageno, false, xid); byteptr = ClogCtl->shared->page_buffer[slotno] + byteno; /* Zero so-far-unused positions in the current byte */ *byteptr &= (1 << bshift) - 1; /* Zero the rest of the page */ MemSet(byteptr + 1, 0, BLCKSZ - byteno - 1); ClogCtl->shared->page_dirty[slotno] = true; } LWLockRelease(CLogControlLock); } /* * This must be called ONCE during postmaster or standalone-backend shutdown */ void ShutdownCLOG(void) { /* Flush dirty CLOG pages to disk */ TRACE_POSTGRESQL_CLOG_CHECKPOINT_START(false); SimpleLruFlush(ClogCtl, false); TRACE_POSTGRESQL_CLOG_CHECKPOINT_DONE(false); } /* * Perform a checkpoint --- either during shutdown, or on-the-fly */ void CheckPointCLOG(void) { /* Flush dirty CLOG pages to disk */ TRACE_POSTGRESQL_CLOG_CHECKPOINT_START(true); SimpleLruFlush(ClogCtl, true); TRACE_POSTGRESQL_CLOG_CHECKPOINT_DONE(true); } /* * Make sure that CLOG has room for a newly-allocated XID. * * NB: this is called while holding XidGenLock. We want it to be very fast * most of the time; even when it's not so fast, no actual I/O need happen * unless we're forced to write out a dirty clog or xlog page to make room * in shared memory. */ void ExtendCLOG(TransactionId newestXact) { int pageno; /* * No work except at first XID of a page. But beware: just after * wraparound, the first XID of page zero is FirstNormalTransactionId. */ if (TransactionIdToPgIndex(newestXact) != 0 && !TransactionIdEquals(newestXact, FirstNormalTransactionId)) return; pageno = TransactionIdToPage(newestXact); LWLockAcquire(CLogControlLock, LW_EXCLUSIVE); /* Zero the page and make an XLOG entry about it */ ZeroCLOGPage(pageno, !InRecovery); LWLockRelease(CLogControlLock); } /* * Remove all CLOG segments before the one holding the passed transaction ID * * Before removing any CLOG data, we must flush XLOG to disk, to ensure * that any recently-emitted HEAP_FREEZE records have reached disk; otherwise * a crash and restart might leave us with some unfrozen tuples referencing * removed CLOG data. We choose to emit a special TRUNCATE XLOG record too. * Replaying the deletion from XLOG is not critical, since the files could * just as well be removed later, but doing so prevents a long-running hot * standby server from acquiring an unreasonably bloated CLOG directory. * * Since CLOG segments hold a large number of transactions, the opportunity to * actually remove a segment is fairly rare, and so it seems best not to do * the XLOG flush unless we have confirmed that there is a removable segment. */ void TruncateCLOG(TransactionId oldestXact) { int cutoffPage; /* * The cutoff point is the start of the segment containing oldestXact. We * pass the *page* containing oldestXact to SimpleLruTruncate. */ cutoffPage = TransactionIdToPage(oldestXact); /* Check to see if there's any files that could be removed */ if (!SlruScanDirectory(ClogCtl, cutoffPage, false)) return; /* nothing to remove */ /* Write XLOG record and flush XLOG to disk */ WriteTruncateXlogRec(cutoffPage); /* Now we can remove the old CLOG segment(s) */ SimpleLruTruncate(ClogCtl, cutoffPage); } /* * Decide which of two CLOG page numbers is "older" for truncation purposes. * * We need to use comparison of TransactionIds here in order to do the right * thing with wraparound XID arithmetic. However, if we are asked about * page number zero, we don't want to hand InvalidTransactionId to * TransactionIdPrecedes: it'll get weird about permanent xact IDs. So, * offset both xids by FirstNormalTransactionId to avoid that. */ static bool CLOGPagePrecedes(int page1, int page2) { TransactionId xid1; TransactionId xid2; xid1 = ((TransactionId) page1) * CLOG_XACTS_PER_PAGE; xid1 += FirstNormalTransactionId; xid2 = ((TransactionId) page2) * CLOG_XACTS_PER_PAGE; xid2 += FirstNormalTransactionId; return TransactionIdPrecedes(xid1, xid2); } /* * Write a ZEROPAGE xlog record */ static void WriteZeroPageXlogRec(int pageno) { XLogRecData rdata; rdata.data = (char *) (&pageno); rdata.len = sizeof(int); rdata.buffer = InvalidBuffer; rdata.next = NULL; (void) XLogInsert(RM_CLOG_ID, CLOG_ZEROPAGE, &rdata); } /* * Write a TRUNCATE xlog record * * We must flush the xlog record to disk before returning --- see notes * in TruncateCLOG(). */ static void WriteTruncateXlogRec(int pageno) { XLogRecData rdata; XLogRecPtr recptr; rdata.data = (char *) (&pageno); rdata.len = sizeof(int); rdata.buffer = InvalidBuffer; rdata.next = NULL; recptr = XLogInsert(RM_CLOG_ID, CLOG_TRUNCATE, &rdata); XLogFlush(recptr); } /* * CLOG resource manager's routines */ void clog_redo(XLogRecPtr beginLoc, XLogRecPtr lsn, XLogRecord *record) { uint8 info = record->xl_info & ~XLR_INFO_MASK; /* Backup blocks are not used in clog records */ Assert(!(record->xl_info & XLR_BKP_BLOCK_MASK)); if (info == CLOG_ZEROPAGE) { int pageno; int slotno; memcpy(&pageno, XLogRecGetData(record), sizeof(int)); LWLockAcquire(CLogControlLock, LW_EXCLUSIVE); slotno = ZeroCLOGPage(pageno, false); SimpleLruWritePage(ClogCtl, slotno, NULL); Assert(!ClogCtl->shared->page_dirty[slotno]); LWLockRelease(CLogControlLock); } else if (info == CLOG_TRUNCATE) { int pageno; memcpy(&pageno, XLogRecGetData(record), sizeof(int)); /* * During XLOG replay, latest_page_number isn't set up yet; insert a * suitable value to bypass the sanity test in SimpleLruTruncate. */ ClogCtl->shared->latest_page_number = pageno; SimpleLruTruncate(ClogCtl, pageno); } else elog(PANIC, "clog_redo: unknown op code %u", info); } void clog_desc(StringInfo buf, XLogRecPtr beginLoc, XLogRecord *record) { uint8 info = record->xl_info & ~XLR_INFO_MASK; char *rec = XLogRecGetData(record); if (info == CLOG_ZEROPAGE) { int pageno; memcpy(&pageno, rec, sizeof(int)); appendStringInfo(buf, "zeropage: %d", pageno); } else if (info == CLOG_TRUNCATE) { int pageno; memcpy(&pageno, rec, sizeof(int)); appendStringInfo(buf, "truncate before: %d", pageno); } else appendStringInfo(buf, "UNKNOWN"); }
962469.c
#include "CNNFTTTransforms.h" #include <stdio.h> #define P_REF -93.9794 Transform* newTransform(int window, int framesPerSecond) { Transform* newTransform = (Transform*)malloc(sizeof(Transform)); newTransform->windowSize = window; newTransform->framesPerSecond = framesPerSecond; int pow2Size = 0x01; while (pow2Size < window) { pow2Size = pow2Size << 1; } newTransform->points = pow2Size; newTransform->real = (float*)malloc(pow2Size*sizeof(float)); newTransform->imaginary = (float*)malloc(pow2Size*sizeof(float)); newTransform->power = (float*)malloc(pow2Size*sizeof(float)); newTransform->sine = (float*)malloc((pow2Size/2)*sizeof(float)); newTransform->cosine = (float*)malloc((pow2Size/2)*sizeof(float)); newTransform->dbpowerBuffer = (float*)calloc(framesPerSecond, sizeof(float)); newTransform->dbpower = 0; //precompute twiddle factors float arg; int i; for (i=0;i<pow2Size/2;i++) { arg = (-2.0*M_PI*i)/pow2Size; newTransform->cosine[i] = cos(arg); newTransform->sine[i] = sin(arg); } newTransform->window = (float*)malloc(pow2Size*sizeof(float)); for(i=0;i<window;i++) { //Hanning newTransform->window[i] = (float)((1.0-cos(2.0*M_PI*(i+1)/(window+1)))*0.5); } for(i=window;i<pow2Size;i++){ newTransform->window[i] = 0; } return newTransform; } void ForwardFFT(Transform* fft, float* realInput) { int i,j,k,L,m,n,o,p,q; float tempReal, tempImaginary, cos, sin, xt, yt, temp; k = fft->points; fft->totalPower = 0; for(i=0;i<k;i++) { fft->real[i] = 0; fft->imaginary[i] = 0; } for(i=0;i<fft->windowSize;i++) { //Windowing fft->real[i] = realInput[i]*fft->window[i]; } j=0; m=k/2; //bit reversal for(i=1;i<(k-1);i++) { L=m; while(j>=L) { j=j-L; L=L/2; } j=j+L; if(i<j) { tempReal=fft->real[i]; tempImaginary=fft->imaginary[i]; fft->real[i]=fft->real[j]; fft->imaginary[i]=fft->imaginary[j]; fft->real[j]=tempReal; fft->imaginary[j]=tempImaginary; } } L=0; m=1; n=k/2; //computation for(i=k;i>1;i=(i>>1)) { L=m; m=2*m; o=0; for(j=0;j<L;j++) { cos=fft->cosine[o]; sin=fft->sine[o]; o=o+n; for(p=j;p<k;p=p+m) { q=p+L; xt=cos*fft->real[q]-sin*fft->imaginary[q]; yt=sin*fft->real[q]+cos*fft->imaginary[q]; fft->real[q]=(fft->real[p]-xt); fft->real[p]=(fft->real[p]+xt); fft->imaginary[q]=(fft->imaginary[p]-yt); fft->imaginary[p]=(fft->imaginary[p]+yt); } } n=n>>1; } for (i=0; i<k; i++) { fft->power[i] = sqrt(fft->real[i]*fft->real[i]+fft->imaginary[i]*fft->imaginary[i]); fft->totalPower += fft->power[i]/k; } fft->dBSPL = 10*log10(fft->totalPower + 1e-6) - P_REF; temp = fft->dBSPL; fft->dbpower = fft->dbpower + (temp - fft->dbpowerBuffer[0])/fft->framesPerSecond; memmove(fft->dbpowerBuffer, fft->dbpowerBuffer + 1, sizeof(*fft->dbpowerBuffer)*(fft->framesPerSecond-1)); fft->dbpowerBuffer[fft->framesPerSecond -1] = temp; } void InverseFFT(Transform* fft) { int i,j,k,L,m,n,o,p,q; float tempReal, tempImaginary, cos, sin, xt, yt; k = fft->points; j=0; m=k/2; //bit reversal for(i=1;i<(k-1);i++) { L=m; while(j>=L) { j=j-L; L=L/2; } j=j+L; if(i<j) { tempReal=fft->real[i]; tempImaginary=fft->imaginary[i]; fft->real[i]=fft->real[j]; fft->imaginary[i]=fft->imaginary[j]; fft->real[j]=tempReal; fft->imaginary[j]=tempImaginary; } } L=0; m=1; n=k/2; //computation for(i=k;i>1;i=(i>>1)) { L=m; m=2*m; o=0; for(j=0;j<L;j++) { cos=fft->cosine[o]; sin=-fft->sine[o]; o=o+n; for(p=j;p<k;p=p+m) { q=p+L; xt=cos*fft->real[q]-sin*fft->imaginary[q]; yt=sin*fft->real[q]+cos*fft->imaginary[q]; fft->real[q]=(fft->real[p]-xt); fft->real[p]=(fft->real[p]+xt); fft->imaginary[q]=(fft->imaginary[p]-yt); fft->imaginary[p]=(fft->imaginary[p]+yt); } } n=n>>1; } for(i=0;i<k;i++) { fft->real[i] /= k; } } void destroyTransform(Transform** transform) { if(*transform != NULL){ if((*transform)->cosine != NULL){ free((*transform)->cosine); (*transform)->cosine = NULL; } if((*transform)->sine != NULL){ free((*transform)->sine); (*transform)->sine = NULL; } if((*transform)->real != NULL){ free((*transform)->real); (*transform)->real = NULL; } if((*transform)->imaginary != NULL){ free((*transform)->imaginary); (*transform)->imaginary = NULL; } free(*transform); *transform = NULL; } }
458853.c
/* * Copyright (c) 2014 Wind River Systems, Inc. * * SPDX-License-Identifier: Apache-2.0 */ /* * @file test access to the minimal C libraries * * This module verifies that the various minimal C libraries can be used. * * IMPORTANT: The module only ensures that each supported library is present, * and that a bare minimum of its functionality is operating correctly. It does * NOT guarantee that ALL standards-defined functionality is present, nor does * it guarantee that ALL functionality provided is working correctly. */ #include <zephyr.h> #include <misc/__assert.h> #include <ztest.h> #include <limits.h> #include <stdbool.h> #include <stddef.h> #include <zephyr/types.h> #include <string.h> /* * variables used during limits library testing; must be marked as "volatile" * to prevent compiler from computing results at compile time */ volatile long long_max = LONG_MAX; volatile long long_one = 1L; /** * * @brief Test implementation-defined constants library * */ void limits_test(void) { zassert_true((long_max + long_one == LONG_MIN), NULL); } /** * * @brief Test boolean types and values library * */ void stdbool_test(void) { zassert_true((true == 1), "true value"); zassert_true((false == 0), "false value"); } /* * variables used during stddef library testing; must be marked as "volatile" * to prevent compiler from computing results at compile time */ volatile long long_variable; volatile size_t size_of_long_variable = sizeof(long_variable); /** * * @brief Test standard type definitions library * */ void stddef_test(void) { zassert_true((size_of_long_variable == 4), "sizeof"); } /* * variables used during stdint library testing; must be marked as "volatile" * to prevent compiler from computing results at compile time */ volatile u8_t unsigned_byte = 0xff; volatile u32_t unsigned_int = 0xffffff00; /** * * @brief Test integer types library * */ void stdint_test(void) { zassert_true((unsigned_int + unsigned_byte + 1u == 0), NULL); } /* * variables used during string library testing */ #define BUFSIZE 10 char buffer[BUFSIZE]; /** * * @brief Test string memset * */ void memset_test(void) { memset(buffer, 'a', BUFSIZE); zassert_true((buffer[0] == 'a'), "memset"); zassert_true((buffer[BUFSIZE - 1] == 'a'), "memset"); } /** * * @brief Test string length function * */ void strlen_test(void) { memset(buffer, '\0', BUFSIZE); memset(buffer, 'b', 5); /* 5 is BUFSIZE / 2 */ zassert_equal(strlen(buffer), 5, "strlen"); } /** * * @brief Test string compare function * */ void strcmp_test(void) { strcpy(buffer, "eeeee"); zassert_true((strcmp(buffer, "fffff") < 0), "strcmp less ..."); zassert_true((strcmp(buffer, "eeeee") == 0), "strcmp equal ..."); zassert_true((strcmp(buffer, "ddddd") > 0), "strcmp greater ..."); } /** * * @brief Test string N compare function * */ void strncmp_test(void) { const char pattern[] = "eeeeeeeeeeee"; /* Note we don't want to count the final \0 that sizeof will */ __ASSERT_NO_MSG(sizeof(pattern) - 1 > BUFSIZE); memcpy(buffer, pattern, BUFSIZE); zassert_true((strncmp(buffer, "fffff", 0) == 0), "strncmp 0"); zassert_true((strncmp(buffer, "eeeff", 3) == 0), "strncmp 3"); zassert_true((strncmp(buffer, "eeeeeeeeeeeff", BUFSIZE) == 0), "strncmp 10"); } /** * * @brief Test string copy function * */ void strcpy_test(void) { memset(buffer, '\0', BUFSIZE); strcpy(buffer, "10 chars!\0"); zassert_true((strcmp(buffer, "10 chars!\0") == 0), "strcpy"); } /** * * @brief Test string N copy function * */ void strncpy_test(void) { int ret; memset(buffer, '\0', BUFSIZE); strncpy(buffer, "This is over 10 characters", BUFSIZE); /* Purposely different values */ ret = strncmp(buffer, "This is over 20 characters", BUFSIZE); zassert_true((ret == 0), "strncpy"); } /** * * @brief Test string scanning function * */ void strchr_test(void) { char *rs = NULL; int ret; memset(buffer, '\0', BUFSIZE); strncpy(buffer, "Copy 10", BUFSIZE); rs = strchr(buffer, '1'); zassert_not_null(rs, "strchr"); ret = strncmp(rs, "10", 2); zassert_true((ret == 0), "strchr"); } /** * * @brief Test memory comparison function * */ void memcmp_test(void) { int ret; unsigned char m1[5] = { 1, 2, 3, 4, 5 }; unsigned char m2[5] = { 1, 2, 3, 4, 6 }; ret = memcmp(m1, m2, 4); zassert_true((ret == 0), "memcmp 4"); ret = memcmp(m1, m2, 5); zassert_true((ret != 0), "memcmp 5"); } /** * * @brief Test string operations library */ void string_test(void) { memset_test(); strlen_test(); strcmp_test(); strcpy_test(); strncpy_test(); strncmp_test(); strchr_test(); memcmp_test(); } void test_main(void) { ztest_test_suite(test_libs, ztest_unit_test(limits_test), ztest_unit_test(stdbool_test), ztest_unit_test(stddef_test), ztest_unit_test(stdint_test), ztest_unit_test(string_test)); ztest_run_test_suite(test_libs); }
431199.c
#include <std.h> #include "../valley.h" inherit GRASS; void create() { ::create(); set_exits( ([ "west" : ROOMS+"g1", "east" : ROOMS+"g3", "north" : ROOMS+"g21", "south" : ROOMS+"v20", ]) ); }
690505.c
#include <stdlib.h> #include <string.h> #include "libbmsdi/protocol.h" uint8_t sizeOfType( uint8_t t ) { switch( t ) { case BM_TYPE_BOOLEAN: case BM_TYPE_INT8: case BM_TYPE_STR: return 1; case BM_TYPE_FIXED16: case BM_TYPE_INT16: return 2; case BM_TYPE_INT32: return 4; case BM_TYPE_INT64: return 8; default: return 0; } } uint32_t align32( uint8_t x ) { return ((x + 3) & ~0x03); }
799570.c
/* -*- linux-c -*- * * (C) Copyright IBM Corp. 2005 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. This * file and program are licensed under a BSD style license. See * the Copying file included with the OpenHPI distribution for * full licensing terms. * * Author: Renier Morales <[email protected]> */ #include <SaHpi.h> #include <oh_utils.h> /******************************************************************* * WARNING! This file is auto-magically generated by: * ./gen_epath_pattern_tests.py * Do not change this file manually. Update script instead *******************************************************************/ /** * This takes an entity path and an entity path's pattern, * and knowing the proper result beforehand, checks if the * pattern matches the entity path. If the proper result is * achieved, the test passes. **/ int main(int argc, char **argv) { char *ep_str = "{SBC_BLADE,3}"; char *epp_str = "*{SBC_BLADE,.}"; oh_entitypath_pattern epp; SaHpiEntityPathT ep; SaErrorT error = SA_OK; SaHpiBoolT match = SAHPI_TRUE; error = oh_encode_entitypath(ep_str, &ep); if (error) { printf("Encoding of entitypath failed.\n"); return -1; } error = oh_compile_entitypath_pattern(epp_str, &epp); if (error) { printf("Compilation of pattern failed.\n"); return -1; } if (oh_match_entitypath_pattern(&epp, &ep) != match) return -1; return 0; }
26856.c
/* * PostScript output functions * * Copyright 1998 Huw D M Davies * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */ #include <ctype.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <locale.h> #define NONAMELESSUNION #define NONAMELESSSTRUCT #include "windef.h" #include "winbase.h" #include "wingdi.h" #include "psdrv.h" #include "wine/debug.h" WINE_DEFAULT_DEBUG_CHANNEL(psdrv); static const char psadobe[] = "%!PS-Adobe-3.0\n"; static const char media[] = "%cupsJobTicket: media="; static const char cups_one_sided[] = "%cupsJobTicket: sides=one-sided\n"; static const char cups_two_sided_long[] = "%cupsJobTicket: sides=two-sided-long-edge\n"; static const char cups_two_sided_short[] = "%cupsJobTicket: sides=two-sided-short-edge\n"; static const char *cups_duplexes[3] = { cups_one_sided, /* DMDUP_SIMPLEX */ cups_two_sided_long, /* DMDUP_VERTICAL */ cups_two_sided_short /* DMDUP_HORIZONTAL */ }; static const char cups_collate_false[] = "%cupsJobTicket: collate=false\n"; static const char cups_collate_true[] = "%cupsJobTicket: collate=true\n"; static const char psheader[] = /* title llx lly urx ury orientation */ "%%%%Creator: Wine PostScript Driver\n" "%%%%Title: %s\n" "%%%%BoundingBox: %d %d %d %d\n" "%%%%Pages: (atend)\n" "%%%%Orientation: %s\n" "%%%%EndComments\n"; static const char psbeginprolog[] = "%%BeginProlog\n"; static const char psendprolog[] = "%%EndProlog\n"; static const char psprolog[] = "/tmpmtrx matrix def\n" "/hatch {\n" " pathbbox\n" " /b exch def /r exch def /t exch def /l exch def /gap 32 def\n" " l cvi gap idiv gap mul\n" " gap\n" " r cvi gap idiv gap mul\n" " {t moveto 0 b t sub rlineto}\n" " for\n" "} bind def\n" "/B {pop pop pop pop} def\n" "/N {newpath} def\n" "/havetype42gdir {version cvi 2015 ge} bind def\n"; static const char psbeginsetup[] = "%%BeginSetup\n"; static const char psendsetup[] = "%%EndSetup\n"; static const char psbeginfeature[] = /* feature, value */ "mark {\n" "%%%%BeginFeature: %s %s\n"; static const char psendfeature[] = "\n%%EndFeature\n" "} stopped cleartomark\n"; static const char psnewpage[] = /* name, number, xres, yres, xtrans, ytrans, rot */ "%%%%Page: %s %d\n" "%%%%BeginPageSetup\n" "/pgsave save def\n" "72 %d div 72 %d div scale\n" "%d %d translate\n" "1 -1 scale\n" "%d rotate\n" "%%%%EndPageSetup\n"; static const char psendpage[] = "pgsave restore\n" "showpage\n"; static const char psfooter[] = /* pages */ "%%%%Trailer\n" "%%%%Pages: %d\n" "%%%%EOF\n"; static const char psmoveto[] = /* x, y */ "%d %d moveto\n"; static const char pslineto[] = /* x, y */ "%d %d lineto\n"; static const char psstroke[] = "stroke\n"; static const char psrectangle[] = /* x, y, width, height, -width */ "%d %d moveto\n" "%d 0 rlineto\n" "0 %d rlineto\n" "%d 0 rlineto\n" "closepath\n"; static const char psglyphshow[] = /* glyph name */ "/%s glyphshow\n"; static const char psfindfont[] = /* fontname */ "/%s findfont\n"; static const char psfakeitalic[] = "[1 0 0.25 1 0 0]\n"; static const char pssizematrix[] = "[%d %d %d %d 0 0]\n"; static const char psconcat[] = "matrix concatmatrix\n"; static const char psrotatefont[] = /* escapement */ "%d 10 div matrix rotate\n" "matrix concatmatrix\n"; static const char pssetfont[] = "makefont setfont\n"; static const char pssetline[] = /* width, join, endcap */ "%d setlinewidth %u setlinejoin %u setlinecap\n"; static const char pssetgray[] = /* gray */ "%.2f setgray\n"; static const char pssetrgbcolor[] = /* r, g, b */ "%.2f %.2f %.2f setrgbcolor\n"; static const char psarc[] = /* x, y, w, h, ang1, ang2 */ "tmpmtrx currentmatrix pop\n" "%d %d translate\n" "%d %d scale\n" "0 0 0.5 %.1f %.1f arc\n" "tmpmtrx setmatrix\n"; static const char pscurveto[] = /* x1, y1, x2, y2, x3, y3 */ "%d %d %d %d %d %d curveto\n"; static const char psgsave[] = "gsave\n"; static const char psgrestore[] = "grestore\n"; static const char psfill[] = "fill\n"; static const char pseofill[] = "eofill\n"; static const char psnewpath[] = "newpath\n"; static const char psclosepath[] = "closepath\n"; static const char psclip[] = "clip\n"; static const char pseoclip[] = "eoclip\n"; static const char psrectclip[] = "%d %d %d %d rectclip\n"; static const char psrectclip2[] = "%s rectclip\n"; static const char pshatch[] = "hatch\n"; static const char psrotate[] = /* ang */ "%.1f rotate\n"; static const char psarrayput[] = "%s %d %d put\n"; static const char psarraydef[] = "/%s %d array def\n"; static const char psenddocument[] = "\n%%EndDocument\n"; DWORD PSDRV_WriteSpool(PHYSDEV dev, LPCSTR lpData, DWORD cch) { PSDRV_PDEVICE *physDev = get_psdrv_dev( dev ); int num, num_left = cch; if(physDev->job.quiet) { TRACE("ignoring output\n"); return 0; } if(physDev->job.in_passthrough) { /* Was in PASSTHROUGH mode */ write_spool( dev, psenddocument, sizeof(psenddocument)-1 ); physDev->job.in_passthrough = physDev->job.had_passthrough_rect = FALSE; } if(physDev->job.OutOfPage) { /* Will get here after NEWFRAME Escape */ if( !PSDRV_StartPage(dev) ) return 0; } do { num = min(num_left, 0x8000); if(write_spool( dev, lpData, num ) != num) return 0; lpData += num; num_left -= num; } while(num_left); return cch; } static INT PSDRV_WriteFeature(PHYSDEV dev, LPCSTR feature, LPCSTR value, LPCSTR invocation) { char *buf = HeapAlloc( GetProcessHeap(), 0, sizeof(psbeginfeature) + strlen(feature) + strlen(value)); sprintf(buf, psbeginfeature, feature, value); write_spool( dev, buf, strlen(buf) ); write_spool( dev, invocation, strlen(invocation) ); write_spool( dev, psendfeature, strlen(psendfeature) ); HeapFree( GetProcessHeap(), 0, buf ); return 1; } /******************************************************** * escape_title * * Helper for PSDRV_WriteHeader. Escape any non-printable characters * as octal. If we've had to use an escape then surround the entire string * in brackets. Truncate string to represent at most 0x80 characters. * */ static char *escape_title(LPCWSTR wstr) { char *ret, *cp, *str; int i, extra = 0; if(!wstr) { ret = HeapAlloc(GetProcessHeap(), 0, 1); *ret = '\0'; return ret; } i = WideCharToMultiByte( CP_ACP, 0, wstr, -1, NULL, 0, NULL, NULL ); str = HeapAlloc( GetProcessHeap(), 0, i ); if (!str) return NULL; WideCharToMultiByte( CP_ACP, 0, wstr, -1, str, i, NULL, NULL ); for(i = 0; i < 0x80 && str[i]; i++) { if(!isprint(str[i])) extra += 3; } if(!extra) { ret = HeapAlloc(GetProcessHeap(), 0, i + 1); memcpy(ret, str, i); ret[i] = '\0'; goto done; } extra += 2; /* two for the brackets */ cp = ret = HeapAlloc(GetProcessHeap(), 0, i + extra + 1); *cp++ = '('; for(i = 0; i < 0x80 && str[i]; i++) { if(!isprint(str[i])) { BYTE b = (BYTE)str[i]; *cp++ = '\\'; *cp++ = ((b >> 6) & 0x7) + '0'; *cp++ = ((b >> 3) & 0x7) + '0'; *cp++ = ((b) & 0x7) + '0'; } else *cp++ = str[i]; } *cp++ = ')'; *cp = '\0'; done: HeapFree( GetProcessHeap(), 0, str ); return ret; } struct ticket_info { PAGESIZE *page; DUPLEX *duplex; }; static void write_cups_job_ticket( PHYSDEV dev, const struct ticket_info *info ) { PSDRV_PDEVICE *physDev = get_psdrv_dev( dev ); char buf[256]; int len; if (info->page && info->page->InvocationString) { len = sizeof(media) + strlen( info->page->Name ) + 1; if (len <= sizeof(buf)) { memcpy( buf, media, sizeof(media) ); strcat( buf, info->page->Name ); strcat( buf, "\n"); write_spool( dev, buf, len - 1 ); } else WARN( "paper name %s will be too long for DSC\n", info->page->Name ); } if (info->duplex && info->duplex->InvocationString) { if (info->duplex->WinDuplex >= 1 && info->duplex->WinDuplex <= 3) { const char *str = cups_duplexes[ info->duplex->WinDuplex - 1 ]; write_spool( dev, str, strlen( str ) ); } } if (physDev->Devmode->dmPublic.u1.s1.dmCopies > 1) { len = snprintf( buf, sizeof(buf), "%%cupsJobTicket: copies=%d\n", physDev->Devmode->dmPublic.u1.s1.dmCopies ); if (len > 0 && len < sizeof(buf)) write_spool( dev, buf, len ); if (physDev->Devmode->dmPublic.dmFields & DM_COLLATE) { if (physDev->Devmode->dmPublic.dmCollate == DMCOLLATE_FALSE) write_spool( dev, cups_collate_false, sizeof(cups_collate_false) - 1 ); else if (physDev->Devmode->dmPublic.dmCollate == DMCOLLATE_TRUE) write_spool( dev, cups_collate_true, sizeof(cups_collate_true) - 1 ); } } } INT PSDRV_WriteHeader( PHYSDEV dev, LPCWSTR title ) { PSDRV_PDEVICE *physDev = get_psdrv_dev( dev ); char *buf, *escaped_title; INPUTSLOT *slot = find_slot( physDev->pi->ppd, physDev->Devmode ); PAGESIZE *page = find_pagesize( physDev->pi->ppd, physDev->Devmode ); DUPLEX *duplex = find_duplex( physDev->pi->ppd, physDev->Devmode ); int llx, lly, urx, ury; int ret, len; const char * dmOrientation; struct ticket_info ticket_info = { page, duplex }; TRACE("%s\n", debugstr_w(title)); len = strlen( psadobe ); ret = write_spool( dev, psadobe, len ); if (ret != len) { WARN("WriteSpool error\n"); return 0; } write_cups_job_ticket( dev, &ticket_info ); escaped_title = escape_title(title); buf = HeapAlloc( GetProcessHeap(), 0, sizeof(psheader) + strlen(escaped_title) + 30 ); if(!buf) { WARN("HeapAlloc failed\n"); HeapFree(GetProcessHeap(), 0, escaped_title); return 0; } /* BBox co-ords are in default user co-ord system so urx < ury even in landscape mode */ llx = physDev->ImageableArea.left * 72.0 / physDev->logPixelsX; lly = physDev->ImageableArea.bottom * 72.0 / physDev->logPixelsY; urx = physDev->ImageableArea.right * 72.0 / physDev->logPixelsX; ury = physDev->ImageableArea.top * 72.0 / physDev->logPixelsY; /* FIXME should do something better with BBox */ dmOrientation = (physDev->Devmode->dmPublic.u1.s1.dmOrientation == DMORIENT_LANDSCAPE ? "Landscape" : "Portrait"); sprintf(buf, psheader, escaped_title, llx, lly, urx, ury, dmOrientation); HeapFree(GetProcessHeap(), 0, escaped_title); len = strlen( buf ); write_spool( dev, buf, len ); HeapFree( GetProcessHeap(), 0, buf ); write_spool( dev, psbeginprolog, strlen(psbeginprolog) ); write_spool( dev, psprolog, strlen(psprolog) ); write_spool( dev, psendprolog, strlen(psendprolog) ); write_spool( dev, psbeginsetup, strlen(psbeginsetup) ); if (slot && slot->InvocationString) PSDRV_WriteFeature( dev, "*InputSlot", slot->Name, slot->InvocationString ); if (page && page->InvocationString) PSDRV_WriteFeature( dev, "*PageSize", page->Name, page->InvocationString ); if (duplex && duplex->InvocationString) PSDRV_WriteFeature( dev, "*Duplex", duplex->Name, duplex->InvocationString ); write_spool( dev, psendsetup, strlen(psendsetup) ); return 1; } INT PSDRV_WriteFooter( PHYSDEV dev ) { PSDRV_PDEVICE *physDev = get_psdrv_dev( dev ); char *buf; int ret = 1; buf = HeapAlloc( GetProcessHeap(), 0, sizeof(psfooter) + 100 ); if(!buf) { WARN("HeapAlloc failed\n"); return 0; } sprintf(buf, psfooter, physDev->job.PageNo); if( write_spool( dev, buf, strlen(buf) ) != strlen(buf) ) { WARN("WriteSpool error\n"); ret = 0; } HeapFree( GetProcessHeap(), 0, buf ); return ret; } INT PSDRV_WriteEndPage( PHYSDEV dev ) { if( write_spool( dev, psendpage, sizeof(psendpage)-1 ) != sizeof(psendpage)-1 ) { WARN("WriteSpool error\n"); return 0; } return 1; } INT PSDRV_WriteNewPage( PHYSDEV dev ) { PSDRV_PDEVICE *physDev = get_psdrv_dev( dev ); char *buf; char name[100]; signed int xtrans, ytrans, rotation; int ret = 1; sprintf(name, "%d", physDev->job.PageNo); buf = HeapAlloc( GetProcessHeap(), 0, sizeof(psnewpage) + 200 ); if(!buf) { WARN("HeapAlloc failed\n"); return 0; } if(physDev->Devmode->dmPublic.u1.s1.dmOrientation == DMORIENT_LANDSCAPE) { if(physDev->pi->ppd->LandscapeOrientation == -90) { xtrans = physDev->ImageableArea.right; ytrans = physDev->ImageableArea.top; rotation = 90; } else { xtrans = physDev->ImageableArea.left; ytrans = physDev->ImageableArea.bottom; rotation = -90; } } else { xtrans = physDev->ImageableArea.left; ytrans = physDev->ImageableArea.top; rotation = 0; } sprintf(buf, psnewpage, name, physDev->job.PageNo, physDev->logPixelsX, physDev->logPixelsY, xtrans, ytrans, rotation); if( write_spool( dev, buf, strlen(buf) ) != strlen(buf) ) { WARN("WriteSpool error\n"); ret = 0; } HeapFree( GetProcessHeap(), 0, buf ); return ret; } BOOL PSDRV_WriteMoveTo(PHYSDEV dev, INT x, INT y) { char buf[100]; sprintf(buf, psmoveto, x, y); return PSDRV_WriteSpool(dev, buf, strlen(buf)); } BOOL PSDRV_WriteLineTo(PHYSDEV dev, INT x, INT y) { char buf[100]; sprintf(buf, pslineto, x, y); return PSDRV_WriteSpool(dev, buf, strlen(buf)); } BOOL PSDRV_WriteStroke(PHYSDEV dev) { return PSDRV_WriteSpool(dev, psstroke, sizeof(psstroke)-1); } BOOL PSDRV_WriteRectangle(PHYSDEV dev, INT x, INT y, INT width, INT height) { char buf[100]; sprintf(buf, psrectangle, x, y, width, height, -width); return PSDRV_WriteSpool(dev, buf, strlen(buf)); } BOOL PSDRV_WriteArc(PHYSDEV dev, INT x, INT y, INT w, INT h, double ang1, double ang2) { char buf[256]; /* Make angles -ve and swap order because we're working with an upside down y-axis */ push_lc_numeric("C"); sprintf(buf, psarc, x, y, w, h, -ang2, -ang1); pop_lc_numeric(); return PSDRV_WriteSpool(dev, buf, strlen(buf)); } BOOL PSDRV_WriteCurveTo(PHYSDEV dev, POINT pts[3]) { char buf[256]; sprintf(buf, pscurveto, pts[0].x, pts[0].y, pts[1].x, pts[1].y, pts[2].x, pts[2].y ); return PSDRV_WriteSpool(dev, buf, strlen(buf)); } BOOL PSDRV_WriteSetFont(PHYSDEV dev, const char *name, matrix size, INT escapement, BOOL fake_italic) { char *buf; buf = HeapAlloc( GetProcessHeap(), 0, strlen(name) + 256 ); if(!buf) { WARN("HeapAlloc failed\n"); return FALSE; } sprintf( buf, psfindfont, name ); PSDRV_WriteSpool( dev, buf, strlen(buf) ); if (fake_italic) PSDRV_WriteSpool( dev, psfakeitalic, sizeof(psfakeitalic) - 1 ); sprintf( buf, pssizematrix, size.xx, size.xy, size.yx, size.yy ); PSDRV_WriteSpool( dev, buf, strlen(buf) ); if (fake_italic) PSDRV_WriteSpool( dev, psconcat, sizeof(psconcat) - 1 ); if (escapement) { sprintf( buf, psrotatefont, -escapement ); PSDRV_WriteSpool( dev, buf, strlen(buf) ); } PSDRV_WriteSpool( dev, pssetfont, sizeof(pssetfont) - 1 ); HeapFree( GetProcessHeap(), 0, buf ); return TRUE; } BOOL PSDRV_WriteSetColor(PHYSDEV dev, PSCOLOR *color) { PSDRV_PDEVICE *physDev = get_psdrv_dev( dev ); char buf[256]; PSDRV_CopyColor(&physDev->inkColor, color); switch(color->type) { case PSCOLOR_RGB: push_lc_numeric("C"); sprintf(buf, pssetrgbcolor, color->value.rgb.r, color->value.rgb.g, color->value.rgb.b); pop_lc_numeric(); return PSDRV_WriteSpool(dev, buf, strlen(buf)); case PSCOLOR_GRAY: push_lc_numeric("C"); sprintf(buf, pssetgray, color->value.gray.i); pop_lc_numeric(); return PSDRV_WriteSpool(dev, buf, strlen(buf)); default: ERR("Unknown colour type %d\n", color->type); break; } return FALSE; } BOOL PSDRV_WriteSetPen(PHYSDEV dev) { PSDRV_PDEVICE *physDev = get_psdrv_dev( dev ); char buf[256]; DWORD i, pos; sprintf(buf, pssetline, physDev->pen.width, physDev->pen.join, physDev->pen.endcap); PSDRV_WriteSpool(dev, buf, strlen(buf)); if (physDev->pen.dash_len) { for (i = pos = 0; i < physDev->pen.dash_len; i++) pos += sprintf( buf + pos, " %u", physDev->pen.dash[i] ); buf[0] = '['; sprintf(buf + pos, "] %u setdash\n", 0); } else sprintf(buf, "[] %u setdash\n", 0); PSDRV_WriteSpool(dev, buf, strlen(buf)); return TRUE; } BOOL PSDRV_WriteGlyphShow(PHYSDEV dev, LPCSTR g_name) { char buf[128]; int l; l = snprintf(buf, sizeof(buf), psglyphshow, g_name); if (l < sizeof(psglyphshow) - 2 || l > sizeof(buf) - 1) { WARN("Unusable glyph name '%s' - ignoring\n", g_name); return FALSE; } PSDRV_WriteSpool(dev, buf, l); return TRUE; } BOOL PSDRV_WriteFill(PHYSDEV dev) { return PSDRV_WriteSpool(dev, psfill, sizeof(psfill)-1); } BOOL PSDRV_WriteEOFill(PHYSDEV dev) { return PSDRV_WriteSpool(dev, pseofill, sizeof(pseofill)-1); } BOOL PSDRV_WriteGSave(PHYSDEV dev) { return PSDRV_WriteSpool(dev, psgsave, sizeof(psgsave)-1); } BOOL PSDRV_WriteGRestore(PHYSDEV dev) { return PSDRV_WriteSpool(dev, psgrestore, sizeof(psgrestore)-1); } BOOL PSDRV_WriteNewPath(PHYSDEV dev) { return PSDRV_WriteSpool(dev, psnewpath, sizeof(psnewpath)-1); } BOOL PSDRV_WriteClosePath(PHYSDEV dev) { return PSDRV_WriteSpool(dev, psclosepath, sizeof(psclosepath)-1); } BOOL PSDRV_WriteClip(PHYSDEV dev) { return PSDRV_WriteSpool(dev, psclip, sizeof(psclip)-1); } BOOL PSDRV_WriteEOClip(PHYSDEV dev) { return PSDRV_WriteSpool(dev, pseoclip, sizeof(pseoclip)-1); } BOOL PSDRV_WriteHatch(PHYSDEV dev) { return PSDRV_WriteSpool(dev, pshatch, sizeof(pshatch)-1); } BOOL PSDRV_WriteRotate(PHYSDEV dev, float ang) { char buf[256]; push_lc_numeric("C"); sprintf(buf, psrotate, ang); pop_lc_numeric(); return PSDRV_WriteSpool(dev, buf, strlen(buf)); } BOOL PSDRV_WriteIndexColorSpaceBegin(PHYSDEV dev, int size) { char buf[256]; sprintf(buf, "[/Indexed /DeviceRGB %d\n<\n", size); return PSDRV_WriteSpool(dev, buf, strlen(buf)); } BOOL PSDRV_WriteIndexColorSpaceEnd(PHYSDEV dev) { static const char buf[] = ">\n] setcolorspace\n"; return PSDRV_WriteSpool(dev, buf, sizeof(buf) - 1); } static BOOL PSDRV_WriteRGB(PHYSDEV dev, COLORREF *map, int number) { char *buf = HeapAlloc( GetProcessHeap(), 0, number * 7 + 1 ), *ptr; int i; ptr = buf; for(i = 0; i < number; i++) { sprintf(ptr, "%02x%02x%02x%c", (int)GetRValue(map[i]), (int)GetGValue(map[i]), (int)GetBValue(map[i]), ((i & 0x7) == 0x7) || (i == number - 1) ? '\n' : ' '); ptr += 7; } PSDRV_WriteSpool(dev, buf, number * 7); HeapFree( GetProcessHeap(), 0, buf ); return TRUE; } BOOL PSDRV_WriteRGBQUAD(PHYSDEV dev, const RGBQUAD *rgb, int number) { char *buf = HeapAlloc( GetProcessHeap(), 0, number * 7 + 1 ), *ptr; int i; ptr = buf; for(i = 0; i < number; i++, rgb++) ptr += sprintf(ptr, "%02x%02x%02x%c", rgb->rgbRed, rgb->rgbGreen, rgb->rgbBlue, ((i & 0x7) == 0x7) || (i == number - 1) ? '\n' : ' '); PSDRV_WriteSpool(dev, buf, ptr - buf); HeapFree( GetProcessHeap(), 0, buf ); return TRUE; } static BOOL PSDRV_WriteImageDict(PHYSDEV dev, WORD depth, BOOL grayscale, INT widthSrc, INT heightSrc, char *bits, BOOL top_down) { static const char start[] = "<<\n" " /ImageType 1\n /Width %d\n /Height %d\n /BitsPerComponent %d\n" " /ImageMatrix [%d 0 0 %d 0 %d]\n"; static const char decode1[] = " /Decode [0 %d]\n"; static const char decode3[] = " /Decode [0 1 0 1 0 1]\n"; static const char end[] = " /DataSource currentfile /ASCII85Decode filter /RunLengthDecode filter\n>>\n"; static const char endbits[] = " /DataSource <%s>\n>>\n"; char buf[1000]; if (top_down) sprintf(buf, start, widthSrc, heightSrc, (depth < 8) ? depth : 8, widthSrc, heightSrc, 0); else sprintf(buf, start, widthSrc, heightSrc, (depth < 8) ? depth : 8, widthSrc, -heightSrc, heightSrc); PSDRV_WriteSpool(dev, buf, strlen(buf)); switch(depth) { case 8: sprintf(buf, decode1, 255); break; case 4: sprintf(buf, decode1, 15); break; case 1: sprintf(buf, decode1, 1); break; default: if (grayscale) sprintf(buf, decode1, 1); else strcpy(buf, decode3); break; } PSDRV_WriteSpool(dev, buf, strlen(buf)); if(!bits) { PSDRV_WriteSpool(dev, end, sizeof(end) - 1); } else { sprintf(buf, endbits, bits); PSDRV_WriteSpool(dev, buf, strlen(buf)); } return TRUE; } BOOL PSDRV_WriteImage(PHYSDEV dev, WORD depth, BOOL grayscale, INT xDst, INT yDst, INT widthDst, INT heightDst, INT widthSrc, INT heightSrc, BOOL mask, BOOL top_down) { static const char start[] = "%d %d translate\n%d %d scale\n"; static const char image[] = "image\n"; static const char imagemask[] = "imagemask\n"; char buf[100]; sprintf(buf, start, xDst, yDst, widthDst, heightDst); PSDRV_WriteSpool(dev, buf, strlen(buf)); PSDRV_WriteImageDict(dev, depth, grayscale, widthSrc, heightSrc, NULL, top_down); if(mask) PSDRV_WriteSpool(dev, imagemask, sizeof(imagemask) - 1); else PSDRV_WriteSpool(dev, image, sizeof(image) - 1); return TRUE; } BOOL PSDRV_WriteBytes(PHYSDEV dev, const BYTE *bytes, DWORD number) { char *buf = HeapAlloc( GetProcessHeap(), 0, number * 3 + 1 ); char *ptr; unsigned int i; ptr = buf; for(i = 0; i < number; i++) { sprintf(ptr, "%02x", bytes[i]); ptr += 2; if(((i & 0xf) == 0xf) || (i == number - 1)) { strcpy(ptr, "\n"); ptr++; } } PSDRV_WriteSpool(dev, buf, ptr - buf); HeapFree( GetProcessHeap(), 0, buf ); return TRUE; } BOOL PSDRV_WriteData(PHYSDEV dev, const BYTE *data, DWORD number) { int num, num_left = number; do { num = min(num_left, 60); PSDRV_WriteSpool(dev, (LPCSTR)data, num); PSDRV_WriteSpool(dev, "\n", 1); data += num; num_left -= num; } while(num_left); return TRUE; } BOOL PSDRV_WriteArrayPut(PHYSDEV dev, CHAR *pszArrayName, INT nIndex, LONG lObject) { char buf[100]; sprintf(buf, psarrayput, pszArrayName, nIndex, lObject); return PSDRV_WriteSpool(dev, buf, strlen(buf)); } BOOL PSDRV_WriteArrayDef(PHYSDEV dev, CHAR *pszArrayName, INT nSize) { char buf[100]; sprintf(buf, psarraydef, pszArrayName, nSize); return PSDRV_WriteSpool(dev, buf, strlen(buf)); } BOOL PSDRV_WriteRectClip(PHYSDEV dev, INT x, INT y, INT w, INT h) { char buf[100]; sprintf(buf, psrectclip, x, y, w, h); return PSDRV_WriteSpool(dev, buf, strlen(buf)); } BOOL PSDRV_WriteRectClip2(PHYSDEV dev, CHAR *pszArrayName) { char buf[100]; sprintf(buf, psrectclip2, pszArrayName); return PSDRV_WriteSpool(dev, buf, strlen(buf)); } BOOL PSDRV_WriteDIBPatternDict(PHYSDEV dev, const BITMAPINFO *bmi, BYTE *bits, UINT usage) { static const char mypat[] = "/mypat\n"; static const char do_pattern[] = "<<\n /PaintType 1\n /PatternType 1\n /TilingType 1\n " "/BBox [0 0 %d %d]\n /XStep %d\n /YStep %d\n /PaintProc {\n begin\n 0 0 translate\n" " %d %d scale\n mypat image\n end\n }\n>>\n matrix makepattern setpattern\n"; PSDRV_PDEVICE *physDev = get_psdrv_dev( dev ); char *buf, *ptr; INT w, h, x, y, w_mult, h_mult, abs_height = abs( bmi->bmiHeader.biHeight ); COLORREF map[2]; TRACE( "size %dx%dx%d\n", bmi->bmiHeader.biWidth, bmi->bmiHeader.biHeight, bmi->bmiHeader.biBitCount); if(bmi->bmiHeader.biBitCount != 1) { FIXME("dib depth %d not supported\n", bmi->bmiHeader.biBitCount); return FALSE; } w = bmi->bmiHeader.biWidth & ~0x7; h = abs_height & ~0x7; buf = HeapAlloc( GetProcessHeap(), 0, sizeof(do_pattern) + 100 ); ptr = buf; for(y = h-1; y >= 0; y--) { for(x = 0; x < w/8; x++) { sprintf(ptr, "%02x", *(bits + x/8 + y * (bmi->bmiHeader.biWidth + 31) / 32 * 4)); ptr += 2; } } PSDRV_WriteSpool(dev, mypat, sizeof(mypat) - 1); PSDRV_WriteImageDict(dev, 1, FALSE, 8, 8, buf, bmi->bmiHeader.biHeight < 0); PSDRV_WriteSpool(dev, "def\n", 4); PSDRV_WriteIndexColorSpaceBegin(dev, 1); map[0] = GetTextColor( dev->hdc ); map[1] = GetBkColor( dev->hdc ); PSDRV_WriteRGB(dev, map, 2); PSDRV_WriteIndexColorSpaceEnd(dev); /* Windows seems to scale patterns so that a one pixel corresponds to 1/300" */ w_mult = (physDev->logPixelsX + 150) / 300; h_mult = (physDev->logPixelsY + 150) / 300; sprintf(buf, do_pattern, w * w_mult, h * h_mult, w * w_mult, h * h_mult, w * w_mult, h * h_mult); PSDRV_WriteSpool(dev, buf, strlen(buf)); HeapFree( GetProcessHeap(), 0, buf ); return TRUE; }
551826.c
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* ft_strcspn.c :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: lkaba <[email protected]> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2019/02/27 01:45:12 by lkaba #+# #+# */ /* Updated: 2019/11/24 21:58:04 by lkaba ### ########.fr */ /* */ /* ************************************************************************** */ #include "libft.h" size_t ft_strcspn(const char *str, const char *charset) { uint32_t i; uint32_t count; uint8_t tab[1024]; ft_bzero(tab, 1024); i = -1; while (charset[++i]) tab[(int)charset[i]] = 1; count = 0; i = 0; while (str[i] && !tab[(int)str[i++]]) count++; return ((str[count] != '\0' && !tab[(int)str[count]]) ? count + 1 : count); }
89014.c
/* ISC license. */ #include <string.h> #include <limits.h> #include <skalibs/types.h> #include <skalibs/strerr2.h> #include <skalibs/sgetopt.h> #include <skalibs/tai.h> #include <skalibs/env.h> #include <skalibs/djbunix.h> #include <skalibs/genalloc.h> #include <s6/s6-fdholder.h> #define USAGE "s6-fdholder-getdump [ -t timeout ] socket prog..." #define dieusage() strerr_dieusage(100, USAGE) int main (int argc, char const *const *argv, char const *const *envp) { s6_fdholder_t a = S6_FDHOLDER_ZERO ; genalloc dump = GENALLOC_ZERO ; tain_t deadline, halfinfinite ; PROG = "s6-fdholder-getdump" ; { unsigned int t = 0 ; subgetopt_t l = SUBGETOPT_ZERO ; for (;;) { int opt = subgetopt_r(argc, argv, "t:", &l) ; if (opt == -1) break ; switch (opt) { case 't' : if (!uint0_scan(l.arg, &t)) dieusage() ; break ; default : dieusage() ; } } argc -= l.ind ; argv += l.ind ; if (t) tain_from_millisecs(&deadline, t) ; else deadline = tain_infinite_relative ; } if (argc < 2) dieusage() ; tain_now_set_stopwatch_g() ; tain_add_g(&deadline, &deadline) ; if (!s6_fdholder_start_g(&a, argv[0], &deadline)) strerr_diefu2sys(111, "connect to a fd-holder daemon at ", argv[0]) ; if (!s6_fdholder_getdump_g(&a, &dump, &deadline)) strerr_diefu1sys(1, "get dump") ; s6_fdholder_end(&a) ; tain_half(&halfinfinite, &tain_infinite_relative) ; tain_add_g(&halfinfinite, &halfinfinite) ; { size_t n = genalloc_len(s6_fdholder_fd_t, &dump) ; size_t pos = 0 ; unsigned int i = 0 ; char modifs[7 + UINT_FMT + (25 + TIMESTAMP + 4 * UINT_FMT) * n] ; if (n > UINT_MAX) strerr_dief1x(100, "dump exceeds maximum size") ; memcpy(modifs + pos, "S6_FD#=", 7) ; pos += 7 ; pos += uint_fmt(modifs + pos, n) ; modifs[pos++] = 0 ; for (; i < n ; i++) { s6_fdholder_fd_t *p = genalloc_s(s6_fdholder_fd_t, &dump) + i ; size_t len = strlen(p->id) + 1 ; if (uncoe(p->fd) < 0) strerr_diefu1sys(111, "uncoe") ; memcpy(modifs + pos, "S6_FD_", 6) ; pos += 6 ; pos += uint_fmt(modifs + pos, i) ; modifs[pos++] = '=' ; pos += uint_fmt(modifs + pos, p->fd) ; modifs[pos++] = 0 ; memcpy(modifs + pos, "S6_FDID_", 8) ; pos += 8 ; pos += uint_fmt(modifs + pos, i) ; modifs[pos++] = '=' ; memcpy(modifs + pos, p->id, len) ; pos += len ; memcpy(modifs + pos, "S6_FDLIMIT_", 11) ; pos += 11 ; pos += uint_fmt(modifs + pos, i) ; if (tain_less(&p->limit, &halfinfinite)) { modifs[pos++] = '=' ; pos += timestamp_fmt(modifs + pos, &p->limit) ; } modifs[pos++] = 0 ; } xpathexec_r(argv+1, envp, env_len(envp), modifs, pos) ; } }
307310.c
/* * The MIT License (MIT) * * Copyright (c) 2015-2020 Alexander Chumakov * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "mcode-config.h" #include "cmd-iface.h" #include "main.h" #include "mtick.h" #include "utils.h" #include "hw-wdt.h" #include "system.h" #include "mglobal.h" #include "mparser.h" #include "mstatus.h" #include "mstring.h" #include "scheduler.h" #include "cmd-engine.h" CMD_IMPL("ut", TheUt, "Show uptime", cmd_system_ut, NULL, 0); CMD_IMPL("errno", TheErrno, "Print/reset error code", cmd_system_errno, NULL, 0); CMD_IMPL("sleep", TheSleep, "Sleep for <milli-seconds>", cmd_system_sleep, NULL, 0); CMD_IMPL("reboot", TheReboot, "Reboot system", cmd_system_reboot, NULL, 0); CMD_IMPL("poweroff", ThePoweroff, "Power off system", cmd_system_poweroff, NULL, 0); #ifdef MCODE_ID CMD_IMPL("uid", TheId, "Show device ID", cmd_system_id, NULL, 0); #endif /* MCODE_ID */ bool cmd_system_ut(const TCmdData *data, const char *args, size_t args_len, bool *start_cmd) { #ifdef MCODE_WDT mprintstr(PSTR("Reset reason: 0x")); mprint_uint8(wdt_reset_reason(), false); mprint(MStringNewLine); #endif /* MCODE_WDT */ const uint64_t tickCount = mtick_count(); uint64_t count = tickCount; const uint32_t milliSeconds = count%1000; count = count/1000; const uint32_t seconds = count%60; count = count/60; const uint32_t minutes = count%60; count = count/60; const uint32_t hours = count%24; count = count/24; const uint32_t days = count; mprintstr(PSTR("Uptime: 0x")); mprint_uint64(tickCount, true); mprintstr(PSTR("; days: ")); mprint_uintd(days, 0); mprintstr(PSTR(", hours: ")); mprint_uintd(hours, 0); mprintstr(PSTR(", minutes: ")); mprint_uintd(minutes, 0); mprintstr(PSTR(", seconds: ")); mprint_uintd(seconds, 0); mprintstr(PSTR(", milli-seconds: ")); mprint_uintd(milliSeconds, 0); mprint(MStringNewLine); return true; } bool cmd_system_errno(const TCmdData *data, const char *args, size_t args_len, bool *start_cmd) { mprintstr(PSTR("errno: ")); mprint_uintd(mcode_errno(), 0); mcode_errno_set(ESuccess); mprint(MStringNewLine); return true; } bool cmd_system_sleep(const TCmdData *data, const char *args, size_t args_len, bool *start_cmd) { uint32_t value; TokenType type; const char *token = NULL; type = next_token(&args, &args_len, &token, &value); if (TokenInt != type || value > 10000) { /* Wrong argument or requested to sleep for too long * (more than 10 seconds, need to check if this is ok) */ mcode_errno_set(EArgument); return true; } /* Request to sleep synchronously */ mtick_sleep(value); return true; } bool cmd_system_reboot(const TCmdData *data, const char *args, size_t args_len, bool *start_cmd) { reboot(); *start_cmd = false; return true; } bool cmd_system_poweroff(const TCmdData *data, const char *args, size_t args_len, bool *start_cmd) { #ifndef __linux__ scheduler_stop(); #else /* !__linux__ */ main_request_exit(); #endif /* !__linux__ */ *start_cmd = false; return true; } #ifdef MCODE_ID bool cmd_system_id(const TCmdData *data, const char *args, size_t args_len, bool *start_cmd) { mprint_uint32(mcode_id(0), false); mprint_uint32(mcode_id(1), false); mprint_uint32(mcode_id(2), false); mprint(MStringNewLine); return true; } #endif /* MCODE_ID */
932821.c
// Copyright 2018 Espressif Systems (Shanghai) PTE LTD // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <string.h> #include <stdint.h> #include <limits.h> #include <sys/param.h> #include "esp_attr.h" #include "esp_log.h" #include "rom/cache.h" #include "rom/efuse.h" #include "rom/ets_sys.h" #include "rom/spi_flash.h" #include "rom/crc.h" #include "rom/rtc.h" #include "rom/uart.h" #include "rom/gpio.h" #include "rom/secure_boot.h" #include "soc/soc.h" #include "soc/cpu.h" #include "soc/rtc.h" #include "soc/dport_reg.h" #include "soc/io_mux_reg.h" #include "soc/efuse_reg.h" #include "soc/rtc_cntl_reg.h" #include "soc/timer_group_reg.h" #include "soc/gpio_reg.h" #include "soc/gpio_sig_map.h" #include "sdkconfig.h" #include "esp_image_format.h" #include "esp_secure_boot.h" #include "esp_flash_encrypt.h" #include "esp_flash_partitions.h" #include "bootloader_flash.h" #include "bootloader_random.h" #include "bootloader_config.h" #include "bootloader_common.h" #include "bootloader_utility.h" #include "bootloader_sha.h" #include "esp_efuse.h" static const char* TAG = "boot"; /* Reduce literal size for some generic string literals */ #define MAP_ERR_MSG "Image contains multiple %s segments. Only the last one will be mapped." static bool ota_has_initial_contents; static void load_image(const esp_image_metadata_t* image_data); static void unpack_load_app(const esp_image_metadata_t *data); static void set_cache_and_start_app(uint32_t drom_addr, uint32_t drom_load_addr, uint32_t drom_size, uint32_t irom_addr, uint32_t irom_load_addr, uint32_t irom_size, uint32_t entry_addr); // Read ota_info partition and fill array from two otadata structures. static esp_err_t read_otadata(const esp_partition_pos_t *ota_info, esp_ota_select_entry_t *two_otadata) { const esp_ota_select_entry_t *ota_select_map; if (ota_info->offset == 0) { return ESP_ERR_NOT_FOUND; } // partition table has OTA data partition if (ota_info->size < 2 * SPI_SEC_SIZE) { ESP_LOGE(TAG, "ota_info partition size %d is too small (minimum %d bytes)", ota_info->size, sizeof(esp_ota_select_entry_t)); return ESP_FAIL; // can't proceed } ESP_LOGD(TAG, "OTA data offset 0x%x", ota_info->offset); ota_select_map = bootloader_mmap(ota_info->offset, ota_info->size); if (!ota_select_map) { ESP_LOGE(TAG, "bootloader_mmap(0x%x, 0x%x) failed", ota_info->offset, ota_info->size); return ESP_FAIL; // can't proceed } memcpy(&two_otadata[0], ota_select_map, sizeof(esp_ota_select_entry_t)); memcpy(&two_otadata[1], (uint8_t *)ota_select_map + SPI_SEC_SIZE, sizeof(esp_ota_select_entry_t)); bootloader_munmap(ota_select_map); return ESP_OK; } bool bootloader_utility_load_partition_table(bootloader_state_t* bs) { const esp_partition_info_t *partitions; const char *partition_usage; esp_err_t err; int num_partitions; partitions = bootloader_mmap(ESP_PARTITION_TABLE_OFFSET, ESP_PARTITION_TABLE_MAX_LEN); if (!partitions) { ESP_LOGE(TAG, "bootloader_mmap(0x%x, 0x%x) failed", ESP_PARTITION_TABLE_OFFSET, ESP_PARTITION_TABLE_MAX_LEN); return false; } ESP_LOGD(TAG, "mapped partition table 0x%x at 0x%x", ESP_PARTITION_TABLE_OFFSET, (intptr_t)partitions); err = esp_partition_table_verify(partitions, true, &num_partitions); if (err != ESP_OK) { ESP_LOGE(TAG, "Failed to verify partition table"); return false; } ESP_LOGI(TAG, "Partition Table:"); ESP_LOGI(TAG, "## Label Usage Type ST Offset Length"); for(int i = 0; i < num_partitions; i++) { const esp_partition_info_t *partition = &partitions[i]; ESP_LOGD(TAG, "load partition table entry 0x%x", (intptr_t)partition); ESP_LOGD(TAG, "type=%x subtype=%x", partition->type, partition->subtype); partition_usage = "unknown"; /* valid partition table */ switch(partition->type) { case PART_TYPE_APP: /* app partition */ switch(partition->subtype) { case PART_SUBTYPE_FACTORY: /* factory binary */ bs->factory = partition->pos; partition_usage = "factory app"; break; case PART_SUBTYPE_TEST: /* test binary */ bs->test = partition->pos; partition_usage = "test app"; break; default: /* OTA binary */ if ((partition->subtype & ~PART_SUBTYPE_OTA_MASK) == PART_SUBTYPE_OTA_FLAG) { bs->ota[partition->subtype & PART_SUBTYPE_OTA_MASK] = partition->pos; ++bs->app_count; partition_usage = "OTA app"; } else { partition_usage = "Unknown app"; } break; } break; /* PART_TYPE_APP */ case PART_TYPE_DATA: /* data partition */ switch(partition->subtype) { case PART_SUBTYPE_DATA_OTA: /* ota data */ bs->ota_info = partition->pos; partition_usage = "OTA data"; break; case PART_SUBTYPE_DATA_RF: partition_usage = "RF data"; break; case PART_SUBTYPE_DATA_WIFI: partition_usage = "WiFi data"; break; case PART_SUBTYPE_DATA_NVS_KEYS: partition_usage = "NVS keys"; break; case PART_SUBTYPE_DATA_EFUSE_EM: partition_usage = "efuse"; #ifdef CONFIG_EFUSE_SECURE_VERSION_EMULATE esp_efuse_init(partition->pos.offset, partition->pos.size); #endif break; default: partition_usage = "Unknown data"; break; } break; /* PARTITION_USAGE_DATA */ default: /* other partition type */ break; } /* print partition type info */ ESP_LOGI(TAG, "%2d %-16s %-16s %02x %02x %08x %08x", i, partition->label, partition_usage, partition->type, partition->subtype, partition->pos.offset, partition->pos.size); } bootloader_munmap(partitions); ESP_LOGI(TAG,"End of partition table"); return true; } /* Given a partition index, return the partition position data from the bootloader_state_t structure */ static esp_partition_pos_t index_to_partition(const bootloader_state_t *bs, int index) { if (index == FACTORY_INDEX) { return bs->factory; } if (index == TEST_APP_INDEX) { return bs->test; } if (index >= 0 && index < MAX_OTA_SLOTS && index < bs->app_count) { return bs->ota[index]; } esp_partition_pos_t invalid = { 0 }; return invalid; } static void log_invalid_app_partition(int index) { const char *not_bootable = " is not bootable"; /* save a few string literal bytes */ switch(index) { case FACTORY_INDEX: ESP_LOGE(TAG, "Factory app partition%s", not_bootable); break; case TEST_APP_INDEX: ESP_LOGE(TAG, "Factory test app partition%s", not_bootable); break; default: ESP_LOGE(TAG, "OTA app partition slot %d%s", index, not_bootable); break; } } static esp_err_t write_otadata(esp_ota_select_entry_t *otadata, uint32_t offset, bool write_encrypted) { esp_err_t err = bootloader_flash_erase_sector(offset / FLASH_SECTOR_SIZE); if (err == ESP_OK) { err = bootloader_flash_write(offset, otadata, sizeof(esp_ota_select_entry_t), write_encrypted); } if (err != ESP_OK) { ESP_LOGE(TAG, "Error in write_otadata operation. err = 0x%x", err); } return err; } static bool check_anti_rollback(const esp_partition_pos_t *partition) { #ifdef CONFIG_APP_ANTI_ROLLBACK esp_app_desc_t app_desc; esp_err_t err = bootloader_common_get_partition_description(partition, &app_desc); return err == ESP_OK && esp_efuse_check_secure_version(app_desc.secure_version) == true; #else return true; #endif } #ifdef CONFIG_APP_ANTI_ROLLBACK static void update_anti_rollback(const esp_partition_pos_t *partition) { esp_app_desc_t app_desc; esp_err_t err = bootloader_common_get_partition_description(partition, &app_desc); if (err == ESP_OK) { esp_efuse_update_secure_version(app_desc.secure_version); } } static int get_active_otadata_with_check_anti_rollback(const bootloader_state_t *bs, esp_ota_select_entry_t *two_otadata) { uint32_t ota_seq; uint32_t ota_slot; bool valid_otadata[2]; valid_otadata[0] = bootloader_common_ota_select_valid(&two_otadata[0]); valid_otadata[1] = bootloader_common_ota_select_valid(&two_otadata[1]); bool sec_ver_valid_otadata[2] = { 0 }; for (int i = 0; i < 2; ++i) { if (valid_otadata[i] == true) { ota_seq = two_otadata[i].ota_seq - 1; // Raw OTA sequence number. May be more than # of OTA slots ota_slot = ota_seq % bs->app_count; // Actual OTA partition selection if (check_anti_rollback(&bs->ota[ota_slot]) == false) { // invalid. This otadata[i] will not be selected as active. ESP_LOGD(TAG, "OTA slot %d has an app with secure_version, this version is smaller than in the device. This OTA slot will not be selected.", ota_slot); } else { sec_ver_valid_otadata[i] = true; } } } return bootloader_common_select_otadata(two_otadata, sec_ver_valid_otadata, true); } #endif int bootloader_utility_get_selected_boot_partition(const bootloader_state_t *bs) { esp_ota_select_entry_t otadata[2]; int boot_index = FACTORY_INDEX; if (bs->ota_info.offset == 0) { return FACTORY_INDEX; } if (read_otadata(&bs->ota_info, otadata) != ESP_OK) { return INVALID_INDEX; } ota_has_initial_contents = false; ESP_LOGD(TAG, "otadata[0]: sequence values 0x%08x", otadata[0].ota_seq); ESP_LOGD(TAG, "otadata[1]: sequence values 0x%08x", otadata[1].ota_seq); #ifdef CONFIG_APP_ROLLBACK_ENABLE bool write_encrypted = esp_flash_encryption_enabled(); for (int i = 0; i < 2; ++i) { if (otadata[i].ota_state == ESP_OTA_IMG_PENDING_VERIFY) { ESP_LOGD(TAG, "otadata[%d] is marking as ABORTED", i); otadata[i].ota_state = ESP_OTA_IMG_ABORTED; write_otadata(&otadata[i], bs->ota_info.offset + FLASH_SECTOR_SIZE * i, write_encrypted); } } #endif #ifndef CONFIG_APP_ANTI_ROLLBACK if ((bootloader_common_ota_select_invalid(&otadata[0]) && bootloader_common_ota_select_invalid(&otadata[1])) || bs->app_count == 0) { ESP_LOGD(TAG, "OTA sequence numbers both empty (all-0xFF) or partition table does not have bootable ota_apps (app_count=%d)", bs->app_count); if (bs->factory.offset != 0) { ESP_LOGI(TAG, "Defaulting to factory image"); boot_index = FACTORY_INDEX; } else { ESP_LOGI(TAG, "No factory image, trying OTA 0"); boot_index = 0; // Try to boot from ota_0. if ((otadata[0].ota_seq == UINT32_MAX || otadata[0].crc != bootloader_common_ota_select_crc(&otadata[0])) && (otadata[1].ota_seq == UINT32_MAX || otadata[1].crc != bootloader_common_ota_select_crc(&otadata[1]))) { // Factory is not found and both otadata are initial(0xFFFFFFFF) or incorrect crc. // will set correct ota_seq. ota_has_initial_contents = true; } } } else { int active_otadata = bootloader_common_get_active_otadata(otadata); #else ESP_LOGI(TAG, "Enabled a check secure version of app for anti rollback"); ESP_LOGI(TAG, "Secure version (from eFuse) = %d", esp_efuse_read_secure_version()); // When CONFIG_APP_ANTI_ROLLBACK is enabled factory partition should not be in partition table, only two ota_app are there. if ((otadata[0].ota_seq == UINT32_MAX || otadata[0].crc != bootloader_common_ota_select_crc(&otadata[0])) && (otadata[1].ota_seq == UINT32_MAX || otadata[1].crc != bootloader_common_ota_select_crc(&otadata[1]))) { ESP_LOGI(TAG, "otadata[0..1] in initial state"); // both otadata are initial(0xFFFFFFFF) or incorrect crc. // will set correct ota_seq. ota_has_initial_contents = true; } else { int active_otadata = get_active_otadata_with_check_anti_rollback(bs, otadata); #endif if (active_otadata != -1) { ESP_LOGD(TAG, "Active otadata[%d]", active_otadata); uint32_t ota_seq = otadata[active_otadata].ota_seq - 1; // Raw OTA sequence number. May be more than # of OTA slots boot_index = ota_seq % bs->app_count; // Actual OTA partition selection ESP_LOGD(TAG, "Mapping seq %d -> OTA slot %d", ota_seq, boot_index); #ifdef CONFIG_APP_ROLLBACK_ENABLE if (otadata[active_otadata].ota_state == ESP_OTA_IMG_NEW) { ESP_LOGI(TAG, "otadata[%d] is selected as new and marked PENDING_VERIFY state", active_otadata); otadata[active_otadata].ota_state = ESP_OTA_IMG_PENDING_VERIFY; write_otadata(&otadata[active_otadata], bs->ota_info.offset + FLASH_SECTOR_SIZE * active_otadata, write_encrypted); } else if (otadata[active_otadata].ota_state == ESP_OTA_IMG_VALID) { #ifdef CONFIG_BOOTLOADER_OTA_NO_FORCE_ROLLBACK /* Firmware image is valid, check if, passive partition should be completely erased */ int passive_otadata = !active_otadata; if (otadata[passive_otadata].crc == bootloader_common_ota_select_crc(&otadata[passive_otadata])) { esp_partition_pos_t part; part = index_to_partition(bs, ((otadata[passive_otadata].ota_seq - 1) % bs->app_count)); ESP_LOGW(TAG, "Deleting passive firmware @0x%x len:0x%x", part.offset, part.size); /* Delete contents of other partition and its entry from `otadata` */ esp_err_t ret = bootloader_flash_erase_range(part.offset, part.size); if (ret != ESP_OK) { ESP_LOGE(TAG, "flash partition erase failed"); } memset(&otadata[passive_otadata], 0xff, sizeof(esp_ota_select_entry_t)); write_otadata(&otadata[passive_otadata], bs->ota_info.offset + FLASH_SECTOR_SIZE * passive_otadata, write_encrypted); } #endif // CONFIG_BOOTLOADER_OTA_NO_FORCE_ROLLBACK } ESP_LOGI(TAG, "ota rollback check done"); #endif // CONFIG_APP_ROLLBACK_ENABLE #ifdef CONFIG_APP_ANTI_ROLLBACK if(otadata[active_otadata].ota_state == ESP_OTA_IMG_VALID) { update_anti_rollback(&bs->ota[boot_index]); } #endif // CONFIG_APP_ANTI_ROLLBACK } else if (bs->factory.offset != 0) { ESP_LOGE(TAG, "ota data partition invalid, falling back to factory"); boot_index = FACTORY_INDEX; } else { ESP_LOGE(TAG, "ota data partition invalid and no factory, will try all partitions"); boot_index = FACTORY_INDEX; } } return boot_index; } /* Return true if a partition has a valid app image that was successfully loaded */ static bool try_load_partition(const esp_partition_pos_t *partition, esp_image_metadata_t *data) { if (partition->size == 0) { ESP_LOGD(TAG, "Can't boot from zero-length partition"); return false; } #ifdef BOOTLOADER_BUILD if (bootloader_load_image(partition, data) == ESP_OK) { ESP_LOGI(TAG, "Loaded app from partition at offset 0x%x", partition->offset); return true; } #endif return false; } // ota_has_initial_contents flag is set if factory does not present in partition table and // otadata has initial content(0xFFFFFFFF), then set actual ota_seq. static void set_actual_ota_seq(const bootloader_state_t *bs, int index) { if (index > FACTORY_INDEX && ota_has_initial_contents == true) { esp_ota_select_entry_t otadata; memset(&otadata, 0xFF, sizeof(otadata)); otadata.ota_seq = index + 1; otadata.ota_state = ESP_OTA_IMG_VALID; otadata.crc = bootloader_common_ota_select_crc(&otadata); bool write_encrypted = esp_flash_encryption_enabled(); write_otadata(&otadata, bs->ota_info.offset + FLASH_SECTOR_SIZE * 0, write_encrypted); ESP_LOGI(TAG, "Set actual ota_seq=%d in otadata[0]", otadata.ota_seq); #ifdef CONFIG_APP_ANTI_ROLLBACK update_anti_rollback(&bs->ota[index]); #endif } } #define TRY_LOG_FORMAT "Trying partition index %d offs 0x%x size 0x%x" void bootloader_utility_load_boot_image(const bootloader_state_t *bs, int start_index) { int index = start_index; esp_partition_pos_t part; esp_image_metadata_t image_data; if(start_index == TEST_APP_INDEX) { if (try_load_partition(&bs->test, &image_data)) { load_image(&image_data); } else { ESP_LOGE(TAG, "No bootable test partition in the partition table"); bootloader_reset(); } } /* work backwards from start_index, down to the factory app */ for(index = start_index; index >= FACTORY_INDEX; index--) { part = index_to_partition(bs, index); if (part.size == 0) { continue; } ESP_LOGD(TAG, TRY_LOG_FORMAT, index, part.offset, part.size); if (check_anti_rollback(&part) && try_load_partition(&part, &image_data)) { set_actual_ota_seq(bs, index); load_image(&image_data); } log_invalid_app_partition(index); } /* failing that work forwards from start_index, try valid OTA slots */ for(index = start_index + 1; index < bs->app_count; index++) { part = index_to_partition(bs, index); if (part.size == 0) { continue; } ESP_LOGD(TAG, TRY_LOG_FORMAT, index, part.offset, part.size); if (check_anti_rollback(&part) && try_load_partition(&part, &image_data)) { set_actual_ota_seq(bs, index); load_image(&image_data); } log_invalid_app_partition(index); } if (try_load_partition(&bs->test, &image_data)) { ESP_LOGW(TAG, "Falling back to test app as only bootable partition"); load_image(&image_data); } ESP_LOGE(TAG, "No bootable app partitions in the partition table"); bzero(&image_data, sizeof(esp_image_metadata_t)); bootloader_reset(); } // Copy loaded segments to RAM, set up caches for mapped segments, and start application. static void load_image(const esp_image_metadata_t* image_data) { /** * Rough steps for a first boot, when encryption and secure boot are both disabled: * 1) Generate secure boot key and write to EFUSE. * 2) Write plaintext digest based on plaintext bootloader * 3) Generate flash encryption key and write to EFUSE. * 4) Encrypt flash in-place including bootloader, then digest, * then app partitions and other encrypted partitions * 5) Burn EFUSE to enable flash encryption (FLASH_CRYPT_CNT) * 6) Burn EFUSE to enable secure boot (ABS_DONE_0) * * If power failure happens during Step 1, probably the next boot will continue from Step 2. * There is some small chance that EFUSEs will be part-way through being written so will be * somehow corrupted here. Thankfully this window of time is very small, but if that's the * case, one has to use the espefuse tool to manually set the remaining bits and enable R/W * protection. Once the relevant EFUSE bits are set and R/W protected, Step 1 will be skipped * successfully on further reboots. * * If power failure happens during Step 2, Step 1 will be skipped and Step 2 repeated: * the digest will get re-written on the next boot. * * If power failure happens during Step 3, it's possible that EFUSE was partially written * with the generated flash encryption key, though the time window for that would again * be very small. On reboot, Step 1 will be skipped and Step 2 repeated, though, Step 3 * may fail due to the above mentioned reason, in which case, one has to use the espefuse * tool to manually set the remaining bits and enable R/W protection. Once the relevant EFUSE * bits are set and R/W protected, Step 3 will be skipped successfully on further reboots. * * If power failure happens after start of 4 and before end of 5, the next boot will fail * (bootloader header is encrypted and flash encryption isn't enabled yet, so it looks like * noise to the ROM bootloader). The check in the ROM is pretty basic so if the first byte of * ciphertext happens to be the magic byte E9 then it may try to boot, but it will definitely * crash (no chance that the remaining ciphertext will look like a valid bootloader image). * Only solution is to reflash with all plaintext and the whole process starts again: skips * Step 1, repeats Step 2, skips Step 3, etc. * * If power failure happens after 5 but before 6, the device will reboot with flash * encryption on and will regenerate an encrypted digest in Step 2. This should still * be valid as the input data for the digest is read via flash cache (so will be decrypted) * and the code in secure_boot_generate() tells bootloader_flash_write() to encrypt the data * on write if flash encryption is enabled. Steps 3 - 5 are skipped (encryption already on), * then Step 6 enables secure boot. */ #if defined(CONFIG_SECURE_BOOT_ENABLED) || defined(CONFIG_FLASH_ENCRYPTION_ENABLED) esp_err_t err; #endif #ifdef CONFIG_SECURE_BOOT_ENABLED /* Steps 1 & 2 (see above for full description): * 1) Generate secure boot EFUSE key * 2) Compute digest of plaintext bootloader */ err = esp_secure_boot_generate_digest(); if (err != ESP_OK) { ESP_LOGE(TAG, "Bootloader digest generation for secure boot failed (%d).", err); return; } #endif #ifdef CONFIG_FLASH_ENCRYPTION_ENABLED /* Steps 3, 4 & 5 (see above for full description): * 3) Generate flash encryption EFUSE key * 4) Encrypt flash contents * 5) Burn EFUSE to enable flash encryption */ ESP_LOGI(TAG, "Checking flash encryption..."); bool flash_encryption_enabled = esp_flash_encryption_enabled(); err = esp_flash_encrypt_check_and_update(); if (err != ESP_OK) { ESP_LOGE(TAG, "Flash encryption check failed (%d).", err); return; } #endif #ifdef CONFIG_SECURE_BOOT_ENABLED /* Step 6 (see above for full description): * 6) Burn EFUSE to enable secure boot */ ESP_LOGI(TAG, "Checking secure boot..."); err = esp_secure_boot_permanently_enable(); if (err != ESP_OK) { ESP_LOGE(TAG, "FAILED TO ENABLE SECURE BOOT (%d).", err); /* Panic here as secure boot is not properly enabled due to one of the reasons in above function */ abort(); } #endif #ifdef CONFIG_FLASH_ENCRYPTION_ENABLED if (!flash_encryption_enabled && esp_flash_encryption_enabled()) { /* Flash encryption was just enabled for the first time, so issue a system reset to ensure flash encryption cache resets properly */ ESP_LOGI(TAG, "Resetting with flash encryption enabled..."); bootloader_reset(); } #endif ESP_LOGI(TAG, "Disabling RNG early entropy source..."); bootloader_random_disable(); // copy loaded segments to RAM, set up caches for mapped segments, and start application unpack_load_app(image_data); } static void unpack_load_app(const esp_image_metadata_t* data) { uint32_t drom_addr = 0; uint32_t drom_load_addr = 0; uint32_t drom_size = 0; uint32_t irom_addr = 0; uint32_t irom_load_addr = 0; uint32_t irom_size = 0; // Find DROM & IROM addresses, to configure cache mappings for (int i = 0; i < data->image.segment_count; i++) { const esp_image_segment_header_t *header = &data->segments[i]; if (header->load_addr >= SOC_DROM_LOW && header->load_addr < SOC_DROM_HIGH) { if (drom_addr != 0) { ESP_LOGE(TAG, MAP_ERR_MSG, "DROM"); } else { ESP_LOGD(TAG, "Mapping segment %d as %s", i, "DROM"); } drom_addr = data->segment_data[i]; drom_load_addr = header->load_addr; drom_size = header->data_len; } if (header->load_addr >= SOC_IROM_LOW && header->load_addr < SOC_IROM_HIGH) { if (irom_addr != 0) { ESP_LOGE(TAG, MAP_ERR_MSG, "IROM"); } else { ESP_LOGD(TAG, "Mapping segment %d as %s", i, "IROM"); } irom_addr = data->segment_data[i]; irom_load_addr = header->load_addr; irom_size = header->data_len; } } ESP_LOGD(TAG, "calling set_cache_and_start_app"); set_cache_and_start_app(drom_addr, drom_load_addr, drom_size, irom_addr, irom_load_addr, irom_size, data->image.entry_addr); } static void set_cache_and_start_app( uint32_t drom_addr, uint32_t drom_load_addr, uint32_t drom_size, uint32_t irom_addr, uint32_t irom_load_addr, uint32_t irom_size, uint32_t entry_addr) { int rc; ESP_LOGD(TAG, "configure drom and irom and start"); Cache_Read_Disable( 0 ); Cache_Flush( 0 ); /* Clear the MMU entries that are already set up, so the new app only has the mappings it creates. */ for (int i = 0; i < DPORT_FLASH_MMU_TABLE_SIZE; i++) { DPORT_PRO_FLASH_MMU_TABLE[i] = DPORT_FLASH_MMU_TABLE_INVALID_VAL; } uint32_t drom_load_addr_aligned = drom_load_addr & MMU_FLASH_MASK; uint32_t drom_page_count = bootloader_cache_pages_to_map(drom_size, drom_load_addr); ESP_LOGV(TAG, "d mmu set paddr=%08x vaddr=%08x size=%d n=%d", drom_addr & MMU_FLASH_MASK, drom_load_addr_aligned, drom_size, drom_page_count); rc = cache_flash_mmu_set(0, 0, drom_load_addr_aligned, drom_addr & MMU_FLASH_MASK, 64, drom_page_count); ESP_LOGV(TAG, "rc=%d", rc); rc = cache_flash_mmu_set(1, 0, drom_load_addr_aligned, drom_addr & MMU_FLASH_MASK, 64, drom_page_count); ESP_LOGV(TAG, "rc=%d", rc); uint32_t irom_load_addr_aligned = irom_load_addr & MMU_FLASH_MASK; uint32_t irom_page_count = bootloader_cache_pages_to_map(irom_size, irom_load_addr); ESP_LOGV(TAG, "i mmu set paddr=%08x vaddr=%08x size=%d n=%d", irom_addr & MMU_FLASH_MASK, irom_load_addr_aligned, irom_size, irom_page_count); rc = cache_flash_mmu_set(0, 0, irom_load_addr_aligned, irom_addr & MMU_FLASH_MASK, 64, irom_page_count); ESP_LOGV(TAG, "rc=%d", rc); rc = cache_flash_mmu_set(1, 0, irom_load_addr_aligned, irom_addr & MMU_FLASH_MASK, 64, irom_page_count); ESP_LOGV(TAG, "rc=%d", rc); DPORT_REG_CLR_BIT( DPORT_PRO_CACHE_CTRL1_REG, (DPORT_PRO_CACHE_MASK_IRAM0) | (DPORT_PRO_CACHE_MASK_IRAM1 & 0) | (DPORT_PRO_CACHE_MASK_IROM0 & 0) | DPORT_PRO_CACHE_MASK_DROM0 | DPORT_PRO_CACHE_MASK_DRAM1 ); DPORT_REG_CLR_BIT( DPORT_APP_CACHE_CTRL1_REG, (DPORT_APP_CACHE_MASK_IRAM0) | (DPORT_APP_CACHE_MASK_IRAM1 & 0) | (DPORT_APP_CACHE_MASK_IROM0 & 0) | DPORT_APP_CACHE_MASK_DROM0 | DPORT_APP_CACHE_MASK_DRAM1 ); Cache_Read_Enable( 0 ); // Application will need to do Cache_Flush(1) and Cache_Read_Enable(1) ESP_LOGD(TAG, "start: 0x%08x", entry_addr); typedef void (*entry_t)(void) __attribute__((noreturn)); entry_t entry = ((entry_t) entry_addr); // TODO: we have used quite a bit of stack at this point. // use "movsp" instruction to reset stack back to where ROM stack starts. (*entry)(); } void bootloader_reset(void) { #ifdef BOOTLOADER_BUILD uart_tx_flush(0); /* Ensure any buffered log output is displayed */ uart_tx_flush(1); ets_delay_us(1000); /* Allow last byte to leave FIFO */ REG_WRITE(RTC_CNTL_OPTIONS0_REG, RTC_CNTL_SW_SYS_RST); while (1) { } /* This line will never be reached, used to keep gcc happy */ #else abort(); /* This function should really not be called from application code */ #endif }
320456.c
#include <stdio.h> int main(int argc, char* argv[]) { int m, n, i; freopen("input.txt", "r", stdin); while (~scanf("%d %d", &m, &n)) { //printf("%d %d\n", m, n); for (i = 0; i < n; i++) { m++; } printf("%d\n", m); } return 0; }
160604.c
/** * \file * * \brief SAM D1x Clock Driver * * Copyright (C) 2014-2018 Atmel Corporation. All rights reserved. * * \asf_license_start * * \page License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. The name of Atmel may not be used to endorse or promote products derived * from this software without specific prior written permission. * * 4. This software may only be redistributed and used in connection with an * Atmel microcontroller product. * * THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE * EXPRESSLY AND SPECIFICALLY DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * \asf_license_stop * */ /* * Support and FAQ: visit <a href="http://www.atmel.com/design-support/">Atmel Support</a> */ #include <clock.h> #include <conf_clocks.h> #include <system.h> /** * \internal * \brief DFLL-specific data container. */ struct _system_clock_dfll_config { uint32_t control; uint32_t val; uint32_t mul; }; /** * \internal * \brief DPLL-specific data container. */ struct _system_clock_dpll_config { uint32_t frequency; }; /** * \internal * \brief XOSC-specific data container. */ struct _system_clock_xosc_config { uint32_t frequency; }; /** * \internal * \brief System clock module data container. */ struct _system_clock_module { volatile struct _system_clock_dfll_config dfll; #ifdef FEATURE_SYSTEM_CLOCK_DPLL volatile struct _system_clock_dpll_config dpll; #endif volatile struct _system_clock_xosc_config xosc; volatile struct _system_clock_xosc_config xosc32k; }; /** * \internal * \brief Internal module instance to cache configuration values. */ static struct _system_clock_module _system_clock_inst = { .dfll = { .control = 0, .val = 0, .mul = 0, }, #ifdef FEATURE_SYSTEM_CLOCK_DPLL .dpll = { .frequency = 0, }, #endif .xosc = { .frequency = 0, }, .xosc32k = { .frequency = 0, }, }; /** * \internal * \brief Wait for sync to the DFLL control registers. */ static inline void _system_dfll_wait_for_sync(void) { while (!(SYSCTRL->PCLKSR.reg & SYSCTRL_PCLKSR_DFLLRDY)) { /* Wait for DFLL sync */ } } /** * \internal * \brief Wait for sync to the OSC32K control registers. */ static inline void _system_osc32k_wait_for_sync(void) { while (!(SYSCTRL->PCLKSR.reg & SYSCTRL_PCLKSR_OSC32KRDY)) { /* Wait for OSC32K sync */ } } static inline void _system_clock_source_dfll_set_config_errata_9905(void) { /* Disable ONDEMAND mode while writing configurations */ SYSCTRL->DFLLCTRL.reg = SYSCTRL_DFLLCTRL_ENABLE; _system_dfll_wait_for_sync(); SYSCTRL->DFLLMUL.reg = _system_clock_inst.dfll.mul; SYSCTRL->DFLLVAL.reg = _system_clock_inst.dfll.val; /* Write full configuration to DFLL control register */ SYSCTRL->DFLLCTRL.reg = 0; _system_dfll_wait_for_sync(); SYSCTRL->DFLLCTRL.reg = _system_clock_inst.dfll.control; } /** * \brief Retrieve the frequency of a clock source. * * Determines the current operating frequency of a given clock source. * * \param[in] clock_source Clock source to get the frequency * * \returns Frequency of the given clock source, in Hz. */ uint32_t system_clock_source_get_hz( const enum system_clock_source clock_source) { switch (clock_source) { case SYSTEM_CLOCK_SOURCE_XOSC: return _system_clock_inst.xosc.frequency; case SYSTEM_CLOCK_SOURCE_OSC8M: return 8000000UL >> SYSCTRL->OSC8M.bit.PRESC; case SYSTEM_CLOCK_SOURCE_OSC32K: return 32768UL; case SYSTEM_CLOCK_SOURCE_ULP32K: return 32768UL; case SYSTEM_CLOCK_SOURCE_XOSC32K: return _system_clock_inst.xosc32k.frequency; case SYSTEM_CLOCK_SOURCE_DFLL: /* Check if the DFLL has been configured */ if (!(_system_clock_inst.dfll.control & SYSCTRL_DFLLCTRL_ENABLE)) return 0; /* Make sure that the DFLL module is ready */ _system_dfll_wait_for_sync(); /* Check if operating in closed loop (USB) mode */ switch(_system_clock_inst.dfll.control & (SYSCTRL_DFLLCTRL_MODE | SYSCTRL_DFLLCTRL_USBCRM)) { case SYSCTRL_DFLLCTRL_MODE: return system_gclk_chan_get_hz(SYSCTRL_GCLK_ID_DFLL48) * (_system_clock_inst.dfll.mul & 0xffff); default: return 48000000UL; } #ifdef FEATURE_SYSTEM_CLOCK_DPLL case SYSTEM_CLOCK_SOURCE_DPLL: if (!(SYSCTRL->DPLLSTATUS.reg & SYSCTRL_DPLLSTATUS_ENABLE)) { return 0; } return _system_clock_inst.dpll.frequency; #endif default: return 0; } } /** * \brief Configure the internal OSC8M oscillator clock source. * * Configures the 8MHz (nominal) internal RC oscillator with the given * configuration settings. * * \param[in] config OSC8M configuration structure containing the new config */ void system_clock_source_osc8m_set_config( struct system_clock_source_osc8m_config *const config) { SYSCTRL_OSC8M_Type temp = SYSCTRL->OSC8M; /* Use temporary struct to reduce register access */ temp.bit.PRESC = config->prescaler; temp.bit.ONDEMAND = config->on_demand; temp.bit.RUNSTDBY = config->run_in_standby; SYSCTRL->OSC8M = temp; } /** * \brief Configure the internal OSC32K oscillator clock source. * * Configures the 32KHz (nominal) internal RC oscillator with the given * configuration settings. * * \param[in] config OSC32K configuration structure containing the new config */ void system_clock_source_osc32k_set_config( struct system_clock_source_osc32k_config *const config) { SYSCTRL_OSC32K_Type temp = SYSCTRL->OSC32K; /* Update settings via a temporary struct to reduce register access */ temp.bit.EN1K = config->enable_1khz_output; temp.bit.EN32K = config->enable_32khz_output; temp.bit.STARTUP = config->startup_time; temp.bit.ONDEMAND = config->on_demand; temp.bit.RUNSTDBY = config->run_in_standby; temp.bit.WRTLOCK = config->write_once; SYSCTRL->OSC32K = temp; } /** * \brief Configure the external oscillator clock source. * * Configures the external oscillator clock source with the given configuration * settings. * * \param[in] config External oscillator configuration structure containing * the new config */ void system_clock_source_xosc_set_config( struct system_clock_source_xosc_config *const config) { SYSCTRL_XOSC_Type temp = SYSCTRL->XOSC; temp.bit.STARTUP = config->startup_time; if (config->external_clock == SYSTEM_CLOCK_EXTERNAL_CRYSTAL) { temp.bit.XTALEN = 1; } else { temp.bit.XTALEN = 0; } temp.bit.AMPGC = config->auto_gain_control; /* Set gain */ if (config->frequency <= 2000000) { temp.bit.GAIN = 0; } else if (config->frequency <= 4000000) { temp.bit.GAIN = 1; } else if (config->frequency <= 8000000) { temp.bit.GAIN = 2; } else if (config->frequency <= 16000000) { temp.bit.GAIN = 3; } else if (config->frequency <= 32000000) { temp.bit.GAIN = 4; } temp.bit.ONDEMAND = config->on_demand; temp.bit.RUNSTDBY = config->run_in_standby; /* Store XOSC frequency for internal use */ _system_clock_inst.xosc.frequency = config->frequency; SYSCTRL->XOSC = temp; } /** * \brief Configure the XOSC32K external 32KHz oscillator clock source. * * Configures the external 32KHz oscillator clock source with the given * configuration settings. * * \param[in] config XOSC32K configuration structure containing the new config */ void system_clock_source_xosc32k_set_config( struct system_clock_source_xosc32k_config *const config) { SYSCTRL_XOSC32K_Type temp = SYSCTRL->XOSC32K; temp.bit.STARTUP = config->startup_time; if (config->external_clock == SYSTEM_CLOCK_EXTERNAL_CRYSTAL) { temp.bit.XTALEN = 1; } else { temp.bit.XTALEN = 0; } temp.bit.AAMPEN = config->auto_gain_control; temp.bit.EN1K = config->enable_1khz_output; temp.bit.EN32K = config->enable_32khz_output; temp.bit.ONDEMAND = config->on_demand; temp.bit.RUNSTDBY = config->run_in_standby; temp.bit.WRTLOCK = config->write_once; /* Cache the new frequency in case the user needs to check the current * operating frequency later */ _system_clock_inst.xosc32k.frequency = config->frequency; SYSCTRL->XOSC32K = temp; } /** * \brief Configure the DFLL clock source. * * Configures the Digital Frequency Locked Loop clock source with the given * configuration settings. * * \note The DFLL will be running when this function returns, as the DFLL module * needs to be enabled in order to perform the module configuration. * * \param[in] config DFLL configuration structure containing the new config */ void system_clock_source_dfll_set_config( struct system_clock_source_dfll_config *const config) { _system_clock_inst.dfll.val = SYSCTRL_DFLLVAL_COARSE(config->coarse_value) | SYSCTRL_DFLLVAL_FINE(config->fine_value); _system_clock_inst.dfll.control = (uint32_t)config->wakeup_lock | (uint32_t)config->stable_tracking | (uint32_t)config->quick_lock | (uint32_t)config->chill_cycle | ((uint32_t)config->on_demand << SYSCTRL_DFLLCTRL_ONDEMAND_Pos); if (config->loop_mode == SYSTEM_CLOCK_DFLL_LOOP_MODE_CLOSED) { _system_clock_inst.dfll.mul = SYSCTRL_DFLLMUL_CSTEP(config->coarse_max_step) | SYSCTRL_DFLLMUL_FSTEP(config->fine_max_step) | SYSCTRL_DFLLMUL_MUL(config->multiply_factor); /* Enable the closed loop mode */ _system_clock_inst.dfll.control |= config->loop_mode; } if (config->loop_mode == SYSTEM_CLOCK_DFLL_LOOP_MODE_USB_RECOVERY) { _system_clock_inst.dfll.mul = SYSCTRL_DFLLMUL_CSTEP(config->coarse_max_step) | SYSCTRL_DFLLMUL_FSTEP(config->fine_max_step) | SYSCTRL_DFLLMUL_MUL(config->multiply_factor); /* Enable the USB recovery mode */ _system_clock_inst.dfll.control |= config->loop_mode | SYSCTRL_DFLLCTRL_MODE | SYSCTRL_DFLLCTRL_BPLCKC; } } #ifdef FEATURE_SYSTEM_CLOCK_DPLL /** * \brief Configure the DPLL clock source. * * Configures the Digital Phase-Locked Loop clock source with the given * configuration settings. * * \note The DPLL will be running when this function returns, as the DPLL module * needs to be enabled in order to perform the module configuration. * * \param[in] config DPLL configuration structure containing the new config */ void system_clock_source_dpll_set_config( struct system_clock_source_dpll_config *const config) { uint32_t tmpldr; uint8_t tmpldrfrac; uint32_t refclk; refclk = config->reference_frequency; /* Only reference clock REF1 can be divided */ if (config->reference_clock == SYSTEM_CLOCK_SOURCE_DPLL_REFERENCE_CLOCK_XOSC) { refclk = refclk / (2 * (config->reference_divider + 1)); } /* Calculate LDRFRAC and LDR */ tmpldr = (config->output_frequency << 4) / refclk; tmpldrfrac = tmpldr & 0x0f; tmpldr = (tmpldr >> 4) - 1; SYSCTRL->DPLLCTRLA.reg = ((uint32_t)config->on_demand << SYSCTRL_DPLLCTRLA_ONDEMAND_Pos) | ((uint32_t)config->run_in_standby << SYSCTRL_DPLLCTRLA_RUNSTDBY_Pos); SYSCTRL->DPLLRATIO.reg = SYSCTRL_DPLLRATIO_LDRFRAC(tmpldrfrac) | SYSCTRL_DPLLRATIO_LDR(tmpldr); SYSCTRL->DPLLCTRLB.reg = SYSCTRL_DPLLCTRLB_DIV(config->reference_divider) | ((uint32_t)config->lock_bypass << SYSCTRL_DPLLCTRLB_LBYPASS_Pos) | SYSCTRL_DPLLCTRLB_LTIME(config->lock_time) | SYSCTRL_DPLLCTRLB_REFCLK(config->reference_clock) | ((uint32_t)config->wake_up_fast << SYSCTRL_DPLLCTRLB_WUF_Pos) | ((uint32_t)config->low_power_enable << SYSCTRL_DPLLCTRLB_LPEN_Pos) | SYSCTRL_DPLLCTRLB_FILTER(config->filter); /* * Fck = Fckrx * (LDR + 1 + LDRFRAC / 16) */ _system_clock_inst.dpll.frequency = (refclk * (((tmpldr + 1) << 4) + tmpldrfrac)) >> 4; } #endif /** * \brief Writes the calibration values for a given oscillator clock source. * * Writes an oscillator calibration value to the given oscillator control * registers. The acceptable ranges are: * * For OSC32K: * - 7 bits (maximum value 128) * For OSC8MHZ: * - 8 bits (maximum value 255) * For OSCULP: * - 5 bits (maximum value 32) * * \note The frequency range parameter applies only when configuring the 8MHz * oscillator and will be ignored for the other oscillators. * * \param[in] clock_source Clock source to calibrate * \param[in] calibration_value Calibration value to write * \param[in] freq_range Frequency range (8MHz oscillator only) * * \retval STATUS_OK The calibration value was written * successfully. * \retval STATUS_ERR_INVALID_ARG The setting is not valid for selected clock * source. */ enum status_code system_clock_source_write_calibration( const enum system_clock_source clock_source, const uint16_t calibration_value, const uint8_t freq_range) { switch (clock_source) { case SYSTEM_CLOCK_SOURCE_OSC8M: if (calibration_value > 0xfff || freq_range > 4) { return STATUS_ERR_INVALID_ARG; } SYSCTRL->OSC8M.bit.CALIB = calibration_value; SYSCTRL->OSC8M.bit.FRANGE = freq_range; break; case SYSTEM_CLOCK_SOURCE_OSC32K: if (calibration_value > 128) { return STATUS_ERR_INVALID_ARG; } _system_osc32k_wait_for_sync(); SYSCTRL->OSC32K.bit.CALIB = calibration_value; break; case SYSTEM_CLOCK_SOURCE_ULP32K: if (calibration_value > 32) { return STATUS_ERR_INVALID_ARG; } SYSCTRL->OSCULP32K.bit.CALIB = calibration_value; break; default: Assert(false); return STATUS_ERR_INVALID_ARG; break; } return STATUS_OK; } /** * \brief Enables a clock source. * * Enables a clock source which has been previously configured. * * \param[in] clock_source Clock source to enable * * \retval STATUS_OK Clock source was enabled successfully and * is ready * \retval STATUS_ERR_INVALID_ARG The clock source is not available on this * device */ enum status_code system_clock_source_enable( const enum system_clock_source clock_source) { switch (clock_source) { case SYSTEM_CLOCK_SOURCE_OSC8M: SYSCTRL->OSC8M.reg |= SYSCTRL_OSC8M_ENABLE; return STATUS_OK; case SYSTEM_CLOCK_SOURCE_OSC32K: SYSCTRL->OSC32K.reg |= SYSCTRL_OSC32K_ENABLE; break; case SYSTEM_CLOCK_SOURCE_XOSC: SYSCTRL->XOSC.reg |= SYSCTRL_XOSC_ENABLE; break; case SYSTEM_CLOCK_SOURCE_XOSC32K: SYSCTRL->XOSC32K.reg |= SYSCTRL_XOSC32K_ENABLE; break; case SYSTEM_CLOCK_SOURCE_DFLL: _system_clock_inst.dfll.control |= SYSCTRL_DFLLCTRL_ENABLE; _system_clock_source_dfll_set_config_errata_9905(); break; #ifdef FEATURE_SYSTEM_CLOCK_DPLL case SYSTEM_CLOCK_SOURCE_DPLL: SYSCTRL->DPLLCTRLA.reg |= SYSCTRL_DPLLCTRLA_ENABLE; break; #endif case SYSTEM_CLOCK_SOURCE_ULP32K: /* Always enabled */ return STATUS_OK; default: Assert(false); return STATUS_ERR_INVALID_ARG; } return STATUS_OK; } /** * \brief Disables a clock source. * * Disables a clock source that was previously enabled. * * \param[in] clock_source Clock source to disable * * \retval STATUS_OK Clock source was disabled successfully * \retval STATUS_ERR_INVALID_ARG An invalid or unavailable clock source was * given */ enum status_code system_clock_source_disable( const enum system_clock_source clock_source) { switch (clock_source) { case SYSTEM_CLOCK_SOURCE_OSC8M: SYSCTRL->OSC8M.reg &= ~SYSCTRL_OSC8M_ENABLE; break; case SYSTEM_CLOCK_SOURCE_OSC32K: SYSCTRL->OSC32K.reg &= ~SYSCTRL_OSC32K_ENABLE; break; case SYSTEM_CLOCK_SOURCE_XOSC: SYSCTRL->XOSC.reg &= ~SYSCTRL_XOSC_ENABLE; break; case SYSTEM_CLOCK_SOURCE_XOSC32K: SYSCTRL->XOSC32K.reg &= ~SYSCTRL_XOSC32K_ENABLE; break; case SYSTEM_CLOCK_SOURCE_DFLL: _system_clock_inst.dfll.control &= ~SYSCTRL_DFLLCTRL_ENABLE; SYSCTRL->DFLLCTRL.reg = _system_clock_inst.dfll.control; break; #ifdef FEATURE_SYSTEM_CLOCK_DPLL case SYSTEM_CLOCK_SOURCE_DPLL: SYSCTRL->DPLLCTRLA.reg &= ~SYSCTRL_DPLLCTRLA_ENABLE; break; #endif case SYSTEM_CLOCK_SOURCE_ULP32K: /* Not possible to disable */ default: Assert(false); return STATUS_ERR_INVALID_ARG; } return STATUS_OK; } /** * \brief Checks if a clock source is ready. * * Checks if a given clock source is ready to be used. * * \param[in] clock_source Clock source to check if ready * * \returns Ready state of the given clock source. * * \retval true Clock source is enabled and ready * \retval false Clock source is disabled or not yet ready */ bool system_clock_source_is_ready( const enum system_clock_source clock_source) { uint32_t mask = 0; switch (clock_source) { case SYSTEM_CLOCK_SOURCE_OSC8M: mask = SYSCTRL_PCLKSR_OSC8MRDY; break; case SYSTEM_CLOCK_SOURCE_OSC32K: mask = SYSCTRL_PCLKSR_OSC32KRDY; break; case SYSTEM_CLOCK_SOURCE_XOSC: mask = SYSCTRL_PCLKSR_XOSCRDY; break; case SYSTEM_CLOCK_SOURCE_XOSC32K: mask = SYSCTRL_PCLKSR_XOSC32KRDY; break; case SYSTEM_CLOCK_SOURCE_DFLL: if (CONF_CLOCK_DFLL_LOOP_MODE == SYSTEM_CLOCK_DFLL_LOOP_MODE_CLOSED) { mask = (SYSCTRL_PCLKSR_DFLLRDY | SYSCTRL_PCLKSR_DFLLLCKF | SYSCTRL_PCLKSR_DFLLLCKC); } else { mask = SYSCTRL_PCLKSR_DFLLRDY; } break; #ifdef FEATURE_SYSTEM_CLOCK_DPLL case SYSTEM_CLOCK_SOURCE_DPLL: return ((SYSCTRL->DPLLSTATUS.reg & (SYSCTRL_DPLLSTATUS_CLKRDY | SYSCTRL_DPLLSTATUS_LOCK)) == (SYSCTRL_DPLLSTATUS_CLKRDY | SYSCTRL_DPLLSTATUS_LOCK)); #endif case SYSTEM_CLOCK_SOURCE_ULP32K: /* Not possible to disable */ return true; default: return false; } return ((SYSCTRL->PCLKSR.reg & mask) == mask); } /* Include some checks for conf_clocks.h validation */ #include "clock_config_check.h" #if !defined(__DOXYGEN__) /** \internal * * Configures a Generic Clock Generator with the configuration from \c conf_clocks.h. */ # define _CONF_CLOCK_GCLK_CONFIG(n, unused) \ if (CONF_CLOCK_GCLK_##n##_ENABLE == true) { \ struct system_gclk_gen_config gclk_conf; \ system_gclk_gen_get_config_defaults(&gclk_conf); \ gclk_conf.source_clock = CONF_CLOCK_GCLK_##n##_CLOCK_SOURCE; \ gclk_conf.division_factor = CONF_CLOCK_GCLK_##n##_PRESCALER; \ gclk_conf.run_in_standby = CONF_CLOCK_GCLK_##n##_RUN_IN_STANDBY; \ gclk_conf.output_enable = CONF_CLOCK_GCLK_##n##_OUTPUT_ENABLE; \ system_gclk_gen_set_config(GCLK_GENERATOR_##n, &gclk_conf); \ system_gclk_gen_enable(GCLK_GENERATOR_##n); \ } /** \internal * * Configures a Generic Clock Generator with the configuration from \c conf_clocks.h, * provided that it is not the main Generic Clock Generator channel. */ # define _CONF_CLOCK_GCLK_CONFIG_NONMAIN(n, unused) \ if (n > 0) { _CONF_CLOCK_GCLK_CONFIG(n, unused); } #endif /** \internal * * Switch all peripheral clock to a not enabled general clock * to save power. */ static void _switch_peripheral_gclk(void) { uint32_t gclk_id; struct system_gclk_chan_config gclk_conf; #if CONF_CLOCK_GCLK_1_ENABLE == false gclk_conf.source_generator = GCLK_GENERATOR_1; #elif CONF_CLOCK_GCLK_2_ENABLE == false gclk_conf.source_generator = GCLK_GENERATOR_2; #elif CONF_CLOCK_GCLK_3_ENABLE == false gclk_conf.source_generator = GCLK_GENERATOR_3; #elif CONF_CLOCK_GCLK_4_ENABLE == false gclk_conf.source_generator = GCLK_GENERATOR_4; #elif CONF_CLOCK_GCLK_5_ENABLE == false gclk_conf.source_generator = GCLK_GENERATOR_5; #else gclk_conf.source_generator = GCLK_GENERATOR_5; #endif for (gclk_id = 0; gclk_id < GCLK_NUM; gclk_id++) { system_gclk_chan_set_config(gclk_id, &gclk_conf); } } /** * \brief Initialize clock system based on the configuration in conf_clocks.h. * * This function will apply the settings in conf_clocks.h when run from the user * application. All clock sources and GCLK generators are running when this function * returns. * * \note OSC8M is always enabled and if user selects other clocks for GCLK generators, * the OSC8M default enable can be disabled after system_clock_init. Make sure the * clock switch successfully before disabling OSC8M. */ void system_clock_init(void) { /* Various bits in the INTFLAG register can be set to one at startup. This will ensure that these bits are cleared */ SYSCTRL->INTFLAG.reg = SYSCTRL_INTFLAG_BOD33RDY | SYSCTRL_INTFLAG_BOD33DET | SYSCTRL_INTFLAG_DFLLRDY; system_flash_set_waitstates(CONF_CLOCK_FLASH_WAIT_STATES); /* Switch all peripheral clock to a not enabled general clock to save power. */ _switch_peripheral_gclk(); /* XOSC */ #if CONF_CLOCK_XOSC_ENABLE == true struct system_clock_source_xosc_config xosc_conf; system_clock_source_xosc_get_config_defaults(&xosc_conf); xosc_conf.external_clock = CONF_CLOCK_XOSC_EXTERNAL_CRYSTAL; xosc_conf.startup_time = CONF_CLOCK_XOSC_STARTUP_TIME; xosc_conf.frequency = CONF_CLOCK_XOSC_EXTERNAL_FREQUENCY; xosc_conf.run_in_standby = CONF_CLOCK_XOSC_RUN_IN_STANDBY; system_clock_source_xosc_set_config(&xosc_conf); system_clock_source_enable(SYSTEM_CLOCK_SOURCE_XOSC); while(!system_clock_source_is_ready(SYSTEM_CLOCK_SOURCE_XOSC)); if (CONF_CLOCK_XOSC_ON_DEMAND || CONF_CLOCK_XOSC_AUTO_GAIN_CONTROL) { SYSCTRL->XOSC.reg |= (CONF_CLOCK_XOSC_ON_DEMAND << SYSCTRL_XOSC_ONDEMAND_Pos) | (CONF_CLOCK_XOSC_AUTO_GAIN_CONTROL << SYSCTRL_XOSC_AMPGC_Pos); } #endif /* XOSC32K */ #if CONF_CLOCK_XOSC32K_ENABLE == true struct system_clock_source_xosc32k_config xosc32k_conf; system_clock_source_xosc32k_get_config_defaults(&xosc32k_conf); xosc32k_conf.frequency = 32768UL; xosc32k_conf.external_clock = CONF_CLOCK_XOSC32K_EXTERNAL_CRYSTAL; xosc32k_conf.startup_time = CONF_CLOCK_XOSC32K_STARTUP_TIME; xosc32k_conf.auto_gain_control = CONF_CLOCK_XOSC32K_AUTO_AMPLITUDE_CONTROL; xosc32k_conf.enable_1khz_output = CONF_CLOCK_XOSC32K_ENABLE_1KHZ_OUPUT; xosc32k_conf.enable_32khz_output = CONF_CLOCK_XOSC32K_ENABLE_32KHZ_OUTPUT; xosc32k_conf.on_demand = false; xosc32k_conf.run_in_standby = CONF_CLOCK_XOSC32K_RUN_IN_STANDBY; system_clock_source_xosc32k_set_config(&xosc32k_conf); system_clock_source_enable(SYSTEM_CLOCK_SOURCE_XOSC32K); while(!system_clock_source_is_ready(SYSTEM_CLOCK_SOURCE_XOSC32K)); if (CONF_CLOCK_XOSC32K_ON_DEMAND) { SYSCTRL->XOSC32K.bit.ONDEMAND = 1; } #endif /* OSCK32K */ #if CONF_CLOCK_OSC32K_ENABLE == true SYSCTRL->OSC32K.bit.CALIB = (*(uint32_t *)FUSES_OSC32K_ADDR >> FUSES_OSC32K_Pos); struct system_clock_source_osc32k_config osc32k_conf; system_clock_source_osc32k_get_config_defaults(&osc32k_conf); osc32k_conf.startup_time = CONF_CLOCK_OSC32K_STARTUP_TIME; osc32k_conf.enable_1khz_output = CONF_CLOCK_OSC32K_ENABLE_1KHZ_OUTPUT; osc32k_conf.enable_32khz_output = CONF_CLOCK_OSC32K_ENABLE_32KHZ_OUTPUT; osc32k_conf.on_demand = CONF_CLOCK_OSC32K_ON_DEMAND; osc32k_conf.run_in_standby = CONF_CLOCK_OSC32K_RUN_IN_STANDBY; system_clock_source_osc32k_set_config(&osc32k_conf); system_clock_source_enable(SYSTEM_CLOCK_SOURCE_OSC32K); #endif /* DFLL Config (Open and Closed Loop) */ #if CONF_CLOCK_DFLL_ENABLE == true struct system_clock_source_dfll_config dfll_conf; system_clock_source_dfll_get_config_defaults(&dfll_conf); dfll_conf.loop_mode = CONF_CLOCK_DFLL_LOOP_MODE; dfll_conf.on_demand = false; /* Using DFLL48M COARSE CAL value from NVM Software Calibration Area Mapping in DFLL.COARSE helps to output a frequency close to 48 MHz.*/ #define NVM_DFLL_COARSE_POS 58 /* DFLL48M Coarse calibration value bit position.*/ #define NVM_DFLL_COARSE_SIZE 6 /* DFLL48M Coarse calibration value bit size.*/ uint32_t coarse =( *((uint32_t *)(NVMCTRL_OTP4) + (NVM_DFLL_COARSE_POS / 32)) >> (NVM_DFLL_COARSE_POS % 32)) & ((1 << NVM_DFLL_COARSE_SIZE) - 1); /* In some revision chip, the coarse calibration value is not correct. */ if (coarse == 0x3f) { coarse = 0x1f; } dfll_conf.coarse_value = coarse; if (CONF_CLOCK_DFLL_LOOP_MODE == SYSTEM_CLOCK_DFLL_LOOP_MODE_OPEN) { dfll_conf.fine_value = CONF_CLOCK_DFLL_FINE_VALUE; } # if CONF_CLOCK_DFLL_QUICK_LOCK == true dfll_conf.quick_lock = SYSTEM_CLOCK_DFLL_QUICK_LOCK_ENABLE; # else dfll_conf.quick_lock = SYSTEM_CLOCK_DFLL_QUICK_LOCK_DISABLE; # endif # if CONF_CLOCK_DFLL_TRACK_AFTER_FINE_LOCK == true dfll_conf.stable_tracking = SYSTEM_CLOCK_DFLL_STABLE_TRACKING_TRACK_AFTER_LOCK; # else dfll_conf.stable_tracking = SYSTEM_CLOCK_DFLL_STABLE_TRACKING_FIX_AFTER_LOCK; # endif # if CONF_CLOCK_DFLL_KEEP_LOCK_ON_WAKEUP == true dfll_conf.wakeup_lock = SYSTEM_CLOCK_DFLL_WAKEUP_LOCK_KEEP; # else dfll_conf.wakeup_lock = SYSTEM_CLOCK_DFLL_WAKEUP_LOCK_LOSE; # endif # if CONF_CLOCK_DFLL_ENABLE_CHILL_CYCLE == true dfll_conf.chill_cycle = SYSTEM_CLOCK_DFLL_CHILL_CYCLE_ENABLE; # else dfll_conf.chill_cycle = SYSTEM_CLOCK_DFLL_CHILL_CYCLE_DISABLE; # endif if (CONF_CLOCK_DFLL_LOOP_MODE == SYSTEM_CLOCK_DFLL_LOOP_MODE_CLOSED) { dfll_conf.multiply_factor = CONF_CLOCK_DFLL_MULTIPLY_FACTOR; } dfll_conf.coarse_max_step = CONF_CLOCK_DFLL_MAX_COARSE_STEP_SIZE; dfll_conf.fine_max_step = CONF_CLOCK_DFLL_MAX_FINE_STEP_SIZE; if (CONF_CLOCK_DFLL_LOOP_MODE == SYSTEM_CLOCK_DFLL_LOOP_MODE_USB_RECOVERY) { dfll_conf.fine_max_step = 10; dfll_conf.fine_value = 0x1ff; dfll_conf.quick_lock = SYSTEM_CLOCK_DFLL_QUICK_LOCK_ENABLE; dfll_conf.stable_tracking = SYSTEM_CLOCK_DFLL_STABLE_TRACKING_TRACK_AFTER_LOCK; dfll_conf.wakeup_lock = SYSTEM_CLOCK_DFLL_WAKEUP_LOCK_KEEP; dfll_conf.chill_cycle = SYSTEM_CLOCK_DFLL_CHILL_CYCLE_DISABLE; dfll_conf.multiply_factor = 48000; } system_clock_source_dfll_set_config(&dfll_conf); #endif /* OSC8M */ struct system_clock_source_osc8m_config osc8m_conf; system_clock_source_osc8m_get_config_defaults(&osc8m_conf); osc8m_conf.prescaler = CONF_CLOCK_OSC8M_PRESCALER; osc8m_conf.on_demand = CONF_CLOCK_OSC8M_ON_DEMAND; osc8m_conf.run_in_standby = CONF_CLOCK_OSC8M_RUN_IN_STANDBY; system_clock_source_osc8m_set_config(&osc8m_conf); system_clock_source_enable(SYSTEM_CLOCK_SOURCE_OSC8M); /* GCLK */ #if CONF_CLOCK_CONFIGURE_GCLK == true system_gclk_init(); /* Configure all GCLK generators except for the main generator, which * is configured later after all other clock systems are set up */ MREPEAT(GCLK_GEN_NUM, _CONF_CLOCK_GCLK_CONFIG_NONMAIN, ~); # if CONF_CLOCK_DFLL_ENABLE == true /* Enable DFLL reference clock if in closed loop mode */ if (CONF_CLOCK_DFLL_LOOP_MODE == SYSTEM_CLOCK_DFLL_LOOP_MODE_CLOSED) { struct system_gclk_chan_config dfll_gclk_chan_conf; system_gclk_chan_get_config_defaults(&dfll_gclk_chan_conf); dfll_gclk_chan_conf.source_generator = CONF_CLOCK_DFLL_SOURCE_GCLK_GENERATOR; system_gclk_chan_set_config(SYSCTRL_GCLK_ID_DFLL48, &dfll_gclk_chan_conf); system_gclk_chan_enable(SYSCTRL_GCLK_ID_DFLL48); } # endif # if CONF_CLOCK_DPLL_ENABLE == true /* Enable DPLL internal lock timer and reference clock */ struct system_gclk_chan_config dpll_gclk_chan_conf; system_gclk_chan_get_config_defaults(&dpll_gclk_chan_conf); if (CONF_CLOCK_DPLL_LOCK_TIME != SYSTEM_CLOCK_SOURCE_DPLL_LOCK_TIME_DEFAULT) { dpll_gclk_chan_conf.source_generator = CONF_CLOCK_DPLL_LOCK_GCLK_GENERATOR; system_gclk_chan_set_config(SYSCTRL_GCLK_ID_FDPLL32K, &dpll_gclk_chan_conf); system_gclk_chan_enable(SYSCTRL_GCLK_ID_FDPLL32K); } if (CONF_CLOCK_DPLL_REFERENCE_CLOCK == SYSTEM_CLOCK_SOURCE_DPLL_REFERENCE_CLOCK_GCLK) { dpll_gclk_chan_conf.source_generator = CONF_CLOCK_DPLL_REFERENCE_GCLK_GENERATOR; system_gclk_chan_set_config(SYSCTRL_GCLK_ID_FDPLL, &dpll_gclk_chan_conf); system_gclk_chan_enable(SYSCTRL_GCLK_ID_FDPLL); } # endif #endif /* DFLL Enable (Open and Closed Loop) */ #if CONF_CLOCK_DFLL_ENABLE == true system_clock_source_enable(SYSTEM_CLOCK_SOURCE_DFLL); while(!system_clock_source_is_ready(SYSTEM_CLOCK_SOURCE_DFLL)); if (CONF_CLOCK_DFLL_ON_DEMAND) { SYSCTRL->DFLLCTRL.bit.ONDEMAND = 1; } #endif /* DPLL */ #ifdef FEATURE_SYSTEM_CLOCK_DPLL # if (CONF_CLOCK_DPLL_ENABLE == true) /* Enable DPLL reference clock */ if (CONF_CLOCK_DPLL_REFERENCE_CLOCK == SYSTEM_CLOCK_SOURCE_DPLL_REFERENCE_CLOCK_XOSC32K) { /* XOSC32K should have been enabled for DPLL_REF0 */ Assert(CONF_CLOCK_XOSC32K_ENABLE); } else if (CONF_CLOCK_DPLL_REFERENCE_CLOCK == SYSTEM_CLOCK_SOURCE_DPLL_REFERENCE_CLOCK_XOSC) { /* XOSC should have been enabled for DPLL_REF1 */ Assert(CONF_CLOCK_XOSC_ENABLE); } else if (CONF_CLOCK_DPLL_REFERENCE_CLOCK == SYSTEM_CLOCK_SOURCE_DPLL_REFERENCE_CLOCK_GCLK) { /* GCLK should have been enabled */ Assert(CONF_CLOCK_CONFIGURE_GCLK); } else { Assert(false); } struct system_clock_source_dpll_config dpll_config; system_clock_source_dpll_get_config_defaults(&dpll_config); dpll_config.on_demand = false; dpll_config.run_in_standby = CONF_CLOCK_DPLL_RUN_IN_STANDBY; dpll_config.lock_bypass = CONF_CLOCK_DPLL_LOCK_BYPASS; dpll_config.wake_up_fast = CONF_CLOCK_DPLL_WAKE_UP_FAST; dpll_config.low_power_enable = CONF_CLOCK_DPLL_LOW_POWER_ENABLE; dpll_config.filter = CONF_CLOCK_DPLL_FILTER; dpll_config.lock_time = CONF_CLOCK_DPLL_LOCK_TIME; dpll_config.reference_clock = CONF_CLOCK_DPLL_REFERENCE_CLOCK; dpll_config.reference_frequency = CONF_CLOCK_DPLL_REFERENCE_FREQUENCY; dpll_config.reference_divider = CONF_CLOCK_DPLL_REFERENCE_DIVIDER; dpll_config.output_frequency = CONF_CLOCK_DPLL_OUTPUT_FREQUENCY; system_clock_source_dpll_set_config(&dpll_config); system_clock_source_enable(SYSTEM_CLOCK_SOURCE_DPLL); while(!system_clock_source_is_ready(SYSTEM_CLOCK_SOURCE_DPLL)); if (CONF_CLOCK_DPLL_ON_DEMAND) { SYSCTRL->DPLLCTRLA.bit.ONDEMAND = 1; } # endif #endif /* CPU and BUS clocks */ system_cpu_clock_set_divider(CONF_CLOCK_CPU_DIVIDER); system_apb_clock_set_divider(SYSTEM_CLOCK_APB_APBA, CONF_CLOCK_APBA_DIVIDER); system_apb_clock_set_divider(SYSTEM_CLOCK_APB_APBB, CONF_CLOCK_APBB_DIVIDER); system_apb_clock_set_divider(SYSTEM_CLOCK_APB_APBC, CONF_CLOCK_APBC_DIVIDER); /* GCLK 0 */ #if CONF_CLOCK_CONFIGURE_GCLK == true /* Configure the main GCLK last as it might depend on other generators */ _CONF_CLOCK_GCLK_CONFIG(0, ~); #endif }
147142.c
/********************************************************************** * gost89.c * * Copyright (c) 2005-2006 Cryptocom LTD * * This file is distributed under the same license as OpenSSL * * * * Implementation of GOST 28147-89 encryption algorithm * * No OpenSSL libraries required to compile and use * * this code * **********************************************************************/ #include <string.h> #include "gost89.h" /* Substitution blocks from RFC 4357 Note: our implementation of gost 28147-89 algorithm uses S-box matrix rotated 90 degrees counterclockwise, relative to examples given in RFC. */ /* Substitution blocks from test examples for GOST R 34.11-94*/ gost_subst_block GostR3411_94_TestParamSet = { {0X1,0XF,0XD,0X0,0X5,0X7,0XA,0X4,0X9,0X2,0X3,0XE,0X6,0XB,0X8,0XC}, {0XD,0XB,0X4,0X1,0X3,0XF,0X5,0X9,0X0,0XA,0XE,0X7,0X6,0X8,0X2,0XC}, {0X4,0XB,0XA,0X0,0X7,0X2,0X1,0XD,0X3,0X6,0X8,0X5,0X9,0XC,0XF,0XE}, {0X6,0XC,0X7,0X1,0X5,0XF,0XD,0X8,0X4,0XA,0X9,0XE,0X0,0X3,0XB,0X2}, {0X7,0XD,0XA,0X1,0X0,0X8,0X9,0XF,0XE,0X4,0X6,0XC,0XB,0X2,0X5,0X3}, {0X5,0X8,0X1,0XD,0XA,0X3,0X4,0X2,0XE,0XF,0XC,0X7,0X6,0X0,0X9,0XB}, {0XE,0XB,0X4,0XC,0X6,0XD,0XF,0XA,0X2,0X3,0X8,0X1,0X0,0X7,0X5,0X9}, {0X4,0XA,0X9,0X2,0XD,0X8,0X0,0XE,0X6,0XB,0X1,0XC,0X7,0XF,0X5,0X3} }; /* Substitution blocks for hash function 1.2.643.2.9.1.6.1 */ gost_subst_block GostR3411_94_CryptoProParamSet= { {0x1,0x3,0xA,0x9,0x5,0xB,0x4,0xF,0x8,0x6,0x7,0xE,0xD,0x0,0x2,0xC}, {0xD,0xE,0x4,0x1,0x7,0x0,0x5,0xA,0x3,0xC,0x8,0xF,0x6,0x2,0x9,0xB}, {0x7,0x6,0x2,0x4,0xD,0x9,0xF,0x0,0xA,0x1,0x5,0xB,0x8,0xE,0xC,0x3}, {0x7,0x6,0x4,0xB,0x9,0xC,0x2,0xA,0x1,0x8,0x0,0xE,0xF,0xD,0x3,0x5}, {0x4,0xA,0x7,0xC,0x0,0xF,0x2,0x8,0xE,0x1,0x6,0x5,0xD,0xB,0x9,0x3}, {0x7,0xF,0xC,0xE,0x9,0x4,0x1,0x0,0x3,0xB,0x5,0x2,0x6,0xA,0x8,0xD}, {0x5,0xF,0x4,0x0,0x2,0xD,0xB,0x9,0x1,0x7,0x6,0x3,0xC,0xE,0xA,0x8}, {0xA,0x4,0x5,0x6,0x8,0x1,0x3,0x7,0xD,0xC,0xE,0x0,0x9,0x2,0xB,0xF} } ; /* Test paramset from GOST 28147 */ gost_subst_block Gost28147_TestParamSet = { {0xC,0x6,0x5,0x2,0xB,0x0,0x9,0xD,0x3,0xE,0x7,0xA,0xF,0x4,0x1,0x8}, {0x9,0xB,0xC,0x0,0x3,0x6,0x7,0x5,0x4,0x8,0xE,0xF,0x1,0xA,0x2,0xD}, {0x8,0xF,0x6,0xB,0x1,0x9,0xC,0x5,0xD,0x3,0x7,0xA,0x0,0xE,0x2,0x4}, {0x3,0xE,0x5,0x9,0x6,0x8,0x0,0xD,0xA,0xB,0x7,0xC,0x2,0x1,0xF,0x4}, {0xE,0x9,0xB,0x2,0x5,0xF,0x7,0x1,0x0,0xD,0xC,0x6,0xA,0x4,0x3,0x8}, {0xD,0x8,0xE,0xC,0x7,0x3,0x9,0xA,0x1,0x5,0x2,0x4,0x6,0xF,0x0,0xB}, {0xC,0x9,0xF,0xE,0x8,0x1,0x3,0xA,0x2,0x7,0x4,0xD,0x6,0x0,0xB,0x5}, {0x4,0x2,0xF,0x5,0x9,0x1,0x0,0x8,0xE,0x3,0xB,0xC,0xD,0x7,0xA,0x6} }; /* 1.2.643.2.2.31.1 */ gost_subst_block Gost28147_CryptoProParamSetA= { {0xB,0xA,0xF,0x5,0x0,0xC,0xE,0x8,0x6,0x2,0x3,0x9,0x1,0x7,0xD,0x4}, {0x1,0xD,0x2,0x9,0x7,0xA,0x6,0x0,0x8,0xC,0x4,0x5,0xF,0x3,0xB,0xE}, {0x3,0xA,0xD,0xC,0x1,0x2,0x0,0xB,0x7,0x5,0x9,0x4,0x8,0xF,0xE,0x6}, {0xB,0x5,0x1,0x9,0x8,0xD,0xF,0x0,0xE,0x4,0x2,0x3,0xC,0x7,0xA,0x6}, {0xE,0x7,0xA,0xC,0xD,0x1,0x3,0x9,0x0,0x2,0xB,0x4,0xF,0x8,0x5,0x6}, {0xE,0x4,0x6,0x2,0xB,0x3,0xD,0x8,0xC,0xF,0x5,0xA,0x0,0x7,0x1,0x9}, {0x3,0x7,0xE,0x9,0x8,0xA,0xF,0x0,0x5,0x2,0x6,0xC,0xB,0x4,0xD,0x1}, {0x9,0x6,0x3,0x2,0x8,0xB,0x1,0x7,0xA,0x4,0xE,0xF,0xC,0x0,0xD,0x5} }; /* 1.2.643.2.2.31.2 */ gost_subst_block Gost28147_CryptoProParamSetB= { {0x0,0x4,0xB,0xE,0x8,0x3,0x7,0x1,0xA,0x2,0x9,0x6,0xF,0xD,0x5,0xC}, {0x5,0x2,0xA,0xB,0x9,0x1,0xC,0x3,0x7,0x4,0xD,0x0,0x6,0xF,0x8,0xE}, {0x8,0x3,0x2,0x6,0x4,0xD,0xE,0xB,0xC,0x1,0x7,0xF,0xA,0x0,0x9,0x5}, {0x2,0x7,0xC,0xF,0x9,0x5,0xA,0xB,0x1,0x4,0x0,0xD,0x6,0x8,0xE,0x3}, {0x7,0x5,0x0,0xD,0xB,0x6,0x1,0x2,0x3,0xA,0xC,0xF,0x4,0xE,0x9,0x8}, {0xE,0xC,0x0,0xA,0x9,0x2,0xD,0xB,0x7,0x5,0x8,0xF,0x3,0x6,0x1,0x4}, {0x0,0x1,0x2,0xA,0x4,0xD,0x5,0xC,0x9,0x7,0x3,0xF,0xB,0x8,0x6,0xE}, {0x8,0x4,0xB,0x1,0x3,0x5,0x0,0x9,0x2,0xE,0xA,0xC,0xD,0x6,0x7,0xF} }; /* 1.2.643.2.2.31.3 */ gost_subst_block Gost28147_CryptoProParamSetC= { {0x7,0x4,0x0,0x5,0xA,0x2,0xF,0xE,0xC,0x6,0x1,0xB,0xD,0x9,0x3,0x8}, {0xA,0x9,0x6,0x8,0xD,0xE,0x2,0x0,0xF,0x3,0x5,0xB,0x4,0x1,0xC,0x7}, {0xC,0x9,0xB,0x1,0x8,0xE,0x2,0x4,0x7,0x3,0x6,0x5,0xA,0x0,0xF,0xD}, {0x8,0xD,0xB,0x0,0x4,0x5,0x1,0x2,0x9,0x3,0xC,0xE,0x6,0xF,0xA,0x7}, {0x3,0x6,0x0,0x1,0x5,0xD,0xA,0x8,0xB,0x2,0x9,0x7,0xE,0xF,0xC,0x4}, {0x8,0x2,0x5,0x0,0x4,0x9,0xF,0xA,0x3,0x7,0xC,0xD,0x6,0xE,0x1,0xB}, {0x0,0x1,0x7,0xD,0xB,0x4,0x5,0x2,0x8,0xE,0xF,0xC,0x9,0xA,0x6,0x3}, {0x1,0xB,0xC,0x2,0x9,0xD,0x0,0xF,0x4,0x5,0x8,0xE,0xA,0x7,0x6,0x3} }; /* 1.2.643.2.2.31.4 */ gost_subst_block Gost28147_CryptoProParamSetD= { {0x1,0xA,0x6,0x8,0xF,0xB,0x0,0x4,0xC,0x3,0x5,0x9,0x7,0xD,0x2,0xE}, {0x3,0x0,0x6,0xF,0x1,0xE,0x9,0x2,0xD,0x8,0xC,0x4,0xB,0xA,0x5,0x7}, {0x8,0x0,0xF,0x3,0x2,0x5,0xE,0xB,0x1,0xA,0x4,0x7,0xC,0x9,0xD,0x6}, {0x0,0xC,0x8,0x9,0xD,0x2,0xA,0xB,0x7,0x3,0x6,0x5,0x4,0xE,0xF,0x1}, {0x1,0x5,0xE,0xC,0xA,0x7,0x0,0xD,0x6,0x2,0xB,0x4,0x9,0x3,0xF,0x8}, {0x1,0xC,0xB,0x0,0xF,0xE,0x6,0x5,0xA,0xD,0x4,0x8,0x9,0x3,0x7,0x2}, {0xB,0x6,0x3,0x4,0xC,0xF,0xE,0x2,0x7,0xD,0x8,0x0,0x5,0xA,0x9,0x1}, {0xF,0xC,0x2,0xA,0x6,0x4,0x5,0x0,0x7,0x9,0xE,0xD,0x1,0xB,0x8,0x3} }; const byte CryptoProKeyMeshingKey[]={ 0x69, 0x00, 0x72, 0x22, 0x64, 0xC9, 0x04, 0x23, 0x8D, 0x3A, 0xDB, 0x96, 0x46, 0xE9, 0x2A, 0xC4, 0x18, 0xFE, 0xAC, 0x94, 0x00, 0xED, 0x07, 0x12, 0xC0, 0x86, 0xDC, 0xC2, 0xEF, 0x4C, 0xA9, 0x2B }; /* Initialization of gost_ctx subst blocks*/ static void kboxinit(gost_ctx *c, const gost_subst_block *b) { int i; for (i = 0; i < 256; i++) { c->k87[i] = (b->k8[i>>4] <<4 | b->k7 [i &15])<<24; c->k65[i] = (b->k6[i>>4] << 4 | b->k5 [i &15])<<16; c->k43[i] = (b->k4[i>>4] <<4 | b->k3 [i &15])<<8; c->k21[i] = b->k2[i>>4] <<4 | b->k1 [i &15]; } } /* Part of GOST 28147 algorithm moved into separate function */ static word32 f(gost_ctx *c,word32 x) { x = c->k87[x>>24 & 255] | c->k65[x>>16 & 255]| c->k43[x>> 8 & 255] | c->k21[x & 255]; /* Rotate left 11 bits */ return x<<11 | x>>(32-11); } /* Low-level encryption routine - encrypts one 64 bit block*/ void gostcrypt(gost_ctx *c, const byte *in, byte *out) { register word32 n1, n2; /* As named in the GOST */ n1 = in[0]|(in[1]<<8)|(in[2]<<16)|(in[3]<<24); n2 = in[4]|(in[5]<<8)|(in[6]<<16)|(in[7]<<24); /* Instead of swapping halves, swap names each round */ n2 ^= f(c,n1+c->k[0]); n1 ^= f(c,n2+c->k[1]); n2 ^= f(c,n1+c->k[2]); n1 ^= f(c,n2+c->k[3]); n2 ^= f(c,n1+c->k[4]); n1 ^= f(c,n2+c->k[5]); n2 ^= f(c,n1+c->k[6]); n1 ^= f(c,n2+c->k[7]); n2 ^= f(c,n1+c->k[0]); n1 ^= f(c,n2+c->k[1]); n2 ^= f(c,n1+c->k[2]); n1 ^= f(c,n2+c->k[3]); n2 ^= f(c,n1+c->k[4]); n1 ^= f(c,n2+c->k[5]); n2 ^= f(c,n1+c->k[6]); n1 ^= f(c,n2+c->k[7]); n2 ^= f(c,n1+c->k[0]); n1 ^= f(c,n2+c->k[1]); n2 ^= f(c,n1+c->k[2]); n1 ^= f(c,n2+c->k[3]); n2 ^= f(c,n1+c->k[4]); n1 ^= f(c,n2+c->k[5]); n2 ^= f(c,n1+c->k[6]); n1 ^= f(c,n2+c->k[7]); n2 ^= f(c,n1+c->k[7]); n1 ^= f(c,n2+c->k[6]); n2 ^= f(c,n1+c->k[5]); n1 ^= f(c,n2+c->k[4]); n2 ^= f(c,n1+c->k[3]); n1 ^= f(c,n2+c->k[2]); n2 ^= f(c,n1+c->k[1]); n1 ^= f(c,n2+c->k[0]); out[0] = (byte)(n2&0xff); out[1] = (byte)((n2>>8)&0xff); out[2] = (byte)((n2>>16)&0xff); out[3]=(byte)(n2>>24); out[4] = (byte)(n1&0xff); out[5] = (byte)((n1>>8)&0xff); out[6] = (byte)((n1>>16)&0xff); out[7] = (byte)(n1>>24); } /* Low-level decryption routine. Decrypts one 64-bit block */ void gostdecrypt(gost_ctx *c, const byte *in,byte *out) { register word32 n1, n2; /* As named in the GOST */ n1 = in[0]|(in[1]<<8)|(in[2]<<16)|(in[3]<<24); n2 = in[4]|(in[5]<<8)|(in[6]<<16)|(in[7]<<24); n2 ^= f(c,n1+c->k[0]); n1 ^= f(c,n2+c->k[1]); n2 ^= f(c,n1+c->k[2]); n1 ^= f(c,n2+c->k[3]); n2 ^= f(c,n1+c->k[4]); n1 ^= f(c,n2+c->k[5]); n2 ^= f(c,n1+c->k[6]); n1 ^= f(c,n2+c->k[7]); n2 ^= f(c,n1+c->k[7]); n1 ^= f(c,n2+c->k[6]); n2 ^= f(c,n1+c->k[5]); n1 ^= f(c,n2+c->k[4]); n2 ^= f(c,n1+c->k[3]); n1 ^= f(c,n2+c->k[2]); n2 ^= f(c,n1+c->k[1]); n1 ^= f(c,n2+c->k[0]); n2 ^= f(c,n1+c->k[7]); n1 ^= f(c,n2+c->k[6]); n2 ^= f(c,n1+c->k[5]); n1 ^= f(c,n2+c->k[4]); n2 ^= f(c,n1+c->k[3]); n1 ^= f(c,n2+c->k[2]); n2 ^= f(c,n1+c->k[1]); n1 ^= f(c,n2+c->k[0]); n2 ^= f(c,n1+c->k[7]); n1 ^= f(c,n2+c->k[6]); n2 ^= f(c,n1+c->k[5]); n1 ^= f(c,n2+c->k[4]); n2 ^= f(c,n1+c->k[3]); n1 ^= f(c,n2+c->k[2]); n2 ^= f(c,n1+c->k[1]); n1 ^= f(c,n2+c->k[0]); out[0] = (byte)(n2&0xff); out[1] = (byte)((n2>>8)&0xff); out[2] = (byte)((n2>>16)&0xff); out[3]=(byte)(n2>>24); out[4] = (byte)(n1&0xff); out[5] = (byte)((n1>>8)&0xff); out[6] = (byte)((n1>>16)&0xff); out[7] = (byte)(n1>>24); } /* Encrypts several blocks in ECB mode */ void gost_enc(gost_ctx *c,const byte *clear,byte *cipher, int blocks) { int i; for(i=0;i<blocks;i++) { gostcrypt(c,clear,cipher); clear+=8; cipher+=8; } } /* Decrypts several blocks in ECB mode */ void gost_dec(gost_ctx *c, const byte *cipher,byte *clear, int blocks) { int i; for(i=0;i<blocks;i++) { gostdecrypt(c,cipher,clear); clear+=8; cipher+=8; } } /* Encrypts several full blocks in CFB mode using 8byte IV */ void gost_enc_cfb(gost_ctx *ctx,const byte *iv,const byte *clear,byte *cipher, int blocks) { byte cur_iv[8]; byte gamma[8]; int i,j; const byte *in; byte *out; memcpy(cur_iv,iv,8); for(i=0,in=clear,out=cipher;i<blocks;i++,in+=8,out+=8) { gostcrypt(ctx,cur_iv,gamma); for (j=0;j<8;j++) { cur_iv[j]=out[j]=in[j]^gamma[j]; } } } /* Decrypts several full blocks in CFB mode using 8byte IV */ void gost_dec_cfb(gost_ctx *ctx,const byte *iv,const byte *cipher,byte *clear, int blocks) { byte cur_iv[8]; byte gamma[8]; int i,j; const byte *in; byte *out; memcpy(cur_iv,iv,8); for(i=0,in=cipher,out=clear;i<blocks;i++,in+=8,out+=8) { gostcrypt(ctx,cur_iv,gamma); for (j=0;j<8;j++) { out[j]=(cur_iv[j]=in[j])^gamma[j]; } } } /* Encrypts one block using specified key */ void gost_enc_with_key(gost_ctx *c,byte *key,byte *inblock,byte *outblock) { gost_key(c,key); gostcrypt(c,inblock,outblock); } /* Set 256 bit key into context */ void gost_key(gost_ctx *c, const byte *k) { int i,j; for(i=0,j=0;i<8;i++,j+=4) { c->k[i]=k[j]|(k[j+1]<<8)|(k[j+2]<<16)|(k[j+3]<<24); } } /* Retrieve 256-bit key from context */ void gost_get_key(gost_ctx *c, byte *k) { int i,j; for(i=0,j=0;i<8;i++,j+=4) { k[j]=(byte)(c->k[i]& 0xFF); k[j+1]=(byte)((c->k[i]>>8 )&0xFF); k[j+2]=(byte)((c->k[i]>>16) &0xFF); k[j+3]=(byte)((c->k[i]>>24) &0xFF); } } /* Initalize context. Provides default value for subst_block */ void gost_init(gost_ctx *c, const gost_subst_block *b) { if(!b) { b=&GostR3411_94_TestParamSet; } kboxinit(c,b); } /* Cleans up key from context */ void gost_destroy(gost_ctx *c) { int i; for(i=0;i<8;i++) c->k[i]=0; } /* Compute GOST 28147 mac block * * Parameters * gost_ctx *c - context initalized with substitution blocks and key * buffer - 8-byte mac state buffer * block 8-byte block to process. * */ void mac_block(gost_ctx *c,byte *buffer,const byte *block) { register word32 n1, n2; /* As named in the GOST */ int i; for (i=0; i<8; i++) { buffer[i]^=block[i]; } n1 = buffer[0]|(buffer[1]<<8)|(buffer[2]<<16)|(buffer[3]<<24); n2 = buffer[4]|(buffer[5]<<8)|(buffer[6]<<16)|(buffer[7]<<24); /* Instead of swapping halves, swap names each round */ n2 ^= f(c,n1+c->k[0]); n1 ^= f(c,n2+c->k[1]); n2 ^= f(c,n1+c->k[2]); n1 ^= f(c,n2+c->k[3]); n2 ^= f(c,n1+c->k[4]); n1 ^= f(c,n2+c->k[5]); n2 ^= f(c,n1+c->k[6]); n1 ^= f(c,n2+c->k[7]); n2 ^= f(c,n1+c->k[0]); n1 ^= f(c,n2+c->k[1]); n2 ^= f(c,n1+c->k[2]); n1 ^= f(c,n2+c->k[3]); n2 ^= f(c,n1+c->k[4]); n1 ^= f(c,n2+c->k[5]); n2 ^= f(c,n1+c->k[6]); n1 ^= f(c,n2+c->k[7]); buffer[0] = (byte)(n1&0xff); buffer[1] = (byte)((n1>>8)&0xff); buffer[2] = (byte)((n1>>16)&0xff); buffer[3] = (byte)(n1>>24); buffer[4] = (byte)(n2&0xff); buffer[5] = (byte)((n2>>8)&0xff); buffer[6] = (byte)((n2>>16)&0xff); buffer[7] = (byte)(n2>>24); } /* Get mac with specified number of bits from MAC state buffer */ void get_mac(byte *buffer,int nbits,byte *out) { int nbytes= nbits >> 3; int rembits = nbits & 7; int mask =rembits?((1<rembits)-1):0; int i; for (i=0;i<nbytes;i++) out[i]=buffer[i]; if (rembits) out[i]=buffer[i]&mask; } /* Compute mac of specified length (in bits) from data. * Context should be initialized with key and subst blocks */ int gost_mac(gost_ctx *ctx,int mac_len,const unsigned char *data, unsigned int data_len,unsigned char *mac) { byte buffer[8]={0,0,0,0,0,0,0,0}; byte buf2[8]; unsigned int i; for (i=0;i+8<=data_len;i+=8) mac_block(ctx,buffer,data+i); if (i<data_len) { memset(buf2,0,8); memcpy(buf2,data+i,data_len-i); mac_block(ctx,buffer,buf2); } get_mac(buffer,mac_len,mac); return 1; } /* Compute MAC with non-zero IV. Used in some RFC 4357 algorithms */ int gost_mac_iv(gost_ctx *ctx,int mac_len,const unsigned char *iv,const unsigned char *data, unsigned int data_len,unsigned char *mac) { byte buffer[8]; byte buf2[8]; unsigned int i; memcpy (buffer,iv,8); for (i=0;i+8<=data_len;i+=8) mac_block(ctx,buffer,data+i); if (i<data_len) { memset(buf2,0,8); memcpy(buf2,data+i,data_len-i); mac_block(ctx,buffer,buf2); } get_mac(buffer,mac_len,mac); return 1; } /* Implements key meshing algorithm by modifing ctx and IV in place */ void cryptopro_key_meshing(gost_ctx *ctx, unsigned char *iv) { unsigned char newkey[32],newiv[8]; /* Set static keymeshing key */ /* "Decrypt" key with keymeshing key */ gost_dec(ctx,CryptoProKeyMeshingKey,newkey,4); /* set new key */ gost_key(ctx,newkey); /* Encrypt iv with new key */ gostcrypt(ctx,iv,newiv); memcpy(iv,newiv,8); }
648649.c
/* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "osTest.h" #include "It_los_swtmr.h" #if (LOSCFG_BASE_CORE_SWTMR_ALIGN == 1) static VOID Case1(UINT32 arg) { ICUNIT_ASSERT_EQUAL_VOID(arg, 0xffff, arg); g_uwsTick1 = LOS_TickCountGet(); g_testCount++; return; } static VOID Case2(UINT32 arg) { ICUNIT_ASSERT_EQUAL_VOID(arg, 0xffff, arg); g_uwsTick2 = LOS_TickCountGet(); g_testCount++; return; } static UINT32 Testcase(VOID) { UINT32 ret; UINT32 swtmrId1; UINT32 swtmrId2; g_testCount = 0; // 20, Timeout interval of a periodic software timer. ret = LOS_SwtmrCreate(20, LOS_SWTMR_MODE_PERIOD, Case1, &swtmrId1, 0xffff, OS_SWTMR_ROUSES_ALLOW, OS_SWTMR_ALIGN_INSENSITIVE); ICUNIT_ASSERT_EQUAL(ret, LOS_OK, ret); // 40, Timeout interval of a periodic software timer. ret = LOS_SwtmrCreate(40, LOS_SWTMR_MODE_PERIOD, Case2, &swtmrId2, 0xffff, OS_SWTMR_ROUSES_ALLOW, OS_SWTMR_ALIGN_INSENSITIVE); ICUNIT_GOTO_EQUAL(ret, LOS_OK, ret, EXIT); ret = LOS_SwtmrStart(swtmrId1); ICUNIT_GOTO_EQUAL(ret, LOS_OK, ret, EXIT); ret = LOS_SwtmrStart(swtmrId2); ICUNIT_GOTO_EQUAL(ret, LOS_OK, ret, EXIT); ret = LOS_TaskDelay(20); // 20, set delay time. ICUNIT_GOTO_EQUAL(ret, LOS_OK, ret, EXIT); // 2, Here, assert that g_testCount is equal to this . ICUNIT_GOTO_EQUAL(g_testCount, 2, g_testCount, EXIT); ret = LOS_SwtmrDelete(swtmrId1); ICUNIT_GOTO_EQUAL(ret, LOS_OK, ret, EXIT); ret = LOS_SwtmrDelete(swtmrId2); ICUNIT_ASSERT_EQUAL(ret, LOS_OK, ret); ret = LOS_TaskDelay(20); // 20, set delay time. ICUNIT_ASSERT_EQUAL(ret, LOS_OK, ret); // 2, Here, assert that g_testCount is equal to this . ICUNIT_ASSERT_EQUAL(g_testCount, 2, g_testCount); return LOS_OK; EXIT: LOS_SwtmrDelete(swtmrId1); LOS_SwtmrDelete(swtmrId2); return LOS_OK; } VOID ItLosSwtmrAlign018() // IT_Layer_ModuleORFeature_No { TEST_ADD_CASE("ItLosSwtmrAlign018", Testcase, TEST_LOS, TEST_SWTMR, TEST_LEVEL1, TEST_FUNCTION); } #endif
9502.c
#include <flut/flut.h> #include "../../../src/front-end/parser/parse.h" #include "../../../src/front-end/program.h" #include "../../../src/front-end/binding/resolve.h" #include "../../../src/front-end/symtable.h" #include "../../../src/front-end/types/context.h" #include "tests.h" void zenit_test_resolve_block(void) { const char *source = "{" "\n" " var a = 1;" "\n" " {" "\n" " var b = 2;" "\n" " {" "\n" " var c = 3;" "\n" " }" "\n" " }" "\n" "}" "\n" ; const char *program_dump = "(program " "(scope global " "(scope block %L1:C1_block " "(symbol a uint8)" " " "(scope block %L3:C4_block " "(symbol b uint8)" " " "(scope block %L5:C8_block " "(symbol c uint8)" ")" ")" ")" ")" ")" ; zenit_test_resolve_run(source, program_dump, false); }
894016.c
/**************************************************************************** * libs/libc/modlib/modlib_load.c * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. The * ASF licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> #include <sys/types.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <assert.h> #include <errno.h> #include <debug.h> #include <nuttx/lib/modlib.h> #include "libc.h" #include "modlib/modlib.h" /**************************************************************************** * Pre-processor Definitions ****************************************************************************/ #define ELF_ALIGN_MASK ((1 << CONFIG_MODLIB_ALIGN_LOG2) - 1) #define ELF_ALIGNUP(a) (((unsigned long)(a) + ELF_ALIGN_MASK) & ~ELF_ALIGN_MASK) #define ELF_ALIGNDOWN(a) ((unsigned long)(a) & ~ELF_ALIGN_MASK) #ifndef MAX # define MAX(x,y) ((x) > (y) ? (x) : (y)) #endif #ifndef MIN # define MIN(x,y) ((x) < (y) ? (x) : (y)) #endif /**************************************************************************** * Private Functions ****************************************************************************/ /**************************************************************************** * Name: modlib_elfsize * * Description: * Calculate total memory allocation for the ELF file. * * Returned Value: * 0 (OK) is returned on success and a negated errno is returned on * failure. * ****************************************************************************/ static void modlib_elfsize(struct mod_loadinfo_s *loadinfo) { size_t textsize; size_t datasize; int i; /* Accumulate the size each section into memory that is marked SHF_ALLOC */ textsize = 0; datasize = 0; for (i = 0; i < loadinfo->ehdr.e_shnum; i++) { FAR Elf_Shdr *shdr = &loadinfo->shdr[i]; /* SHF_ALLOC indicates that the section requires memory during * execution. */ if ((shdr->sh_flags & SHF_ALLOC) != 0) { /* SHF_WRITE indicates that the section address space is write- * able */ if ((shdr->sh_flags & SHF_WRITE) != 0) { datasize += ELF_ALIGNUP(shdr->sh_size); } else { textsize += ELF_ALIGNUP(shdr->sh_size); } } } /* Save the allocation size */ loadinfo->textsize = textsize; loadinfo->datasize = datasize; } /**************************************************************************** * Name: modlib_loadfile * * Description: * Read the section data into memory. Section addresses in the shdr[] are * updated to point to the corresponding position in the memory. * * Returned Value: * 0 (OK) is returned on success and a negated errno is returned on * failure. * ****************************************************************************/ static inline int modlib_loadfile(FAR struct mod_loadinfo_s *loadinfo) { FAR uint8_t *text; FAR uint8_t *data; FAR uint8_t **pptr; int ret; int i; /* Read each section into memory that is marked SHF_ALLOC + SHT_NOBITS */ binfo("Loaded sections:\n"); text = (FAR uint8_t *)loadinfo->textalloc; data = (FAR uint8_t *)loadinfo->datastart; for (i = 0; i < loadinfo->ehdr.e_shnum; i++) { FAR Elf_Shdr *shdr = &loadinfo->shdr[i]; /* SHF_ALLOC indicates that the section requires memory during * execution */ if ((shdr->sh_flags & SHF_ALLOC) == 0) { continue; } /* SHF_WRITE indicates that the section address space is write- * able */ if ((shdr->sh_flags & SHF_WRITE) != 0) { pptr = &data; } else { pptr = &text; } /* SHT_NOBITS indicates that there is no data in the file for the * section. */ if (shdr->sh_type != SHT_NOBITS) { /* Read the section data from sh_offset to the memory region */ ret = modlib_read(loadinfo, *pptr, shdr->sh_size, shdr->sh_offset); if (ret < 0) { berr("ERROR: Failed to read section %d: %d\n", i, ret); return ret; } } /* If there is no data in an allocated section, then the allocated * section must be cleared. */ else { memset(*pptr, 0, shdr->sh_size); } /* Update sh_addr to point to copy in memory */ binfo("%d. %08lx->%08lx\n", i, (unsigned long)shdr->sh_addr, (unsigned long)*pptr); shdr->sh_addr = (uintptr_t)*pptr; /* Setup the memory pointer for the next time through the loop */ *pptr += ELF_ALIGNUP(shdr->sh_size); } return OK; } /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Name: modlib_load * * Description: * Loads the binary into memory, allocating memory, performing relocations * and initializing the data and bss segments. * * Returned Value: * 0 (OK) is returned on success and a negated errno is returned on * failure. * ****************************************************************************/ int modlib_load(FAR struct mod_loadinfo_s *loadinfo) { int ret; binfo("loadinfo: %p\n", loadinfo); DEBUGASSERT(loadinfo && loadinfo->filfd >= 0); /* Load section headers into memory */ ret = modlib_loadshdrs(loadinfo); if (ret < 0) { berr("ERROR: modlib_loadshdrs failed: %d\n", ret); goto errout_with_buffers; } /* Determine total size to allocate */ modlib_elfsize(loadinfo); /* Allocate (and zero) memory for the ELF file. */ /* Allocate memory to hold the ELF image */ #if defined(CONFIG_ARCH_USE_MODULE_TEXT) if (loadinfo->textsize > 0) { loadinfo->textalloc = (uintptr_t) up_module_text_alloc(loadinfo->textsize); if (!loadinfo->textalloc) { berr("ERROR: Failed to allocate memory for the module text\n"); ret = -ENOMEM; goto errout_with_buffers; } } if (loadinfo->datasize > 0) { loadinfo->datastart = (uintptr_t)lib_malloc(loadinfo->datasize); if (!loadinfo->datastart) { berr("ERROR: Failed to allocate memory for the module data\n"); ret = -ENOMEM; goto errout_with_buffers; } } #else loadinfo->textalloc = (uintptr_t)lib_malloc(loadinfo->textsize + loadinfo->datasize); if (!loadinfo->textalloc) { berr("ERROR: Failed to allocate memory for the module\n"); ret = -ENOMEM; goto errout_with_buffers; } loadinfo->datastart = loadinfo->textalloc + loadinfo->textsize; #endif /* Load ELF section data into memory */ ret = modlib_loadfile(loadinfo); if (ret < 0) { berr("ERROR: modlib_loadfile failed: %d\n", ret); goto errout_with_buffers; } return OK; /* Error exits */ errout_with_buffers: modlib_unload(loadinfo); return ret; }
51358.c
/** * Copyright (c) 2017-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. */ //***************************************************************************** // HEADER FILES //***************************************************************************** #include "inc/devices/ina226.h" #include "devices/i2c/threaded_int.h" #include "inc/common/byteorder.h" #include "inc/common/global_header.h" #include "helpers/memory.h" /***************************************************************************** * REGISTER DEFINITIONS *****************************************************************************/ #define INA_CONFIGURATION_REG 0x00 #define INA_SHUNTVOLTAGE_REG 0x01 #define INA_BUSVOLTAGE_REG 0x02 #define INA_POWER_REG 0x03 #define INA_CURRENT_REG 0x04 #define INA_CALIBRATION_REG 0x05 #define INA_MASKENABLE_REG 0x06 #define INA_ALERTLIMIT_REG 0x07 #define INA_MANUFACTUREID_REG 0xFE #define INA_DIEID_REG 0xFF /*INA226 Device Info */ #define INA226_MANFACTURE_ID 0x5449 #define INA226_DEVICE_ID 0x2260 #define INA226_DEV_VERSION 0x00 /* Configuration Register Bits */ #define INA_CFG_RESET (1 << 15) /* * Conversion of current into Shunt Voltage Register contents and viceversa. * * First Calculate the Current Register Value from the given Current Value * ui16rfINARegValue = ui16rfINACurrentLimit/(INA226_CURRENT_LSB); * Calculate Shunt Voltage Alert Limit Register Value * ui16rfINARegValue = (ui16rfINARegValue * 2048)/INA226_CALIBRATION_REG_VALUE; */ #define CURRENT_TO_REG(x) \ ((2048 * (x / INA226_CURRENT_LSB) / INA226_CAL_REG_VALUE)) #define REG_TO_CURRENT(y) \ ((y * INA226_CURRENT_LSB * INA226_CAL_REG_VALUE) / 2048) /***************************************************************************** * CONSTANTS DEFINITIONS *****************************************************************************/ /* INA226 LSB Values */ #define INA226_VSHUNT_LSB 2.5 /* 2.5uV or 2500nV (uV default) */ #define INA226_VBUS_LSB 1.25 /* 1.25mV or 1250uV (mV default) */ #define INA226_CURRENT_LSB 0.1 /* 0.100mA 0r 100uA (mA default) */ #define INA226_POWER_LSB 2.5 /* 2.5mW or 2500uW (mW default) */ /* Configure the Configuration register with Number of Samples and Conversion * Time for Shunt and Bus Voltage. * Min(Default):0x4127; Max: 0x4FFF; Average: 0x476F */ #define INA226_CONFIG_REG_VALUE 0x476F /* Configure Calibration register with shunt resistor value and current LSB. Current_LSB = Maximum Expected Current/2^15 Current_LSB = 2A/2^15 = 0.00006103515625 = 61uA ~ 100uA(Maximum Expected Current = 2A) Calibration Register(CAL) = 0.00512/(Current_LSB*RSHUNT) CAL = 0.00512/(100uA*2mOhm) = = 25600 = 0x6400.(RSHUNT = 2mohm) */ #define INA226_CAL_REG_VALUE 0x6400 #define INA226_MASKEN_REG_VALUE 0x8001 /***************************************************************************** ** FUNCTION NAME : read_ina_reg ** ** DESCRIPTION : Read a 16 bit value from INA226 register. ** ** ARGUMENTS : i2c device, Register address and value ** to be read. ** ** RETURN TYPE : Success or failure ** *****************************************************************************/ static ReturnStatus read_ina_reg(const INA226_Dev *dev, uint8_t regAddress, uint16_t *regValue) { ReturnStatus status = RETURN_NOTOK; I2C_Handle inaHandle = i2c_get_handle(dev->cfg.dev.bus); if (!inaHandle) { LOGGER_ERROR("INASENSOR:ERROR:: Failed to get I2C Bus for INA sensor " "0x%x on bus 0x%x.\n", dev->cfg.dev.slave_addr, dev->cfg.dev.bus); } else { status = i2c_reg_read(inaHandle, dev->cfg.dev.slave_addr, regAddress, regValue, 2); *regValue = betoh16(*regValue); } return status; } /***************************************************************************** ** FUNCTION NAME : write_ina_reg ** ** DESCRIPTION : Write 16 bit value to INA226 register. ** ** ARGUMENTS : i2c device, Register address and value ** to be written. ** ** RETURN TYPE : Success or failure ** *****************************************************************************/ static ReturnStatus write_ina_reg(const INA226_Dev *dev, uint8_t regAddress, uint16_t regValue) { ReturnStatus status = RETURN_NOTOK; I2C_Handle inaHandle = i2c_get_handle(dev->cfg.dev.bus); if (!inaHandle) { LOGGER_ERROR("INASENSOR:ERROR:: Failed to get I2C Bus for INA sensor " "0x%x on bus 0x%x.\n", dev->cfg.dev.slave_addr, dev->cfg.dev.bus); } else { regValue = htobe16(regValue); status = i2c_reg_write(inaHandle, dev->cfg.dev.slave_addr, regAddress, regValue, 2); } return status; } /***************************************************************************** ** FUNCTION NAME : curr_sens_get_dev_id ** ** DESCRIPTION : Read the device id of Current sensor. ** ** ARGUMENTS : i2c device and pointer to device Id. ** ** RETURN TYPE : Success or failure ** *****************************************************************************/ static ReturnStatus ina226_getDevId(INA226_Dev *dev, uint16_t *devID) { return read_ina_reg(dev, INA_DIEID_REG, devID); } /***************************************************************************** ** FUNCTION NAME : curr_sens_get_mfg_id ** ** DESCRIPTION : Read the mfg id of Current sensor. ** ** ARGUMENTS : i2c device and out-pointer to manufacturing ID. ** ** RETURN TYPE : Success or failure ** *****************************************************************************/ static ReturnStatus ina226_getMfgId(INA226_Dev *dev, uint16_t *mfgID) { return read_ina_reg(dev, INA_MANUFACTUREID_REG, mfgID); } /***************************************************************************** ** FUNCTION NAME : curr_sens_set_cfg_reg ** ** DESCRIPTION : Write the value to Current sensor configuration ** register. ** ** ARGUMENTS : i2c device and new value of configuration register. ** ** RETURN TYPE : Success or failure ** *****************************************************************************/ static ReturnStatus _set_cfg_reg(INA226_Dev *dev, uint16_t regValue) { return write_ina_reg(dev, INA_CONFIGURATION_REG, regValue); } /***************************************************************************** ** FUNCTION NAME : curr_sens_set_cal_reg ** ** DESCRIPTION : Write the value to Current sensor calibration register. ** ** ARGUMENTS : i2c device and new value of calibration register. ** ** RETURN TYPE : Success or failure ** *****************************************************************************/ static ReturnStatus _set_cal_reg(INA226_Dev *dev, uint16_t regValue) { return write_ina_reg(dev, INA_CALIBRATION_REG, regValue); } /***************************************************************************** ** FUNCTION NAME : curr_sens_get_curr_limit ** ** DESCRIPTION : Read the value of Current sensor alert limit register. ** ** ARGUMENTS : i2c device and out-pointer to current limit. ** ** RETURN TYPE : Success or failure ** *****************************************************************************/ ReturnStatus ina226_readCurrentLim(INA226_Dev *dev, uint16_t *currLimit) { uint16_t regValue = 0x0000; ReturnStatus status = read_ina_reg(dev, INA_ALERTLIMIT_REG, &regValue); if (status == RETURN_OK) { *currLimit = REG_TO_CURRENT(regValue); LOGGER_DEBUG("INASENSOR:INFO:: INA sensor 0x%x on bus 0x%x is " "reporting current limit of %d mA.\n", dev->cfg.dev.slave_addr, dev->cfg.dev.bus, *currLimit); } return status; } /***************************************************************************** ** FUNCTION NAME : curr_sens_set_curr_limit ** ** DESCRIPTION : Write the value to Current sensor alert limit register. ** ** ARGUMENTS : i2c device and new current limit. ** ** RETURN TYPE : Success or failure ** *****************************************************************************/ ReturnStatus ina226_setCurrentLim(INA226_Dev *dev, uint16_t currLimit) { uint16_t regValue = CURRENT_TO_REG(currLimit); return write_ina_reg(dev, INA_ALERTLIMIT_REG, regValue); } /***************************************************************************** ** FUNCTION NAME : curr_sens_read_alert_reg ** ** DESCRIPTION : Read the value to Current sensor mask/enable register. ** ** ARGUMENTS : i2c device and out-pointer to enable and mask bits. ** ** RETURN TYPE : Success or failure ** *****************************************************************************/ static ReturnStatus _read_alert_reg(INA226_Dev *dev, uint16_t *regValue) { return read_ina_reg(dev, INA_MASKENABLE_REG, regValue); } /***************************************************************************** ** FUNCTION NAME : curr_sens_enable_alert ** ** DESCRIPTION : Write the value to Current sensor mask/enable register. ** ** ARGUMENTS : i2c device and alert to be enabled. ** ** RETURN TYPE : Success or failure ** *****************************************************************************/ static ReturnStatus _enable_alert(INA226_Dev *dev, uint16_t regValue) { return write_ina_reg(dev, INA_MASKENABLE_REG, regValue); } /***************************************************************************** ** FUNCTION NAME : curr_sens_get_bus_volt_value ** ** DESCRIPTION : Read the value of Current sensor bus voltage value. ** ** ARGUMENTS : i2c device and out-pointer to bus voltage value. ** ** RETURN TYPE : Success or failure ** *****************************************************************************/ ReturnStatus ina226_readBusVoltage(INA226_Dev *dev, uint16_t *busVoltValue) { uint16_t regValue; ReturnStatus status = read_ina_reg(dev, INA_BUSVOLTAGE_REG, &regValue); if (status == RETURN_OK) { *busVoltValue = regValue * INA226_VBUS_LSB; LOGGER_DEBUG("INASENSOR:INFO:: INA sensor 0x%x on bus 0x%x is " "reporting bus voltage value of %d mV.\n", dev->cfg.dev.slave_addr, dev->cfg.dev.bus, *busVoltValue); } return status; } /***************************************************************************** ** FUNCTION NAME : curr_sens_get_shunt_volt_value ** ** DESCRIPTION : Read the value of Current sensor shunt voltage value. ** ** ARGUMENTS : i2c device and out-pointer to shunt voltage. ** ** RETURN TYPE : Success or failure ** *****************************************************************************/ ReturnStatus ina226_readShuntVoltage(INA226_Dev *dev, uint16_t *shuntVoltValue) { uint16_t regValue; ReturnStatus status = read_ina_reg(dev, INA_SHUNTVOLTAGE_REG, &regValue); if (status == RETURN_OK) { *shuntVoltValue = regValue * INA226_VSHUNT_LSB; LOGGER_DEBUG("INASENSOR:INFO:: INA sensor 0x%x on bus 0x%x is " "reporting shunt voltage value of %d uV.\n", dev->cfg.dev.slave_addr, dev->cfg.dev.bus, *shuntVoltValue); } return status; } /***************************************************************************** ** FUNCTION NAME : curr_sens_get_curr_value ** ** DESCRIPTION : Read the value of Current sensor current value. ** ** ARGUMENTS : i2c device and out-pointer to current value. ** ** RETURN TYPE : Success or failure ** *****************************************************************************/ ReturnStatus ina226_readCurrent(INA226_Dev *dev, uint16_t *currValue) { uint16_t regValue; ReturnStatus status = read_ina_reg(dev, INA_CURRENT_REG, &regValue); if (status == RETURN_OK) { *currValue = regValue * INA226_CURRENT_LSB; LOGGER_DEBUG("INASENSOR:INFO:: INA sensor 0x%x on bus 0x%x " "is reporting current value of %d mA.\n", dev->cfg.dev.slave_addr, dev->cfg.dev.bus, *currValue); } return status; } /***************************************************************************** ** FUNCTION NAME : curr_sens_get_power_value ** ** DESCRIPTION : Read the value of Current sensor power value. ** ** ARGUMENTS : i2c device and out-pointer to power value. ** ** RETURN TYPE : Success or failure ** *****************************************************************************/ ReturnStatus ina226_readPower(INA226_Dev *dev, uint16_t *powValue) { uint16_t regValue; ReturnStatus status = read_ina_reg(dev, INA_POWER_REG, &regValue); if (status == RETURN_OK) { *powValue = regValue * INA226_POWER_LSB; LOGGER_DEBUG("INASENSOR:INFO:: INA sensor 0x%x on bus 0x%x is " "reporting power value of %d mV.\n", dev->cfg.dev.slave_addr, dev->cfg.dev.bus, *powValue); } return status; } /***************************************************************************** * Internal IRQ handler - reads in triggered interrupts and dispatches CBs *****************************************************************************/ static void _ina226_isr(void *context) { INA226_Dev *dev = context; /* Read the alert mask register (will clear the alert bit if set) */ /* TODO: this seems to be a strange bug in the sensor - sometimes it returns * 0xFF as the lo-byte. If this occurs, we need to re-read it. * NOTE: 0x1F is a perfectly legal value, but bits 5-9 are RFU and * normally zero */ uint16_t alert_mask = 0xFFFF; while (LOBYTE(alert_mask) == 0xFF) { if (_read_alert_reg(dev, &alert_mask) != RETURN_OK) { LOGGER_DEBUG("INA226:ERROR:: INT mask read failed\n"); return; } } if (!dev->obj.alert_cb) { return; } if (alert_mask & INA_MSK_AFF) { /* This alert was caused by a fault */ /* Theory of operation: After reading the alert, we change the alert * mask to look at the complement alert. For example, after getting a * bus over-voltage alert, we switch the mask to tell us when it's * under-voltage and thus back in operating limits so that it's less * likely that we'll miss alerts on the shared line, although not * guaranteed :( */ /* The device can only monitor one metric at a time, if multiple flags * are set, it monitors the highest order bit, so check the config * in order, from MSB to LSB */ uint16_t value; uint16_t new_mask = alert_mask & (~INA_ALERT_EN_MASK); INA226_Event evt; uint16_t alert_lim; ina226_readCurrentLim(dev, &alert_lim); if (alert_mask & INA_MSK_SOL) { if (dev->obj.evt_to_monitor == INA226_EVT_COL || dev->obj.evt_to_monitor == INA226_EVT_CUL) { if (ina226_readCurrent(dev, &value) != RETURN_OK) { value = UINT16_MAX; } alert_lim -= INA_HYSTERESIS; evt = INA226_EVT_COL; } else { if (ina226_readShuntVoltage(dev, &value) != RETURN_OK) { value = UINT16_MAX; } evt = INA226_EVT_SOL; } new_mask |= INA_MSK_SUL; } else if (alert_mask & INA_MSK_SUL) { if (dev->obj.evt_to_monitor == INA226_EVT_CUL || dev->obj.evt_to_monitor == INA226_EVT_COL) { if (ina226_readCurrent(dev, &value) != RETURN_OK) { value = UINT16_MAX; } alert_lim += INA_HYSTERESIS; evt = INA226_EVT_CUL; } else { if (ina226_readShuntVoltage(dev, &value) != RETURN_OK) { value = UINT16_MAX; } evt = INA226_EVT_SUL; } new_mask |= INA_MSK_SOL; } else if (alert_mask & INA_MSK_BOL) { if (ina226_readBusVoltage(dev, &value) != RETURN_OK) { value = UINT16_MAX; } evt = INA226_EVT_BOL; new_mask |= INA_MSK_BUL; } else if (alert_mask & INA_MSK_BUL) { if (ina226_readBusVoltage(dev, &value) != RETURN_OK) { value = UINT16_MAX; } evt = INA226_EVT_BUL; new_mask |= INA_MSK_BOL; } else if (alert_mask & INA_MSK_POL) { if (ina226_readPower(dev, &value) != RETURN_OK) { value = UINT16_MAX; } evt = INA226_EVT_POL; /* TODO: there isn't a PUL alert, not sure what to do here. We * don't currently use this alert, but it would be nice to have a * complete driver */ new_mask |= INA_MSK_POL; } else { LOGGER_ERROR("INA226:Unknown alert type\n"); return; } /* Set a new limit in order to account for hysteresis */ /* TODO: make this work for all alert types (this is a hack) */ ina226_setCurrentLim(dev, alert_lim); /* Invert the alert type we're looking for */ if (_enable_alert(dev, new_mask) != RETURN_OK) { /* TODO [HACK]: this sometimes reports failures at random times, so * this is a hacked together retry to keep things stable*/ _enable_alert(dev, new_mask); } dev->obj.alert_cb(evt, value, dev->obj.cb_context); } /* TODO: Conversion ready not handled */ } /***************************************************************************** *****************************************************************************/ ReturnStatus ina226_init(INA226_Dev *dev) { ReturnStatus status; dev->obj = (INA226_Obj){}; /* Perform a device reset to be safe */ status = _set_cfg_reg(dev, INA_CFG_RESET); if (status != RETURN_OK) { return status; } /* Configure the Configuration register with number of samples and * conversion time for shunt and bus voltage */ status = _set_cfg_reg(dev, INA226_CONFIG_REG_VALUE); if (status != RETURN_OK) { return status; } /* Configure the Calibration register with shunt resistor value and * current LSB */ status = _set_cal_reg(dev, INA226_CAL_REG_VALUE); if (status != RETURN_OK) { return status; } /* Make sure we're talking to the right device */ // if (ina226_probe(dev) != POST_DEV_FOUND) { // return RETURN_NOTOK; // } if (dev->cfg.pin_alert) { const uint32_t pin_evt_cfg = OCGPIO_CFG_INPUT | OCGPIO_CFG_INT_FALLING; if (OcGpio_configure(dev->cfg.pin_alert, pin_evt_cfg) < OCGPIO_SUCCESS) { return RETURN_NOTOK; } /* Use a threaded interrupt to handle IRQ */ ThreadedInt_Init(dev->cfg.pin_alert, _ina226_isr, (void *)dev); } return RETURN_OK; } /***************************************************************************** *****************************************************************************/ void ina226_setAlertHandler(INA226_Dev *dev, INA226_CallbackFn alert_cb, void *cb_context) { dev->obj.alert_cb = alert_cb; dev->obj.cb_context = cb_context; } /***************************************************************************** *****************************************************************************/ ReturnStatus ina226_enableAlert(INA226_Dev *dev, INA226_Event evt) { /* TODO: perhaps caching the mask is better? If we have an active alert, * we'll inadvertently clear it here */ /* TODO: this isn't thread safe, but does it need to be? */ uint16_t alert_mask; ReturnStatus res = _read_alert_reg(dev, &alert_mask); if (res != RETURN_OK) { return res; } alert_mask &= (~INA_ALERT_EN_MASK); /* Wipe out previous alert EN bits */ //alert_mask |= (INA_MSK_LEN); /* Enable latch mode (never miss an alert) */ dev->obj.evt_to_monitor = evt; switch (evt) { case INA226_EVT_COL: alert_mask |= INA_MSK_SOL; break; case INA226_EVT_CUL: alert_mask |= INA_MSK_SUL; break; default: alert_mask |= evt; } return _enable_alert(dev, alert_mask); } /***************************************************************************** *****************************************************************************/ ePostCode ina226_probe(INA226_Dev *dev, POSTData *postData) { uint16_t devId = 0x00; uint16_t manfId = 0x0000; if (ina226_getDevId(dev, &devId) != RETURN_OK) { return POST_DEV_MISSING; } if (devId != INA226_DEVICE_ID) { return POST_DEV_ID_MISMATCH; } if (ina226_getMfgId(dev, &manfId) != RETURN_OK) { return POST_DEV_MISSING; } if (manfId != INA226_MANFACTURE_ID) { return POST_DEV_ID_MISMATCH; } post_update_POSTData(postData, dev->cfg.dev.bus, dev->cfg.dev.slave_addr, manfId, devId); return POST_DEV_FOUND; }
229423.c
/************************************************************************* * * file: main.c * * ======================================================================= * This file is obsolete, having been replaced by the Tcl interface. * Scott Wallace updated when testing Soar 8 without Tcl. * ======================================================================= * * Copyright 1995-2003 Carnegie Mellon University, * University of Michigan, * University of Southern California/Information * Sciences Institute. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE SOAR CONSORTIUM ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE SOAR CONSORTIUM OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing official * policies, either expressed or implied, of Carnegie Mellon University, the * University of Michigan, the University of Southern California/Information * Sciences Institute, or the Soar consortium. * ======================================================================= */ /* =================================================================== Main file for Soar 6 =================================================================== */ #include "soarkernel.h" /* =================================================================== Main Function =================================================================== */ int main () { #ifdef THINK_C /* Increase the application stack by 16K. * This is done by decreasing the heap. */ SetApplLimit(GetApplLimit() - 16384); MaxApplZone(); #endif /* THINK_C */ init_soar(); repeatedly_read_and_dispatch_commands (); return terminate_soar(); }
526328.c
#include <std.h> inherit "/std/monster"; create() { ::create(); set_name("keep lieutenant"); set_id(({"lieutenant","klieutenant","orc","orc guard"})); set("short","Keep Lieutenant"); set("long","This is a Lieutenant of Keep Blacktongue. Obviously an orc because" "of his extreme smell and stupid look. Dressed in well-worn leather " "armor, he looks like he's seen better days too. He is larger than " "the than the normal Keep guards and looks a little smarter too."); set_race("orc"); set_gender("male"); set_level(7); set_body_type("human"); set_class("fighter"); set("aggressive", 0); set_alignment(6); set_size(2); set_hd(7,1); set_stats("strength",19); set_stats("intelligence",12); set_stats("wisdom",5); set_stats("charisma",4); set_stats("dexterity", 13); set_stats("constitution",15); set_wielding_limbs(({"right hand","left hand"})); new("/d/common/obj/weapon/longsword")->move(this_object()); command("wield longsword"); set_overall_ac(2); set_hp(60); add_money("silver", random(50)); add_money("copper", 5); }
180342.c
/** * Copyright (c) 2016 Peter Cannici * Licensed under the MIT (X11) license. See LICENSE. */ #include "test.h" #include "rope.h" #define SETUP \ TEST_GET_ALLOCATOR(mctx); \ sfmt_t rng; \ sfmt_init_gen_rand(&rng, 126155); \ rope *r = rope_new((yu_allocator *)&mctx, &rng); #define TEARDOWN \ rope_free(r); \ yu_alloc_ctx_free(&mctx); #define LIST_ROPE_TESTS(X) \ X(create_str, "Initializing a rope with a string should create a rope with the same contents") \ X(unicode_str, "Code point counts should accurately be reported for UTF-8 strings") \ TEST(create_str) const char s[] = "Do you have any idea how stupid we are? Don't underestimate us!"; rope *r2 = rope_new_with_utf8((yu_allocator *)&mctx, &rng, (const u8 *)s); PT_ASSERT_EQ(rope_byte_count(r2), strlen(s)); PT_ASSERT_EQ(rope_char_count(r2), strlen(s)); char out_s[sizeof s]; PT_ASSERT_EQ(rope_write_cstr(r2, (u8 *)out_s), strlen(s) + 1); PT_ASSERT_STR_EQ(out_s, s); rope_free(r2); END(create_str) TEST(unicode_str) // File must be encoded as UTF-8 for this test to work. const char s[] = "年年有余"; rope *r2 = rope_new_with_utf8((yu_allocator *)&mctx, &rng, (const u8 *)s); PT_ASSERT_EQ(rope_byte_count(r2), strlen(s)); PT_ASSERT_EQ(rope_char_count(r2), 4u); END(unicode_str) SUITE(rope, LIST_ROPE_TESTS)
481897.c
/* Copyright (c) 2003 Jesper Svennevid Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* * $Id: mnemonics_data.c,v 1.6 2003/06/22 11:57:27 jesper Exp $ * $Log: mnemonics_data.c,v $ * Revision 1.6 2003/06/22 11:57:27 jesper * fixed msvc-problems (hopefully), updated relocs, started on 'section' * * Revision 1.5 2003/06/19 14:55:30 jesper * code generation update * * Revision 1.4 2003/06/19 06:49:45 jesper * added new interface for instruction-pushing, currently incomplete * * Revision 1.3 2003/06/18 20:23:19 jesper * added logging to all files * * $EndLog$ */ #include <stdio.h> #include <ctype.h> #include "types.h" #include "error.h" #include "mangle.h" #include "mnemonics.h" #include "codespace.h" // TODO: add support for storing strings AFUNC(asm_dc) { unsigned short i; for(i = 0; i < mangle->num_args;i++) { if(!(AM_INVALID(i,AM_IMMEDIATE)||AM_INVALID(i,AM_STRING))) fail(ECODE_INVALID_ADDRESSING_MODE); switch(mangle->opsize) { case OS_BYTE: { switch(mangle->args[i]->mode) { case AM_STRING: { const char* s = mangle->args[i]->string; int in_string = 0; while(*s) { if(in_string&1) { if('\\' == *s) { s++; if(!*s) fail(EPARSE_MISPLACED_ESCAPING); switch(*s) { case '0': cs_push_data(1,0,1,NULL); break; case 't': cs_push_data(1,9,1,NULL); break; case 'r': cs_push_data(1,13,1,NULL); break; case 'n': cs_push_data(1,10,1,NULL); break; case '"': cs_push_data(1,'"',1,NULL); break; default: cs_push_data(1,(*s)&0xff,1,NULL); } } else { if('"' == *s) in_string = 2; else cs_push_data(1,(*s)&0xff,1,NULL); } } else { if(!isspace(*s)) { if(!in_string) { if('"' == *s) in_string = 1; else fail(EPARSE_INVALID_STRING); } else fail(EPARSE_INVALID_STRING); } } s++; } } break; case AM_IMMEDIATE: { if(mangle->args[i]->value.symbol) fail(ECODE_INVALID_ADDRESSING_MODE); if(mangle->args[i]->value.uint > 255) fail(EPARSE_VALUE_OUT_OF_BOUNDS); cs_push_data(1,mangle->args[i]->value.uint&0xff,1,NULL); } break; } } break; case OS_WORD: { if(AM_INVALID(i,AM_STRING)) fail(ECODE_INVALID_ADDRESSING_MODE); // TODO: should we allow absolut addresses < 64k? if(mangle->args[i]->value.symbol) fail(ECODE_INVALID_ADDRESSING_MODE); if(mangle->args[i]->value.uint > 65535) fail(EPARSE_VALUE_OUT_OF_BOUNDS); cs_push_data(2,mangle->args[i]->value.uint&0xffff,1,NULL); } break; case OS_LONG: { if(AM_INVALID(i,AM_STRING)) cs_push_data(4,0,1,cs_create_symbol(mangle->args[i]->string,NULL,SF_REFERENCE)); else cs_push_data(4,mangle->args[i]->value.uint,1,mangle->args[i]->value.symbol); } break; } } return 0; } AFUNC(asm_ds) { return 1; } AFUNC(asm_equ) { return 1; } AFUNC(asm_blk) { return 1; } AFUNC(asm_store) { if(OS_VALID(OS_DEFAULT)) fail(ECODE_INVALID_INSTRUCTION_SIZE); cs_push_section(); return 1; } AFUNC(asm_restore) { if(OS_VALID(OS_DEFAULT)) fail(ECODE_INVALID_INSTRUCTION_SIZE); cs_pop_section(); return 1; }
409793.c
/* * Arm SCP/MCP Software * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include <mod_css_clock.h> #include <mod_pik_clock.h> #include <mod_sid.h> #include <mod_system_pll.h> #include <fwk_assert.h> #include <fwk_element.h> #include <fwk_id.h> #include <fwk_macros.h> #include <fwk_module.h> #include <fwk_module_idx.h> #include <fwk_status.h> #include <stdbool.h> #include <stddef.h> static const struct mod_css_clock_rate rate_table_cpu_group_big[] = { { .rate = 2700 * FWK_MHZ, .pll_rate = 2700 * FWK_MHZ, .clock_source = MOD_PIK_CLOCK_CLUSCLK_SOURCE_PLL1, .clock_div_type = MOD_PIK_CLOCK_MSCLOCK_DIVIDER_DIV_EXT, .clock_div = 1, .clock_mod_numerator = 1, .clock_mod_denominator = 1, }, }; static const struct mod_css_clock_rate rate_table_cpu_group_little[] = { { .rate = 2200 * FWK_MHZ, .pll_rate = 2200 * FWK_MHZ, .clock_source = MOD_PIK_CLOCK_CLUSCLK_SOURCE_PLL0, .clock_div_type = MOD_PIK_CLOCK_MSCLOCK_DIVIDER_DIV_EXT, .clock_div = 1, .clock_mod_numerator = 1, .clock_mod_denominator = 1, }, }; static const struct mod_css_clock_rate rate_table_gpu[] = { { .rate = 800 * FWK_MHZ, .pll_rate = 800 * FWK_MHZ, .clock_source = MOD_PIK_CLOCK_MSCLOCK_SOURCE_PRIVPLLCLK, .clock_div_type = MOD_PIK_CLOCK_MSCLOCK_DIVIDER_DIV_EXT, .clock_div = 1, }, }; static const fwk_id_t member_table_cpu_big_cfg_a[] = { FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 11), FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 12), }; static const fwk_id_t member_table_cpu_little_cfg_a[] = { FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 5), FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 6), FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 7), FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 8), FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 9), FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 10), }; static const fwk_id_t member_table_cpu_big_cfg_b[] = { FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 9), FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 10), FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 11), FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 12), }; static const fwk_id_t member_table_cpu_little_cfg_b[] = { FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 5), FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 6), FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 7), FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 8), }; static const fwk_id_t member_table_gpu[] = { FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_PIK_CLOCK, 13), }; static const struct fwk_element css_clock_element_table[] = { { .name = "CPU_GROUP_BIG", .data = &((struct mod_css_clock_dev_config) { .rate_table = rate_table_cpu_group_big, .rate_count = sizeof(rate_table_cpu_group_big) / sizeof(struct mod_css_clock_rate), .clock_switching_source = MOD_PIK_CLOCK_CLUSCLK_SOURCE_SYSREFCLK, .pll_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_SYSTEM_PLL, 1), .pll_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_SYSTEM_PLL, MOD_SYSTEM_PLL_API_TYPE_DEFAULT), .member_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_PIK_CLOCK, MOD_PIK_CLOCK_API_TYPE_CSS), .initial_rate = 2700 * FWK_MHZ, .modulation_supported = true, }), }, { .name = "CPU_GROUP_LITTLE", .data = &((struct mod_css_clock_dev_config) { .rate_table = rate_table_cpu_group_little, .rate_count = sizeof(rate_table_cpu_group_little) / sizeof(struct mod_css_clock_rate), .clock_switching_source = MOD_PIK_CLOCK_CLUSCLK_SOURCE_SYSREFCLK, .pll_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_SYSTEM_PLL, 0), .pll_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_SYSTEM_PLL, MOD_SYSTEM_PLL_API_TYPE_DEFAULT), .member_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_PIK_CLOCK, MOD_PIK_CLOCK_API_TYPE_CSS), .initial_rate = 2200 * FWK_MHZ, .modulation_supported = true, }), }, { .name = "GPU", .data = &((struct mod_css_clock_dev_config) { .rate_table = rate_table_gpu, .rate_count = sizeof(rate_table_gpu) / sizeof(struct mod_css_clock_rate), .clock_switching_source = MOD_PIK_CLOCK_CLUSCLK_SOURCE_SYSREFCLK, .pll_id = FWK_ID_ELEMENT_INIT(FWK_MODULE_IDX_SYSTEM_PLL, 2), .pll_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_SYSTEM_PLL, MOD_SYSTEM_PLL_API_TYPE_DEFAULT), .member_table = member_table_gpu, .member_count = FWK_ARRAY_SIZE(member_table_gpu), .member_api_id = FWK_ID_API_INIT(FWK_MODULE_IDX_PIK_CLOCK, MOD_PIK_CLOCK_API_TYPE_CSS), .initial_rate = 800 * FWK_MHZ, .modulation_supported = false, }), }, { 0 }, /* Termination description. */ }; static const struct fwk_element *css_clock_get_element_table (fwk_id_t module_id) { int status; struct mod_css_clock_dev_config *config_big; struct mod_css_clock_dev_config *config_little; const struct mod_sid_info *system_info; status = mod_sid_get_system_info(&system_info); fwk_assert(status == FWK_SUCCESS); config_big = (struct mod_css_clock_dev_config *) css_clock_element_table[0].data; config_little = (struct mod_css_clock_dev_config *) css_clock_element_table[1].data; switch (system_info->config_number) { case 1: case 5: case 7: case 8: config_big->member_table = member_table_cpu_big_cfg_a; config_big->member_count = FWK_ARRAY_SIZE(member_table_cpu_big_cfg_a); config_little->member_table = member_table_cpu_little_cfg_a; config_little->member_count = FWK_ARRAY_SIZE(member_table_cpu_little_cfg_a); break; case 2: case 3: case 4: case 6: config_big->member_table = member_table_cpu_big_cfg_b; config_big->member_count = FWK_ARRAY_SIZE(member_table_cpu_big_cfg_b); config_little->member_table = member_table_cpu_little_cfg_b; config_little->member_count = FWK_ARRAY_SIZE(member_table_cpu_little_cfg_b); break; default: return NULL; } return css_clock_element_table; } const struct fwk_module_config config_css_clock = { .elements = FWK_MODULE_DYNAMIC_ELEMENTS(css_clock_get_element_table), };
306098.c
/* * Copyright (c) 2018 Intel Corporation. * * SPDX-License-Identifier: Apache-2.0 */ #include <zephyr.h> #include <tc_util.h> #include <ztest.h> #include <kernel.h> #include <ksched.h> #include <kernel_structs.h> #if CONFIG_MP_NUM_CPUS < 2 #error SMP test requires at least two CPUs! #endif #define T2_STACK_SIZE (2048 + CONFIG_TEST_EXTRA_STACK_SIZE) #define STACK_SIZE (384 + CONFIG_TEST_EXTRA_STACK_SIZE) #define DELAY_US 50000 #define TIMEOUT 1000 #define EQUAL_PRIORITY 1 #define TIME_SLICE_MS 500 #define THREAD_DELAY 1 #define SLEEP_MS_LONG 15000 struct k_thread t2; K_THREAD_STACK_DEFINE(t2_stack, T2_STACK_SIZE); volatile int t2_count; volatile int sync_count = -1; static int main_thread_id; static int child_thread_id; volatile int rv; K_SEM_DEFINE(cpuid_sema, 0, 1); K_SEM_DEFINE(sema, 0, 1); static struct k_mutex smutex; static struct k_sem smp_sem; #define THREADS_NUM CONFIG_MP_NUM_CPUS struct thread_info { k_tid_t tid; int executed; int priority; int cpu_id; }; static ZTEST_BMEM volatile struct thread_info tinfo[THREADS_NUM]; static struct k_thread tthread[THREADS_NUM]; static K_THREAD_STACK_ARRAY_DEFINE(tstack, THREADS_NUM, STACK_SIZE); static volatile int thread_started[THREADS_NUM - 1]; static struct k_poll_signal tsignal[THREADS_NUM]; static struct k_poll_event tevent[THREADS_NUM]; static int curr_cpu(void) { unsigned int k = arch_irq_lock(); int ret = arch_curr_cpu()->id; arch_irq_unlock(k); return ret; } /** * @brief Tests for SMP * @defgroup kernel_smp_tests SMP Tests * @ingroup all_tests * @{ * @} */ /** * @defgroup kernel_smp_integration_tests SMP Tests * @ingroup all_tests * @{ * @} */ /** * @defgroup kernel_smp_module_tests SMP Tests * @ingroup all_tests * @{ * @} */ static void t2_fn(void *a, void *b, void *c) { ARG_UNUSED(a); ARG_UNUSED(b); ARG_UNUSED(c); t2_count = 0; /* This thread simply increments a counter while spinning on * the CPU. The idea is that it will always be iterating * faster than the other thread so long as it is fairly * scheduled (and it's designed to NOT be fairly schedulable * without a separate CPU!), so the main thread can always * check its progress. */ while (1) { k_busy_wait(DELAY_US); t2_count++; } } /** * @brief Verify SMP with 2 cooperative threads * * @ingroup kernel_smp_tests * * @details Multi processing is verified by checking whether * 2 cooperative threads run simultaneously at different cores */ void test_smp_coop_threads(void) { int i, ok = 1; k_tid_t tid = k_thread_create(&t2, t2_stack, T2_STACK_SIZE, t2_fn, NULL, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT); /* Wait for the other thread (on a separate CPU) to actually * start running. We want synchrony to be as perfect as * possible. */ t2_count = -1; while (t2_count == -1) { } for (i = 0; i < 10; i++) { /* Wait slightly longer than the other thread so our * count will always be lower */ k_busy_wait(DELAY_US + (DELAY_US / 8)); if (t2_count <= i) { ok = 0; break; } } k_thread_abort(tid); zassert_true(ok, "SMP test failed"); } static void child_fn(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); int parent_cpu_id = POINTER_TO_INT(p1); zassert_true(parent_cpu_id != curr_cpu(), "Parent isn't on other core"); sync_count++; k_sem_give(&cpuid_sema); } /** * @brief Verify CPU IDs of threads in SMP * * @ingroup kernel_smp_tests * * @details Verify whether thread running on other core is * parent thread from child thread */ void test_cpu_id_threads(void) { /* Make sure idle thread runs on each core */ k_sleep(K_MSEC(1000)); int parent_cpu_id = curr_cpu(); k_tid_t tid = k_thread_create(&t2, t2_stack, T2_STACK_SIZE, child_fn, INT_TO_POINTER(parent_cpu_id), NULL, NULL, K_PRIO_PREEMPT(2), 0, K_NO_WAIT); while (sync_count == -1) { } k_sem_take(&cpuid_sema, K_FOREVER); k_thread_abort(tid); } static void thread_entry(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); int thread_num = POINTER_TO_INT(p1); int count = 0; tinfo[thread_num].executed = 1; tinfo[thread_num].cpu_id = curr_cpu(); while (count++ < 5) { k_busy_wait(DELAY_US); } } static void spin_for_threads_exit(void) { for (int i = 0; i < THREADS_NUM - 1; i++) { volatile uint8_t *p = &tinfo[i].tid->base.thread_state; while (!(*p & _THREAD_DEAD)) { } } k_busy_wait(DELAY_US); } static void spawn_threads(int prio, int thread_num, int equal_prio, k_thread_entry_t thread_entry, int delay) { int i; /* Spawn threads of priority higher than * the previously created thread */ for (i = 0; i < thread_num; i++) { if (equal_prio) { tinfo[i].priority = prio; } else { /* Increase priority for each thread */ tinfo[i].priority = prio - 1; prio = tinfo[i].priority; } tinfo[i].tid = k_thread_create(&tthread[i], tstack[i], STACK_SIZE, thread_entry, INT_TO_POINTER(i), NULL, NULL, tinfo[i].priority, 0, K_MSEC(delay)); if (delay) { /* Increase delay for each thread */ delay = delay + 10; } } } static void abort_threads(int num) { for (int i = 0; i < num; i++) { k_thread_abort(tinfo[i].tid); } } static void cleanup_resources(void) { for (int i = 0; i < THREADS_NUM; i++) { tinfo[i].tid = 0; tinfo[i].executed = 0; tinfo[i].priority = 0; } } /** * @brief Test cooperative threads non-preemption * * @ingroup kernel_smp_tests * * @details Spawn cooperative threads equal to number of cores * supported. Main thread will already be running on 1 core. * Check if the last thread created preempts any threads * already running. */ void test_coop_resched_threads(void) { /* Spawn threads equal to number of cores, * since we don't give up current CPU, last thread * will not get scheduled */ spawn_threads(K_PRIO_COOP(10), THREADS_NUM, !EQUAL_PRIORITY, &thread_entry, THREAD_DELAY); /* Wait for some time to let other core's thread run */ k_busy_wait(DELAY_US); /* Reassure that cooperative thread's are not preempted * by checking last thread's execution * status. We know that all threads got rescheduled on * other cores except the last one */ for (int i = 0; i < THREADS_NUM - 1; i++) { zassert_true(tinfo[i].executed == 1, "cooperative thread %d didn't run", i); } zassert_true(tinfo[THREADS_NUM - 1].executed == 0, "cooperative thread is preempted"); /* Abort threads created */ abort_threads(THREADS_NUM); cleanup_resources(); } /** * @brief Test preemptness of preemptive thread * * @ingroup kernel_smp_tests * * @details Create preemptive thread and let it run * on another core and verify if it gets preempted * if another thread of higher priority is spawned */ void test_preempt_resched_threads(void) { /* Spawn threads equal to number of cores, * lower priority thread should * be preempted by higher ones */ spawn_threads(K_PRIO_PREEMPT(10), THREADS_NUM, !EQUAL_PRIORITY, &thread_entry, THREAD_DELAY); spin_for_threads_exit(); for (int i = 0; i < THREADS_NUM; i++) { zassert_true(tinfo[i].executed == 1, "preemptive thread %d didn't run", i); } /* Abort threads created */ abort_threads(THREADS_NUM); cleanup_resources(); } /** * @brief Validate behavior of thread when it yields * * @ingroup kernel_smp_tests * * @details Spawn cooperative threads equal to number * of cores, so last thread would be pending, call * yield() from main thread. Now, all threads must be * executed */ void test_yield_threads(void) { /* Spawn threads equal to the number * of cores, so the last thread would be * pending. */ spawn_threads(K_PRIO_COOP(10), THREADS_NUM, !EQUAL_PRIORITY, &thread_entry, !THREAD_DELAY); k_yield(); k_busy_wait(DELAY_US); for (int i = 0; i < THREADS_NUM; i++) { zassert_true(tinfo[i].executed == 1, "thread %d did not execute", i); } abort_threads(THREADS_NUM); cleanup_resources(); } /** * @brief Test behavior of thread when it sleeps * * @ingroup kernel_smp_tests * * @details Spawn cooperative thread and call * sleep() from main thread. After timeout, all * threads has to be scheduled. */ void test_sleep_threads(void) { spawn_threads(K_PRIO_COOP(10), THREADS_NUM, !EQUAL_PRIORITY, &thread_entry, !THREAD_DELAY); k_msleep(TIMEOUT); for (int i = 0; i < THREADS_NUM; i++) { zassert_true(tinfo[i].executed == 1, "thread %d did not execute", i); } abort_threads(THREADS_NUM); cleanup_resources(); } static void thread_wakeup_entry(void *p1, void *p2, void *p3) { ARG_UNUSED(p2); ARG_UNUSED(p3); int thread_num = POINTER_TO_INT(p1); thread_started[thread_num] = 1; k_msleep(DELAY_US * 1000); tinfo[thread_num].executed = 1; } static void wakeup_on_start_thread(int tnum) { int threads_started = 0, i; /* For each thread, spin waiting for it to first flag that * it's going to sleep, and then that it's actually blocked */ for (i = 0; i < tnum; i++) { while (thread_started[i] == 0) { } while (!z_is_thread_prevented_from_running(tinfo[i].tid)) { } } for (i = 0; i < tnum; i++) { if (thread_started[i] == 1 && threads_started <= tnum) { threads_started++; k_wakeup(tinfo[i].tid); } } zassert_equal(threads_started, tnum, "All threads haven't started"); } static void check_wokeup_threads(int tnum) { int threads_woke_up = 0, i; /* k_wakeup() isn't synchronous, give the other CPU time to * schedule them */ k_busy_wait(200000); for (i = 0; i < tnum; i++) { if (tinfo[i].executed == 1 && threads_woke_up <= tnum) { threads_woke_up++; } } zassert_equal(threads_woke_up, tnum, "Threads did not wakeup"); } /** * @brief Test behavior of wakeup() in SMP case * * @ingroup kernel_smp_tests * * @details Spawn number of threads equal to number of * remaining cores and let them sleep for a while. Call * wakeup() of those threads from parent thread and check * if they are all running */ void test_wakeup_threads(void) { /* Spawn threads to run on all remaining cores */ spawn_threads(K_PRIO_COOP(10), THREADS_NUM - 1, !EQUAL_PRIORITY, &thread_wakeup_entry, !THREAD_DELAY); /* Check if all the threads have started, then call wakeup */ wakeup_on_start_thread(THREADS_NUM - 1); /* Count threads which are woken up */ check_wokeup_threads(THREADS_NUM - 1); /* Abort all threads and cleanup */ abort_threads(THREADS_NUM - 1); cleanup_resources(); } /* a thread for testing get current cpu */ static void thread_get_cpu_entry(void *p1, void *p2, void *p3) { int bsp_id = *(int *)p1; int cpu_id = -1; /* get current cpu number for running thread */ _cpu_t *curr_cpu = arch_curr_cpu(); /**TESTPOINT: call arch_curr_cpu() to get cpu struct */ zassert_true(curr_cpu != NULL, "test failed to get current cpu."); cpu_id = curr_cpu->id; zassert_true(bsp_id != cpu_id, "should not be the same with our BSP"); /* loop forever to ensure running on this CPU */ while (1) { k_busy_wait(DELAY_US); } } /** * @brief Test get a pointer of CPU * * @ingroup kernel_smp_module_tests * * @details * Test Objective: * - To verify architecture layer provides a mechanism to return a pointer to the * current kernel CPU record of the running CPU. * We call arch_curr_cpu() and get its member, both in main and spawned thread * separately, and compare them. They shall be different in SMP environment. * * Testing techniques: * - Interface testing, function and block box testing, * dynamic analysis and testing, * * Prerequisite Conditions: * - CONFIG_SMP=y, and the HW platform must support SMP. * * Input Specifications: * - N/A * * Test Procedure: * -# In main thread, call arch_curr_cpu() to get it's member "id",then store it * into a variable thread_id. * -# Spawn a thread t2, and pass the stored thread_id to it, then call * k_busy_wait() 50us to wait for thread run and won't be swapped out. * -# In thread t2, call arch_curr_cpu() to get pointer of current cpu data. Then * check if it not NULL. * -# Store the member id via accessing pointer of current cpu data to var cpu_id. * -# Check if cpu_id is not equaled to bsp_id that we pass into thread. * -# Call k_busy_wait() and loop forever. * -# In main thread, terminate the thread t2 before exit. * * Expected Test Result: * - The pointer of current cpu data that we got from function call is correct. * * Pass/Fail Criteria: * - Successful if the check of step 3,5 are all passed. * - Failure if one of the check of step 3,5 is failed. * * Assumptions and Constraints: * - This test using for the platform that support SMP, in our current scenario * , only x86_64, arc and xtensa supported. * * @see arch_curr_cpu() */ void test_get_cpu(void) { k_tid_t thread_id; /* get current cpu number */ int cpu_id = arch_curr_cpu()->id; thread_id = k_thread_create(&t2, t2_stack, T2_STACK_SIZE, (k_thread_entry_t)thread_get_cpu_entry, &cpu_id, NULL, NULL, K_PRIO_COOP(2), K_INHERIT_PERMS, K_NO_WAIT); k_busy_wait(DELAY_US); k_thread_abort(thread_id); } #ifdef CONFIG_TRACE_SCHED_IPI /* global variable for testing send IPI */ static volatile int sched_ipi_has_called; void z_trace_sched_ipi(void) { sched_ipi_has_called++; } #endif /** * @brief Test interprocessor interrupt * * @ingroup kernel_smp_integration_tests * * @details * Test Objective: * - To verify architecture layer provides a mechanism to issue an interprocessor * interrupt to all other CPUs in the system that calls the scheduler IPI. * We simply add a hook in z_sched_ipi(), in order to check if it has been * called once in another CPU except the caller, when arch_sched_ipi() is * called. * * Testing techniques: * - Interface testing, function and block box testing, * dynamic analysis and testing * * Prerequisite Conditions: * - CONFIG_SMP=y, and the HW platform must support SMP. * - CONFIG_TRACE_SCHED_IPI=y was set. * * Input Specifications: * - N/A * * Test Procedure: * -# In main thread, given a global variable sched_ipi_has_called equaled zero. * -# Call arch_sched_ipi() then sleep for 100ms. * -# In z_sched_ipi() handler, increment the sched_ipi_has_called. * -# In main thread, check the sched_ipi_has_called is not equaled to zero. * -# Repeat step 1 to 4 for 3 times. * * Expected Test Result: * - The pointer of current cpu data that we got from function call is correct. * * Pass/Fail Criteria: * - Successful if the check of step 4 are all passed. * - Failure if one of the check of step 4 is failed. * * Assumptions and Constraints: * - This test using for the platform that support SMP, in our current scenario * , only x86_64 and arc supported. * * @see arch_sched_ipi() */ void test_smp_ipi(void) { #ifndef CONFIG_TRACE_SCHED_IPI ztest_test_skip(); #endif TC_PRINT("cpu num=%d", CONFIG_MP_NUM_CPUS); for (int i = 0; i < 3 ; i++) { /* issue a sched ipi to tell other CPU to run thread */ sched_ipi_has_called = 0; arch_sched_ipi(); /* Need to wait longer than we think, loaded CI * systems need to wait for host scheduling to run the * other CPU's thread. */ k_msleep(100); /**TESTPOINT: check if enter our IPI interrupt handler */ zassert_true(sched_ipi_has_called != 0, "did not receive IPI.(%d)", sched_ipi_has_called); } } void k_sys_fatal_error_handler(unsigned int reason, const z_arch_esf_t *pEsf) { static int trigger; if (reason != K_ERR_KERNEL_OOPS) { printk("wrong error reason\n"); k_fatal_halt(reason); } if (trigger == 0) { child_thread_id = curr_cpu(); trigger++; } else { main_thread_id = curr_cpu(); /* Verify the fatal was happened on different core */ zassert_true(main_thread_id != child_thread_id, "fatal on the same core"); } } void entry_oops(void *p1, void *p2, void *p3) { k_oops(); TC_ERROR("SHOULD NEVER SEE THIS\n"); } /** * @brief Test fatal error can be triggered on different core * @details When CONFIG_SMP is enabled, on some multiprocessor * platforms, exception can be triggered on different core at * the same time. * * @ingroup kernel_common_tests */ void test_fatal_on_smp(void) { /* Creat a child thread and trigger a crash */ k_thread_create(&t2, t2_stack, T2_STACK_SIZE, entry_oops, NULL, NULL, NULL, K_PRIO_PREEMPT(2), 0, K_NO_WAIT); /* hold cpu and wait for thread trigger exception */ k_busy_wait(2000); /* Manually trigger the crash in mainthread */ entry_oops(NULL, NULL, NULL); /* should not be here */ ztest_test_fail(); } static void workq_handler(struct k_work *work) { child_thread_id = curr_cpu(); } /** * @brief Test system workq run on different core * @details When macro CONFIG_SMP is enabled, workq can be run * on different core. * * @ingroup kernel_common_tests */ void test_workq_on_smp(void) { static struct k_work work; k_work_init(&work, workq_handler); /* submit work item on system workq */ k_work_submit(&work); /* Wait for some time to let other core's thread run */ k_busy_wait(DELAY_US); /* check work have finished */ zassert_equal(k_work_busy_get(&work), 0, NULL); main_thread_id = curr_cpu(); /* Verify the ztest thread and system workq run on different core */ zassert_true(main_thread_id != child_thread_id, "system workq run on the same core"); } static void t1_mutex_lock(void *p1, void *p2, void *p3) { /* t1 will get mutex first */ k_mutex_lock((struct k_mutex *)p1, K_FOREVER); k_msleep(2); k_mutex_unlock((struct k_mutex *)p1); } static void t2_mutex_lock(void *p1, void *p2, void *p3) { zassert_equal(_current->base.global_lock_count, 0, "thread global lock cnt %d is incorrect", _current->base.global_lock_count); k_mutex_lock((struct k_mutex *)p1, K_FOREVER); zassert_equal(_current->base.global_lock_count, 0, "thread global lock cnt %d is incorrect", _current->base.global_lock_count); k_mutex_unlock((struct k_mutex *)p1); /**TESTPOINT: z_smp_release_global_lock() has been call during * context switch but global_lock_cnt has not been decrease * because no irq_lock() was called. */ zassert_equal(_current->base.global_lock_count, 0, "thread global lock cnt %d is incorrect", _current->base.global_lock_count); } /** * @brief Test scenario that a thread release the global lock * * @ingroup kernel_smp_tests * * @details Validate the scenario that make the internal APIs of SMP * z_smp_release_global_lock() to be called. */ void test_smp_release_global_lock(void) { k_mutex_init(&smutex); tinfo[0].tid = k_thread_create(&tthread[0], tstack[0], STACK_SIZE, (k_thread_entry_t)t1_mutex_lock, &smutex, NULL, NULL, K_PRIO_PREEMPT(5), K_INHERIT_PERMS, K_NO_WAIT); tinfo[1].tid = k_thread_create(&tthread[1], tstack[1], STACK_SIZE, (k_thread_entry_t)t2_mutex_lock, &smutex, NULL, NULL, K_PRIO_PREEMPT(3), K_INHERIT_PERMS, K_MSEC(1)); /* Hold one of the cpu to ensure context switch as we wanted * can happen in another cpu. */ k_busy_wait(20000); k_thread_join(tinfo[1].tid, K_FOREVER); k_thread_join(tinfo[0].tid, K_FOREVER); cleanup_resources(); } #define LOOP_COUNT 20000 enum sync_t { LOCK_IRQ, LOCK_SEM, LOCK_MUTEX }; static int global_cnt; static struct k_mutex smp_mutex; static void (*sync_lock)(void *); static void (*sync_unlock)(void *); static void sync_lock_dummy(void *k) { /* no sync lock used */ } static void sync_lock_irq(void *k) { *((unsigned int *)k) = irq_lock(); } static void sync_unlock_irq(void *k) { irq_unlock(*(unsigned int *)k); } static void sync_lock_sem(void *k) { k_sem_take(&smp_sem, K_FOREVER); } static void sync_unlock_sem(void *k) { k_sem_give(&smp_sem); } static void sync_lock_mutex(void *k) { k_mutex_lock(&smp_mutex, K_FOREVER); } static void sync_unlock_mutex(void *k) { k_mutex_unlock(&smp_mutex); } static void sync_init(int lock_type) { switch (lock_type) { case LOCK_IRQ: sync_lock = sync_lock_irq; sync_unlock = sync_unlock_irq; break; case LOCK_SEM: sync_lock = sync_lock_sem; sync_unlock = sync_unlock_sem; k_sem_init(&smp_sem, 1, 3); break; case LOCK_MUTEX: sync_lock = sync_lock_mutex; sync_unlock = sync_unlock_mutex; k_mutex_init(&smp_mutex); break; default: sync_lock = sync_unlock = sync_lock_dummy; } } static void inc_global_cnt(void *a, void *b, void *c) { int key; for (int i = 0; i < LOOP_COUNT; i++) { sync_lock(&key); global_cnt++; global_cnt--; global_cnt++; sync_unlock(&key); } } static int run_concurrency(int type, void *func) { uint32_t start_t, end_t; sync_init(type); global_cnt = 0; start_t = k_cycle_get_32(); tinfo[0].tid = k_thread_create(&tthread[0], tstack[0], STACK_SIZE, (k_thread_entry_t)func, NULL, NULL, NULL, K_PRIO_PREEMPT(1), K_INHERIT_PERMS, K_NO_WAIT); tinfo[1].tid = k_thread_create(&tthread[1], tstack[1], STACK_SIZE, (k_thread_entry_t)func, NULL, NULL, NULL, K_PRIO_PREEMPT(1), K_INHERIT_PERMS, K_NO_WAIT); k_tid_t tid = k_thread_create(&t2, t2_stack, T2_STACK_SIZE, (k_thread_entry_t)func, NULL, NULL, NULL, K_PRIO_PREEMPT(1), K_INHERIT_PERMS, K_NO_WAIT); k_thread_join(tinfo[0].tid, K_FOREVER); k_thread_join(tinfo[1].tid, K_FOREVER); k_thread_join(tid, K_FOREVER); cleanup_resources(); end_t = k_cycle_get_32(); printk("type %d: cnt %d, spend %u ms\n", type, global_cnt, k_cyc_to_ms_ceil32(end_t - start_t)); return global_cnt == (LOOP_COUNT * 3); } /** * @brief Test if the concurrency of SMP works or not * * @ingroup kernel_smp_tests * * @details Validate the global lock and unlock API of SMP are thread-safe. * We make 3 thread to increase the global count in different cpu and * they both do locking then unlocking for LOOP_COUNT times. It shall be no * deadlock happened and total global count shall be 3 * LOOP COUNT. * * We show the 4 kinds of scenario: * - No any lock used * - Use global irq lock * - Use semaphore * - Use mutex */ void test_inc_concurrency(void) { /* increasing global var with irq lock */ zassert_true(run_concurrency(LOCK_IRQ, inc_global_cnt), "total count %d is wrong(i)", global_cnt); /* increasing global var with irq lock */ zassert_true(run_concurrency(LOCK_SEM, inc_global_cnt), "total count %d is wrong(s)", global_cnt); /* increasing global var with irq lock */ zassert_true(run_concurrency(LOCK_MUTEX, inc_global_cnt), "total count %d is wrong(M)", global_cnt); } /** * @brief Torture test for context switching code * * @ingroup kernel_smp_tests * * @details Leverage the polling API to stress test the context switching code. * This test will hammer all the CPUs with thread swapping requests. */ static void process_events(void *arg0, void *arg1, void *arg2) { uintptr_t id = (uintptr_t) arg0; while (1) { k_poll(&tevent[id], 1, K_FOREVER); if (tevent[id].signal->result != 0x55) { ztest_test_fail(); } tevent[id].signal->signaled = 0; tevent[id].state = K_POLL_STATE_NOT_READY; k_poll_signal_reset(&tsignal[id]); } } static void signal_raise(void *arg0, void *arg1, void *arg2) { while (1) { for (uintptr_t i = 0; i < THREADS_NUM; i++) { k_poll_signal_raise(&tsignal[i], 0x55); } } } void test_smp_switch_torture(void) { for (uintptr_t i = 0; i < THREADS_NUM; i++) { k_poll_signal_init(&tsignal[i]); k_poll_event_init(&tevent[i], K_POLL_TYPE_SIGNAL, K_POLL_MODE_NOTIFY_ONLY, &tsignal[i]); k_thread_create(&tthread[i], tstack[i], STACK_SIZE, (k_thread_entry_t) process_events, (void *) i, NULL, NULL, K_PRIO_PREEMPT(i + 1), K_INHERIT_PERMS, K_NO_WAIT); } k_thread_create(&t2, t2_stack, T2_STACK_SIZE, signal_raise, NULL, NULL, NULL, K_PRIO_COOP(2), 0, K_NO_WAIT); k_sleep(K_MSEC(SLEEP_MS_LONG)); k_thread_abort(&t2); for (uintptr_t i = 0; i < THREADS_NUM; i++) { k_thread_abort(&tthread[i]); } } void test_main(void) { /* Sleep a bit to guarantee that both CPUs enter an idle * thread from which they can exit correctly to run the main * test. */ k_sleep(K_MSEC(10)); ztest_test_suite(smp, ztest_unit_test(test_smp_coop_threads), ztest_unit_test(test_cpu_id_threads), ztest_unit_test(test_coop_resched_threads), ztest_unit_test(test_preempt_resched_threads), ztest_unit_test(test_yield_threads), ztest_unit_test(test_sleep_threads), ztest_unit_test(test_wakeup_threads), ztest_unit_test(test_smp_ipi), ztest_unit_test(test_get_cpu), ztest_unit_test(test_fatal_on_smp), ztest_unit_test(test_workq_on_smp), ztest_unit_test(test_smp_release_global_lock), ztest_unit_test(test_inc_concurrency), ztest_unit_test(test_smp_switch_torture) ); ztest_run_test_suite(smp); }
77796.c
/* ---------------------------------------------------------------------------- * Copyright (c) Huawei Technologies Co., Ltd. 2021-2021. All rights reserved. * Description: Membox Test Case * Author: Huawei LiteOS Team * Create: 2021-06-02 * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * 3. Neither the name of the copyright holder nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- */ #include "it_los_membox.h" #ifdef __cplusplus #if __cplusplus extern "C" { #endif /* __cplusplus */ #endif /* __cplusplus */ static UINT32 TestCase(VOID) { char c; VOID *p0 = NULL; MEMBOX_START(); g_blkSize = 0x200; MEMBOX_INIT(); p0 = LOS_MemboxAlloc(g_boxAddr); ICUNIT_GOTO_NOT_EQUAL(p0, NULL, p0, EXIT); memset_s(p0, g_blkSize, 'c', g_blkSize); LOS_MemboxClr(g_boxAddr, NULL); LOS_MemboxClr(NULL, p0); c = *((char*)p0); ICUNIT_GOTO_EQUAL(c, 'c', c, EXIT); LOS_MemboxClr(g_boxAddr, p0); c = *((char*)p0); ICUNIT_GOTO_EQUAL(c, 0, c, EXIT); EXIT: MEMBOX_FREE(); MEMBOX_END(); return LOS_OK; } /** * @ingroup TEST_MEM * @par TestCase_Number * ItLosMembox004 * @par TestCase_TestCase_Type * Function test * @brief LOS_MemboxClr API test. * @par TestCase_Pretreatment_Condition * NA. * @par TestCase_Test_Steps * step1: Set input parameter of LOS_MemboxClr to NULL.\n * step2: Set input parameter of LOS_MemboxClr to correct value. * @par TestCase_Expected_Result * 1.LOS_MemboxClr will not work.\n * 2.LOS_MemboxClr is executed successfully. * @par TestCase_Level * Level 0 * @par TestCase_Automated * true * @par TestCase_Remark * null */ VOID ItLosMembox004(VOID) { TEST_ADD_CASE("ItLosMembox004", TestCase, TEST_LOS, TEST_MEM, TEST_LEVEL0, TEST_FUNCTION); } #ifdef __cplusplus #if __cplusplus } #endif /* __cplusplus */ #endif /* __cplusplus */
874550.c
/* * Implementation of the kernel access vector cache (AVC). * * Authors: Stephen Smalley, <[email protected]> * James Morris <[email protected]> * * Update: KaiGai, Kohei <[email protected]> * Replaced the avc_lock spinlock by RCU. * * Copyright (C) 2003 Red Hat, Inc., James Morris <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/dcache.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/percpu.h> #include <linux/list.h> #include <net/sock.h> #include <linux/un.h> #include <net/af_unix.h> #include <linux/ip.h> #include <linux/audit.h> #include <linux/ipv6.h> #include <net/ipv6.h> #include "avc.h" #include "avc_ss.h" #include "classmap.h" #define AVC_CACHE_SLOTS 512 #define AVC_DEF_CACHE_THRESHOLD 512 #define AVC_CACHE_RECLAIM 16 #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS #define avc_cache_stats_incr(field) this_cpu_inc(avc_cache_stats.field) #else #define avc_cache_stats_incr(field) do {} while (0) #endif struct avc_entry { u32 ssid; u32 tsid; u16 tclass; struct av_decision avd; struct avc_xperms_node *xp_node; }; struct avc_node { struct avc_entry ae; struct hlist_node list; /* anchored in avc_cache->slots[i] */ struct rcu_head rhead; }; struct avc_xperms_decision_node { struct extended_perms_decision xpd; struct list_head xpd_list; /* list of extended_perms_decision */ }; struct avc_xperms_node { struct extended_perms xp; struct list_head xpd_head; /* list head of extended_perms_decision */ }; struct avc_cache { struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */ spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ atomic_t lru_hint; /* LRU hint for reclaim scan */ atomic_t active_nodes; u32 latest_notif; /* latest revocation notification */ }; struct avc_callback_node { int (*callback) (u32 event); u32 events; struct avc_callback_node *next; }; /* Exported via selinufs */ unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD; #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 }; #endif static struct avc_cache avc_cache; static struct avc_callback_node *avc_callbacks; static struct kmem_cache *avc_node_cachep; static struct kmem_cache *avc_xperms_data_cachep; static struct kmem_cache *avc_xperms_decision_cachep; static struct kmem_cache *avc_xperms_cachep; static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) { return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1); } /** * avc_dump_av - Display an access vector in human-readable form. * @tclass: target security class * @av: access vector */ static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av) { const char **perms; int i, perm; if (av == 0) { audit_log_format(ab, " null"); return; } perms = secclass_map[tclass-1].perms; audit_log_format(ab, " {"); i = 0; perm = 1; while (i < (sizeof(av) * 8)) { if ((perm & av) && perms[i]) { audit_log_format(ab, " %s", perms[i]); av &= ~perm; } i++; perm <<= 1; } if (av) audit_log_format(ab, " 0x%x", av); audit_log_format(ab, " }"); } /** * avc_dump_query - Display a SID pair and a class in human-readable form. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class */ static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tclass) { int rc; char *scontext; u32 scontext_len; rc = security_sid_to_context(ssid, &scontext, &scontext_len); if (rc) audit_log_format(ab, "ssid=%d", ssid); else { audit_log_format(ab, "scontext=%s", scontext); kfree(scontext); } rc = security_sid_to_context(tsid, &scontext, &scontext_len); if (rc) audit_log_format(ab, " tsid=%d", tsid); else { audit_log_format(ab, " tcontext=%s", scontext); kfree(scontext); } BUG_ON(tclass >= ARRAY_SIZE(secclass_map)); audit_log_format(ab, " tclass=%s", secclass_map[tclass-1].name); } /** * avc_init - Initialize the AVC. * * Initialize the access vector cache. */ void __init avc_init(void) { int i; for (i = 0; i < AVC_CACHE_SLOTS; i++) { INIT_HLIST_HEAD(&avc_cache.slots[i]); spin_lock_init(&avc_cache.slots_lock[i]); } atomic_set(&avc_cache.active_nodes, 0); atomic_set(&avc_cache.lru_hint, 0); avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node), 0, SLAB_PANIC, NULL); avc_xperms_cachep = kmem_cache_create("avc_xperms_node", sizeof(struct avc_xperms_node), 0, SLAB_PANIC, NULL); avc_xperms_decision_cachep = kmem_cache_create( "avc_xperms_decision_node", sizeof(struct avc_xperms_decision_node), 0, SLAB_PANIC, NULL); avc_xperms_data_cachep = kmem_cache_create("avc_xperms_data", sizeof(struct extended_perms_data), 0, SLAB_PANIC, NULL); audit_log(current->audit_context, GFP_KERNEL, AUDIT_KERNEL, "AVC INITIALIZED\n"); } int avc_get_hash_stats(char *page) { int i, chain_len, max_chain_len, slots_used; struct avc_node *node; struct hlist_head *head; rcu_read_lock(); slots_used = 0; max_chain_len = 0; for (i = 0; i < AVC_CACHE_SLOTS; i++) { head = &avc_cache.slots[i]; if (!hlist_empty(head)) { slots_used++; chain_len = 0; hlist_for_each_entry_rcu(node, head, list) chain_len++; if (chain_len > max_chain_len) max_chain_len = chain_len; } } rcu_read_unlock(); return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n" "longest chain: %d\n", atomic_read(&avc_cache.active_nodes), slots_used, AVC_CACHE_SLOTS, max_chain_len); } /* * using a linked list for extended_perms_decision lookup because the list is * always small. i.e. less than 5, typically 1 */ static struct extended_perms_decision *avc_xperms_decision_lookup(u8 driver, struct avc_xperms_node *xp_node) { struct avc_xperms_decision_node *xpd_node; list_for_each_entry(xpd_node, &xp_node->xpd_head, xpd_list) { if (xpd_node->xpd.driver == driver) return &xpd_node->xpd; } return NULL; } static inline unsigned int avc_xperms_has_perm(struct extended_perms_decision *xpd, u8 perm, u8 which) { unsigned int rc = 0; if ((which == XPERMS_ALLOWED) && (xpd->used & XPERMS_ALLOWED)) rc = security_xperm_test(xpd->allowed->p, perm); else if ((which == XPERMS_AUDITALLOW) && (xpd->used & XPERMS_AUDITALLOW)) rc = security_xperm_test(xpd->auditallow->p, perm); else if ((which == XPERMS_DONTAUDIT) && (xpd->used & XPERMS_DONTAUDIT)) rc = security_xperm_test(xpd->dontaudit->p, perm); return rc; } static void avc_xperms_allow_perm(struct avc_xperms_node *xp_node, u8 driver, u8 perm) { struct extended_perms_decision *xpd; security_xperm_set(xp_node->xp.drivers.p, driver); xpd = avc_xperms_decision_lookup(driver, xp_node); if (xpd && xpd->allowed) security_xperm_set(xpd->allowed->p, perm); } static void avc_xperms_decision_free(struct avc_xperms_decision_node *xpd_node) { struct extended_perms_decision *xpd; xpd = &xpd_node->xpd; if (xpd->allowed) kmem_cache_free(avc_xperms_data_cachep, xpd->allowed); if (xpd->auditallow) kmem_cache_free(avc_xperms_data_cachep, xpd->auditallow); if (xpd->dontaudit) kmem_cache_free(avc_xperms_data_cachep, xpd->dontaudit); kmem_cache_free(avc_xperms_decision_cachep, xpd_node); } static void avc_xperms_free(struct avc_xperms_node *xp_node) { struct avc_xperms_decision_node *xpd_node, *tmp; if (!xp_node) return; list_for_each_entry_safe(xpd_node, tmp, &xp_node->xpd_head, xpd_list) { list_del(&xpd_node->xpd_list); avc_xperms_decision_free(xpd_node); } kmem_cache_free(avc_xperms_cachep, xp_node); } static void avc_copy_xperms_decision(struct extended_perms_decision *dest, struct extended_perms_decision *src) { dest->driver = src->driver; dest->used = src->used; if (dest->used & XPERMS_ALLOWED) memcpy(dest->allowed->p, src->allowed->p, sizeof(src->allowed->p)); if (dest->used & XPERMS_AUDITALLOW) memcpy(dest->auditallow->p, src->auditallow->p, sizeof(src->auditallow->p)); if (dest->used & XPERMS_DONTAUDIT) memcpy(dest->dontaudit->p, src->dontaudit->p, sizeof(src->dontaudit->p)); } /* * similar to avc_copy_xperms_decision, but only copy decision * information relevant to this perm */ static inline void avc_quick_copy_xperms_decision(u8 perm, struct extended_perms_decision *dest, struct extended_perms_decision *src) { /* * compute index of the u32 of the 256 bits (8 u32s) that contain this * command permission */ u8 i = perm >> 5; dest->used = src->used; if (dest->used & XPERMS_ALLOWED) dest->allowed->p[i] = src->allowed->p[i]; if (dest->used & XPERMS_AUDITALLOW) dest->auditallow->p[i] = src->auditallow->p[i]; if (dest->used & XPERMS_DONTAUDIT) dest->dontaudit->p[i] = src->dontaudit->p[i]; } static struct avc_xperms_decision_node *avc_xperms_decision_alloc(u8 which) { struct avc_xperms_decision_node *xpd_node; struct extended_perms_decision *xpd; xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_ATOMIC | __GFP_NOMEMALLOC); if (!xpd_node) return NULL; xpd = &xpd_node->xpd; if (which & XPERMS_ALLOWED) { xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep, GFP_ATOMIC | __GFP_NOMEMALLOC); if (!xpd->allowed) goto error; } if (which & XPERMS_AUDITALLOW) { xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep, GFP_ATOMIC | __GFP_NOMEMALLOC); if (!xpd->auditallow) goto error; } if (which & XPERMS_DONTAUDIT) { xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep, GFP_ATOMIC | __GFP_NOMEMALLOC); if (!xpd->dontaudit) goto error; } return xpd_node; error: avc_xperms_decision_free(xpd_node); return NULL; } static int avc_add_xperms_decision(struct avc_node *node, struct extended_perms_decision *src) { struct avc_xperms_decision_node *dest_xpd; node->ae.xp_node->xp.len++; dest_xpd = avc_xperms_decision_alloc(src->used); if (!dest_xpd) return -ENOMEM; avc_copy_xperms_decision(&dest_xpd->xpd, src); list_add(&dest_xpd->xpd_list, &node->ae.xp_node->xpd_head); return 0; } static struct avc_xperms_node *avc_xperms_alloc(void) { struct avc_xperms_node *xp_node; xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC); if (!xp_node) return xp_node; INIT_LIST_HEAD(&xp_node->xpd_head); return xp_node; } static int avc_xperms_populate(struct avc_node *node, struct avc_xperms_node *src) { struct avc_xperms_node *dest; struct avc_xperms_decision_node *dest_xpd; struct avc_xperms_decision_node *src_xpd; if (src->xp.len == 0) return 0; dest = avc_xperms_alloc(); if (!dest) return -ENOMEM; memcpy(dest->xp.drivers.p, src->xp.drivers.p, sizeof(dest->xp.drivers.p)); dest->xp.len = src->xp.len; /* for each source xpd allocate a destination xpd and copy */ list_for_each_entry(src_xpd, &src->xpd_head, xpd_list) { dest_xpd = avc_xperms_decision_alloc(src_xpd->xpd.used); if (!dest_xpd) goto error; avc_copy_xperms_decision(&dest_xpd->xpd, &src_xpd->xpd); list_add(&dest_xpd->xpd_list, &dest->xpd_head); } node->ae.xp_node = dest; return 0; error: avc_xperms_free(dest); return -ENOMEM; } static inline u32 avc_xperms_audit_required(u32 requested, struct av_decision *avd, struct extended_perms_decision *xpd, u8 perm, int result, u32 *deniedp) { u32 denied, audited; denied = requested & ~avd->allowed; if (unlikely(denied)) { audited = denied & avd->auditdeny; if (audited && xpd) { if (avc_xperms_has_perm(xpd, perm, XPERMS_DONTAUDIT)) audited &= ~requested; } } else if (result) { audited = denied = requested; } else { audited = requested & avd->auditallow; if (audited && xpd) { if (!avc_xperms_has_perm(xpd, perm, XPERMS_AUDITALLOW)) audited &= ~requested; } } *deniedp = denied; return audited; } static inline int avc_xperms_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested, struct av_decision *avd, struct extended_perms_decision *xpd, u8 perm, int result, struct common_audit_data *ad) { u32 audited, denied; audited = avc_xperms_audit_required( requested, avd, xpd, perm, result, &denied); if (likely(!audited)) return 0; return slow_avc_audit(ssid, tsid, tclass, requested, audited, denied, result, ad, 0); } static void avc_node_free(struct rcu_head *rhead) { struct avc_node *node = container_of(rhead, struct avc_node, rhead); avc_xperms_free(node->ae.xp_node); kmem_cache_free(avc_node_cachep, node); avc_cache_stats_incr(frees); } static void avc_node_delete(struct avc_node *node) { hlist_del_rcu(&node->list); call_rcu(&node->rhead, avc_node_free); atomic_dec(&avc_cache.active_nodes); } static void avc_node_kill(struct avc_node *node) { avc_xperms_free(node->ae.xp_node); kmem_cache_free(avc_node_cachep, node); avc_cache_stats_incr(frees); atomic_dec(&avc_cache.active_nodes); } static void avc_node_replace(struct avc_node *new, struct avc_node *old) { hlist_replace_rcu(&old->list, &new->list); call_rcu(&old->rhead, avc_node_free); atomic_dec(&avc_cache.active_nodes); } static inline int avc_reclaim_node(void) { struct avc_node *node; int hvalue, try, ecx; unsigned long flags; struct hlist_head *head; spinlock_t *lock; for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) { hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); head = &avc_cache.slots[hvalue]; lock = &avc_cache.slots_lock[hvalue]; if (!spin_trylock_irqsave(lock, flags)) continue; rcu_read_lock(); hlist_for_each_entry(node, head, list) { avc_node_delete(node); avc_cache_stats_incr(reclaims); ecx++; if (ecx >= AVC_CACHE_RECLAIM) { rcu_read_unlock(); spin_unlock_irqrestore(lock, flags); goto out; } } rcu_read_unlock(); spin_unlock_irqrestore(lock, flags); } out: return ecx; } static struct avc_node *avc_alloc_node(void) { struct avc_node *node; node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC); if (!node) goto out; INIT_HLIST_NODE(&node->list); avc_cache_stats_incr(allocations); if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold) avc_reclaim_node(); out: return node; } static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd) { node->ae.ssid = ssid; node->ae.tsid = tsid; node->ae.tclass = tclass; memcpy(&node->ae.avd, avd, sizeof(node->ae.avd)); } static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) { struct avc_node *node, *ret = NULL; int hvalue; struct hlist_head *head; hvalue = avc_hash(ssid, tsid, tclass); head = &avc_cache.slots[hvalue]; hlist_for_each_entry_rcu(node, head, list) { if (ssid == node->ae.ssid && tclass == node->ae.tclass && tsid == node->ae.tsid) { ret = node; break; } } return ret; } /** * avc_lookup - Look up an AVC entry. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * * Look up an AVC entry that is valid for the * (@ssid, @tsid), interpreting the permissions * based on @tclass. If a valid AVC entry exists, * then this function returns the avc_node. * Otherwise, this function returns NULL. */ static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass) { struct avc_node *node; avc_cache_stats_incr(lookups); node = avc_search_node(ssid, tsid, tclass); if (node) return node; avc_cache_stats_incr(misses); return NULL; } static int avc_latest_notif_update(int seqno, int is_insert) { int ret = 0; static DEFINE_SPINLOCK(notif_lock); unsigned long flag; spin_lock_irqsave(&notif_lock, flag); if (is_insert) { if (seqno < avc_cache.latest_notif) { printk(KERN_WARNING "SELinux: avc: seqno %d < latest_notif %d\n", seqno, avc_cache.latest_notif); ret = -EAGAIN; } } else { if (seqno > avc_cache.latest_notif) avc_cache.latest_notif = seqno; } spin_unlock_irqrestore(&notif_lock, flag); return ret; } /** * avc_insert - Insert an AVC entry. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @avd: resulting av decision * @xp_node: resulting extended permissions * * Insert an AVC entry for the SID pair * (@ssid, @tsid) and class @tclass. * The access vectors and the sequence number are * normally provided by the security server in * response to a security_compute_av() call. If the * sequence number @avd->seqno is not less than the latest * revocation notification, then the function copies * the access vectors into a cache entry, returns * avc_node inserted. Otherwise, this function returns NULL. */ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd, struct avc_xperms_node *xp_node) { struct avc_node *pos, *node = NULL; int hvalue; unsigned long flag; if (avc_latest_notif_update(avd->seqno, 1)) goto out; node = avc_alloc_node(); if (node) { struct hlist_head *head; spinlock_t *lock; int rc = 0; hvalue = avc_hash(ssid, tsid, tclass); avc_node_populate(node, ssid, tsid, tclass, avd); rc = avc_xperms_populate(node, xp_node); if (rc) { kmem_cache_free(avc_node_cachep, node); return NULL; } head = &avc_cache.slots[hvalue]; lock = &avc_cache.slots_lock[hvalue]; spin_lock_irqsave(lock, flag); hlist_for_each_entry(pos, head, list) { if (pos->ae.ssid == ssid && pos->ae.tsid == tsid && pos->ae.tclass == tclass) { avc_node_replace(node, pos); goto found; } } hlist_add_head_rcu(&node->list, head); found: spin_unlock_irqrestore(lock, flag); } out: return node; } /** * avc_audit_pre_callback - SELinux specific information * will be called by generic audit code * @ab: the audit buffer * @a: audit_data */ static void avc_audit_pre_callback(struct audit_buffer *ab, void *a) { struct common_audit_data *ad = a; audit_log_format(ab, "avc: %s ", ad->selinux_audit_data->denied ? "denied" : "granted"); avc_dump_av(ab, ad->selinux_audit_data->tclass, ad->selinux_audit_data->audited); audit_log_format(ab, " for "); } /** * avc_audit_post_callback - SELinux specific information * will be called by generic audit code * @ab: the audit buffer * @a: audit_data */ static void avc_audit_post_callback(struct audit_buffer *ab, void *a) { struct common_audit_data *ad = a; audit_log_format(ab, " "); avc_dump_query(ab, ad->selinux_audit_data->ssid, ad->selinux_audit_data->tsid, ad->selinux_audit_data->tclass); if (ad->selinux_audit_data->denied) { audit_log_format(ab, " permissive=%u", ad->selinux_audit_data->result ? 0 : 1); #ifdef CONFIG_MTK_SELINUX_AEE_WARNING { struct nlmsghdr *nlh; char *selinux_data; if (ab) { nlh = nlmsg_hdr(audit_get_skb(ab)); selinux_data = nlmsg_data(nlh); if (nlh->nlmsg_type != AUDIT_EOE) { if (nlh->nlmsg_type == 1400) mtk_audit_hook(selinux_data); } } } #endif } } /* This is the slow part of avc audit with big stack footprint */ noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested, u32 audited, u32 denied, int result, struct common_audit_data *a, unsigned flags) { struct common_audit_data stack_data; struct selinux_audit_data sad; if (!a) { a = &stack_data; a->type = LSM_AUDIT_DATA_NONE; } /* * When in a RCU walk do the audit on the RCU retry. This is because * the collection of the dname in an inode audit message is not RCU * safe. Note this may drop some audits when the situation changes * during retry. However this is logically just as if the operation * happened a little later. */ if ((a->type == LSM_AUDIT_DATA_INODE) && (flags & MAY_NOT_BLOCK)) return -ECHILD; sad.tclass = tclass; sad.requested = requested; sad.ssid = ssid; sad.tsid = tsid; sad.audited = audited; sad.denied = denied; sad.result = result; a->selinux_audit_data = &sad; common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback); return 0; } /** * avc_add_callback - Register a callback for security events. * @callback: callback function * @events: security events * * Register a callback function for events in the set @events. * Returns %0 on success or -%ENOMEM if insufficient memory * exists to add the callback. */ int __init avc_add_callback(int (*callback)(u32 event), u32 events) { struct avc_callback_node *c; int rc = 0; c = kmalloc(sizeof(*c), GFP_KERNEL); if (!c) { rc = -ENOMEM; goto out; } c->callback = callback; c->events = events; c->next = avc_callbacks; avc_callbacks = c; out: return rc; } static inline int avc_sidcmp(u32 x, u32 y) { return (x == y || x == SECSID_WILD || y == SECSID_WILD); } /** * avc_update_node Update an AVC entry * @event : Updating event * @perms : Permission mask bits * @ssid,@tsid,@tclass : identifier of an AVC entry * @seqno : sequence number when decision was made * @xpd: extended_perms_decision to be added to the node * * if a valid AVC entry doesn't exist,this function returns -ENOENT. * if kmalloc() called internal returns NULL, this function returns -ENOMEM. * otherwise, this function updates the AVC entry. The original AVC-entry object * will release later by RCU. */ static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid, u32 tsid, u16 tclass, u32 seqno, struct extended_perms_decision *xpd, u32 flags) { int hvalue, rc = 0; unsigned long flag; struct avc_node *pos, *node, *orig = NULL; struct hlist_head *head; spinlock_t *lock; node = avc_alloc_node(); if (!node) { rc = -ENOMEM; goto out; } /* Lock the target slot */ hvalue = avc_hash(ssid, tsid, tclass); head = &avc_cache.slots[hvalue]; lock = &avc_cache.slots_lock[hvalue]; spin_lock_irqsave(lock, flag); hlist_for_each_entry(pos, head, list) { if (ssid == pos->ae.ssid && tsid == pos->ae.tsid && tclass == pos->ae.tclass && seqno == pos->ae.avd.seqno){ orig = pos; break; } } if (!orig) { rc = -ENOENT; avc_node_kill(node); goto out_unlock; } /* * Copy and replace original node. */ avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd); if (orig->ae.xp_node) { rc = avc_xperms_populate(node, orig->ae.xp_node); if (rc) { kmem_cache_free(avc_node_cachep, node); goto out_unlock; } } switch (event) { case AVC_CALLBACK_GRANT: node->ae.avd.allowed |= perms; if (node->ae.xp_node && (flags & AVC_EXTENDED_PERMS)) avc_xperms_allow_perm(node->ae.xp_node, driver, xperm); break; case AVC_CALLBACK_TRY_REVOKE: case AVC_CALLBACK_REVOKE: node->ae.avd.allowed &= ~perms; break; case AVC_CALLBACK_AUDITALLOW_ENABLE: node->ae.avd.auditallow |= perms; break; case AVC_CALLBACK_AUDITALLOW_DISABLE: node->ae.avd.auditallow &= ~perms; break; case AVC_CALLBACK_AUDITDENY_ENABLE: node->ae.avd.auditdeny |= perms; break; case AVC_CALLBACK_AUDITDENY_DISABLE: node->ae.avd.auditdeny &= ~perms; break; case AVC_CALLBACK_ADD_XPERMS: avc_add_xperms_decision(node, xpd); break; } avc_node_replace(node, orig); out_unlock: spin_unlock_irqrestore(lock, flag); out: return rc; } /** * avc_flush - Flush the cache */ static void avc_flush(void) { struct hlist_head *head; struct avc_node *node; spinlock_t *lock; unsigned long flag; int i; for (i = 0; i < AVC_CACHE_SLOTS; i++) { head = &avc_cache.slots[i]; lock = &avc_cache.slots_lock[i]; spin_lock_irqsave(lock, flag); /* * With preemptable RCU, the outer spinlock does not * prevent RCU grace periods from ending. */ rcu_read_lock(); hlist_for_each_entry(node, head, list) avc_node_delete(node); rcu_read_unlock(); spin_unlock_irqrestore(lock, flag); } } /** * avc_ss_reset - Flush the cache and revalidate migrated permissions. * @seqno: policy sequence number */ int avc_ss_reset(u32 seqno) { struct avc_callback_node *c; int rc = 0, tmprc; avc_flush(); for (c = avc_callbacks; c; c = c->next) { if (c->events & AVC_CALLBACK_RESET) { tmprc = c->callback(AVC_CALLBACK_RESET); /* save the first error encountered for the return value and continue processing the callbacks */ if (!rc) rc = tmprc; } } avc_latest_notif_update(seqno, 0); return rc; } /* * Slow-path helper function for avc_has_perm_noaudit, * when the avc_node lookup fails. We get called with * the RCU read lock held, and need to return with it * still held, but drop if for the security compute. * * Don't inline this, since it's the slow-path and just * results in a bigger stack frame. */ static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd, struct avc_xperms_node *xp_node) { rcu_read_unlock(); INIT_LIST_HEAD(&xp_node->xpd_head); security_compute_av(ssid, tsid, tclass, avd, &xp_node->xp); rcu_read_lock(); return avc_insert(ssid, tsid, tclass, avd, xp_node); } static noinline int avc_denied(u32 ssid, u32 tsid, u16 tclass, u32 requested, u8 driver, u8 xperm, unsigned flags, struct av_decision *avd) { if (flags & AVC_STRICT) return -EACCES; if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE)) return -EACCES; avc_update_node(AVC_CALLBACK_GRANT, requested, driver, xperm, ssid, tsid, tclass, avd->seqno, NULL, flags); return 0; } /* * The avc extended permissions logic adds an additional 256 bits of * permissions to an avc node when extended permissions for that node are * specified in the avtab. If the additional 256 permissions is not adequate, * as-is the case with ioctls, then multiple may be chained together and the * driver field is used to specify which set contains the permission. */ int avc_has_extended_perms(u32 ssid, u32 tsid, u16 tclass, u32 requested, u8 driver, u8 xperm, struct common_audit_data *ad) { struct avc_node *node; struct av_decision avd; u32 denied; struct extended_perms_decision local_xpd; struct extended_perms_decision *xpd = NULL; struct extended_perms_data allowed; struct extended_perms_data auditallow; struct extended_perms_data dontaudit; struct avc_xperms_node local_xp_node; struct avc_xperms_node *xp_node; int rc = 0, rc2; xp_node = &local_xp_node; BUG_ON(!requested); rcu_read_lock(); node = avc_lookup(ssid, tsid, tclass); if (unlikely(!node)) { node = avc_compute_av(ssid, tsid, tclass, &avd, xp_node); } else { memcpy(&avd, &node->ae.avd, sizeof(avd)); xp_node = node->ae.xp_node; } /* if extended permissions are not defined, only consider av_decision */ if (!xp_node || !xp_node->xp.len) goto decision; local_xpd.allowed = &allowed; local_xpd.auditallow = &auditallow; local_xpd.dontaudit = &dontaudit; xpd = avc_xperms_decision_lookup(driver, xp_node); if (unlikely(!xpd)) { /* * Compute the extended_perms_decision only if the driver * is flagged */ if (!security_xperm_test(xp_node->xp.drivers.p, driver)) { avd.allowed &= ~requested; goto decision; } rcu_read_unlock(); security_compute_xperms_decision(ssid, tsid, tclass, driver, &local_xpd); rcu_read_lock(); avc_update_node(AVC_CALLBACK_ADD_XPERMS, requested, driver, xperm, ssid, tsid, tclass, avd.seqno, &local_xpd, 0); } else { avc_quick_copy_xperms_decision(xperm, &local_xpd, xpd); } xpd = &local_xpd; if (!avc_xperms_has_perm(xpd, xperm, XPERMS_ALLOWED)) avd.allowed &= ~requested; decision: denied = requested & ~(avd.allowed); if (unlikely(denied)) rc = avc_denied(ssid, tsid, tclass, requested, driver, xperm, AVC_EXTENDED_PERMS, &avd); rcu_read_unlock(); rc2 = avc_xperms_audit(ssid, tsid, tclass, requested, &avd, xpd, xperm, rc, ad); if (rc2) return rc2; return rc; } /** * avc_has_perm_noaudit - Check permissions but perform no auditing. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @requested: requested permissions, interpreted based on @tclass * @flags: AVC_STRICT or 0 * @avd: access vector decisions * * Check the AVC to determine whether the @requested permissions are granted * for the SID pair (@ssid, @tsid), interpreting the permissions * based on @tclass, and call the security server on a cache miss to obtain * a new decision and add it to the cache. Return a copy of the decisions * in @avd. Return %0 if all @requested permissions are granted, * -%EACCES if any permissions are denied, or another -errno upon * other errors. This function is typically called by avc_has_perm(), * but may also be called directly to separate permission checking from * auditing, e.g. in cases where a lock must be held for the check but * should be released for the auditing. */ inline int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested, unsigned flags, struct av_decision *avd) { struct avc_node *node; struct avc_xperms_node xp_node; int rc = 0; u32 denied; BUG_ON(!requested); rcu_read_lock(); node = avc_lookup(ssid, tsid, tclass); if (unlikely(!node)) node = avc_compute_av(ssid, tsid, tclass, avd, &xp_node); else memcpy(avd, &node->ae.avd, sizeof(*avd)); denied = requested & ~(avd->allowed); if (unlikely(denied)) rc = avc_denied(ssid, tsid, tclass, requested, 0, 0, flags, avd); rcu_read_unlock(); return rc; } /** * avc_has_perm - Check permissions and perform any appropriate auditing. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @requested: requested permissions, interpreted based on @tclass * @auditdata: auxiliary audit data * * Check the AVC to determine whether the @requested permissions are granted * for the SID pair (@ssid, @tsid), interpreting the permissions * based on @tclass, and call the security server on a cache miss to obtain * a new decision and add it to the cache. Audit the granting or denial of * permissions in accordance with the policy. Return %0 if all @requested * permissions are granted, -%EACCES if any permissions are denied, or * another -errno upon other errors. */ int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, u32 requested, struct common_audit_data *auditdata) { struct av_decision avd; int rc, rc2; rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd); rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata); if (rc2) return rc2; return rc; } u32 avc_policy_seqno(void) { return avc_cache.latest_notif; } void avc_disable(void) { /* * If you are looking at this because you have realized that we are * not destroying the avc_node_cachep it might be easy to fix, but * I don't know the memory barrier semantics well enough to know. It's * possible that some other task dereferenced security_ops when * it still pointed to selinux operations. If that is the case it's * possible that it is about to use the avc and is about to need the * avc_node_cachep. I know I could wrap the security.c security_ops call * in an rcu_lock, but seriously, it's not worth it. Instead I just flush * the cache and get that memory back. */ if (avc_node_cachep) { avc_flush(); /* kmem_cache_destroy(avc_node_cachep); */ } }
96158.c
/******************************************************************************* * * DO NOT EDIT THIS FILE! * This file is auto-generated by fltg from * INTERNAL/fltg/xgs/ctr/bcm56996_a0/bcm56996_a0_CTR_EGR_EFLEX_OPERAND_PROFILE_INFO.map.ltl for * bcm56996_a0 * * Tool: $SDK/INTERNAL/fltg/bin/fltg * * Edits to this file will be lost when it is regenerated. * * This license is set out in https://raw.githubusercontent.com/Broadcom-Network-Switching-Software/OpenBCM/master/Legal/LICENSE file. * * Copyright 2007-2020 Broadcom Inc. All rights reserved. */ #include <bcmlrd/bcmlrd_internal.h> #include <bcmlrd/chip/bcmlrd_id.h> #include <bcmlrd/chip/bcm56996_a0/bcm56996_a0_lrd_field_data.h> #include <bcmlrd/chip/bcm56996_a0/bcm56996_a0_lrd_ltm_intf.h> #include <bcmlrd/chip/bcm56996_a0/bcm56996_a0_lrd_xfrm_field_desc.h> #include <bcmdrd/chip/bcm56996_a0_enum.h> #include "bcmltd/chip/bcmltd_common_enumpool.h" #include "bcm56996_a0_lrd_enumpool.h" #include <bcmltd/bcmltd_handler.h> /* CTR_EGR_EFLEX_OPERAND_PROFILE_INFO field init */ static const bcmlrd_field_data_t bcm56996_a0_lrd_ctr_egr_eflex_operand_profile_info_map_field_data_mmd[] = { { /* 0 CTR_EGR_EFLEX_OPERAND_PROFILE_ID */ .flags = BCMLRD_FIELD_F_READ_ONLY | BCMLTD_FIELD_F_KEY, .min = &bcm56996_a0_lrd_ifd_u16_0x0, .def = &bcm56996_a0_lrd_ifd_u16_0x0, .max = &bcm56996_a0_lrd_ifd_u16_0xffff, .depth = 0, .width = 16, .edata = NULL, }, { /* 1 PROFILE */ .flags = BCMLRD_FIELD_F_READ_ONLY, .min = &bcm56996_a0_lrd_ifd_u16_0x0, .def = &bcm56996_a0_lrd_ifd_u16_0x0, .max = &bcm56996_a0_lrd_ifd_u16_0x3f, .depth = 0, .width = 6, .edata = NULL, }, }; const bcmlrd_map_field_data_t bcm56996_a0_lrd_ctr_egr_eflex_operand_profile_info_map_field_data = { .fields = 2, .field = bcm56996_a0_lrd_ctr_egr_eflex_operand_profile_info_map_field_data_mmd }; static const bcmlrd_map_table_attr_t bcm56996_a0_lrd_ctr_egr_eflex_operand_profile_infot_attr_entry[] = { { /* 0 */ .key = BCMLRD_MAP_TABLE_ATTRIBUTE_ENTRY_LIMIT, .value = 256, }, { /* 1 */ .key = BCMLRD_MAP_TABLE_ATTRIBUTE_INTERACTIVE, .value = true, }, }; static const bcmlrd_map_attr_t bcm56996_a0_lrd_ctr_egr_eflex_operand_profile_infot_attr_group = { .attributes = 2, .attr = bcm56996_a0_lrd_ctr_egr_eflex_operand_profile_infot_attr_entry, }; static const bcmlrd_map_entry_t bcm56996_a0_lrd_bcmimm_ctr_egr_eflex_operand_profile_info_entry[] = { { /* 0 */ .entry_type = BCMLRD_MAP_ENTRY_TABLE_HANDLER, .desc = { .field_id = 0, .field_idx = 0, .minbit = 0, .maxbit = 0, .entry_idx = 0, .reserved = 0 }, .u = { /* handler: bcm56996_a0_lta_bcmimm_ctr_egr_eflex_operand_profile_info_cth_handler */ .handler_id = BCMLTD_TABLE_BCM56996_A0_LTA_BCMIMM_CTR_EGR_EFLEX_OPERAND_PROFILE_INFO_CTH_HANDLER_ID } }, }; static const bcmlrd_map_group_t bcm56996_a0_lrd_ctr_egr_eflex_operand_profile_info_map_group[] = { { .dest = { .kind = BCMLRD_MAP_CUSTOM, .id = 0, }, .entries = 1, .entry = bcm56996_a0_lrd_bcmimm_ctr_egr_eflex_operand_profile_info_entry }, }; const bcmlrd_map_t bcm56996_a0_lrd_ctr_egr_eflex_operand_profile_info_map = { .src_id = CTR_EGR_EFLEX_OPERAND_PROFILE_INFOt, .field_data = &bcm56996_a0_lrd_ctr_egr_eflex_operand_profile_info_map_field_data, .groups = 1, .group = bcm56996_a0_lrd_ctr_egr_eflex_operand_profile_info_map_group, .table_attr = &bcm56996_a0_lrd_ctr_egr_eflex_operand_profile_infot_attr_group, .entry_ops = BCMLRD_MAP_TABLE_ENTRY_OPERATION_LOOKUP | BCMLRD_MAP_TABLE_ENTRY_OPERATION_TRAVERSE };
853216.c
/***********************************************************************/ /* */ /* Objective Caml */ /* */ /* Xavier Leroy, projet Cristal, INRIA Rocquencourt */ /* */ /* Copyright 1996 Institut National de Recherche en Informatique et */ /* en Automatique. All rights reserved. This file is distributed */ /* under the terms of the GNU Library General Public License, with */ /* the special exception on linking described in file ../LICENSE. */ /* */ /***********************************************************************/ /* $Id: debugger.c,v 1.29 2005/09/22 14:21:50 xleroy Exp $ */ /* Interface with the debugger */ #include <string.h> #include "config.h" #include "debugger.h" #include "fail.h" #include "fix_code.h" #include "instruct.h" #include "intext.h" #include "io.h" #include "misc.h" #include "mlvalues.h" #include "stacks.h" #include "sys.h" int caml_debugger_in_use = 0; uintnat caml_event_count; #if !defined(HAS_SOCKETS) || defined(_WIN32) void caml_debugger_init(void) { } void caml_debugger(enum event_kind event) { } #else #ifdef HAS_UNISTD #include <unistd.h> #endif #include <sys/types.h> #include <sys/wait.h> #include <sys/socket.h> #include <sys/un.h> #include <netinet/in.h> #include <arpa/inet.h> #include <netdb.h> static int sock_domain; /* Socket domain for the debugger */ static union { /* Socket address for the debugger */ struct sockaddr s_gen; struct sockaddr_un s_unix; struct sockaddr_in s_inet; } sock_addr; static int sock_addr_len; /* Length of sock_addr */ static int dbg_socket = -1; /* The socket connected to the debugger */ static struct channel * dbg_in; /* Input channel on the socket */ static struct channel * dbg_out;/* Output channel on the socket */ static void open_connection(void) { dbg_socket = socket(sock_domain, SOCK_STREAM, 0); if (dbg_socket == -1 || connect(dbg_socket, &sock_addr.s_gen, sock_addr_len) == -1) caml_fatal_error("cannot connect to debugger"); dbg_in = caml_open_descriptor_in(dbg_socket); dbg_out = caml_open_descriptor_out(dbg_socket); if (!caml_debugger_in_use) caml_putword(dbg_out, -1); /* first connection */ caml_putword(dbg_out, getpid()); caml_flush(dbg_out); } static void close_connection(void) { caml_close_channel(dbg_in); caml_close_channel(dbg_out); dbg_socket = -1; /* was closed by caml_close_channel */ } void caml_debugger_init(void) { char * address; char * port, * p; struct hostent * host; int n; address = getenv("CAML_DEBUG_SOCKET"); if (address == NULL) return; /* Parse the address */ port = NULL; for (p = address; *p != 0; p++) { if (*p == ':') { *p = 0; port = p+1; break; } } if (port == NULL) { /* Unix domain */ sock_domain = PF_UNIX; sock_addr.s_unix.sun_family = AF_UNIX; strncpy(sock_addr.s_unix.sun_path, address, sizeof(sock_addr.s_unix.sun_path)); sock_addr_len = ((char *)&(sock_addr.s_unix.sun_path) - (char *)&(sock_addr.s_unix)) + strlen(address); } else { /* Internet domain */ sock_domain = PF_INET; for (p = (char *) &sock_addr.s_inet, n = sizeof(sock_addr.s_inet); n > 0; n--) *p++ = 0; sock_addr.s_inet.sin_family = AF_INET; sock_addr.s_inet.sin_addr.s_addr = inet_addr(address); if (sock_addr.s_inet.sin_addr.s_addr == -1) { host = gethostbyname(address); if (host == NULL) caml_fatal_error_arg("Unknown debugging host %s\n", address); memmove(&sock_addr.s_inet.sin_addr, host->h_addr, host->h_length); } sock_addr.s_inet.sin_port = htons(atoi(port)); sock_addr_len = sizeof(sock_addr.s_inet); } open_connection(); caml_debugger_in_use = 1; caml_trap_barrier = caml_stack_high; } static value getval(struct channel *chan) { value res; if (caml_really_getblock(chan, (char *) &res, sizeof(res)) == 0) caml_raise_end_of_file(); /* Bad, but consistent with caml_getword */ return res; } static void putval(struct channel *chan, value val) { caml_really_putblock(chan, (char *) &val, sizeof(val)); } static void safe_output_value(struct channel *chan, value val) { struct longjmp_buffer raise_buf, * saved_external_raise; /* Catch exceptions raised by [caml_output_val] */ saved_external_raise = caml_external_raise; if (sigsetjmp(raise_buf.buf, 0) == 0) { caml_external_raise = &raise_buf; caml_output_val(chan, val, Val_unit); } else { /* Send wrong magic number, will cause [caml_input_value] to fail */ caml_really_putblock(chan, "\000\000\000\000", 4); } caml_external_raise = saved_external_raise; } #define Pc(sp) ((code_t)((sp)[0])) #define Env(sp) ((sp)[1]) #define Extra_args(sp) (Long_val(((sp)[2]))) #define Locals(sp) ((sp) + 3) void caml_debugger(enum event_kind event) { int frame_number; value * frame; intnat i, pos; value val; if (dbg_socket == -1) return; /* Not connected to a debugger. */ /* Reset current frame */ frame_number = 0; frame = caml_extern_sp + 1; /* Report the event to the debugger */ switch(event) { case PROGRAM_START: /* Nothing to report */ goto command_loop; case EVENT_COUNT: putch(dbg_out, REP_EVENT); break; case BREAKPOINT: putch(dbg_out, REP_BREAKPOINT); break; case PROGRAM_EXIT: putch(dbg_out, REP_EXITED); break; case TRAP_BARRIER: putch(dbg_out, REP_TRAP); break; case UNCAUGHT_EXC: putch(dbg_out, REP_UNCAUGHT_EXC); break; } caml_putword(dbg_out, caml_event_count); if (event == EVENT_COUNT || event == BREAKPOINT) { caml_putword(dbg_out, caml_stack_high - frame); caml_putword(dbg_out, (Pc(frame) - caml_start_code) * sizeof(opcode_t)); } else { /* No PC and no stack frame associated with other events */ caml_putword(dbg_out, 0); caml_putword(dbg_out, 0); } caml_flush(dbg_out); command_loop: /* Read and execute the commands sent by the debugger */ while(1) { switch(getch(dbg_in)) { case REQ_SET_EVENT: pos = caml_getword(dbg_in); Assert (pos >= 0); Assert (pos < caml_code_size); caml_set_instruction(caml_start_code + pos / sizeof(opcode_t), EVENT); break; case REQ_SET_BREAKPOINT: pos = caml_getword(dbg_in); Assert (pos >= 0); Assert (pos < caml_code_size); caml_set_instruction(caml_start_code + pos / sizeof(opcode_t), BREAK); break; case REQ_RESET_INSTR: pos = caml_getword(dbg_in); Assert (pos >= 0); Assert (pos < caml_code_size); pos = pos / sizeof(opcode_t); caml_set_instruction(caml_start_code + pos, caml_saved_code[pos]); break; case REQ_CHECKPOINT: i = fork(); if (i == 0) { close_connection(); /* Close parent connection. */ open_connection(); /* Open new connection with debugger */ } else { caml_putword(dbg_out, i); caml_flush(dbg_out); } break; case REQ_GO: caml_event_count = caml_getword(dbg_in); return; case REQ_STOP: exit(0); break; case REQ_WAIT: wait(NULL); break; case REQ_INITIAL_FRAME: frame = caml_extern_sp + 1; /* Fall through */ case REQ_GET_FRAME: caml_putword(dbg_out, caml_stack_high - frame); if (frame < caml_stack_high){ caml_putword(dbg_out, (Pc(frame) - caml_start_code) * sizeof(opcode_t)); }else{ caml_putword (dbg_out, 0); } caml_flush(dbg_out); break; case REQ_SET_FRAME: i = caml_getword(dbg_in); frame = caml_stack_high - i; break; case REQ_UP_FRAME: i = caml_getword(dbg_in); if (frame + Extra_args(frame) + i + 3 >= caml_stack_high) { caml_putword(dbg_out, -1); } else { frame += Extra_args(frame) + i + 3; caml_putword(dbg_out, caml_stack_high - frame); caml_putword(dbg_out, (Pc(frame) - caml_start_code) * sizeof(opcode_t)); } caml_flush(dbg_out); break; case REQ_SET_TRAP_BARRIER: i = caml_getword(dbg_in); caml_trap_barrier = caml_stack_high - i; break; case REQ_GET_LOCAL: i = caml_getword(dbg_in); putval(dbg_out, Locals(frame)[i]); caml_flush(dbg_out); break; case REQ_GET_ENVIRONMENT: i = caml_getword(dbg_in); putval(dbg_out, Field(Env(frame), i)); caml_flush(dbg_out); break; case REQ_GET_GLOBAL: i = caml_getword(dbg_in); putval(dbg_out, Field(caml_global_data, i)); caml_flush(dbg_out); break; case REQ_GET_ACCU: putval(dbg_out, *caml_extern_sp); caml_flush(dbg_out); break; case REQ_GET_HEADER: val = getval(dbg_in); caml_putword(dbg_out, Hd_val(val)); caml_flush(dbg_out); break; case REQ_GET_FIELD: val = getval(dbg_in); i = caml_getword(dbg_in); if (Tag_val(val) != Double_array_tag) { putch(dbg_out, 0); putval(dbg_out, Field(val, i)); } else { double d = Double_field(val, i); putch(dbg_out, 1); caml_really_putblock(dbg_out, (char *) &d, 8); } caml_flush(dbg_out); break; case REQ_MARSHAL_OBJ: val = getval(dbg_in); safe_output_value(dbg_out, val); caml_flush(dbg_out); break; case REQ_GET_CLOSURE_CODE: val = getval(dbg_in); caml_putword(dbg_out, (Code_val(val)-caml_start_code) * sizeof(opcode_t)); caml_flush(dbg_out); break; } } } #endif
266927.c
#include <stdio.h> #include <math.h> #include <omp.h> #include "ofmo-twoint.h" #ifndef false #define false 0 #endif #ifndef true #define true 1 #endif #define HALF 0.5e0 #define ONE 1.e0 #define ZERO 0.e0 #define EPS_PS4 1.e-30 #define EPS_ERI 1.e-15 #define OFMO_EBUF_FULL 1 #define OFMO_EBUF_NOFULL 0 #define EPS_PS_PAIR 1.e-32 #define EPS_CS_PAIR2 1.e-30 #define MAXNPSPAIR 100 extern double* ofmo_integ_getadd_xint( const int mythread ); extern double* ofmo_integ_getadd_yint( const int mythread ); extern double* ofmo_integ_getadd_zint( const int mythread ); extern double* ofmo_integ_getadd_eh( const int mythread ); extern double* ofmo_integ_getadd_eri( const int mythread ); extern void calc_root( const int nroot, const double T, double *U, double *W ); extern int ofmo_integ_add_fock( const int nao, const size_t nstored_eri, const double eri_val[], const short int eri_ind4[], const double D[], double G[] ); extern double* ofmo_getadd_dfact(); static double *DFACT; static void ofmo_hrr_clear_dpss( double *eh ) { int i; // (DS|SS) for ( i=0; i<(0+6); i++ ) eh[i] = 0.e0; // (FS|SS) for ( i=6; i<(6+10); i++ ) eh[i] = 0.e0; } static void ofmo_hrr_coef_dpss( double *eh, double *DINT ) { int i, j, k, l, iao, jao, kao, lao, ix; double coef_a, coef_ab, coef_abc; double *th; th = &eh[16]; ix = 0; for ( i=0, iao=4; i<6; i++, iao++ ) { coef_a = DFACT[iao]; for ( j=0, jao=1; j<3; j++, jao++ ) { coef_ab = coef_a * DFACT[jao]; for ( k=0, kao=0; k<1; k++, kao++ ) { coef_abc = coef_ab * DFACT[kao]; for ( l=0, lao=0; l<1; l++, lao++ ) { DINT[ix] = coef_abc * DFACT[lao] * th[ix]; ix++; } } } } } static void ofmo_hrr_calc_dpss( double *eh, const double BA[3], const double DC[3] ) { // HRR for (XX|XS)-type integral (center AB) // (DP,SS) eh[ 16] = eh[ 6] - BA[0]*eh[ 0]; eh[ 17] = eh[ 9] - BA[1]*eh[ 0]; eh[ 18] = eh[ 10] - BA[2]*eh[ 0]; eh[ 19] = eh[ 11] - BA[0]*eh[ 1]; eh[ 20] = eh[ 7] - BA[1]*eh[ 1]; eh[ 21] = eh[ 14] - BA[2]*eh[ 1]; eh[ 22] = eh[ 12] - BA[0]*eh[ 2]; eh[ 23] = eh[ 15] - BA[1]*eh[ 2]; eh[ 24] = eh[ 8] - BA[2]*eh[ 2]; eh[ 25] = eh[ 9] - BA[0]*eh[ 3]; eh[ 26] = eh[ 11] - BA[1]*eh[ 3]; eh[ 27] = eh[ 13] - BA[2]*eh[ 3]; eh[ 28] = eh[ 10] - BA[0]*eh[ 4]; eh[ 29] = eh[ 13] - BA[1]*eh[ 4]; eh[ 30] = eh[ 12] - BA[2]*eh[ 4]; eh[ 31] = eh[ 13] - BA[0]*eh[ 5]; eh[ 32] = eh[ 14] - BA[1]*eh[ 5]; eh[ 33] = eh[ 15] - BA[2]*eh[ 5]; } static void ofmo_xyzint_dpss( const double *F00, const double *B00, const double *B10, const double *B01, const double *C00, const double *CP00, double *xint, double *yint, double *zint ) { int Lab, Lcd; int m, m3, N, M, ix3, ix2, ix1, ix0, nroot; double C10[2], CP10[2], CP01[2], C01[2]; // (0,0) xint[ 0]=1.e0; yint[ 0]=1.e0; zint[ 0]=F00[0]; xint[ 1]=1.e0; yint[ 1]=1.e0; zint[ 1]=F00[1]; // (1,0) xint[ 2]=C00[ 0]; yint[ 2]=C00[ 1]; zint[ 2]=C00[ 2]*F00[0]; xint[ 3]=C00[ 3]; yint[ 3]=C00[ 4]; zint[ 3]=C00[ 5]*F00[1]; // (N,0) and (N,1) for ( m=0; m<2; m++ ) { C10[m] = 0.e0; CP10[m] = B00[m]; } // (2,0) C10[0] += B10[0]; xint[ 4]=C00[ 0]*xint[ 2]+C10[0]*xint[ 0]; yint[ 4]=C00[ 1]*yint[ 2]+C10[0]*yint[ 0]; zint[ 4]=C00[ 2]*zint[ 2]+C10[0]*zint[ 0]; C10[1] += B10[1]; xint[ 5]=C00[ 3]*xint[ 3]+C10[1]*xint[ 1]; yint[ 5]=C00[ 4]*yint[ 3]+C10[1]*yint[ 1]; zint[ 5]=C00[ 5]*zint[ 3]+C10[1]*zint[ 1]; // (3,0) C10[0] += B10[0]; xint[ 6]=C00[ 0]*xint[ 4]+C10[0]*xint[ 2]; yint[ 6]=C00[ 1]*yint[ 4]+C10[0]*yint[ 2]; zint[ 6]=C00[ 2]*zint[ 4]+C10[0]*zint[ 2]; C10[1] += B10[1]; xint[ 7]=C00[ 3]*xint[ 5]+C10[1]*xint[ 3]; yint[ 7]=C00[ 4]*yint[ 5]+C10[1]*yint[ 3]; zint[ 7]=C00[ 5]*zint[ 5]+C10[1]*zint[ 3]; } static void ofmo_form_dpss( const double *xint, const double *yint, const double *zint, double *eh ) { // (DS|SS) eh[ 0] += xint[ 4]*yint[ 0]*zint[ 0]; eh[ 0] += xint[ 5]*yint[ 1]*zint[ 1]; eh[ 1] += xint[ 0]*yint[ 4]*zint[ 0]; eh[ 1] += xint[ 1]*yint[ 5]*zint[ 1]; eh[ 2] += xint[ 0]*yint[ 0]*zint[ 4]; eh[ 2] += xint[ 1]*yint[ 1]*zint[ 5]; eh[ 3] += xint[ 2]*yint[ 2]*zint[ 0]; eh[ 3] += xint[ 3]*yint[ 3]*zint[ 1]; eh[ 4] += xint[ 2]*yint[ 0]*zint[ 2]; eh[ 4] += xint[ 3]*yint[ 1]*zint[ 3]; eh[ 5] += xint[ 0]*yint[ 2]*zint[ 2]; eh[ 5] += xint[ 1]*yint[ 3]*zint[ 3]; // (FS|SS) eh[ 6] += xint[ 6]*yint[ 0]*zint[ 0]; eh[ 6] += xint[ 7]*yint[ 1]*zint[ 1]; eh[ 7] += xint[ 0]*yint[ 6]*zint[ 0]; eh[ 7] += xint[ 1]*yint[ 7]*zint[ 1]; eh[ 8] += xint[ 0]*yint[ 0]*zint[ 6]; eh[ 8] += xint[ 1]*yint[ 1]*zint[ 7]; eh[ 9] += xint[ 4]*yint[ 2]*zint[ 0]; eh[ 9] += xint[ 5]*yint[ 3]*zint[ 1]; eh[ 10] += xint[ 4]*yint[ 0]*zint[ 2]; eh[ 10] += xint[ 5]*yint[ 1]*zint[ 3]; eh[ 11] += xint[ 2]*yint[ 4]*zint[ 0]; eh[ 11] += xint[ 3]*yint[ 5]*zint[ 1]; eh[ 12] += xint[ 2]*yint[ 0]*zint[ 4]; eh[ 12] += xint[ 3]*yint[ 1]*zint[ 5]; eh[ 13] += xint[ 2]*yint[ 2]*zint[ 2]; eh[ 13] += xint[ 3]*yint[ 3]*zint[ 3]; eh[ 14] += xint[ 0]*yint[ 4]*zint[ 2]; eh[ 14] += xint[ 1]*yint[ 5]*zint[ 3]; eh[ 15] += xint[ 0]*yint[ 2]*zint[ 4]; eh[ 15] += xint[ 1]*yint[ 3]*zint[ 5]; } void ofmo_twoint_core_rys_dpss( const int mythread, const int *nijps, const double *vzeta, const double *vdkab, const double vxiza[], const double BA[3], const int *nklps, const double *veta, const double *vdkcd, const double *vxizc, const double DC[3], const double AC[3], double *DINT ) { int ijps, klps, i; double cssss, zeta, dkab, xiza, eta, xizc, dk, T; double zeta2, eta2, rz, PA[3], QC[3]; double PQ2, sqrho, rho, PC[3], QP[3]; double C00[6], CP00[6], B00[2], B10[2], B01[2], F00[2]; double rrho, rze, W[13], U[13]; double u2, duminv, dm2inv, dum; int m, m3; double *xint, *yint, *zint, *eh; xint = ofmo_integ_getadd_xint( mythread ); yint = ofmo_integ_getadd_yint( mythread ); zint = ofmo_integ_getadd_zint( mythread ); eh = ofmo_integ_getadd_eh( mythread ); DFACT = ofmo_getadd_dfact(); ofmo_hrr_clear_dpss( eh ); for ( ijps=0; ijps<(*nijps); ijps++ ) { zeta = vzeta[ijps]; dkab = vdkab[ijps]; xiza = vxiza[ijps]; zeta2 = HALF * zeta; for ( i=0; i<3; i++ ) { PC[i] = AC[i] + xiza*BA[i]; PA[i] = xiza * BA[i]; } for ( klps=0; klps<(*nklps); klps++ ) { eta = veta[klps]; dk = dkab * vdkcd[klps]; xizc = vxizc[klps]; eta2 = HALF * eta; PQ2 = ZERO; for ( i=0; i<3; i++ ) { QC[i] = xizc*DC[i]; QP[i] = xizc*DC[i] - PC[i]; PQ2 += QP[i]*QP[i]; } rrho = zeta + eta; rze = zeta * eta; sqrho = sqrt(1.e0/rrho); rho = sqrho * sqrho; rz = rho * zeta; T = rho * PQ2; cssss = sqrho * dk; calc_root( 2, T, U, W ); for ( m=m3=0; m<2; m++, m3+=3 ) { u2 = rho * U[m]; F00[m] = cssss * W[m]; duminv = 1.e0 / ( 1.e0 + rrho * u2 ); dm2inv = 0.5e0 * duminv; B00[m] = dm2inv * rze * u2; B10[m] = dm2inv * ( zeta + rze*u2 ); B01[m] = dm2inv * ( eta + rze*u2 ); dum = zeta * u2 * duminv; for ( i=0; i<3; i++ ) C00[m3+i] = PA[i] + dum * QP[i]; dum = eta * u2 * duminv; for ( i=0; i<3; i++ ) CP00[m3+i] = QC[i] - dum * QP[i]; } ofmo_xyzint_dpss( F00, B00, B10, B01, C00, CP00, xint, yint, zint ); ofmo_form_dpss( xint, yint, zint, eh ); } } ofmo_hrr_calc_dpss( eh, BA, DC ); ofmo_hrr_coef_dpss( eh, DINT ); } int ofmo_twoint_rys_dpss( const int *pnworkers, const int *pworkerid, const int *pLa, const int *pLb, const int *pLc, const int *pLd, const int *shel_atm, const int *shel_ini, const double atom_x[], const double atom_y[], const double atom_z[], const int leading_cs_pair[], const double csp_schwarz[], const int csp_ics[], const int csp_jcs[], const int csp_leading_ps_pair[], const double psp_zeta[], const double psp_dkps[], const double psp_xiza[], // for partially direct SCF const long *pebuf_max_nzeri, long *ebuf_non_zero_eri, double ebuf_val[], short int ebuf_ind4[], int *last_ijcs, int *last_klcs ) { int Lab, Lcd, i, j, k, l, ipat, ix; int I2, IJ, K2, KL; int ijcs, ijcs0, ijcs1; int klcs, klcs0, klcs1, max_klcs; int ijps0, nijps, klps0, nklps; int ics, iat, iao, iao0, jcs, jat, jao, jao0; int kcs, kat, kao, kao0, lcs, lat, lao, lao0; double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3]; double val_ab, val_cd, coe, coe0; double *DINTEG; long nzeri, max_nzeri, nzeri4; int nworkers=*pnworkers, workerid=*pworkerid; int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd; long ebuf_max_nzeri = *pebuf_max_nzeri; int mythread; DFACT = ofmo_getadd_dfact(); mythread = omp_get_thread_num(); DINTEG = ofmo_integ_getadd_eri( mythread ); Lab = La*(La+1)/2+Lb; Lcd = Lc*(Lc+1)/2+Ld; ijcs0 = leading_cs_pair[Lab]; ijcs1 = leading_cs_pair[Lab+1]; klcs0 = leading_cs_pair[Lcd]; klcs1 = leading_cs_pair[Lcd+1]; nzeri = *ebuf_non_zero_eri; max_nzeri = ebuf_max_nzeri - 6*3*1*1; nzeri4 = nzeri*4; if ( nzeri >= max_nzeri ) { *last_ijcs = ijcs0+workerid; *last_klcs = klcs0 - 1; *ebuf_non_zero_eri = nzeri; return OFMO_EBUF_FULL; } for ( ijcs=ijcs0+workerid; ijcs<ijcs1; ijcs+=nworkers ) { val_ab = csp_schwarz[ijcs]; ics = csp_ics[ijcs]; jcs = csp_jcs[ijcs]; ijps0 = csp_leading_ps_pair[ijcs]; nijps = csp_leading_ps_pair[ijcs+1]-ijps0; iat = shel_atm[ics]; jat = shel_atm[jcs]; iao0 = shel_ini[ics]; jao0 = shel_ini[jcs]; A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat]; B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat]; for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i]; max_klcs = ( Lab == Lcd ? ijcs+1 : klcs1 ); for ( klcs=klcs0; klcs<max_klcs; klcs++ ) { val_cd = csp_schwarz[klcs]; if ( val_ab*val_cd < EPS_PS4 ) continue; kcs = csp_ics[klcs]; lcs = csp_jcs[klcs]; klps0 = csp_leading_ps_pair[klcs]; nklps = csp_leading_ps_pair[klcs+1]-klps0; kat = shel_atm[kcs]; lat = shel_atm[lcs]; kao0 = shel_ini[kcs]; lao0 = shel_ini[lcs]; C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat]; D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat]; for ( i=0; i<3; i++ ) { AC[i] = A[i] - C[i]; DC[i] = D[i] - C[i]; } ofmo_twoint_core_rys_dpss( mythread, &nijps, &psp_zeta[ijps0], &psp_dkps[ijps0], &psp_xiza[ijps0], BA, &nklps, &psp_zeta[klps0], &psp_dkps[klps0], &psp_xiza[klps0], DC, AC, DINTEG ); ipat=((Lab != Lcd) || (ics==kcs && jcs>lcs) ? true : false); for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) { I2 = (iao*iao+iao)>>1; for ( j=0, jao=jao0; j<3; j++, jao++ ) { if ( jao>iao ) { ix+=1*1; continue; } IJ = I2 + jao; coe0 = ( iao==jao ? HALF : ONE ); for ( k=0, kao=kao0; k<1; k++, kao++ ) { K2 = (kao*kao+kao)>>1; for ( l=0, lao=lao0; l<1; l++, lao++, ix++ ) { if ( lao>kao ) continue; if ( fabs(DINTEG[ix]) > EPS_ERI ) { KL = K2 + lao; if ( IJ >= KL ) { coe = coe0; if ( kao==lao ) coe *= HALF; if ( KL == IJ ) coe *= HALF; ebuf_val[nzeri] = coe*DINTEG[ix]; ebuf_ind4[nzeri4+0] = (short int)iao; ebuf_ind4[nzeri4+1] = (short int)jao; ebuf_ind4[nzeri4+2] = (short int)kao; ebuf_ind4[nzeri4+3] = (short int)lao; nzeri++; nzeri4+=4; } else if ( ipat ) { coe = coe0; if ( kao==lao ) coe*=HALF; ebuf_val[nzeri] = coe*DINTEG[ix]; ebuf_ind4[nzeri4+0] = (short int)kao; ebuf_ind4[nzeri4+1] = (short int)lao; ebuf_ind4[nzeri4+2] = (short int)iao; ebuf_ind4[nzeri4+3] = (short int)jao; nzeri++; nzeri4+=4; } } } } } } if ( nzeri >= max_nzeri ) { *last_ijcs = ijcs; *last_klcs = klcs; *ebuf_non_zero_eri = nzeri; return OFMO_EBUF_FULL; } } } *ebuf_non_zero_eri = nzeri; return OFMO_EBUF_NOFULL; } int ofmo_twoint_direct_rys_dpss( const int *pnworkers, const int *pworkerid, const int *pLa, const int *pLb, const int *pLc, const int *pLd, const int *shel_atm, const int *shel_ini, const double atom_x[], const double atom_y[], const double atom_z[], const int leading_cs_pair[], const double csp_schwarz[], const int csp_ics[], const int csp_jcs[], const int csp_leading_ps_pair[], const double psp_zeta[], const double psp_dkps[], const double psp_xiza[], // for direct SCF const long *petmp_max_nzeri, long *petmp_non_zero_eri, double etmp_val[], short int etmp_ind4[], const int *plast_ijcs, const int *plast_klcs, // density matrix & G-matrix data const int *pnao, const double Ds[], double G[] ) { int nworkers=*pnworkers, workerid=*pworkerid; int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd; int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao; long max_nzeri=*petmp_max_nzeri; long nzeri4, nzeri=*petmp_non_zero_eri; // int Lab, Lcd, i, j, k, l, ipat, ix; int I2, IJ, K2, KL; int ijcs, ijcs0, ijcs1; int klcs, klcs0, klcs1, max_klcs; int ijps0, nijps, klps0, nklps; int ics, iat, iao, iao0, jcs, jat, jao, jao0; int kcs, kat, kao, kao0, lcs, lat, lao, lao0; double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3]; double val_ab, val_cd, coe, coe0; double *DINTEG; int mythread; float eps_eri = ofmo_twoint_eps_eri(0); float eps_ps4 = ofmo_twoint_eps_ps4(0); float eps_sch = ofmo_twoint_eps_sch(0); DFACT = ofmo_getadd_dfact(); mythread = omp_get_thread_num(); DINTEG = ofmo_integ_getadd_eri( mythread ); Lab = La*(La+1)/2+Lb; Lcd = Lc*(Lc+1)/2+Ld; ijcs1 = leading_cs_pair[Lab+1]; klcs0 = leading_cs_pair[Lcd]; klcs1 = leading_cs_pair[Lcd+1]; if ( last_ijcs != -1 ) { ijcs = last_ijcs; klcs = last_klcs+1; } else { ijcs = leading_cs_pair[Lab] + workerid; klcs = klcs0; } max_nzeri -= 6*3*1*1; nzeri4 = nzeri*4; if ( nzeri >= max_nzeri ) { ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G ); nzeri = nzeri4 = 0; } for ( ; ijcs<ijcs1; ijcs+=nworkers ) { val_ab = csp_schwarz[ijcs]; ics = csp_ics[ijcs]; jcs = csp_jcs[ijcs]; ijps0 = csp_leading_ps_pair[ijcs]; nijps = csp_leading_ps_pair[ijcs+1]-ijps0; iat = shel_atm[ics]; jat = shel_atm[jcs]; iao0 = shel_ini[ics]; jao0 = shel_ini[jcs]; A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat]; B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat]; for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i]; max_klcs = ( Lab == Lcd ? ijcs+1 : klcs1 ); for ( ; klcs<max_klcs; klcs++ ) { val_cd = csp_schwarz[klcs]; if ( val_ab*val_cd < eps_ps4 ) continue; kcs = csp_ics[klcs]; lcs = csp_jcs[klcs]; if ( val_ab*val_cd*ofmo_twoint_dmax6(ics,jcs,kcs,lcs) < eps_sch ) continue; klps0 = csp_leading_ps_pair[klcs]; nklps = csp_leading_ps_pair[klcs+1]-klps0; kat = shel_atm[kcs]; lat = shel_atm[lcs]; kao0 = shel_ini[kcs]; lao0 = shel_ini[lcs]; C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat]; D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat]; for ( i=0; i<3; i++ ) { AC[i] = A[i] - C[i]; DC[i] = D[i] - C[i]; } ofmo_twoint_core_rys_dpss( mythread, &nijps, &psp_zeta[ijps0], &psp_dkps[ijps0], &psp_xiza[ijps0], BA, &nklps, &psp_zeta[klps0], &psp_dkps[klps0], &psp_xiza[klps0], DC, AC, DINTEG ); ipat=((Lab != Lcd) || (ics==kcs && jcs>lcs) ? true : false); for ( i=0, iao=iao0, ix=0; i<6; i++, iao++ ) { I2 = (iao*iao+iao)>>1; for ( j=0, jao=jao0; j<3; j++, jao++ ) { if ( jao>iao ) { ix+=1*1; continue; } IJ = I2 + jao; coe0 = ( iao==jao ? HALF : ONE ); for ( k=0, kao=kao0; k<1; k++, kao++ ) { K2 = (kao*kao+kao)>>1; for ( l=0, lao=lao0; l<1; l++, lao++, ix++ ) { if ( lao>kao ) continue; if ( fabs(DINTEG[ix]) > eps_eri ) { KL = K2 + lao; if ( IJ >= KL ) { coe = coe0; if ( kao==lao ) coe *= HALF; if ( KL == IJ ) coe *= HALF; etmp_val[nzeri] = coe*DINTEG[ix]; etmp_ind4[nzeri4+0] = (short int)iao; etmp_ind4[nzeri4+1] = (short int)jao; etmp_ind4[nzeri4+2] = (short int)kao; etmp_ind4[nzeri4+3] = (short int)lao; nzeri++; nzeri4+=4; } else if ( ipat ) { coe = coe0; if ( kao==lao ) coe*=HALF; etmp_val[nzeri] = coe*DINTEG[ix]; etmp_ind4[nzeri4+0] = (short int)kao; etmp_ind4[nzeri4+1] = (short int)lao; etmp_ind4[nzeri4+2] = (short int)iao; etmp_ind4[nzeri4+3] = (short int)jao; nzeri++; nzeri4+=4; } } } } } } if ( nzeri >= max_nzeri ) { ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G ); nzeri = nzeri4= 0; } } klcs = klcs0; } *petmp_non_zero_eri = nzeri; return 0; }
605755.c
/* { dg-do compile { target { powerpc*-*-* } } } */ /* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power9" } } */ /* { dg-require-effective-target powerpc_p9vector_ok } */ /* { dg-options "-mcpu=power9" } */ #include <altivec.h> __vector unsigned int get_significands (__vector float *p) { __vector float source = *p; return vec_extract_sig (source); } /* { dg-final { scan-assembler "xvxsigsp" } } */
622013.c
/* * Generated by asn1c-0.9.29 (http://lionet.info/asn1c) * From ASN.1 module "NR-RRC-Definitions" * found in "fixed_grammar.asn" * `asn1c -gen-PER -fcompound-names -findirect-choice -no-gen-example` */ #include "QCL-Info.h" /* * This type is implemented using NativeEnumerated, * so here we adjust the DEF accordingly. */ static asn_oer_constraints_t asn_OER_type_referenceSignal_constr_4 CC_NOTUSED = { { 0, 0 }, -1}; static asn_per_constraints_t asn_PER_type_referenceSignal_constr_4 CC_NOTUSED = { { APC_CONSTRAINED, 1, 1, 0, 1 } /* (0..1) */, { APC_UNCONSTRAINED, -1, -1, 0, 0 }, 0, 0 /* No PER value map */ }; static asn_oer_constraints_t asn_OER_type_qcl_Type_constr_7 CC_NOTUSED = { { 0, 0 }, -1}; static asn_per_constraints_t asn_PER_type_qcl_Type_constr_7 CC_NOTUSED = { { APC_CONSTRAINED, 2, 2, 0, 3 } /* (0..3) */, { APC_UNCONSTRAINED, -1, -1, 0, 0 }, 0, 0 /* No PER value map */ }; static asn_TYPE_member_t asn_MBR_referenceSignal_4[] = { { ATF_NOFLAGS, 0, offsetof(struct QCL_Info__referenceSignal, choice.csi_rs), (ASN_TAG_CLASS_CONTEXT | (0 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_NZP_CSI_RS_ResourceId, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "csi-rs" }, { ATF_NOFLAGS, 0, offsetof(struct QCL_Info__referenceSignal, choice.ssb), (ASN_TAG_CLASS_CONTEXT | (1 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_SSB_Index, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "ssb" }, }; static const asn_TYPE_tag2member_t asn_MAP_referenceSignal_tag2el_4[] = { { (ASN_TAG_CLASS_CONTEXT | (0 << 2)), 0, 0, 0 }, /* csi-rs */ { (ASN_TAG_CLASS_CONTEXT | (1 << 2)), 1, 0, 0 } /* ssb */ }; static asn_CHOICE_specifics_t asn_SPC_referenceSignal_specs_4 = { sizeof(struct QCL_Info__referenceSignal), offsetof(struct QCL_Info__referenceSignal, _asn_ctx), offsetof(struct QCL_Info__referenceSignal, present), sizeof(((struct QCL_Info__referenceSignal *)0)->present), asn_MAP_referenceSignal_tag2el_4, 2, /* Count of tags in the map */ 0, 0, -1 /* Extensions start */ }; static /* Use -fall-defs-global to expose */ asn_TYPE_descriptor_t asn_DEF_referenceSignal_4 = { "referenceSignal", "referenceSignal", &asn_OP_CHOICE, 0, /* No effective tags (pointer) */ 0, /* No effective tags (count) */ 0, /* No tags (pointer) */ 0, /* No tags (count) */ { &asn_OER_type_referenceSignal_constr_4, &asn_PER_type_referenceSignal_constr_4, CHOICE_constraint }, asn_MBR_referenceSignal_4, 2, /* Elements count */ &asn_SPC_referenceSignal_specs_4 /* Additional specs */ }; static const asn_INTEGER_enum_map_t asn_MAP_qcl_Type_value2enum_7[] = { { 0, 5, "typeA" }, { 1, 5, "typeB" }, { 2, 5, "typeC" }, { 3, 5, "typeD" } }; static const unsigned int asn_MAP_qcl_Type_enum2value_7[] = { 0, /* typeA(0) */ 1, /* typeB(1) */ 2, /* typeC(2) */ 3 /* typeD(3) */ }; static const asn_INTEGER_specifics_t asn_SPC_qcl_Type_specs_7 = { asn_MAP_qcl_Type_value2enum_7, /* "tag" => N; sorted by tag */ asn_MAP_qcl_Type_enum2value_7, /* N => "tag"; sorted by N */ 4, /* Number of elements in the maps */ 0, /* Enumeration is not extensible */ 1, /* Strict enumeration */ 0, /* Native long size */ 0 }; static const ber_tlv_tag_t asn_DEF_qcl_Type_tags_7[] = { (ASN_TAG_CLASS_CONTEXT | (3 << 2)), (ASN_TAG_CLASS_UNIVERSAL | (10 << 2)) }; static /* Use -fall-defs-global to expose */ asn_TYPE_descriptor_t asn_DEF_qcl_Type_7 = { "qcl-Type", "qcl-Type", &asn_OP_NativeEnumerated, asn_DEF_qcl_Type_tags_7, sizeof(asn_DEF_qcl_Type_tags_7) /sizeof(asn_DEF_qcl_Type_tags_7[0]) - 1, /* 1 */ asn_DEF_qcl_Type_tags_7, /* Same as above */ sizeof(asn_DEF_qcl_Type_tags_7) /sizeof(asn_DEF_qcl_Type_tags_7[0]), /* 2 */ { &asn_OER_type_qcl_Type_constr_7, &asn_PER_type_qcl_Type_constr_7, NativeEnumerated_constraint }, 0, 0, /* Defined elsewhere */ &asn_SPC_qcl_Type_specs_7 /* Additional specs */ }; asn_TYPE_member_t asn_MBR_QCL_Info_1[] = { { ATF_POINTER, 2, offsetof(struct QCL_Info, cell), (ASN_TAG_CLASS_CONTEXT | (0 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_ServCellIndex, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "cell" }, { ATF_POINTER, 1, offsetof(struct QCL_Info, bwp_Id), (ASN_TAG_CLASS_CONTEXT | (1 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_BWP_Id, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "bwp-Id" }, { ATF_NOFLAGS, 0, offsetof(struct QCL_Info, referenceSignal), (ASN_TAG_CLASS_CONTEXT | (2 << 2)), +1, /* EXPLICIT tag at current level */ &asn_DEF_referenceSignal_4, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "referenceSignal" }, { ATF_NOFLAGS, 0, offsetof(struct QCL_Info, qcl_Type), (ASN_TAG_CLASS_CONTEXT | (3 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_qcl_Type_7, 0, { 0, 0, 0 }, 0, 0, /* No default value */ "qcl-Type" }, }; static const int asn_MAP_QCL_Info_oms_1[] = { 0, 1 }; static const ber_tlv_tag_t asn_DEF_QCL_Info_tags_1[] = { (ASN_TAG_CLASS_UNIVERSAL | (16 << 2)) }; static const asn_TYPE_tag2member_t asn_MAP_QCL_Info_tag2el_1[] = { { (ASN_TAG_CLASS_CONTEXT | (0 << 2)), 0, 0, 0 }, /* cell */ { (ASN_TAG_CLASS_CONTEXT | (1 << 2)), 1, 0, 0 }, /* bwp-Id */ { (ASN_TAG_CLASS_CONTEXT | (2 << 2)), 2, 0, 0 }, /* referenceSignal */ { (ASN_TAG_CLASS_CONTEXT | (3 << 2)), 3, 0, 0 } /* qcl-Type */ }; asn_SEQUENCE_specifics_t asn_SPC_QCL_Info_specs_1 = { sizeof(struct QCL_Info), offsetof(struct QCL_Info, _asn_ctx), asn_MAP_QCL_Info_tag2el_1, 4, /* Count of tags in the map */ asn_MAP_QCL_Info_oms_1, /* Optional members */ 2, 0, /* Root/Additions */ 4, /* First extension addition */ }; asn_TYPE_descriptor_t asn_DEF_QCL_Info = { "QCL-Info", "QCL-Info", &asn_OP_SEQUENCE, asn_DEF_QCL_Info_tags_1, sizeof(asn_DEF_QCL_Info_tags_1) /sizeof(asn_DEF_QCL_Info_tags_1[0]), /* 1 */ asn_DEF_QCL_Info_tags_1, /* Same as above */ sizeof(asn_DEF_QCL_Info_tags_1) /sizeof(asn_DEF_QCL_Info_tags_1[0]), /* 1 */ { 0, 0, SEQUENCE_constraint }, asn_MBR_QCL_Info_1, 4, /* Elements count */ &asn_SPC_QCL_Info_specs_1 /* Additional specs */ };
163994.c
#include "io.h" int main(void) { long long rd, rs, rt; long long result; rd = 0; rs = 0x1234567845BCFFFF; rt = 0x8765432198529AD2; result = 0x52fbec7035a2ca5c; __asm ("muleq_s.pw.qhr %0, %1, %2\n\t" : "=r"(rd) : "r"(rs), "r"(rt) ); if (result != rd) { printf("1 muleq_s.pw.qhr error\n"); return -1; } rd = 0; rs = 0x1234567845BC8000; rt = 0x8765432198528000; result = 0x52fbec707FFFFFFF; __asm ("muleq_s.pw.qhr %0, %1, %2\n\t" : "=r"(rd) : "r"(rs), "r"(rt) ); if (result != rd) { printf("2 muleq_s.pw.qhr error\n"); return -1; } rd = 0; __asm ("rddsp %0\n\t" : "=r"(rd) ); rd = rd >> 21; rd = rd & 0x1; if (rd != 1) { printf("3 muleq_s.pw.qhr error\n"); return -1; } return 0; }
462241.c
/* * File: MotorControllerMain.c * Author: BB Slug Team * * PID control based off of material found in Embedded Computing and Mechatronics with the PIC32 * Microcontroller Kevin M. Lynch, Nicholas Marchuk, Matthew L. Elwin * * Created on April 12, 2017, 5:12 PM */ /************************************************************************/ /* File Description: */ /************************************************************************/ #include "BB_BOARD.h" #include <plib.h> #include <stdio.h> #include "BB_Motor.h" #include "BB_Encoder.h" #include "BB_LEDS.h" #include "BB_UART.h" #include <math.h> #include "BB_MOTOR_CONTROLLER.h" /* ------------------------------------------------------------ */ /* Definitions */ /* ------------------------------------------------------------ */ #define cntMsDelay 5000 #define thsndDelay 1000 /* ------------------------------------------------------------ */ /* Prototypes */ /* ------------------------------------------------------------ */ void DeviceInit(); void DelayInit(); void DelayMs(unsigned t); void DelayUs(unsigned t); /* ------------------------------------------------------------ */ /* Global Variables */ /* ------------------------------------------------------------ */ volatile float eCountRadians = 0; /* ------------------------------------------------------------ */ /* Main */ /* ------------------------------------------------------------ */ int main() { BB_BOARD_Init(); PORTSetPinsDigitalOut(JC03); // PMOD Pin JC 03 PORTClearBits(JC03); // Interrupt // *NOTE: // Make sure to change both T4_PS_1_64 and the PRESCALE #define //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // STEP 2. configure Timer 1 using internal clock, 1:64 pre-scaler OpenTimer4(T4_ON | T4_SOURCE_INT | T4_PS_1_64, T4_PERIOD); // set up the timer interrupt with a priority of 2 ConfigIntTimer4(T4_INT_ON | T4_INT_PRIOR_2); // enable multi-vector interrupts INTEnableSystemMultiVectoredInt(); // printf("T4_PS_1_256: %x\n", T4_PS_1_256); // printf("T4_PS_1_64: %x\n", T4_PS_1_64); // printf("T4_PS_1_32 %x\n", T4_PS_1_32); // printf("T4_ON %x\n", T4_ON); float oldECR = 0; while (1) { if (eCountRadians != oldECR) { printf("eCountRadians: %f \n", eCountRadians); oldECR = eCountRadians; } } // eprev = 0; // initial "previous error" is zero // eint = 0; // initial error integral is zero // now = 0; // "now" tracks the elapsed time // every dt seconds do { // s = readSensor(); // read sensor value // r = referenceValue(now); // get reference signal for time "now" // e = r - s; // calculate the error // edot = e - eprev; // error difference // eint = eint + e; // error sum // u = Kp*e + Ki*eint + Kd*edot; // calculate the control signal // sendControl(u); // send control signal to the plant // eprev = e; // current error is now prev error for next iteration // now = now + dt; // update the "now" time // } // dt is Ts in code below // for motor 1 // float reference = 0; // the target angle value // float e1prev = 0; // initial "previous error" is zero // float e1int = 0; // initial error integral is zero // float e1dot = 0; // // float now1 = 0; // "now" tracks the elapsed time // float error1 = 0; // the error or difference between the reference and the actual angle // float frequency = 2000; // double u1 = 0; // float Ts1 = 1 / frequency; // float Kp1 = 0.129; // float Ki1 = 0.0735 * Ts1; // float Kd1 = 0.0565 / Ts1; // // double encoder1value = 0; // printf("STARTING\n"); // // // SetMotorSpeed(250, MOTOR_1); // // DelayUs(); // Wait for 5 seconds, to allow for turning the wheel // // // then the motor should try to move the wheel back to // // // 'zero' // // MotorsStop(); // // printf("Motor Stopped\n"); // // // int j = 0; // while (1) { // DelayUs(1000); // printf("%d\n", j++); // } // // int i = 10000; // while (i > 0) { // encoder1value = GetEncoderRadians(MOTOR_1); //= tick2Radian*GetEncoderCount(MOTOR_1); // read sensor convert to rad // // // // // *IMPORTANT! - READ THE FOLLOWING NOTE! // // *NOTE: reference will be updated in future to be whatever value is needed at a given time // reference = 0; // get reference signal for time "now" // // error1 = reference - encoder1value; // calculate the error // e1dot = error1 - e1prev; // error difference // e1int = e1int + error1; // error sum // u1 = Kp1 * error1 + Ki1 * e1int + Kd1*e1dot; // calculate the control signal // // // u1 needs to be converted to PWM to voltage // SetMotorSpeed(u1*VOLT_2_PWM, MOTOR_1); // send control signal to the plant to rotate so many degrees // e1prev = error1; // current error is now previous error for next iteration // now1 = now1 + Ts1; // update the "now" time // // DelayUs(500); // half a millisecond -> 2kHz // // i--; // } // // // printf("encoder1value: %e \n", encoder1value); // printf("error1: %f \n", error1); // printf("e1dot: %f \n", e1dot); // printf("e1int: %f\n", e1int); // printf("u1: %e \n", u1); // printf("ENDING\n \n"); while (1); // sit and spin return 0; }