filename
stringlengths
3
9
code
stringlengths
4
1.87M
148841.c
/*************************************************************************** * Copyright (C) 2005 by Dominic Rath * * [email protected] * * * * Copyright (C) 2007-2010 Øyvind Harboe * * [email protected] * * * * Copyright (C) 2008, Duane Ellis * * [email protected] * * * * Copyright (C) 2008 by Spencer Oliver * * [email protected] * * * * Copyright (C) 2008 by Rick Altherr * * [email protected]> * * * * Copyright (C) 2011 by Broadcom Corporation * * Evan Hunter - [email protected] * * * * Copyright (C) ST-Ericsson SA 2011 * * [email protected] : smp minimum support * * * * Copyright (C) 2011 Andreas Fritiofson * * [email protected] * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see <http://www.gnu.org/licenses/>. * ***************************************************************************/ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <helper/time_support.h> #include <jtag/jtag.h> #include <flash/nor/core.h> #include "target.h" #include "target_type.h" #include "target_request.h" #include "breakpoints.h" #include "register.h" #include "trace.h" #include "image.h" #include "rtos/rtos.h" #include "transport/transport.h" /* default halt wait timeout (ms) */ #define DEFAULT_HALT_TIMEOUT 5000 static int target_read_buffer_default(struct target *target, uint32_t address, uint32_t count, uint8_t *buffer); static int target_write_buffer_default(struct target *target, uint32_t address, uint32_t count, const uint8_t *buffer); static int target_array2mem(Jim_Interp *interp, struct target *target, int argc, Jim_Obj * const *argv); static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj * const *argv); static int target_register_user_commands(struct command_context *cmd_ctx); static int target_get_gdb_fileio_info_default(struct target *target, struct gdb_fileio_info *fileio_info); static int target_gdb_fileio_end_default(struct target *target, int retcode, int fileio_errno, bool ctrl_c); static int target_profiling_default(struct target *target, uint32_t *samples, uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds); /* targets */ extern struct target_type arm7tdmi_target; extern struct target_type arm720t_target; extern struct target_type arm9tdmi_target; extern struct target_type arm920t_target; extern struct target_type arm966e_target; extern struct target_type arm946e_target; extern struct target_type arm926ejs_target; extern struct target_type fa526_target; extern struct target_type feroceon_target; extern struct target_type dragonite_target; extern struct target_type xscale_target; extern struct target_type cortexm_target; extern struct target_type cortexa_target; extern struct target_type cortexr4_target; extern struct target_type arm11_target; extern struct target_type ls1_sap_target; extern struct target_type mips_m4k_target; extern struct target_type avr_target; extern struct target_type dsp563xx_target; extern struct target_type dsp5680xx_target; extern struct target_type testee_target; extern struct target_type avr32_ap7k_target; extern struct target_type hla_target; extern struct target_type nds32_v2_target; extern struct target_type nds32_v3_target; extern struct target_type nds32_v3m_target; extern struct target_type or1k_target; extern struct target_type quark_x10xx_target; extern struct target_type quark_d20xx_target; static struct target_type *target_types[] = { &arm7tdmi_target, &arm9tdmi_target, &arm920t_target, &arm720t_target, &arm966e_target, &arm946e_target, &arm926ejs_target, &fa526_target, &feroceon_target, &dragonite_target, &xscale_target, &cortexm_target, &cortexa_target, &cortexr4_target, &arm11_target, &ls1_sap_target, &mips_m4k_target, &avr_target, &dsp563xx_target, &dsp5680xx_target, &testee_target, &avr32_ap7k_target, &hla_target, &nds32_v2_target, &nds32_v3_target, &nds32_v3m_target, &or1k_target, &quark_x10xx_target, &quark_d20xx_target, NULL, }; struct target *all_targets; static struct target_event_callback *target_event_callbacks; static struct target_timer_callback *target_timer_callbacks; LIST_HEAD(target_reset_callback_list); LIST_HEAD(target_trace_callback_list); static const int polling_interval = 100; static const Jim_Nvp nvp_assert[] = { { .name = "assert", NVP_ASSERT }, { .name = "deassert", NVP_DEASSERT }, { .name = "T", NVP_ASSERT }, { .name = "F", NVP_DEASSERT }, { .name = "t", NVP_ASSERT }, { .name = "f", NVP_DEASSERT }, { .name = NULL, .value = -1 } }; static const Jim_Nvp nvp_error_target[] = { { .value = ERROR_TARGET_INVALID, .name = "err-invalid" }, { .value = ERROR_TARGET_INIT_FAILED, .name = "err-init-failed" }, { .value = ERROR_TARGET_TIMEOUT, .name = "err-timeout" }, { .value = ERROR_TARGET_NOT_HALTED, .name = "err-not-halted" }, { .value = ERROR_TARGET_FAILURE, .name = "err-failure" }, { .value = ERROR_TARGET_UNALIGNED_ACCESS , .name = "err-unaligned-access" }, { .value = ERROR_TARGET_DATA_ABORT , .name = "err-data-abort" }, { .value = ERROR_TARGET_RESOURCE_NOT_AVAILABLE , .name = "err-resource-not-available" }, { .value = ERROR_TARGET_TRANSLATION_FAULT , .name = "err-translation-fault" }, { .value = ERROR_TARGET_NOT_RUNNING, .name = "err-not-running" }, { .value = ERROR_TARGET_NOT_EXAMINED, .name = "err-not-examined" }, { .value = -1, .name = NULL } }; static const char *target_strerror_safe(int err) { const Jim_Nvp *n; n = Jim_Nvp_value2name_simple(nvp_error_target, err); if (n->name == NULL) return "unknown"; else return n->name; } static const Jim_Nvp nvp_target_event[] = { { .value = TARGET_EVENT_GDB_HALT, .name = "gdb-halt" }, { .value = TARGET_EVENT_HALTED, .name = "halted" }, { .value = TARGET_EVENT_RESUMED, .name = "resumed" }, { .value = TARGET_EVENT_RESUME_START, .name = "resume-start" }, { .value = TARGET_EVENT_RESUME_END, .name = "resume-end" }, { .name = "gdb-start", .value = TARGET_EVENT_GDB_START }, { .name = "gdb-end", .value = TARGET_EVENT_GDB_END }, { .value = TARGET_EVENT_RESET_START, .name = "reset-start" }, { .value = TARGET_EVENT_RESET_ASSERT_PRE, .name = "reset-assert-pre" }, { .value = TARGET_EVENT_RESET_ASSERT, .name = "reset-assert" }, { .value = TARGET_EVENT_RESET_ASSERT_POST, .name = "reset-assert-post" }, { .value = TARGET_EVENT_RESET_DEASSERT_PRE, .name = "reset-deassert-pre" }, { .value = TARGET_EVENT_RESET_DEASSERT_POST, .name = "reset-deassert-post" }, { .value = TARGET_EVENT_RESET_HALT_PRE, .name = "reset-halt-pre" }, { .value = TARGET_EVENT_RESET_HALT_POST, .name = "reset-halt-post" }, { .value = TARGET_EVENT_RESET_WAIT_PRE, .name = "reset-wait-pre" }, { .value = TARGET_EVENT_RESET_WAIT_POST, .name = "reset-wait-post" }, { .value = TARGET_EVENT_RESET_INIT, .name = "reset-init" }, { .value = TARGET_EVENT_RESET_END, .name = "reset-end" }, { .value = TARGET_EVENT_EXAMINE_START, .name = "examine-start" }, { .value = TARGET_EVENT_EXAMINE_END, .name = "examine-end" }, { .value = TARGET_EVENT_DEBUG_HALTED, .name = "debug-halted" }, { .value = TARGET_EVENT_DEBUG_RESUMED, .name = "debug-resumed" }, { .value = TARGET_EVENT_GDB_ATTACH, .name = "gdb-attach" }, { .value = TARGET_EVENT_GDB_DETACH, .name = "gdb-detach" }, { .value = TARGET_EVENT_GDB_FLASH_WRITE_START, .name = "gdb-flash-write-start" }, { .value = TARGET_EVENT_GDB_FLASH_WRITE_END , .name = "gdb-flash-write-end" }, { .value = TARGET_EVENT_GDB_FLASH_ERASE_START, .name = "gdb-flash-erase-start" }, { .value = TARGET_EVENT_GDB_FLASH_ERASE_END , .name = "gdb-flash-erase-end" }, { .value = TARGET_EVENT_TRACE_CONFIG, .name = "trace-config" }, { .name = NULL, .value = -1 } }; static const Jim_Nvp nvp_target_state[] = { { .name = "unknown", .value = TARGET_UNKNOWN }, { .name = "running", .value = TARGET_RUNNING }, { .name = "halted", .value = TARGET_HALTED }, { .name = "reset", .value = TARGET_RESET }, { .name = "debug-running", .value = TARGET_DEBUG_RUNNING }, { .name = NULL, .value = -1 }, }; static const Jim_Nvp nvp_target_debug_reason[] = { { .name = "debug-request" , .value = DBG_REASON_DBGRQ }, { .name = "breakpoint" , .value = DBG_REASON_BREAKPOINT }, { .name = "watchpoint" , .value = DBG_REASON_WATCHPOINT }, { .name = "watchpoint-and-breakpoint", .value = DBG_REASON_WPTANDBKPT }, { .name = "single-step" , .value = DBG_REASON_SINGLESTEP }, { .name = "target-not-halted" , .value = DBG_REASON_NOTHALTED }, { .name = "program-exit" , .value = DBG_REASON_EXIT }, { .name = "undefined" , .value = DBG_REASON_UNDEFINED }, { .name = NULL, .value = -1 }, }; static const Jim_Nvp nvp_target_endian[] = { { .name = "big", .value = TARGET_BIG_ENDIAN }, { .name = "little", .value = TARGET_LITTLE_ENDIAN }, { .name = "be", .value = TARGET_BIG_ENDIAN }, { .name = "le", .value = TARGET_LITTLE_ENDIAN }, { .name = NULL, .value = -1 }, }; static const Jim_Nvp nvp_reset_modes[] = { { .name = "unknown", .value = RESET_UNKNOWN }, { .name = "run" , .value = RESET_RUN }, { .name = "halt" , .value = RESET_HALT }, { .name = "init" , .value = RESET_INIT }, { .name = NULL , .value = -1 }, }; const char *debug_reason_name(struct target *t) { const char *cp; cp = Jim_Nvp_value2name_simple(nvp_target_debug_reason, t->debug_reason)->name; if (!cp) { LOG_ERROR("Invalid debug reason: %d", (int)(t->debug_reason)); cp = "(*BUG*unknown*BUG*)"; } return cp; } const char *target_state_name(struct target *t) { const char *cp; cp = Jim_Nvp_value2name_simple(nvp_target_state, t->state)->name; if (!cp) { LOG_ERROR("Invalid target state: %d", (int)(t->state)); cp = "(*BUG*unknown*BUG*)"; } if (!target_was_examined(t) && t->defer_examine) cp = "examine deferred"; return cp; } const char *target_event_name(enum target_event event) { const char *cp; cp = Jim_Nvp_value2name_simple(nvp_target_event, event)->name; if (!cp) { LOG_ERROR("Invalid target event: %d", (int)(event)); cp = "(*BUG*unknown*BUG*)"; } return cp; } const char *target_reset_mode_name(enum target_reset_mode reset_mode) { const char *cp; cp = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name; if (!cp) { LOG_ERROR("Invalid target reset mode: %d", (int)(reset_mode)); cp = "(*BUG*unknown*BUG*)"; } return cp; } /* determine the number of the new target */ static int new_target_number(void) { struct target *t; int x; /* number is 0 based */ x = -1; t = all_targets; while (t) { if (x < t->target_number) x = t->target_number; t = t->next; } return x + 1; } /* read a uint64_t from a buffer in target memory endianness */ uint64_t target_buffer_get_u64(struct target *target, const uint8_t *buffer) { if (target->endianness == TARGET_LITTLE_ENDIAN) return le_to_h_u64(buffer); else return be_to_h_u64(buffer); } /* read a uint32_t from a buffer in target memory endianness */ uint32_t target_buffer_get_u32(struct target *target, const uint8_t *buffer) { if (target->endianness == TARGET_LITTLE_ENDIAN) return le_to_h_u32(buffer); else return be_to_h_u32(buffer); } /* read a uint24_t from a buffer in target memory endianness */ uint32_t target_buffer_get_u24(struct target *target, const uint8_t *buffer) { if (target->endianness == TARGET_LITTLE_ENDIAN) return le_to_h_u24(buffer); else return be_to_h_u24(buffer); } /* read a uint16_t from a buffer in target memory endianness */ uint16_t target_buffer_get_u16(struct target *target, const uint8_t *buffer) { if (target->endianness == TARGET_LITTLE_ENDIAN) return le_to_h_u16(buffer); else return be_to_h_u16(buffer); } /* read a uint8_t from a buffer in target memory endianness */ static uint8_t target_buffer_get_u8(struct target *target, const uint8_t *buffer) { return *buffer & 0x0ff; } /* write a uint64_t to a buffer in target memory endianness */ void target_buffer_set_u64(struct target *target, uint8_t *buffer, uint64_t value) { if (target->endianness == TARGET_LITTLE_ENDIAN) h_u64_to_le(buffer, value); else h_u64_to_be(buffer, value); } /* write a uint32_t to a buffer in target memory endianness */ void target_buffer_set_u32(struct target *target, uint8_t *buffer, uint32_t value) { if (target->endianness == TARGET_LITTLE_ENDIAN) h_u32_to_le(buffer, value); else h_u32_to_be(buffer, value); } /* write a uint24_t to a buffer in target memory endianness */ void target_buffer_set_u24(struct target *target, uint8_t *buffer, uint32_t value) { if (target->endianness == TARGET_LITTLE_ENDIAN) h_u24_to_le(buffer, value); else h_u24_to_be(buffer, value); } /* write a uint16_t to a buffer in target memory endianness */ void target_buffer_set_u16(struct target *target, uint8_t *buffer, uint16_t value) { if (target->endianness == TARGET_LITTLE_ENDIAN) h_u16_to_le(buffer, value); else h_u16_to_be(buffer, value); } /* write a uint8_t to a buffer in target memory endianness */ static void target_buffer_set_u8(struct target *target, uint8_t *buffer, uint8_t value) { *buffer = value; } /* write a uint64_t array to a buffer in target memory endianness */ void target_buffer_get_u64_array(struct target *target, const uint8_t *buffer, uint32_t count, uint64_t *dstbuf) { uint32_t i; for (i = 0; i < count; i++) dstbuf[i] = target_buffer_get_u64(target, &buffer[i * 8]); } /* write a uint32_t array to a buffer in target memory endianness */ void target_buffer_get_u32_array(struct target *target, const uint8_t *buffer, uint32_t count, uint32_t *dstbuf) { uint32_t i; for (i = 0; i < count; i++) dstbuf[i] = target_buffer_get_u32(target, &buffer[i * 4]); } /* write a uint16_t array to a buffer in target memory endianness */ void target_buffer_get_u16_array(struct target *target, const uint8_t *buffer, uint32_t count, uint16_t *dstbuf) { uint32_t i; for (i = 0; i < count; i++) dstbuf[i] = target_buffer_get_u16(target, &buffer[i * 2]); } /* write a uint64_t array to a buffer in target memory endianness */ void target_buffer_set_u64_array(struct target *target, uint8_t *buffer, uint32_t count, const uint64_t *srcbuf) { uint32_t i; for (i = 0; i < count; i++) target_buffer_set_u64(target, &buffer[i * 8], srcbuf[i]); } /* write a uint32_t array to a buffer in target memory endianness */ void target_buffer_set_u32_array(struct target *target, uint8_t *buffer, uint32_t count, const uint32_t *srcbuf) { uint32_t i; for (i = 0; i < count; i++) target_buffer_set_u32(target, &buffer[i * 4], srcbuf[i]); } /* write a uint16_t array to a buffer in target memory endianness */ void target_buffer_set_u16_array(struct target *target, uint8_t *buffer, uint32_t count, const uint16_t *srcbuf) { uint32_t i; for (i = 0; i < count; i++) target_buffer_set_u16(target, &buffer[i * 2], srcbuf[i]); } /* return a pointer to a configured target; id is name or number */ struct target *get_target(const char *id) { struct target *target; /* try as tcltarget name */ for (target = all_targets; target; target = target->next) { if (target_name(target) == NULL) continue; if (strcmp(id, target_name(target)) == 0) return target; } /* It's OK to remove this fallback sometime after August 2010 or so */ /* no match, try as number */ unsigned num; if (parse_uint(id, &num) != ERROR_OK) return NULL; for (target = all_targets; target; target = target->next) { if (target->target_number == (int)num) { LOG_WARNING("use '%s' as target identifier, not '%u'", target_name(target), num); return target; } } return NULL; } /* returns a pointer to the n-th configured target */ struct target *get_target_by_num(int num) { struct target *target = all_targets; while (target) { if (target->target_number == num) return target; target = target->next; } return NULL; } struct target *get_current_target(struct command_context *cmd_ctx) { struct target *target = get_target_by_num(cmd_ctx->current_target); if (target == NULL) { LOG_ERROR("BUG: current_target out of bounds"); exit(-1); } return target; } int target_poll(struct target *target) { int retval; /* We can't poll until after examine */ if (!target_was_examined(target)) { /* Fail silently lest we pollute the log */ return ERROR_FAIL; } retval = target->type->poll(target); if (retval != ERROR_OK) return retval; if (target->halt_issued) { if (target->state == TARGET_HALTED) target->halt_issued = false; else { int64_t t = timeval_ms() - target->halt_issued_time; if (t > DEFAULT_HALT_TIMEOUT) { target->halt_issued = false; LOG_INFO("Halt timed out, wake up GDB."); target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT); } } } return ERROR_OK; } int target_halt(struct target *target) { int retval; /* We can't poll until after examine */ if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } retval = target->type->halt(target); if (retval != ERROR_OK) return retval; target->halt_issued = true; target->halt_issued_time = timeval_ms(); return ERROR_OK; } /** * Make the target (re)start executing using its saved execution * context (possibly with some modifications). * * @param target Which target should start executing. * @param current True to use the target's saved program counter instead * of the address parameter * @param address Optionally used as the program counter. * @param handle_breakpoints True iff breakpoints at the resumption PC * should be skipped. (For example, maybe execution was stopped by * such a breakpoint, in which case it would be counterprodutive to * let it re-trigger. * @param debug_execution False if all working areas allocated by OpenOCD * should be released and/or restored to their original contents. * (This would for example be true to run some downloaded "helper" * algorithm code, which resides in one such working buffer and uses * another for data storage.) * * @todo Resolve the ambiguity about what the "debug_execution" flag * signifies. For example, Target implementations don't agree on how * it relates to invalidation of the register cache, or to whether * breakpoints and watchpoints should be enabled. (It would seem wrong * to enable breakpoints when running downloaded "helper" algorithms * (debug_execution true), since the breakpoints would be set to match * target firmware being debugged, not the helper algorithm.... and * enabling them could cause such helpers to malfunction (for example, * by overwriting data with a breakpoint instruction. On the other * hand the infrastructure for running such helpers might use this * procedure but rely on hardware breakpoint to detect termination.) */ int target_resume(struct target *target, int current, uint32_t address, int handle_breakpoints, int debug_execution) { int retval; /* We can't poll until after examine */ if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } target_call_event_callbacks(target, TARGET_EVENT_RESUME_START); /* note that resume *must* be asynchronous. The CPU can halt before * we poll. The CPU can even halt at the current PC as a result of * a software breakpoint being inserted by (a bug?) the application. */ retval = target->type->resume(target, current, address, handle_breakpoints, debug_execution); if (retval != ERROR_OK) return retval; target_call_event_callbacks(target, TARGET_EVENT_RESUME_END); return retval; } static int target_process_reset(struct command_context *cmd_ctx, enum target_reset_mode reset_mode) { char buf[100]; int retval; Jim_Nvp *n; n = Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode); if (n->name == NULL) { LOG_ERROR("invalid reset mode"); return ERROR_FAIL; } struct target *target; for (target = all_targets; target; target = target->next) target_call_reset_callbacks(target, reset_mode); /* disable polling during reset to make reset event scripts * more predictable, i.e. dr/irscan & pathmove in events will * not have JTAG operations injected into the middle of a sequence. */ bool save_poll = jtag_poll_get_enabled(); jtag_poll_set_enabled(false); sprintf(buf, "ocd_process_reset %s", n->name); retval = Jim_Eval(cmd_ctx->interp, buf); jtag_poll_set_enabled(save_poll); if (retval != JIM_OK) { Jim_MakeErrorMessage(cmd_ctx->interp); command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(cmd_ctx->interp), NULL)); return ERROR_FAIL; } /* We want any events to be processed before the prompt */ retval = target_call_timer_callbacks_now(); for (target = all_targets; target; target = target->next) { target->type->check_reset(target); target->running_alg = false; } return retval; } static int identity_virt2phys(struct target *target, uint32_t virtual, uint32_t *physical) { *physical = virtual; return ERROR_OK; } static int no_mmu(struct target *target, int *enabled) { *enabled = 0; return ERROR_OK; } static int default_examine(struct target *target) { target_set_examined(target); return ERROR_OK; } /* no check by default */ static int default_check_reset(struct target *target) { return ERROR_OK; } int target_examine_one(struct target *target) { target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_START); int retval = target->type->examine(target); if (retval != ERROR_OK) return retval; target_call_event_callbacks(target, TARGET_EVENT_EXAMINE_END); return ERROR_OK; } static int jtag_enable_callback(enum jtag_event event, void *priv) { struct target *target = priv; if (event != JTAG_TAP_EVENT_ENABLE || !target->tap->enabled) return ERROR_OK; jtag_unregister_event_callback(jtag_enable_callback, target); return target_examine_one(target); } /* Targets that correctly implement init + examine, i.e. * no communication with target during init: * * XScale */ int target_examine(void) { int retval = ERROR_OK; struct target *target; for (target = all_targets; target; target = target->next) { /* defer examination, but don't skip it */ if (!target->tap->enabled) { jtag_register_event_callback(jtag_enable_callback, target); continue; } if (target->defer_examine) continue; retval = target_examine_one(target); if (retval != ERROR_OK) return retval; } return retval; } const char *target_type_name(struct target *target) { return target->type->name; } static int target_soft_reset_halt(struct target *target) { if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } if (!target->type->soft_reset_halt) { LOG_ERROR("Target %s does not support soft_reset_halt", target_name(target)); return ERROR_FAIL; } return target->type->soft_reset_halt(target); } /** * Downloads a target-specific native code algorithm to the target, * and executes it. * Note that some targets may need to set up, enable, * and tear down a breakpoint (hard or * soft) to detect algorithm * termination, while others may support lower overhead schemes where * soft breakpoints embedded in the algorithm automatically terminate the * algorithm. * * @param target used to run the algorithm * @param arch_info target-specific description of the algorithm. */ int target_run_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_param, uint32_t entry_point, uint32_t exit_point, int timeout_ms, void *arch_info) { int retval = ERROR_FAIL; if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); goto done; } if (!target->type->run_algorithm) { LOG_ERROR("Target type '%s' does not support %s", target_type_name(target), __func__); goto done; } target->running_alg = true; retval = target->type->run_algorithm(target, num_mem_params, mem_params, num_reg_params, reg_param, entry_point, exit_point, timeout_ms, arch_info); target->running_alg = false; done: return retval; } /** * Downloads a target-specific native code algorithm to the target, * executes and leaves it running. * * @param target used to run the algorithm * @param arch_info target-specific description of the algorithm. */ int target_start_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t entry_point, uint32_t exit_point, void *arch_info) { int retval = ERROR_FAIL; if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); goto done; } if (!target->type->start_algorithm) { LOG_ERROR("Target type '%s' does not support %s", target_type_name(target), __func__); goto done; } if (target->running_alg) { LOG_ERROR("Target is already running an algorithm"); goto done; } target->running_alg = true; retval = target->type->start_algorithm(target, num_mem_params, mem_params, num_reg_params, reg_params, entry_point, exit_point, arch_info); done: return retval; } /** * Waits for an algorithm started with target_start_algorithm() to complete. * * @param target used to run the algorithm * @param arch_info target-specific description of the algorithm. */ int target_wait_algorithm(struct target *target, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t exit_point, int timeout_ms, void *arch_info) { int retval = ERROR_FAIL; if (!target->type->wait_algorithm) { LOG_ERROR("Target type '%s' does not support %s", target_type_name(target), __func__); goto done; } if (!target->running_alg) { LOG_ERROR("Target is not running an algorithm"); goto done; } retval = target->type->wait_algorithm(target, num_mem_params, mem_params, num_reg_params, reg_params, exit_point, timeout_ms, arch_info); if (retval != ERROR_TARGET_TIMEOUT) target->running_alg = false; done: return retval; } /** * Executes a target-specific native code algorithm in the target. * It differs from target_run_algorithm in that the algorithm is asynchronous. * Because of this it requires an compliant algorithm: * see contrib/loaders/flash/stm32f1x.S for example. * * @param target used to run the algorithm */ int target_run_flash_async_algorithm(struct target *target, const uint8_t *buffer, uint32_t count, int block_size, int num_mem_params, struct mem_param *mem_params, int num_reg_params, struct reg_param *reg_params, uint32_t buffer_start, uint32_t buffer_size, uint32_t entry_point, uint32_t exit_point, void *arch_info) { int retval; int timeout = 0; const uint8_t *buffer_orig = buffer; /* Set up working area. First word is write pointer, second word is read pointer, * rest is fifo data area. */ uint32_t wp_addr = buffer_start; uint32_t rp_addr = buffer_start + 4; uint32_t fifo_start_addr = buffer_start + 8; uint32_t fifo_end_addr = buffer_start + buffer_size; uint32_t wp = fifo_start_addr; uint32_t rp = fifo_start_addr; /* validate block_size is 2^n */ assert(!block_size || !(block_size & (block_size - 1))); retval = target_write_u32(target, wp_addr, wp); if (retval != ERROR_OK) return retval; retval = target_write_u32(target, rp_addr, rp); if (retval != ERROR_OK) return retval; /* Start up algorithm on target and let it idle while writing the first chunk */ retval = target_start_algorithm(target, num_mem_params, mem_params, num_reg_params, reg_params, entry_point, exit_point, arch_info); if (retval != ERROR_OK) { LOG_ERROR("error starting target flash write algorithm"); return retval; } while (count > 0) { retval = target_read_u32(target, rp_addr, &rp); if (retval != ERROR_OK) { LOG_ERROR("failed to get read pointer"); break; } LOG_DEBUG("offs 0x%zx count 0x%" PRIx32 " wp 0x%" PRIx32 " rp 0x%" PRIx32, (size_t) (buffer - buffer_orig), count, wp, rp); if (rp == 0) { LOG_ERROR("flash write algorithm aborted by target"); retval = ERROR_FLASH_OPERATION_FAILED; break; } if (((rp - fifo_start_addr) & (block_size - 1)) || rp < fifo_start_addr || rp >= fifo_end_addr) { LOG_ERROR("corrupted fifo read pointer 0x%" PRIx32, rp); break; } /* Count the number of bytes available in the fifo without * crossing the wrap around. Make sure to not fill it completely, * because that would make wp == rp and that's the empty condition. */ uint32_t thisrun_bytes; if (rp > wp) thisrun_bytes = rp - wp - block_size; else if (rp > fifo_start_addr) thisrun_bytes = fifo_end_addr - wp; else thisrun_bytes = fifo_end_addr - wp - block_size; if (thisrun_bytes == 0) { /* Throttle polling a bit if transfer is (much) faster than flash * programming. The exact delay shouldn't matter as long as it's * less than buffer size / flash speed. This is very unlikely to * run when using high latency connections such as USB. */ alive_sleep(10); /* to stop an infinite loop on some targets check and increment a timeout * this issue was observed on a stellaris using the new ICDI interface */ if (timeout++ >= 500) { LOG_ERROR("timeout waiting for algorithm, a target reset is recommended"); return ERROR_FLASH_OPERATION_FAILED; } continue; } /* reset our timeout */ timeout = 0; /* Limit to the amount of data we actually want to write */ if (thisrun_bytes > count * block_size) thisrun_bytes = count * block_size; /* Write data to fifo */ retval = target_write_buffer(target, wp, thisrun_bytes, buffer); if (retval != ERROR_OK) break; /* Update counters and wrap write pointer */ buffer += thisrun_bytes; count -= thisrun_bytes / block_size; wp += thisrun_bytes; if (wp >= fifo_end_addr) wp = fifo_start_addr; /* Store updated write pointer to target */ retval = target_write_u32(target, wp_addr, wp); if (retval != ERROR_OK) break; } if (retval != ERROR_OK) { /* abort flash write algorithm on target */ target_write_u32(target, wp_addr, 0); } int retval2 = target_wait_algorithm(target, num_mem_params, mem_params, num_reg_params, reg_params, exit_point, 10000, arch_info); if (retval2 != ERROR_OK) { LOG_ERROR("error waiting for target flash write algorithm"); retval = retval2; } if (retval == ERROR_OK) { /* check if algorithm set rp = 0 after fifo writer loop finished */ retval = target_read_u32(target, rp_addr, &rp); if (retval == ERROR_OK && rp == 0) { LOG_ERROR("flash write algorithm aborted by target"); retval = ERROR_FLASH_OPERATION_FAILED; } } return retval; } int target_read_memory(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer) { if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } if (!target->type->read_memory) { LOG_ERROR("Target %s doesn't support read_memory", target_name(target)); return ERROR_FAIL; } return target->type->read_memory(target, address, size, count, buffer); } int target_read_phys_memory(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer) { if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } if (!target->type->read_phys_memory) { LOG_ERROR("Target %s doesn't support read_phys_memory", target_name(target)); return ERROR_FAIL; } return target->type->read_phys_memory(target, address, size, count, buffer); } int target_write_memory(struct target *target, uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer) { if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } if (!target->type->write_memory) { LOG_ERROR("Target %s doesn't support write_memory", target_name(target)); return ERROR_FAIL; } return target->type->write_memory(target, address, size, count, buffer); } int target_write_phys_memory(struct target *target, uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer) { if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } if (!target->type->write_phys_memory) { LOG_ERROR("Target %s doesn't support write_phys_memory", target_name(target)); return ERROR_FAIL; } return target->type->write_phys_memory(target, address, size, count, buffer); } int target_add_breakpoint(struct target *target, struct breakpoint *breakpoint) { if ((target->state != TARGET_HALTED) && (breakpoint->type != BKPT_HARD)) { LOG_WARNING("target %s is not halted", target_name(target)); return ERROR_TARGET_NOT_HALTED; } return target->type->add_breakpoint(target, breakpoint); } int target_add_context_breakpoint(struct target *target, struct breakpoint *breakpoint) { if (target->state != TARGET_HALTED) { LOG_WARNING("target %s is not halted", target_name(target)); return ERROR_TARGET_NOT_HALTED; } return target->type->add_context_breakpoint(target, breakpoint); } int target_add_hybrid_breakpoint(struct target *target, struct breakpoint *breakpoint) { if (target->state != TARGET_HALTED) { LOG_WARNING("target %s is not halted", target_name(target)); return ERROR_TARGET_NOT_HALTED; } return target->type->add_hybrid_breakpoint(target, breakpoint); } int target_remove_breakpoint(struct target *target, struct breakpoint *breakpoint) { return target->type->remove_breakpoint(target, breakpoint); } int target_add_watchpoint(struct target *target, struct watchpoint *watchpoint) { if (target->state != TARGET_HALTED) { LOG_WARNING("target %s is not halted", target_name(target)); return ERROR_TARGET_NOT_HALTED; } return target->type->add_watchpoint(target, watchpoint); } int target_remove_watchpoint(struct target *target, struct watchpoint *watchpoint) { return target->type->remove_watchpoint(target, watchpoint); } int target_hit_watchpoint(struct target *target, struct watchpoint **hit_watchpoint) { if (target->state != TARGET_HALTED) { LOG_WARNING("target %s is not halted", target->cmd_name); return ERROR_TARGET_NOT_HALTED; } if (target->type->hit_watchpoint == NULL) { /* For backward compatible, if hit_watchpoint is not implemented, * return ERROR_FAIL such that gdb_server will not take the nonsense * information. */ return ERROR_FAIL; } return target->type->hit_watchpoint(target, hit_watchpoint); } int target_get_gdb_reg_list(struct target *target, struct reg **reg_list[], int *reg_list_size, enum target_register_class reg_class) { return target->type->get_gdb_reg_list(target, reg_list, reg_list_size, reg_class); } int target_step(struct target *target, int current, uint32_t address, int handle_breakpoints) { return target->type->step(target, current, address, handle_breakpoints); } int target_get_gdb_fileio_info(struct target *target, struct gdb_fileio_info *fileio_info) { if (target->state != TARGET_HALTED) { LOG_WARNING("target %s is not halted", target->cmd_name); return ERROR_TARGET_NOT_HALTED; } return target->type->get_gdb_fileio_info(target, fileio_info); } int target_gdb_fileio_end(struct target *target, int retcode, int fileio_errno, bool ctrl_c) { if (target->state != TARGET_HALTED) { LOG_WARNING("target %s is not halted", target->cmd_name); return ERROR_TARGET_NOT_HALTED; } return target->type->gdb_fileio_end(target, retcode, fileio_errno, ctrl_c); } int target_profiling(struct target *target, uint32_t *samples, uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds) { if (target->state != TARGET_HALTED) { LOG_WARNING("target %s is not halted", target->cmd_name); return ERROR_TARGET_NOT_HALTED; } return target->type->profiling(target, samples, max_num_samples, num_samples, seconds); } /** * Reset the @c examined flag for the given target. * Pure paranoia -- targets are zeroed on allocation. */ static void target_reset_examined(struct target *target) { target->examined = false; } static int handle_target(void *priv); static int target_init_one(struct command_context *cmd_ctx, struct target *target) { target_reset_examined(target); struct target_type *type = target->type; if (type->examine == NULL) type->examine = default_examine; if (type->check_reset == NULL) type->check_reset = default_check_reset; assert(type->init_target != NULL); int retval = type->init_target(cmd_ctx, target); if (ERROR_OK != retval) { LOG_ERROR("target '%s' init failed", target_name(target)); return retval; } /* Sanity-check MMU support ... stub in what we must, to help * implement it in stages, but warn if we need to do so. */ if (type->mmu) { if (type->virt2phys == NULL) { LOG_ERROR("type '%s' is missing virt2phys", type->name); type->virt2phys = identity_virt2phys; } } else { /* Make sure no-MMU targets all behave the same: make no * distinction between physical and virtual addresses, and * ensure that virt2phys() is always an identity mapping. */ if (type->write_phys_memory || type->read_phys_memory || type->virt2phys) LOG_WARNING("type '%s' has bad MMU hooks", type->name); type->mmu = no_mmu; type->write_phys_memory = type->write_memory; type->read_phys_memory = type->read_memory; type->virt2phys = identity_virt2phys; } if (target->type->read_buffer == NULL) target->type->read_buffer = target_read_buffer_default; if (target->type->write_buffer == NULL) target->type->write_buffer = target_write_buffer_default; if (target->type->get_gdb_fileio_info == NULL) target->type->get_gdb_fileio_info = target_get_gdb_fileio_info_default; if (target->type->gdb_fileio_end == NULL) target->type->gdb_fileio_end = target_gdb_fileio_end_default; if (target->type->profiling == NULL) target->type->profiling = target_profiling_default; return ERROR_OK; } static int target_init(struct command_context *cmd_ctx) { struct target *target; int retval; for (target = all_targets; target; target = target->next) { retval = target_init_one(cmd_ctx, target); if (ERROR_OK != retval) return retval; } if (!all_targets) return ERROR_OK; retval = target_register_user_commands(cmd_ctx); if (ERROR_OK != retval) return retval; retval = target_register_timer_callback(&handle_target, polling_interval, 1, cmd_ctx->interp); if (ERROR_OK != retval) return retval; return ERROR_OK; } COMMAND_HANDLER(handle_target_init_command) { int retval; if (CMD_ARGC != 0) return ERROR_COMMAND_SYNTAX_ERROR; static bool target_initialized; if (target_initialized) { LOG_INFO("'target init' has already been called"); return ERROR_OK; } target_initialized = true; retval = command_run_line(CMD_CTX, "init_targets"); if (ERROR_OK != retval) return retval; retval = command_run_line(CMD_CTX, "init_target_events"); if (ERROR_OK != retval) return retval; retval = command_run_line(CMD_CTX, "init_board"); if (ERROR_OK != retval) return retval; LOG_DEBUG("Initializing targets..."); return target_init(CMD_CTX); } int target_register_event_callback(int (*callback)(struct target *target, enum target_event event, void *priv), void *priv) { struct target_event_callback **callbacks_p = &target_event_callbacks; if (callback == NULL) return ERROR_COMMAND_SYNTAX_ERROR; if (*callbacks_p) { while ((*callbacks_p)->next) callbacks_p = &((*callbacks_p)->next); callbacks_p = &((*callbacks_p)->next); } (*callbacks_p) = malloc(sizeof(struct target_event_callback)); (*callbacks_p)->callback = callback; (*callbacks_p)->priv = priv; (*callbacks_p)->next = NULL; return ERROR_OK; } int target_register_reset_callback(int (*callback)(struct target *target, enum target_reset_mode reset_mode, void *priv), void *priv) { struct target_reset_callback *entry; if (callback == NULL) return ERROR_COMMAND_SYNTAX_ERROR; entry = malloc(sizeof(struct target_reset_callback)); if (entry == NULL) { LOG_ERROR("error allocating buffer for reset callback entry"); return ERROR_COMMAND_SYNTAX_ERROR; } entry->callback = callback; entry->priv = priv; list_add(&entry->list, &target_reset_callback_list); return ERROR_OK; } int target_register_trace_callback(int (*callback)(struct target *target, size_t len, uint8_t *data, void *priv), void *priv) { struct target_trace_callback *entry; if (callback == NULL) return ERROR_COMMAND_SYNTAX_ERROR; entry = malloc(sizeof(struct target_trace_callback)); if (entry == NULL) { LOG_ERROR("error allocating buffer for trace callback entry"); return ERROR_COMMAND_SYNTAX_ERROR; } entry->callback = callback; entry->priv = priv; list_add(&entry->list, &target_trace_callback_list); return ERROR_OK; } int target_register_timer_callback(int (*callback)(void *priv), int time_ms, int periodic, void *priv) { struct target_timer_callback **callbacks_p = &target_timer_callbacks; struct timeval now; if (callback == NULL) return ERROR_COMMAND_SYNTAX_ERROR; if (*callbacks_p) { while ((*callbacks_p)->next) callbacks_p = &((*callbacks_p)->next); callbacks_p = &((*callbacks_p)->next); } (*callbacks_p) = malloc(sizeof(struct target_timer_callback)); (*callbacks_p)->callback = callback; (*callbacks_p)->periodic = periodic; (*callbacks_p)->time_ms = time_ms; (*callbacks_p)->removed = false; gettimeofday(&now, NULL); (*callbacks_p)->when.tv_usec = now.tv_usec + (time_ms % 1000) * 1000; time_ms -= (time_ms % 1000); (*callbacks_p)->when.tv_sec = now.tv_sec + (time_ms / 1000); if ((*callbacks_p)->when.tv_usec > 1000000) { (*callbacks_p)->when.tv_usec = (*callbacks_p)->when.tv_usec - 1000000; (*callbacks_p)->when.tv_sec += 1; } (*callbacks_p)->priv = priv; (*callbacks_p)->next = NULL; return ERROR_OK; } int target_unregister_event_callback(int (*callback)(struct target *target, enum target_event event, void *priv), void *priv) { struct target_event_callback **p = &target_event_callbacks; struct target_event_callback *c = target_event_callbacks; if (callback == NULL) return ERROR_COMMAND_SYNTAX_ERROR; while (c) { struct target_event_callback *next = c->next; if ((c->callback == callback) && (c->priv == priv)) { *p = next; free(c); return ERROR_OK; } else p = &(c->next); c = next; } return ERROR_OK; } int target_unregister_reset_callback(int (*callback)(struct target *target, enum target_reset_mode reset_mode, void *priv), void *priv) { struct target_reset_callback *entry; if (callback == NULL) return ERROR_COMMAND_SYNTAX_ERROR; list_for_each_entry(entry, &target_reset_callback_list, list) { if (entry->callback == callback && entry->priv == priv) { list_del(&entry->list); free(entry); break; } } return ERROR_OK; } int target_unregister_trace_callback(int (*callback)(struct target *target, size_t len, uint8_t *data, void *priv), void *priv) { struct target_trace_callback *entry; if (callback == NULL) return ERROR_COMMAND_SYNTAX_ERROR; list_for_each_entry(entry, &target_trace_callback_list, list) { if (entry->callback == callback && entry->priv == priv) { list_del(&entry->list); free(entry); break; } } return ERROR_OK; } int target_unregister_timer_callback(int (*callback)(void *priv), void *priv) { if (callback == NULL) return ERROR_COMMAND_SYNTAX_ERROR; for (struct target_timer_callback *c = target_timer_callbacks; c; c = c->next) { if ((c->callback == callback) && (c->priv == priv)) { c->removed = true; return ERROR_OK; } } return ERROR_FAIL; } int target_call_event_callbacks(struct target *target, enum target_event event) { struct target_event_callback *callback = target_event_callbacks; struct target_event_callback *next_callback; if (event == TARGET_EVENT_HALTED) { /* execute early halted first */ target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT); } LOG_DEBUG("target event %i (%s)", event, Jim_Nvp_value2name_simple(nvp_target_event, event)->name); target_handle_event(target, event); while (callback) { next_callback = callback->next; callback->callback(target, event, callback->priv); callback = next_callback; } return ERROR_OK; } int target_call_reset_callbacks(struct target *target, enum target_reset_mode reset_mode) { struct target_reset_callback *callback; LOG_DEBUG("target reset %i (%s)", reset_mode, Jim_Nvp_value2name_simple(nvp_reset_modes, reset_mode)->name); list_for_each_entry(callback, &target_reset_callback_list, list) callback->callback(target, reset_mode, callback->priv); return ERROR_OK; } int target_call_trace_callbacks(struct target *target, size_t len, uint8_t *data) { struct target_trace_callback *callback; list_for_each_entry(callback, &target_trace_callback_list, list) callback->callback(target, len, data, callback->priv); return ERROR_OK; } static int target_timer_callback_periodic_restart( struct target_timer_callback *cb, struct timeval *now) { int time_ms = cb->time_ms; cb->when.tv_usec = now->tv_usec + (time_ms % 1000) * 1000; time_ms -= (time_ms % 1000); cb->when.tv_sec = now->tv_sec + time_ms / 1000; if (cb->when.tv_usec > 1000000) { cb->when.tv_usec = cb->when.tv_usec - 1000000; cb->when.tv_sec += 1; } return ERROR_OK; } static int target_call_timer_callback(struct target_timer_callback *cb, struct timeval *now) { cb->callback(cb->priv); if (cb->periodic) return target_timer_callback_periodic_restart(cb, now); return target_unregister_timer_callback(cb->callback, cb->priv); } static int target_call_timer_callbacks_check_time(int checktime) { static bool callback_processing; /* Do not allow nesting */ if (callback_processing) return ERROR_OK; callback_processing = true; keep_alive(); struct timeval now; gettimeofday(&now, NULL); /* Store an address of the place containing a pointer to the * next item; initially, that's a standalone "root of the * list" variable. */ struct target_timer_callback **callback = &target_timer_callbacks; while (*callback) { if ((*callback)->removed) { struct target_timer_callback *p = *callback; *callback = (*callback)->next; free(p); continue; } bool call_it = (*callback)->callback && ((!checktime && (*callback)->periodic) || now.tv_sec > (*callback)->when.tv_sec || (now.tv_sec == (*callback)->when.tv_sec && now.tv_usec >= (*callback)->when.tv_usec)); if (call_it) target_call_timer_callback(*callback, &now); callback = &(*callback)->next; } callback_processing = false; return ERROR_OK; } int target_call_timer_callbacks(void) { return target_call_timer_callbacks_check_time(1); } /* invoke periodic callbacks immediately */ int target_call_timer_callbacks_now(void) { return target_call_timer_callbacks_check_time(0); } /* Prints the working area layout for debug purposes */ static void print_wa_layout(struct target *target) { struct working_area *c = target->working_areas; while (c) { LOG_DEBUG("%c%c 0x%08"PRIx32"-0x%08"PRIx32" (%"PRIu32" bytes)", c->backup ? 'b' : ' ', c->free ? ' ' : '*', c->address, c->address + c->size - 1, c->size); c = c->next; } } /* Reduce area to size bytes, create a new free area from the remaining bytes, if any. */ static void target_split_working_area(struct working_area *area, uint32_t size) { assert(area->free); /* Shouldn't split an allocated area */ assert(size <= area->size); /* Caller should guarantee this */ /* Split only if not already the right size */ if (size < area->size) { struct working_area *new_wa = malloc(sizeof(*new_wa)); if (new_wa == NULL) return; new_wa->next = area->next; new_wa->size = area->size - size; new_wa->address = area->address + size; new_wa->backup = NULL; new_wa->user = NULL; new_wa->free = true; area->next = new_wa; area->size = size; /* If backup memory was allocated to this area, it has the wrong size * now so free it and it will be reallocated if/when needed */ if (area->backup) { free(area->backup); area->backup = NULL; } } } /* Merge all adjacent free areas into one */ static void target_merge_working_areas(struct target *target) { struct working_area *c = target->working_areas; while (c && c->next) { assert(c->next->address == c->address + c->size); /* This is an invariant */ /* Find two adjacent free areas */ if (c->free && c->next->free) { /* Merge the last into the first */ c->size += c->next->size; /* Remove the last */ struct working_area *to_be_freed = c->next; c->next = c->next->next; if (to_be_freed->backup) free(to_be_freed->backup); free(to_be_freed); /* If backup memory was allocated to the remaining area, it's has * the wrong size now */ if (c->backup) { free(c->backup); c->backup = NULL; } } else { c = c->next; } } } int target_alloc_working_area_try(struct target *target, uint32_t size, struct working_area **area) { /* Reevaluate working area address based on MMU state*/ if (target->working_areas == NULL) { int retval; int enabled; retval = target->type->mmu(target, &enabled); if (retval != ERROR_OK) return retval; if (!enabled) { if (target->working_area_phys_spec) { LOG_DEBUG("MMU disabled, using physical " "address for working memory 0x%08"PRIx32, target->working_area_phys); target->working_area = target->working_area_phys; } else { LOG_ERROR("No working memory available. " "Specify -work-area-phys to target."); return ERROR_TARGET_RESOURCE_NOT_AVAILABLE; } } else { if (target->working_area_virt_spec) { LOG_DEBUG("MMU enabled, using virtual " "address for working memory 0x%08"PRIx32, target->working_area_virt); target->working_area = target->working_area_virt; } else { LOG_ERROR("No working memory available. " "Specify -work-area-virt to target."); return ERROR_TARGET_RESOURCE_NOT_AVAILABLE; } } /* Set up initial working area on first call */ struct working_area *new_wa = malloc(sizeof(*new_wa)); if (new_wa) { new_wa->next = NULL; new_wa->size = target->working_area_size & ~3UL; /* 4-byte align */ new_wa->address = target->working_area; new_wa->backup = NULL; new_wa->user = NULL; new_wa->free = true; } target->working_areas = new_wa; } /* only allocate multiples of 4 byte */ if (size % 4) size = (size + 3) & (~3UL); struct working_area *c = target->working_areas; /* Find the first large enough working area */ while (c) { if (c->free && c->size >= size) break; c = c->next; } if (c == NULL) return ERROR_TARGET_RESOURCE_NOT_AVAILABLE; /* Split the working area into the requested size */ target_split_working_area(c, size); LOG_DEBUG("allocated new working area of %"PRIu32" bytes at address 0x%08"PRIx32, size, c->address); if (target->backup_working_area) { if (c->backup == NULL) { c->backup = malloc(c->size); if (c->backup == NULL) return ERROR_FAIL; } int retval = target_read_memory(target, c->address, 4, c->size / 4, c->backup); if (retval != ERROR_OK) return retval; } /* mark as used, and return the new (reused) area */ c->free = false; *area = c; /* user pointer */ c->user = area; print_wa_layout(target); return ERROR_OK; } int target_alloc_working_area(struct target *target, uint32_t size, struct working_area **area) { int retval; retval = target_alloc_working_area_try(target, size, area); if (retval == ERROR_TARGET_RESOURCE_NOT_AVAILABLE) LOG_WARNING("not enough working area available(requested %"PRIu32")", size); return retval; } static int target_restore_working_area(struct target *target, struct working_area *area) { int retval = ERROR_OK; if (target->backup_working_area && area->backup != NULL) { retval = target_write_memory(target, area->address, 4, area->size / 4, area->backup); if (retval != ERROR_OK) LOG_ERROR("failed to restore %"PRIu32" bytes of working area at address 0x%08"PRIx32, area->size, area->address); } return retval; } /* Restore the area's backup memory, if any, and return the area to the allocation pool */ static int target_free_working_area_restore(struct target *target, struct working_area *area, int restore) { int retval = ERROR_OK; if (area->free) return retval; if (restore) { retval = target_restore_working_area(target, area); /* REVISIT: Perhaps the area should be freed even if restoring fails. */ if (retval != ERROR_OK) return retval; } area->free = true; LOG_DEBUG("freed %"PRIu32" bytes of working area at address 0x%08"PRIx32, area->size, area->address); /* mark user pointer invalid */ /* TODO: Is this really safe? It points to some previous caller's memory. * How could we know that the area pointer is still in that place and not * some other vital data? What's the purpose of this, anyway? */ *area->user = NULL; area->user = NULL; target_merge_working_areas(target); print_wa_layout(target); return retval; } int target_free_working_area(struct target *target, struct working_area *area) { return target_free_working_area_restore(target, area, 1); } void target_quit(void) { struct target_event_callback *pe = target_event_callbacks; while (pe) { struct target_event_callback *t = pe->next; free(pe); pe = t; } target_event_callbacks = NULL; struct target_timer_callback *pt = target_timer_callbacks; while (pt) { struct target_timer_callback *t = pt->next; free(pt); pt = t; } target_timer_callbacks = NULL; for (struct target *target = all_targets; target; target = target->next) { if (target->type->deinit_target) target->type->deinit_target(target); } } /* free resources and restore memory, if restoring memory fails, * free up resources anyway */ static void target_free_all_working_areas_restore(struct target *target, int restore) { struct working_area *c = target->working_areas; LOG_DEBUG("freeing all working areas"); /* Loop through all areas, restoring the allocated ones and marking them as free */ while (c) { if (!c->free) { if (restore) target_restore_working_area(target, c); c->free = true; *c->user = NULL; /* Same as above */ c->user = NULL; } c = c->next; } /* Run a merge pass to combine all areas into one */ target_merge_working_areas(target); print_wa_layout(target); } void target_free_all_working_areas(struct target *target) { target_free_all_working_areas_restore(target, 1); } /* Find the largest number of bytes that can be allocated */ uint32_t target_get_working_area_avail(struct target *target) { struct working_area *c = target->working_areas; uint32_t max_size = 0; if (c == NULL) return target->working_area_size; while (c) { if (c->free && max_size < c->size) max_size = c->size; c = c->next; } return max_size; } int target_arch_state(struct target *target) { int retval; if (target == NULL) { LOG_WARNING("No target has been configured"); return ERROR_OK; } if (target->state != TARGET_HALTED) return ERROR_OK; retval = target->type->arch_state(target); return retval; } static int target_get_gdb_fileio_info_default(struct target *target, struct gdb_fileio_info *fileio_info) { /* If target does not support semi-hosting function, target has no need to provide .get_gdb_fileio_info callback. It just return ERROR_FAIL and gdb_server will return "Txx" as target halted every time. */ return ERROR_FAIL; } static int target_gdb_fileio_end_default(struct target *target, int retcode, int fileio_errno, bool ctrl_c) { return ERROR_OK; } static int target_profiling_default(struct target *target, uint32_t *samples, uint32_t max_num_samples, uint32_t *num_samples, uint32_t seconds) { struct timeval timeout, now; gettimeofday(&timeout, NULL); timeval_add_time(&timeout, seconds, 0); LOG_INFO("Starting profiling. Halting and resuming the" " target as often as we can..."); uint32_t sample_count = 0; /* hopefully it is safe to cache! We want to stop/restart as quickly as possible. */ struct reg *reg = register_get_by_name(target->reg_cache, "pc", 1); int retval = ERROR_OK; for (;;) { target_poll(target); if (target->state == TARGET_HALTED) { uint32_t t = buf_get_u32(reg->value, 0, 32); samples[sample_count++] = t; /* current pc, addr = 0, do not handle breakpoints, not debugging */ retval = target_resume(target, 1, 0, 0, 0); target_poll(target); alive_sleep(10); /* sleep 10ms, i.e. <100 samples/second. */ } else if (target->state == TARGET_RUNNING) { /* We want to quickly sample the PC. */ retval = target_halt(target); } else { LOG_INFO("Target not halted or running"); retval = ERROR_OK; break; } if (retval != ERROR_OK) break; gettimeofday(&now, NULL); if ((sample_count >= max_num_samples) || ((now.tv_sec >= timeout.tv_sec) && (now.tv_usec >= timeout.tv_usec))) { LOG_INFO("Profiling completed. %" PRIu32 " samples.", sample_count); break; } } *num_samples = sample_count; return retval; } /* Single aligned words are guaranteed to use 16 or 32 bit access * mode respectively, otherwise data is handled as quickly as * possible */ int target_write_buffer(struct target *target, uint32_t address, uint32_t size, const uint8_t *buffer) { LOG_DEBUG("writing buffer of %" PRIi32 " byte at 0x%8.8" PRIx32, size, address); if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } if (size == 0) return ERROR_OK; if ((address + size - 1) < address) { /* GDB can request this when e.g. PC is 0xfffffffc */ LOG_ERROR("address + size wrapped (0x%08" PRIx32 ", 0x%08" PRIx32 ")", address, size); return ERROR_FAIL; } return target->type->write_buffer(target, address, size, buffer); } static int target_write_buffer_default(struct target *target, uint32_t address, uint32_t count, const uint8_t *buffer) { uint32_t size; /* Align up to maximum 4 bytes. The loop condition makes sure the next pass * will have something to do with the size we leave to it. */ for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) { if (address & size) { int retval = target_write_memory(target, address, size, 1, buffer); if (retval != ERROR_OK) return retval; address += size; count -= size; buffer += size; } } /* Write the data with as large access size as possible. */ for (; size > 0; size /= 2) { uint32_t aligned = count - count % size; if (aligned > 0) { int retval = target_write_memory(target, address, size, aligned / size, buffer); if (retval != ERROR_OK) return retval; address += aligned; count -= aligned; buffer += aligned; } } return ERROR_OK; } /* Single aligned words are guaranteed to use 16 or 32 bit access * mode respectively, otherwise data is handled as quickly as * possible */ int target_read_buffer(struct target *target, uint32_t address, uint32_t size, uint8_t *buffer) { LOG_DEBUG("reading buffer of %" PRIi32 " byte at 0x%8.8" PRIx32, size, address); if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } if (size == 0) return ERROR_OK; if ((address + size - 1) < address) { /* GDB can request this when e.g. PC is 0xfffffffc */ LOG_ERROR("address + size wrapped (0x%08" PRIx32 ", 0x%08" PRIx32 ")", address, size); return ERROR_FAIL; } return target->type->read_buffer(target, address, size, buffer); } static int target_read_buffer_default(struct target *target, uint32_t address, uint32_t count, uint8_t *buffer) { uint32_t size; /* Align up to maximum 4 bytes. The loop condition makes sure the next pass * will have something to do with the size we leave to it. */ for (size = 1; size < 4 && count >= size * 2 + (address & size); size *= 2) { if (address & size) { int retval = target_read_memory(target, address, size, 1, buffer); if (retval != ERROR_OK) return retval; address += size; count -= size; buffer += size; } } /* Read the data with as large access size as possible. */ for (; size > 0; size /= 2) { uint32_t aligned = count - count % size; if (aligned > 0) { int retval = target_read_memory(target, address, size, aligned / size, buffer); if (retval != ERROR_OK) return retval; address += aligned; count -= aligned; buffer += aligned; } } return ERROR_OK; } int target_checksum_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* crc) { uint8_t *buffer; int retval; uint32_t i; uint32_t checksum = 0; if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } retval = target->type->checksum_memory(target, address, size, &checksum); if (retval != ERROR_OK) { buffer = malloc(size); if (buffer == NULL) { LOG_ERROR("error allocating buffer for section (%" PRId32 " bytes)", size); return ERROR_COMMAND_SYNTAX_ERROR; } retval = target_read_buffer(target, address, size, buffer); if (retval != ERROR_OK) { free(buffer); return retval; } /* convert to target endianness */ for (i = 0; i < (size/sizeof(uint32_t)); i++) { uint32_t target_data; target_data = target_buffer_get_u32(target, &buffer[i*sizeof(uint32_t)]); target_buffer_set_u32(target, &buffer[i*sizeof(uint32_t)], target_data); } retval = image_calculate_checksum(buffer, size, &checksum); free(buffer); } *crc = checksum; return retval; } int target_blank_check_memory(struct target *target, uint32_t address, uint32_t size, uint32_t* blank, uint8_t erased_value) { int retval; if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } if (target->type->blank_check_memory == 0) return ERROR_TARGET_RESOURCE_NOT_AVAILABLE; retval = target->type->blank_check_memory(target, address, size, blank, erased_value); return retval; } int target_read_u64(struct target *target, uint64_t address, uint64_t *value) { uint8_t value_buf[8]; if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } int retval = target_read_memory(target, address, 8, 1, value_buf); if (retval == ERROR_OK) { *value = target_buffer_get_u64(target, value_buf); LOG_DEBUG("address: 0x%" PRIx64 ", value: 0x%16.16" PRIx64 "", address, *value); } else { *value = 0x0; LOG_DEBUG("address: 0x%" PRIx64 " failed", address); } return retval; } int target_read_u32(struct target *target, uint32_t address, uint32_t *value) { uint8_t value_buf[4]; if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } int retval = target_read_memory(target, address, 4, 1, value_buf); if (retval == ERROR_OK) { *value = target_buffer_get_u32(target, value_buf); LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "", address, *value); } else { *value = 0x0; LOG_DEBUG("address: 0x%8.8" PRIx32 " failed", address); } return retval; } int target_read_u16(struct target *target, uint32_t address, uint16_t *value) { uint8_t value_buf[2]; if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } int retval = target_read_memory(target, address, 2, 1, value_buf); if (retval == ERROR_OK) { *value = target_buffer_get_u16(target, value_buf); LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%4.4" PRIx16, address, *value); } else { *value = 0x0; LOG_DEBUG("address: 0x%8.8" PRIx32 " failed", address); } return retval; } int target_read_u8(struct target *target, uint32_t address, uint8_t *value) { if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } int retval = target_read_memory(target, address, 1, 1, value); if (retval == ERROR_OK) { LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2" PRIx8, address, *value); } else { *value = 0x0; LOG_DEBUG("address: 0x%8.8" PRIx32 " failed", address); } return retval; } int target_write_u64(struct target *target, uint64_t address, uint64_t value) { int retval; uint8_t value_buf[8]; if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } LOG_DEBUG("address: 0x%" PRIx64 ", value: 0x%16.16" PRIx64 "", address, value); target_buffer_set_u64(target, value_buf, value); retval = target_write_memory(target, address, 8, 1, value_buf); if (retval != ERROR_OK) LOG_DEBUG("failed: %i", retval); return retval; } int target_write_u32(struct target *target, uint32_t address, uint32_t value) { int retval; uint8_t value_buf[4]; if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx32 "", address, value); target_buffer_set_u32(target, value_buf, value); retval = target_write_memory(target, address, 4, 1, value_buf); if (retval != ERROR_OK) LOG_DEBUG("failed: %i", retval); return retval; } int target_write_u16(struct target *target, uint32_t address, uint16_t value) { int retval; uint8_t value_buf[2]; if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%8.8" PRIx16, address, value); target_buffer_set_u16(target, value_buf, value); retval = target_write_memory(target, address, 2, 1, value_buf); if (retval != ERROR_OK) LOG_DEBUG("failed: %i", retval); return retval; } int target_write_u8(struct target *target, uint32_t address, uint8_t value) { int retval; if (!target_was_examined(target)) { LOG_ERROR("Target not examined yet"); return ERROR_FAIL; } LOG_DEBUG("address: 0x%8.8" PRIx32 ", value: 0x%2.2" PRIx8, address, value); retval = target_write_memory(target, address, 1, 1, &value); if (retval != ERROR_OK) LOG_DEBUG("failed: %i", retval); return retval; } static int find_target(struct command_context *cmd_ctx, const char *name) { struct target *target = get_target(name); if (target == NULL) { LOG_ERROR("Target: %s is unknown, try one of:\n", name); return ERROR_FAIL; } if (!target->tap->enabled) { LOG_USER("Target: TAP %s is disabled, " "can't be the current target\n", target->tap->dotted_name); return ERROR_FAIL; } cmd_ctx->current_target = target->target_number; return ERROR_OK; } COMMAND_HANDLER(handle_targets_command) { int retval = ERROR_OK; if (CMD_ARGC == 1) { retval = find_target(CMD_CTX, CMD_ARGV[0]); if (retval == ERROR_OK) { /* we're done! */ return retval; } } struct target *target = all_targets; command_print(CMD_CTX, " TargetName Type Endian TapName State "); command_print(CMD_CTX, "-- ------------------ ---------- ------ ------------------ ------------"); while (target) { const char *state; char marker = ' '; if (target->tap->enabled) state = target_state_name(target); else state = "tap-disabled"; if (CMD_CTX->current_target == target->target_number) marker = '*'; /* keep columns lined up to match the headers above */ command_print(CMD_CTX, "%2d%c %-18s %-10s %-6s %-18s %s", target->target_number, marker, target_name(target), target_type_name(target), Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness)->name, target->tap->dotted_name, state); target = target->next; } return retval; } /* every 300ms we check for reset & powerdropout and issue a "reset halt" if so. */ static int powerDropout; static int srstAsserted; static int runPowerRestore; static int runPowerDropout; static int runSrstAsserted; static int runSrstDeasserted; static int sense_handler(void) { static int prevSrstAsserted; static int prevPowerdropout; int retval = jtag_power_dropout(&powerDropout); if (retval != ERROR_OK) return retval; int powerRestored; powerRestored = prevPowerdropout && !powerDropout; if (powerRestored) runPowerRestore = 1; int64_t current = timeval_ms(); static int64_t lastPower; bool waitMore = lastPower + 2000 > current; if (powerDropout && !waitMore) { runPowerDropout = 1; lastPower = current; } retval = jtag_srst_asserted(&srstAsserted); if (retval != ERROR_OK) return retval; int srstDeasserted; srstDeasserted = prevSrstAsserted && !srstAsserted; static int64_t lastSrst; waitMore = lastSrst + 2000 > current; if (srstDeasserted && !waitMore) { runSrstDeasserted = 1; lastSrst = current; } if (!prevSrstAsserted && srstAsserted) runSrstAsserted = 1; prevSrstAsserted = srstAsserted; prevPowerdropout = powerDropout; if (srstDeasserted || powerRestored) { /* Other than logging the event we can't do anything here. * Issuing a reset is a particularly bad idea as we might * be inside a reset already. */ } return ERROR_OK; } /* process target state changes */ static int handle_target(void *priv) { Jim_Interp *interp = (Jim_Interp *)priv; int retval = ERROR_OK; if (!is_jtag_poll_safe()) { /* polling is disabled currently */ return ERROR_OK; } /* we do not want to recurse here... */ static int recursive; if (!recursive) { recursive = 1; sense_handler(); /* danger! running these procedures can trigger srst assertions and power dropouts. * We need to avoid an infinite loop/recursion here and we do that by * clearing the flags after running these events. */ int did_something = 0; if (runSrstAsserted) { LOG_INFO("srst asserted detected, running srst_asserted proc."); Jim_Eval(interp, "srst_asserted"); did_something = 1; } if (runSrstDeasserted) { Jim_Eval(interp, "srst_deasserted"); did_something = 1; } if (runPowerDropout) { LOG_INFO("Power dropout detected, running power_dropout proc."); Jim_Eval(interp, "power_dropout"); did_something = 1; } if (runPowerRestore) { Jim_Eval(interp, "power_restore"); did_something = 1; } if (did_something) { /* clear detect flags */ sense_handler(); } /* clear action flags */ runSrstAsserted = 0; runSrstDeasserted = 0; runPowerRestore = 0; runPowerDropout = 0; recursive = 0; } /* Poll targets for state changes unless that's globally disabled. * Skip targets that are currently disabled. */ for (struct target *target = all_targets; is_jtag_poll_safe() && target; target = target->next) { if (!target_was_examined(target)) continue; if (!target->tap->enabled) continue; if (target->backoff.times > target->backoff.count) { /* do not poll this time as we failed previously */ target->backoff.count++; continue; } target->backoff.count = 0; /* only poll target if we've got power and srst isn't asserted */ if (!powerDropout && !srstAsserted) { /* polling may fail silently until the target has been examined */ retval = target_poll(target); if (retval != ERROR_OK) { /* 100ms polling interval. Increase interval between polling up to 5000ms */ if (target->backoff.times * polling_interval < 5000) { target->backoff.times *= 2; target->backoff.times++; } /* Tell GDB to halt the debugger. This allows the user to * run monitor commands to handle the situation. */ target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT); } if (target->backoff.times > 0) { LOG_USER("Polling target %s failed, trying to reexamine", target_name(target)); target_reset_examined(target); retval = target_examine_one(target); /* Target examination could have failed due to unstable connection, * but we set the examined flag anyway to repoll it later */ if (retval != ERROR_OK) { target->examined = true; LOG_USER("Examination failed, GDB will be halted. Polling again in %dms", target->backoff.times * polling_interval); return retval; } } /* Since we succeeded, we reset backoff count */ target->backoff.times = 0; } } return retval; } COMMAND_HANDLER(handle_reg_command) { struct target *target; struct reg *reg = NULL; unsigned count = 0; char *value; LOG_DEBUG("-"); target = get_current_target(CMD_CTX); /* list all available registers for the current target */ if (CMD_ARGC == 0) { struct reg_cache *cache = target->reg_cache; count = 0; while (cache) { unsigned i; command_print(CMD_CTX, "===== %s", cache->name); for (i = 0, reg = cache->reg_list; i < cache->num_regs; i++, reg++, count++) { /* only print cached values if they are valid */ if (reg->valid) { value = buf_to_str(reg->value, reg->size, 16); command_print(CMD_CTX, "(%i) %s (/%" PRIu32 "): 0x%s%s", count, reg->name, reg->size, value, reg->dirty ? " (dirty)" : ""); free(value); } else { command_print(CMD_CTX, "(%i) %s (/%" PRIu32 ")", count, reg->name, reg->size) ; } } cache = cache->next; } return ERROR_OK; } /* access a single register by its ordinal number */ if ((CMD_ARGV[0][0] >= '0') && (CMD_ARGV[0][0] <= '9')) { unsigned num; COMMAND_PARSE_NUMBER(uint, CMD_ARGV[0], num); struct reg_cache *cache = target->reg_cache; count = 0; while (cache) { unsigned i; for (i = 0; i < cache->num_regs; i++) { if (count++ == num) { reg = &cache->reg_list[i]; break; } } if (reg) break; cache = cache->next; } if (!reg) { command_print(CMD_CTX, "%i is out of bounds, the current target " "has only %i registers (0 - %i)", num, count, count - 1); return ERROR_OK; } } else { /* access a single register by its name */ reg = register_get_by_name(target->reg_cache, CMD_ARGV[0], 1); if (!reg) { command_print(CMD_CTX, "register %s not found in current target", CMD_ARGV[0]); return ERROR_OK; } } assert(reg != NULL); /* give clang a hint that we *know* reg is != NULL here */ /* display a register */ if ((CMD_ARGC == 1) || ((CMD_ARGC == 2) && !((CMD_ARGV[1][0] >= '0') && (CMD_ARGV[1][0] <= '9')))) { if ((CMD_ARGC == 2) && (strcmp(CMD_ARGV[1], "force") == 0)) reg->valid = 0; if (reg->valid == 0) reg->type->get(reg); value = buf_to_str(reg->value, reg->size, 16); command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value); free(value); return ERROR_OK; } /* set register value */ if (CMD_ARGC == 2) { uint8_t *buf = malloc(DIV_ROUND_UP(reg->size, 8)); if (buf == NULL) return ERROR_FAIL; str_to_buf(CMD_ARGV[1], strlen(CMD_ARGV[1]), buf, reg->size, 0); reg->type->set(reg, buf); value = buf_to_str(reg->value, reg->size, 16); command_print(CMD_CTX, "%s (/%i): 0x%s", reg->name, (int)(reg->size), value); free(value); free(buf); return ERROR_OK; } return ERROR_COMMAND_SYNTAX_ERROR; } COMMAND_HANDLER(handle_poll_command) { int retval = ERROR_OK; struct target *target = get_current_target(CMD_CTX); if (CMD_ARGC == 0) { command_print(CMD_CTX, "background polling: %s", jtag_poll_get_enabled() ? "on" : "off"); command_print(CMD_CTX, "TAP: %s (%s)", target->tap->dotted_name, target->tap->enabled ? "enabled" : "disabled"); if (!target->tap->enabled) return ERROR_OK; retval = target_poll(target); if (retval != ERROR_OK) return retval; retval = target_arch_state(target); if (retval != ERROR_OK) return retval; } else if (CMD_ARGC == 1) { bool enable; COMMAND_PARSE_ON_OFF(CMD_ARGV[0], enable); jtag_poll_set_enabled(enable); } else return ERROR_COMMAND_SYNTAX_ERROR; return retval; } COMMAND_HANDLER(handle_wait_halt_command) { if (CMD_ARGC > 1) return ERROR_COMMAND_SYNTAX_ERROR; unsigned ms = DEFAULT_HALT_TIMEOUT; if (1 == CMD_ARGC) { int retval = parse_uint(CMD_ARGV[0], &ms); if (ERROR_OK != retval) return ERROR_COMMAND_SYNTAX_ERROR; } struct target *target = get_current_target(CMD_CTX); return target_wait_state(target, TARGET_HALTED, ms); } /* wait for target state to change. The trick here is to have a low * latency for short waits and not to suck up all the CPU time * on longer waits. * * After 500ms, keep_alive() is invoked */ int target_wait_state(struct target *target, enum target_state state, int ms) { int retval; int64_t then = 0, cur; bool once = true; for (;;) { retval = target_poll(target); if (retval != ERROR_OK) return retval; if (target->state == state) break; cur = timeval_ms(); if (once) { once = false; then = timeval_ms(); LOG_DEBUG("waiting for target %s...", Jim_Nvp_value2name_simple(nvp_target_state, state)->name); } if (cur-then > 500) keep_alive(); if ((cur-then) > ms) { LOG_ERROR("timed out while waiting for target %s", Jim_Nvp_value2name_simple(nvp_target_state, state)->name); return ERROR_FAIL; } } return ERROR_OK; } COMMAND_HANDLER(handle_halt_command) { LOG_DEBUG("-"); struct target *target = get_current_target(CMD_CTX); int retval = target_halt(target); if (ERROR_OK != retval) return retval; if (CMD_ARGC == 1) { unsigned wait_local; retval = parse_uint(CMD_ARGV[0], &wait_local); if (ERROR_OK != retval) return ERROR_COMMAND_SYNTAX_ERROR; if (!wait_local) return ERROR_OK; } return CALL_COMMAND_HANDLER(handle_wait_halt_command); } COMMAND_HANDLER(handle_soft_reset_halt_command) { struct target *target = get_current_target(CMD_CTX); LOG_USER("requesting target halt and executing a soft reset"); target_soft_reset_halt(target); return ERROR_OK; } COMMAND_HANDLER(handle_reset_command) { if (CMD_ARGC > 1) return ERROR_COMMAND_SYNTAX_ERROR; enum target_reset_mode reset_mode = RESET_RUN; if (CMD_ARGC == 1) { const Jim_Nvp *n; n = Jim_Nvp_name2value_simple(nvp_reset_modes, CMD_ARGV[0]); if ((n->name == NULL) || (n->value == RESET_UNKNOWN)) return ERROR_COMMAND_SYNTAX_ERROR; reset_mode = n->value; } /* reset *all* targets */ return target_process_reset(CMD_CTX, reset_mode); } COMMAND_HANDLER(handle_resume_command) { int current = 1; if (CMD_ARGC > 1) return ERROR_COMMAND_SYNTAX_ERROR; struct target *target = get_current_target(CMD_CTX); /* with no CMD_ARGV, resume from current pc, addr = 0, * with one arguments, addr = CMD_ARGV[0], * handle breakpoints, not debugging */ uint32_t addr = 0; if (CMD_ARGC == 1) { COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr); current = 0; } return target_resume(target, current, addr, 1, 0); } COMMAND_HANDLER(handle_step_command) { if (CMD_ARGC > 1) return ERROR_COMMAND_SYNTAX_ERROR; LOG_DEBUG("-"); /* with no CMD_ARGV, step from current pc, addr = 0, * with one argument addr = CMD_ARGV[0], * handle breakpoints, debugging */ uint32_t addr = 0; int current_pc = 1; if (CMD_ARGC == 1) { COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr); current_pc = 0; } struct target *target = get_current_target(CMD_CTX); return target->type->step(target, current_pc, addr, 1); } static void handle_md_output(struct command_context *cmd_ctx, struct target *target, uint32_t address, unsigned size, unsigned count, const uint8_t *buffer) { const unsigned line_bytecnt = 32; unsigned line_modulo = line_bytecnt / size; char output[line_bytecnt * 4 + 1]; unsigned output_len = 0; const char *value_fmt; switch (size) { case 4: value_fmt = "%8.8x "; break; case 2: value_fmt = "%4.4x "; break; case 1: value_fmt = "%2.2x "; break; default: /* "can't happen", caller checked */ LOG_ERROR("invalid memory read size: %u", size); return; } for (unsigned i = 0; i < count; i++) { if (i % line_modulo == 0) { output_len += snprintf(output + output_len, sizeof(output) - output_len, "0x%8.8x: ", (unsigned)(address + (i*size))); } uint32_t value = 0; const uint8_t *value_ptr = buffer + i * size; switch (size) { case 4: value = target_buffer_get_u32(target, value_ptr); break; case 2: value = target_buffer_get_u16(target, value_ptr); break; case 1: value = *value_ptr; } output_len += snprintf(output + output_len, sizeof(output) - output_len, value_fmt, value); if ((i % line_modulo == line_modulo - 1) || (i == count - 1)) { command_print(cmd_ctx, "%s", output); output_len = 0; } } } COMMAND_HANDLER(handle_md_command) { if (CMD_ARGC < 1) return ERROR_COMMAND_SYNTAX_ERROR; unsigned size = 0; switch (CMD_NAME[2]) { case 'w': size = 4; break; case 'h': size = 2; break; case 'b': size = 1; break; default: return ERROR_COMMAND_SYNTAX_ERROR; } bool physical = strcmp(CMD_ARGV[0], "phys") == 0; int (*fn)(struct target *target, uint32_t address, uint32_t size_value, uint32_t count, uint8_t *buffer); if (physical) { CMD_ARGC--; CMD_ARGV++; fn = target_read_phys_memory; } else fn = target_read_memory; if ((CMD_ARGC < 1) || (CMD_ARGC > 2)) return ERROR_COMMAND_SYNTAX_ERROR; uint32_t address; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address); unsigned count = 1; if (CMD_ARGC == 2) COMMAND_PARSE_NUMBER(uint, CMD_ARGV[1], count); uint8_t *buffer = calloc(count, size); struct target *target = get_current_target(CMD_CTX); int retval = fn(target, address, size, count, buffer); if (ERROR_OK == retval) handle_md_output(CMD_CTX, target, address, size, count, buffer); free(buffer); return retval; } typedef int (*target_write_fn)(struct target *target, uint32_t address, uint32_t size, uint32_t count, const uint8_t *buffer); static int target_fill_mem(struct target *target, uint32_t address, target_write_fn fn, unsigned data_size, /* value */ uint32_t b, /* count */ unsigned c) { /* We have to write in reasonably large chunks to be able * to fill large memory areas with any sane speed */ const unsigned chunk_size = 16384; uint8_t *target_buf = malloc(chunk_size * data_size); if (target_buf == NULL) { LOG_ERROR("Out of memory"); return ERROR_FAIL; } for (unsigned i = 0; i < chunk_size; i++) { switch (data_size) { case 4: target_buffer_set_u32(target, target_buf + i * data_size, b); break; case 2: target_buffer_set_u16(target, target_buf + i * data_size, b); break; case 1: target_buffer_set_u8(target, target_buf + i * data_size, b); break; default: exit(-1); } } int retval = ERROR_OK; for (unsigned x = 0; x < c; x += chunk_size) { unsigned current; current = c - x; if (current > chunk_size) current = chunk_size; retval = fn(target, address + x * data_size, data_size, current, target_buf); if (retval != ERROR_OK) break; /* avoid GDB timeouts */ keep_alive(); } free(target_buf); return retval; } COMMAND_HANDLER(handle_mw_command) { if (CMD_ARGC < 2) return ERROR_COMMAND_SYNTAX_ERROR; bool physical = strcmp(CMD_ARGV[0], "phys") == 0; target_write_fn fn; if (physical) { CMD_ARGC--; CMD_ARGV++; fn = target_write_phys_memory; } else fn = target_write_memory; if ((CMD_ARGC < 2) || (CMD_ARGC > 3)) return ERROR_COMMAND_SYNTAX_ERROR; uint32_t address; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], address); uint32_t value; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], value); unsigned count = 1; if (CMD_ARGC == 3) COMMAND_PARSE_NUMBER(uint, CMD_ARGV[2], count); struct target *target = get_current_target(CMD_CTX); unsigned wordsize; switch (CMD_NAME[2]) { case 'w': wordsize = 4; break; case 'h': wordsize = 2; break; case 'b': wordsize = 1; break; default: return ERROR_COMMAND_SYNTAX_ERROR; } return target_fill_mem(target, address, fn, wordsize, value, count); } static COMMAND_HELPER(parse_load_image_command_CMD_ARGV, struct image *image, uint32_t *min_address, uint32_t *max_address) { if (CMD_ARGC < 1 || CMD_ARGC > 5) return ERROR_COMMAND_SYNTAX_ERROR; /* a base address isn't always necessary, * default to 0x0 (i.e. don't relocate) */ if (CMD_ARGC >= 2) { uint32_t addr; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr); image->base_address = addr; image->base_address_set = 1; } else image->base_address_set = 0; image->start_address_set = 0; if (CMD_ARGC >= 4) COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], *min_address); if (CMD_ARGC == 5) { COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], *max_address); /* use size (given) to find max (required) */ *max_address += *min_address; } if (*min_address > *max_address) return ERROR_COMMAND_SYNTAX_ERROR; return ERROR_OK; } COMMAND_HANDLER(handle_load_image_command) { uint8_t *buffer; size_t buf_cnt; uint32_t image_size; uint32_t min_address = 0; uint32_t max_address = 0xffffffff; int i; struct image image; int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV, &image, &min_address, &max_address); if (ERROR_OK != retval) return retval; struct target *target = get_current_target(CMD_CTX); struct duration bench; duration_start(&bench); if (image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL) != ERROR_OK) return ERROR_FAIL; image_size = 0x0; retval = ERROR_OK; for (i = 0; i < image.num_sections; i++) { buffer = malloc(image.sections[i].size); if (buffer == NULL) { command_print(CMD_CTX, "error allocating buffer for section (%d bytes)", (int)(image.sections[i].size)); retval = ERROR_FAIL; break; } retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt); if (retval != ERROR_OK) { free(buffer); break; } uint32_t offset = 0; uint32_t length = buf_cnt; /* DANGER!!! beware of unsigned comparision here!!! */ if ((image.sections[i].base_address + buf_cnt >= min_address) && (image.sections[i].base_address < max_address)) { if (image.sections[i].base_address < min_address) { /* clip addresses below */ offset += min_address-image.sections[i].base_address; length -= offset; } if (image.sections[i].base_address + buf_cnt > max_address) length -= (image.sections[i].base_address + buf_cnt)-max_address; retval = target_write_buffer(target, image.sections[i].base_address + offset, length, buffer + offset); if (retval != ERROR_OK) { free(buffer); break; } image_size += length; command_print(CMD_CTX, "%u bytes written at address 0x%8.8" PRIx32 "", (unsigned int)length, image.sections[i].base_address + offset); } free(buffer); } if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) { command_print(CMD_CTX, "downloaded %" PRIu32 " bytes " "in %fs (%0.3f KiB/s)", image_size, duration_elapsed(&bench), duration_kbps(&bench, image_size)); } image_close(&image); return retval; } COMMAND_HANDLER(handle_dump_image_command) { struct fileio *fileio; uint8_t *buffer; int retval, retvaltemp; uint32_t address, size; struct duration bench; struct target *target = get_current_target(CMD_CTX); if (CMD_ARGC != 3) return ERROR_COMMAND_SYNTAX_ERROR; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], address); COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], size); uint32_t buf_size = (size > 4096) ? 4096 : size; buffer = malloc(buf_size); if (!buffer) return ERROR_FAIL; retval = fileio_open(&fileio, CMD_ARGV[0], FILEIO_WRITE, FILEIO_BINARY); if (retval != ERROR_OK) { free(buffer); return retval; } duration_start(&bench); while (size > 0) { size_t size_written; uint32_t this_run_size = (size > buf_size) ? buf_size : size; retval = target_read_buffer(target, address, this_run_size, buffer); if (retval != ERROR_OK) break; retval = fileio_write(fileio, this_run_size, buffer, &size_written); if (retval != ERROR_OK) break; size -= this_run_size; address += this_run_size; } free(buffer); if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) { size_t filesize; retval = fileio_size(fileio, &filesize); if (retval != ERROR_OK) return retval; command_print(CMD_CTX, "dumped %zu bytes in %fs (%0.3f KiB/s)", filesize, duration_elapsed(&bench), duration_kbps(&bench, filesize)); } retvaltemp = fileio_close(fileio); if (retvaltemp != ERROR_OK) return retvaltemp; return retval; } enum verify_mode { IMAGE_TEST = 0, IMAGE_VERIFY = 1, IMAGE_CHECKSUM_ONLY = 2 }; static COMMAND_HELPER(handle_verify_image_command_internal, enum verify_mode verify) { uint8_t *buffer; size_t buf_cnt; uint32_t image_size; int i; int retval; uint32_t checksum = 0; uint32_t mem_checksum = 0; struct image image; struct target *target = get_current_target(CMD_CTX); if (CMD_ARGC < 1) return ERROR_COMMAND_SYNTAX_ERROR; if (!target) { LOG_ERROR("no target selected"); return ERROR_FAIL; } struct duration bench; duration_start(&bench); if (CMD_ARGC >= 2) { uint32_t addr; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], addr); image.base_address = addr; image.base_address_set = 1; } else { image.base_address_set = 0; image.base_address = 0x0; } image.start_address_set = 0; retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC == 3) ? CMD_ARGV[2] : NULL); if (retval != ERROR_OK) return retval; image_size = 0x0; int diffs = 0; retval = ERROR_OK; for (i = 0; i < image.num_sections; i++) { buffer = malloc(image.sections[i].size); if (buffer == NULL) { command_print(CMD_CTX, "error allocating buffer for section (%d bytes)", (int)(image.sections[i].size)); break; } retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt); if (retval != ERROR_OK) { free(buffer); break; } if (verify >= IMAGE_VERIFY) { /* calculate checksum of image */ retval = image_calculate_checksum(buffer, buf_cnt, &checksum); if (retval != ERROR_OK) { free(buffer); break; } retval = target_checksum_memory(target, image.sections[i].base_address, buf_cnt, &mem_checksum); if (retval != ERROR_OK) { free(buffer); break; } if ((checksum != mem_checksum) && (verify == IMAGE_CHECKSUM_ONLY)) { LOG_ERROR("checksum mismatch"); free(buffer); retval = ERROR_FAIL; goto done; } if (checksum != mem_checksum) { /* failed crc checksum, fall back to a binary compare */ uint8_t *data; if (diffs == 0) LOG_ERROR("checksum mismatch - attempting binary compare"); data = malloc(buf_cnt); /* Can we use 32bit word accesses? */ int size = 1; int count = buf_cnt; if ((count % 4) == 0) { size *= 4; count /= 4; } retval = target_read_memory(target, image.sections[i].base_address, size, count, data); if (retval == ERROR_OK) { uint32_t t; for (t = 0; t < buf_cnt; t++) { if (data[t] != buffer[t]) { command_print(CMD_CTX, "diff %d address 0x%08x. Was 0x%02x instead of 0x%02x", diffs, (unsigned)(t + image.sections[i].base_address), data[t], buffer[t]); if (diffs++ >= 127) { command_print(CMD_CTX, "More than 128 errors, the rest are not printed."); free(data); free(buffer); goto done; } } keep_alive(); } } free(data); } } else { command_print(CMD_CTX, "address 0x%08" PRIx32 " length 0x%08zx", image.sections[i].base_address, buf_cnt); } free(buffer); image_size += buf_cnt; } if (diffs > 0) command_print(CMD_CTX, "No more differences found."); done: if (diffs > 0) retval = ERROR_FAIL; if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) { command_print(CMD_CTX, "verified %" PRIu32 " bytes " "in %fs (%0.3f KiB/s)", image_size, duration_elapsed(&bench), duration_kbps(&bench, image_size)); } image_close(&image); return retval; } COMMAND_HANDLER(handle_verify_image_checksum_command) { return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_CHECKSUM_ONLY); } COMMAND_HANDLER(handle_verify_image_command) { return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_VERIFY); } COMMAND_HANDLER(handle_test_image_command) { return CALL_COMMAND_HANDLER(handle_verify_image_command_internal, IMAGE_TEST); } static int handle_bp_command_list(struct command_context *cmd_ctx) { struct target *target = get_current_target(cmd_ctx); struct breakpoint *breakpoint = target->breakpoints; while (breakpoint) { if (breakpoint->type == BKPT_SOFT) { char *buf = buf_to_str(breakpoint->orig_instr, breakpoint->length, 16); command_print(cmd_ctx, "IVA breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i, 0x%s", breakpoint->address, breakpoint->length, breakpoint->set, buf); free(buf); } else { if ((breakpoint->address == 0) && (breakpoint->asid != 0)) command_print(cmd_ctx, "Context breakpoint: 0x%8.8" PRIx32 ", 0x%x, %i", breakpoint->asid, breakpoint->length, breakpoint->set); else if ((breakpoint->address != 0) && (breakpoint->asid != 0)) { command_print(cmd_ctx, "Hybrid breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i", breakpoint->address, breakpoint->length, breakpoint->set); command_print(cmd_ctx, "\t|--->linked with ContextID: 0x%8.8" PRIx32, breakpoint->asid); } else command_print(cmd_ctx, "Breakpoint(IVA): 0x%8.8" PRIx32 ", 0x%x, %i", breakpoint->address, breakpoint->length, breakpoint->set); } breakpoint = breakpoint->next; } return ERROR_OK; } static int handle_bp_command_set(struct command_context *cmd_ctx, uint32_t addr, uint32_t asid, uint32_t length, int hw) { struct target *target = get_current_target(cmd_ctx); int retval; if (asid == 0) { retval = breakpoint_add(target, addr, length, hw); if (ERROR_OK == retval) command_print(cmd_ctx, "breakpoint set at 0x%8.8" PRIx32 "", addr); else { LOG_ERROR("Failure setting breakpoint, the same address(IVA) is already used"); return retval; } } else if (addr == 0) { if (target->type->add_context_breakpoint == NULL) { LOG_WARNING("Context breakpoint not available"); return ERROR_OK; } retval = context_breakpoint_add(target, asid, length, hw); if (ERROR_OK == retval) command_print(cmd_ctx, "Context breakpoint set at 0x%8.8" PRIx32 "", asid); else { LOG_ERROR("Failure setting breakpoint, the same address(CONTEXTID) is already used"); return retval; } } else { if (target->type->add_hybrid_breakpoint == NULL) { LOG_WARNING("Hybrid breakpoint not available"); return ERROR_OK; } retval = hybrid_breakpoint_add(target, addr, asid, length, hw); if (ERROR_OK == retval) command_print(cmd_ctx, "Hybrid breakpoint set at 0x%8.8" PRIx32 "", asid); else { LOG_ERROR("Failure setting breakpoint, the same address is already used"); return retval; } } return ERROR_OK; } COMMAND_HANDLER(handle_bp_command) { uint32_t addr; uint32_t asid; uint32_t length; int hw = BKPT_SOFT; switch (CMD_ARGC) { case 0: return handle_bp_command_list(CMD_CTX); case 2: asid = 0; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr); COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length); return handle_bp_command_set(CMD_CTX, addr, asid, length, hw); case 3: if (strcmp(CMD_ARGV[2], "hw") == 0) { hw = BKPT_HARD; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr); COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length); asid = 0; return handle_bp_command_set(CMD_CTX, addr, asid, length, hw); } else if (strcmp(CMD_ARGV[2], "hw_ctx") == 0) { hw = BKPT_HARD; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], asid); COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length); addr = 0; return handle_bp_command_set(CMD_CTX, addr, asid, length, hw); } case 4: hw = BKPT_HARD; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr); COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], asid); COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], length); return handle_bp_command_set(CMD_CTX, addr, asid, length, hw); default: return ERROR_COMMAND_SYNTAX_ERROR; } } COMMAND_HANDLER(handle_rbp_command) { if (CMD_ARGC != 1) return ERROR_COMMAND_SYNTAX_ERROR; uint32_t addr; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr); struct target *target = get_current_target(CMD_CTX); breakpoint_remove(target, addr); return ERROR_OK; } COMMAND_HANDLER(handle_wp_command) { struct target *target = get_current_target(CMD_CTX); if (CMD_ARGC == 0) { struct watchpoint *watchpoint = target->watchpoints; while (watchpoint) { command_print(CMD_CTX, "address: 0x%8.8" PRIx32 ", len: 0x%8.8" PRIx32 ", r/w/a: %i, value: 0x%8.8" PRIx32 ", mask: 0x%8.8" PRIx32, watchpoint->address, watchpoint->length, (int)watchpoint->rw, watchpoint->value, watchpoint->mask); watchpoint = watchpoint->next; } return ERROR_OK; } enum watchpoint_rw type = WPT_ACCESS; uint32_t addr = 0; uint32_t length = 0; uint32_t data_value = 0x0; uint32_t data_mask = 0xffffffff; switch (CMD_ARGC) { case 5: COMMAND_PARSE_NUMBER(u32, CMD_ARGV[4], data_mask); /* fall through */ case 4: COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], data_value); /* fall through */ case 3: switch (CMD_ARGV[2][0]) { case 'r': type = WPT_READ; break; case 'w': type = WPT_WRITE; break; case 'a': type = WPT_ACCESS; break; default: LOG_ERROR("invalid watchpoint mode ('%c')", CMD_ARGV[2][0]); return ERROR_COMMAND_SYNTAX_ERROR; } /* fall through */ case 2: COMMAND_PARSE_NUMBER(u32, CMD_ARGV[1], length); COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr); break; default: return ERROR_COMMAND_SYNTAX_ERROR; } int retval = watchpoint_add(target, addr, length, type, data_value, data_mask); if (ERROR_OK != retval) LOG_ERROR("Failure setting watchpoints"); return retval; } COMMAND_HANDLER(handle_rwp_command) { if (CMD_ARGC != 1) return ERROR_COMMAND_SYNTAX_ERROR; uint32_t addr; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], addr); struct target *target = get_current_target(CMD_CTX); watchpoint_remove(target, addr); return ERROR_OK; } /** * Translate a virtual address to a physical address. * * The low-level target implementation must have logged a detailed error * which is forwarded to telnet/GDB session. */ COMMAND_HANDLER(handle_virt2phys_command) { if (CMD_ARGC != 1) return ERROR_COMMAND_SYNTAX_ERROR; uint32_t va; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], va); uint32_t pa; struct target *target = get_current_target(CMD_CTX); int retval = target->type->virt2phys(target, va, &pa); if (retval == ERROR_OK) command_print(CMD_CTX, "Physical address 0x%08" PRIx32 "", pa); return retval; } static void writeData(FILE *f, const void *data, size_t len) { size_t written = fwrite(data, 1, len, f); if (written != len) LOG_ERROR("failed to write %zu bytes: %s", len, strerror(errno)); } static void writeLong(FILE *f, int l, struct target *target) { uint8_t val[4]; target_buffer_set_u32(target, val, l); writeData(f, val, 4); } static void writeString(FILE *f, char *s) { writeData(f, s, strlen(s)); } typedef unsigned char UNIT[2]; /* unit of profiling */ /* Dump a gmon.out histogram file. */ static void write_gmon(uint32_t *samples, uint32_t sampleNum, const char *filename, bool with_range, uint32_t start_address, uint32_t end_address, struct target *target) { uint32_t i; FILE *f = fopen(filename, "w"); if (f == NULL) return; writeString(f, "gmon"); writeLong(f, 0x00000001, target); /* Version */ writeLong(f, 0, target); /* padding */ writeLong(f, 0, target); /* padding */ writeLong(f, 0, target); /* padding */ uint8_t zero = 0; /* GMON_TAG_TIME_HIST */ writeData(f, &zero, 1); /* figure out bucket size */ uint32_t min; uint32_t max; if (with_range) { min = start_address; max = end_address; } else { min = samples[0]; max = samples[0]; for (i = 0; i < sampleNum; i++) { if (min > samples[i]) min = samples[i]; if (max < samples[i]) max = samples[i]; } /* max should be (largest sample + 1) * Refer to binutils/gprof/hist.c (find_histogram_for_pc) */ max++; } int addressSpace = max - min; assert(addressSpace >= 2); /* FIXME: What is the reasonable number of buckets? * The profiling result will be more accurate if there are enough buckets. */ static const uint32_t maxBuckets = 128 * 1024; /* maximum buckets. */ uint32_t numBuckets = addressSpace / sizeof(UNIT); if (numBuckets > maxBuckets) numBuckets = maxBuckets; int *buckets = malloc(sizeof(int) * numBuckets); if (buckets == NULL) { fclose(f); return; } memset(buckets, 0, sizeof(int) * numBuckets); for (i = 0; i < sampleNum; i++) { uint32_t address = samples[i]; if ((address < min) || (max <= address)) continue; long long a = address - min; long long b = numBuckets; long long c = addressSpace; int index_t = (a * b) / c; /* danger!!!! int32 overflows */ buckets[index_t]++; } /* append binary memory gmon.out &profile_hist_hdr ((char*)&profile_hist_hdr + sizeof(struct gmon_hist_hdr)) */ writeLong(f, min, target); /* low_pc */ writeLong(f, max, target); /* high_pc */ writeLong(f, numBuckets, target); /* # of buckets */ writeLong(f, 100, target); /* KLUDGE! We lie, ca. 100Hz best case. */ writeString(f, "seconds"); for (i = 0; i < (15-strlen("seconds")); i++) writeData(f, &zero, 1); writeString(f, "s"); /*append binary memory gmon.out profile_hist_data (profile_hist_data + profile_hist_hdr.hist_size) */ char *data = malloc(2 * numBuckets); if (data != NULL) { for (i = 0; i < numBuckets; i++) { int val; val = buckets[i]; if (val > 65535) val = 65535; data[i * 2] = val&0xff; data[i * 2 + 1] = (val >> 8) & 0xff; } free(buckets); writeData(f, data, numBuckets * 2); free(data); } else free(buckets); fclose(f); } /* profiling samples the CPU PC as quickly as OpenOCD is able, * which will be used as a random sampling of PC */ COMMAND_HANDLER(handle_profile_command) { struct target *target = get_current_target(CMD_CTX); if ((CMD_ARGC != 2) && (CMD_ARGC != 4)) return ERROR_COMMAND_SYNTAX_ERROR; const uint32_t MAX_PROFILE_SAMPLE_NUM = 10000; uint32_t offset; uint32_t num_of_samples; int retval = ERROR_OK; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], offset); uint32_t *samples = malloc(sizeof(uint32_t) * MAX_PROFILE_SAMPLE_NUM); if (samples == NULL) { LOG_ERROR("No memory to store samples."); return ERROR_FAIL; } /** * Some cores let us sample the PC without the * annoying halt/resume step; for example, ARMv7 PCSR. * Provide a way to use that more efficient mechanism. */ retval = target_profiling(target, samples, MAX_PROFILE_SAMPLE_NUM, &num_of_samples, offset); if (retval != ERROR_OK) { free(samples); return retval; } assert(num_of_samples <= MAX_PROFILE_SAMPLE_NUM); retval = target_poll(target); if (retval != ERROR_OK) { free(samples); return retval; } if (target->state == TARGET_RUNNING) { retval = target_halt(target); if (retval != ERROR_OK) { free(samples); return retval; } } retval = target_poll(target); if (retval != ERROR_OK) { free(samples); return retval; } uint32_t start_address = 0; uint32_t end_address = 0; bool with_range = false; if (CMD_ARGC == 4) { with_range = true; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[2], start_address); COMMAND_PARSE_NUMBER(u32, CMD_ARGV[3], end_address); } write_gmon(samples, num_of_samples, CMD_ARGV[1], with_range, start_address, end_address, target); command_print(CMD_CTX, "Wrote %s", CMD_ARGV[1]); free(samples); return retval; } static int new_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t val) { char *namebuf; Jim_Obj *nameObjPtr, *valObjPtr; int result; namebuf = alloc_printf("%s(%d)", varname, idx); if (!namebuf) return JIM_ERR; nameObjPtr = Jim_NewStringObj(interp, namebuf, -1); valObjPtr = Jim_NewIntObj(interp, val); if (!nameObjPtr || !valObjPtr) { free(namebuf); return JIM_ERR; } Jim_IncrRefCount(nameObjPtr); Jim_IncrRefCount(valObjPtr); result = Jim_SetVariable(interp, nameObjPtr, valObjPtr); Jim_DecrRefCount(interp, nameObjPtr); Jim_DecrRefCount(interp, valObjPtr); free(namebuf); /* printf("%s(%d) <= 0%08x\n", varname, idx, val); */ return result; } static int jim_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { struct command_context *context; struct target *target; context = current_command_context(interp); assert(context != NULL); target = get_current_target(context); if (target == NULL) { LOG_ERROR("mem2array: no current target"); return JIM_ERR; } return target_mem2array(interp, target, argc - 1, argv + 1); } static int target_mem2array(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv) { long l; uint32_t width; int len; uint32_t addr; uint32_t count; uint32_t v; const char *varname; const char *phys; bool is_phys; int n, e, retval; uint32_t i; /* argv[1] = name of array to receive the data * argv[2] = desired width * argv[3] = memory address * argv[4] = count of times to read */ if (argc < 4 || argc > 5) { Jim_WrongNumArgs(interp, 1, argv, "varname width addr nelems [phys]"); return JIM_ERR; } varname = Jim_GetString(argv[0], &len); /* given "foo" get space for worse case "foo(%d)" .. add 20 */ e = Jim_GetLong(interp, argv[1], &l); width = l; if (e != JIM_OK) return e; e = Jim_GetLong(interp, argv[2], &l); addr = l; if (e != JIM_OK) return e; e = Jim_GetLong(interp, argv[3], &l); len = l; if (e != JIM_OK) return e; is_phys = false; if (argc > 4) { phys = Jim_GetString(argv[4], &n); if (!strncmp(phys, "phys", n)) is_phys = true; else return JIM_ERR; } switch (width) { case 8: width = 1; break; case 16: width = 2; break; case 32: width = 4; break; default: Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL); return JIM_ERR; } if (len == 0) { Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: zero width read?", NULL); return JIM_ERR; } if ((addr + (len * width)) < addr) { Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: addr + len - wraps to zero?", NULL); return JIM_ERR; } /* absurd transfer size? */ if (len > 65536) { Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: absurd > 64K item request", NULL); return JIM_ERR; } if ((width == 1) || ((width == 2) && ((addr & 1) == 0)) || ((width == 4) && ((addr & 3) == 0))) { /* all is well */ } else { char buf[100]; Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); sprintf(buf, "mem2array address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads", addr, width); Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL); return JIM_ERR; } /* Transfer loop */ /* index counter */ n = 0; size_t buffersize = 4096; uint8_t *buffer = malloc(buffersize); if (buffer == NULL) return JIM_ERR; /* assume ok */ e = JIM_OK; while (len) { /* Slurp... in buffer size chunks */ count = len; /* in objects.. */ if (count > (buffersize / width)) count = (buffersize / width); if (is_phys) retval = target_read_phys_memory(target, addr, width, count, buffer); else retval = target_read_memory(target, addr, width, count, buffer); if (retval != ERROR_OK) { /* BOO !*/ LOG_ERROR("mem2array: Read @ 0x%08" PRIx32 ", w=%" PRId32 ", cnt=%" PRId32 ", failed", addr, width, count); Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); Jim_AppendStrings(interp, Jim_GetResult(interp), "mem2array: cannot read memory", NULL); e = JIM_ERR; break; } else { v = 0; /* shut up gcc */ for (i = 0; i < count ; i++, n++) { switch (width) { case 4: v = target_buffer_get_u32(target, &buffer[i*width]); break; case 2: v = target_buffer_get_u16(target, &buffer[i*width]); break; case 1: v = buffer[i] & 0x0ff; break; } new_int_array_element(interp, varname, n, v); } len -= count; addr += count * width; } } free(buffer); Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); return e; } static int get_int_array_element(Jim_Interp *interp, const char *varname, int idx, uint32_t *val) { char *namebuf; Jim_Obj *nameObjPtr, *valObjPtr; int result; long l; namebuf = alloc_printf("%s(%d)", varname, idx); if (!namebuf) return JIM_ERR; nameObjPtr = Jim_NewStringObj(interp, namebuf, -1); if (!nameObjPtr) { free(namebuf); return JIM_ERR; } Jim_IncrRefCount(nameObjPtr); valObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_ERRMSG); Jim_DecrRefCount(interp, nameObjPtr); free(namebuf); if (valObjPtr == NULL) return JIM_ERR; result = Jim_GetLong(interp, valObjPtr, &l); /* printf("%s(%d) => 0%08x\n", varname, idx, val); */ *val = l; return result; } static int jim_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { struct command_context *context; struct target *target; context = current_command_context(interp); assert(context != NULL); target = get_current_target(context); if (target == NULL) { LOG_ERROR("array2mem: no current target"); return JIM_ERR; } return target_array2mem(interp, target, argc-1, argv + 1); } static int target_array2mem(Jim_Interp *interp, struct target *target, int argc, Jim_Obj *const *argv) { long l; uint32_t width; int len; uint32_t addr; uint32_t count; uint32_t v; const char *varname; const char *phys; bool is_phys; int n, e, retval; uint32_t i; /* argv[1] = name of array to get the data * argv[2] = desired width * argv[3] = memory address * argv[4] = count to write */ if (argc < 4 || argc > 5) { Jim_WrongNumArgs(interp, 0, argv, "varname width addr nelems [phys]"); return JIM_ERR; } varname = Jim_GetString(argv[0], &len); /* given "foo" get space for worse case "foo(%d)" .. add 20 */ e = Jim_GetLong(interp, argv[1], &l); width = l; if (e != JIM_OK) return e; e = Jim_GetLong(interp, argv[2], &l); addr = l; if (e != JIM_OK) return e; e = Jim_GetLong(interp, argv[3], &l); len = l; if (e != JIM_OK) return e; is_phys = false; if (argc > 4) { phys = Jim_GetString(argv[4], &n); if (!strncmp(phys, "phys", n)) is_phys = true; else return JIM_ERR; } switch (width) { case 8: width = 1; break; case 16: width = 2; break; case 32: width = 4; break; default: Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); Jim_AppendStrings(interp, Jim_GetResult(interp), "Invalid width param, must be 8/16/32", NULL); return JIM_ERR; } if (len == 0) { Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: zero width read?", NULL); return JIM_ERR; } if ((addr + (len * width)) < addr) { Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: addr + len - wraps to zero?", NULL); return JIM_ERR; } /* absurd transfer size? */ if (len > 65536) { Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: absurd > 64K item request", NULL); return JIM_ERR; } if ((width == 1) || ((width == 2) && ((addr & 1) == 0)) || ((width == 4) && ((addr & 3) == 0))) { /* all is well */ } else { char buf[100]; Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); sprintf(buf, "array2mem address: 0x%08" PRIx32 " is not aligned for %" PRId32 " byte reads", addr, width); Jim_AppendStrings(interp, Jim_GetResult(interp), buf, NULL); return JIM_ERR; } /* Transfer loop */ /* index counter */ n = 0; /* assume ok */ e = JIM_OK; size_t buffersize = 4096; uint8_t *buffer = malloc(buffersize); if (buffer == NULL) return JIM_ERR; while (len) { /* Slurp... in buffer size chunks */ count = len; /* in objects.. */ if (count > (buffersize / width)) count = (buffersize / width); v = 0; /* shut up gcc */ for (i = 0; i < count; i++, n++) { get_int_array_element(interp, varname, n, &v); switch (width) { case 4: target_buffer_set_u32(target, &buffer[i * width], v); break; case 2: target_buffer_set_u16(target, &buffer[i * width], v); break; case 1: buffer[i] = v & 0x0ff; break; } } len -= count; if (is_phys) retval = target_write_phys_memory(target, addr, width, count, buffer); else retval = target_write_memory(target, addr, width, count, buffer); if (retval != ERROR_OK) { /* BOO !*/ LOG_ERROR("array2mem: Write @ 0x%08" PRIx32 ", w=%" PRId32 ", cnt=%" PRId32 ", failed", addr, width, count); Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); Jim_AppendStrings(interp, Jim_GetResult(interp), "array2mem: cannot read memory", NULL); e = JIM_ERR; break; } addr += count * width; } free(buffer); Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); return e; } /* FIX? should we propagate errors here rather than printing them * and continuing? */ void target_handle_event(struct target *target, enum target_event e) { struct target_event_action *teap; for (teap = target->event_action; teap != NULL; teap = teap->next) { if (teap->event == e) { LOG_DEBUG("target: (%d) %s (%s) event: %d (%s) action: %s", target->target_number, target_name(target), target_type_name(target), e, Jim_Nvp_value2name_simple(nvp_target_event, e)->name, Jim_GetString(teap->body, NULL)); if (Jim_EvalObj(teap->interp, teap->body) != JIM_OK) { Jim_MakeErrorMessage(teap->interp); command_print(NULL, "%s\n", Jim_GetString(Jim_GetResult(teap->interp), NULL)); } } } } /** * Returns true only if the target has a handler for the specified event. */ bool target_has_event_action(struct target *target, enum target_event event) { struct target_event_action *teap; for (teap = target->event_action; teap != NULL; teap = teap->next) { if (teap->event == event) return true; } return false; } enum target_cfg_param { TCFG_TYPE, TCFG_EVENT, TCFG_WORK_AREA_VIRT, TCFG_WORK_AREA_PHYS, TCFG_WORK_AREA_SIZE, TCFG_WORK_AREA_BACKUP, TCFG_ENDIAN, TCFG_COREID, TCFG_CHAIN_POSITION, TCFG_DBGBASE, TCFG_RTOS, TCFG_DEFER_EXAMINE, }; static Jim_Nvp nvp_config_opts[] = { { .name = "-type", .value = TCFG_TYPE }, { .name = "-event", .value = TCFG_EVENT }, { .name = "-work-area-virt", .value = TCFG_WORK_AREA_VIRT }, { .name = "-work-area-phys", .value = TCFG_WORK_AREA_PHYS }, { .name = "-work-area-size", .value = TCFG_WORK_AREA_SIZE }, { .name = "-work-area-backup", .value = TCFG_WORK_AREA_BACKUP }, { .name = "-endian" , .value = TCFG_ENDIAN }, { .name = "-coreid", .value = TCFG_COREID }, { .name = "-chain-position", .value = TCFG_CHAIN_POSITION }, { .name = "-dbgbase", .value = TCFG_DBGBASE }, { .name = "-rtos", .value = TCFG_RTOS }, { .name = "-defer-examine", .value = TCFG_DEFER_EXAMINE }, { .name = NULL, .value = -1 } }; static int target_configure(Jim_GetOptInfo *goi, struct target *target) { Jim_Nvp *n; Jim_Obj *o; jim_wide w; int e; /* parse config or cget options ... */ while (goi->argc > 0) { Jim_SetEmptyResult(goi->interp); /* Jim_GetOpt_Debug(goi); */ if (target->type->target_jim_configure) { /* target defines a configure function */ /* target gets first dibs on parameters */ e = (*(target->type->target_jim_configure))(target, goi); if (e == JIM_OK) { /* more? */ continue; } if (e == JIM_ERR) { /* An error */ return e; } /* otherwise we 'continue' below */ } e = Jim_GetOpt_Nvp(goi, nvp_config_opts, &n); if (e != JIM_OK) { Jim_GetOpt_NvpUnknown(goi, nvp_config_opts, 0); return e; } switch (n->value) { case TCFG_TYPE: /* not setable */ if (goi->isconfigure) { Jim_SetResultFormatted(goi->interp, "not settable: %s", n->name); return JIM_ERR; } else { no_params: if (goi->argc != 0) { Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "NO PARAMS"); return JIM_ERR; } } Jim_SetResultString(goi->interp, target_type_name(target), -1); /* loop for more */ break; case TCFG_EVENT: if (goi->argc == 0) { Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ..."); return JIM_ERR; } e = Jim_GetOpt_Nvp(goi, nvp_target_event, &n); if (e != JIM_OK) { Jim_GetOpt_NvpUnknown(goi, nvp_target_event, 1); return e; } if (goi->isconfigure) { if (goi->argc != 1) { Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name? ?EVENT-BODY?"); return JIM_ERR; } } else { if (goi->argc != 0) { Jim_WrongNumArgs(goi->interp, goi->argc, goi->argv, "-event ?event-name?"); return JIM_ERR; } } { struct target_event_action *teap; teap = target->event_action; /* replace existing? */ while (teap) { if (teap->event == (enum target_event)n->value) break; teap = teap->next; } if (goi->isconfigure) { bool replace = true; if (teap == NULL) { /* create new */ teap = calloc(1, sizeof(*teap)); replace = false; } teap->event = n->value; teap->interp = goi->interp; Jim_GetOpt_Obj(goi, &o); if (teap->body) Jim_DecrRefCount(teap->interp, teap->body); teap->body = Jim_DuplicateObj(goi->interp, o); /* * FIXME: * Tcl/TK - "tk events" have a nice feature. * See the "BIND" command. * We should support that here. * You can specify %X and %Y in the event code. * The idea is: %T - target name. * The idea is: %N - target number * The idea is: %E - event name. */ Jim_IncrRefCount(teap->body); if (!replace) { /* add to head of event list */ teap->next = target->event_action; target->event_action = teap; } Jim_SetEmptyResult(goi->interp); } else { /* get */ if (teap == NULL) Jim_SetEmptyResult(goi->interp); else Jim_SetResult(goi->interp, Jim_DuplicateObj(goi->interp, teap->body)); } } /* loop for more */ break; case TCFG_WORK_AREA_VIRT: if (goi->isconfigure) { target_free_all_working_areas(target); e = Jim_GetOpt_Wide(goi, &w); if (e != JIM_OK) return e; target->working_area_virt = w; target->working_area_virt_spec = true; } else { if (goi->argc != 0) goto no_params; } Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_virt)); /* loop for more */ break; case TCFG_WORK_AREA_PHYS: if (goi->isconfigure) { target_free_all_working_areas(target); e = Jim_GetOpt_Wide(goi, &w); if (e != JIM_OK) return e; target->working_area_phys = w; target->working_area_phys_spec = true; } else { if (goi->argc != 0) goto no_params; } Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_phys)); /* loop for more */ break; case TCFG_WORK_AREA_SIZE: if (goi->isconfigure) { target_free_all_working_areas(target); e = Jim_GetOpt_Wide(goi, &w); if (e != JIM_OK) return e; target->working_area_size = w; } else { if (goi->argc != 0) goto no_params; } Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size)); /* loop for more */ break; case TCFG_WORK_AREA_BACKUP: if (goi->isconfigure) { target_free_all_working_areas(target); e = Jim_GetOpt_Wide(goi, &w); if (e != JIM_OK) return e; /* make this exactly 1 or 0 */ target->backup_working_area = (!!w); } else { if (goi->argc != 0) goto no_params; } Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->backup_working_area)); /* loop for more e*/ break; case TCFG_ENDIAN: if (goi->isconfigure) { e = Jim_GetOpt_Nvp(goi, nvp_target_endian, &n); if (e != JIM_OK) { Jim_GetOpt_NvpUnknown(goi, nvp_target_endian, 1); return e; } target->endianness = n->value; } else { if (goi->argc != 0) goto no_params; } n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness); if (n->name == NULL) { target->endianness = TARGET_LITTLE_ENDIAN; n = Jim_Nvp_value2name_simple(nvp_target_endian, target->endianness); } Jim_SetResultString(goi->interp, n->name, -1); /* loop for more */ break; case TCFG_COREID: if (goi->isconfigure) { e = Jim_GetOpt_Wide(goi, &w); if (e != JIM_OK) return e; target->coreid = (int32_t)w; } else { if (goi->argc != 0) goto no_params; } Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->working_area_size)); /* loop for more */ break; case TCFG_CHAIN_POSITION: if (goi->isconfigure) { Jim_Obj *o_t; struct jtag_tap *tap; target_free_all_working_areas(target); e = Jim_GetOpt_Obj(goi, &o_t); if (e != JIM_OK) return e; tap = jtag_tap_by_jim_obj(goi->interp, o_t); if (tap == NULL) return JIM_ERR; /* make this exactly 1 or 0 */ target->tap = tap; } else { if (goi->argc != 0) goto no_params; } Jim_SetResultString(goi->interp, target->tap->dotted_name, -1); /* loop for more e*/ break; case TCFG_DBGBASE: if (goi->isconfigure) { e = Jim_GetOpt_Wide(goi, &w); if (e != JIM_OK) return e; target->dbgbase = (uint32_t)w; target->dbgbase_set = true; } else { if (goi->argc != 0) goto no_params; } Jim_SetResult(goi->interp, Jim_NewIntObj(goi->interp, target->dbgbase)); /* loop for more */ break; case TCFG_RTOS: /* RTOS */ { int result = rtos_create(goi, target); if (result != JIM_OK) return result; } /* loop for more */ break; case TCFG_DEFER_EXAMINE: /* DEFER_EXAMINE */ target->defer_examine = true; /* loop for more */ break; } } /* while (goi->argc) */ /* done - we return */ return JIM_OK; } static int jim_target_configure(Jim_Interp *interp, int argc, Jim_Obj * const *argv) { Jim_GetOptInfo goi; Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1); goi.isconfigure = !strcmp(Jim_GetString(argv[0], NULL), "configure"); if (goi.argc < 1) { Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv, "missing: -option ..."); return JIM_ERR; } struct target *target = Jim_CmdPrivData(goi.interp); return target_configure(&goi, target); } static int jim_target_mw(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { const char *cmd_name = Jim_GetString(argv[0], NULL); Jim_GetOptInfo goi; Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1); if (goi.argc < 2 || goi.argc > 4) { Jim_SetResultFormatted(goi.interp, "usage: %s [phys] <address> <data> [<count>]", cmd_name); return JIM_ERR; } target_write_fn fn; fn = target_write_memory; int e; if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) { /* consume it */ struct Jim_Obj *obj; e = Jim_GetOpt_Obj(&goi, &obj); if (e != JIM_OK) return e; fn = target_write_phys_memory; } jim_wide a; e = Jim_GetOpt_Wide(&goi, &a); if (e != JIM_OK) return e; jim_wide b; e = Jim_GetOpt_Wide(&goi, &b); if (e != JIM_OK) return e; jim_wide c = 1; if (goi.argc == 1) { e = Jim_GetOpt_Wide(&goi, &c); if (e != JIM_OK) return e; } /* all args must be consumed */ if (goi.argc != 0) return JIM_ERR; struct target *target = Jim_CmdPrivData(goi.interp); unsigned data_size; if (strcasecmp(cmd_name, "mww") == 0) data_size = 4; else if (strcasecmp(cmd_name, "mwh") == 0) data_size = 2; else if (strcasecmp(cmd_name, "mwb") == 0) data_size = 1; else { LOG_ERROR("command '%s' unknown: ", cmd_name); return JIM_ERR; } return (target_fill_mem(target, a, fn, data_size, b, c) == ERROR_OK) ? JIM_OK : JIM_ERR; } /** * @brief Reads an array of words/halfwords/bytes from target memory starting at specified address. * * Usage: mdw [phys] <address> [<count>] - for 32 bit reads * mdh [phys] <address> [<count>] - for 16 bit reads * mdb [phys] <address> [<count>] - for 8 bit reads * * Count defaults to 1. * * Calls target_read_memory or target_read_phys_memory depending on * the presence of the "phys" argument * Reads the target memory in blocks of max. 32 bytes, and returns an array of ints formatted * to int representation in base16. * Also outputs read data in a human readable form using command_print * * @param phys if present target_read_phys_memory will be used instead of target_read_memory * @param address address where to start the read. May be specified in decimal or hex using the standard "0x" prefix * @param count optional count parameter to read an array of values. If not specified, defaults to 1. * @returns: JIM_ERR on error or JIM_OK on success and sets the result string to an array of ascii formatted numbers * on success, with [<count>] number of elements. * * In case of little endian target: * Example1: "mdw 0x00000000" returns "10123456" * Exmaple2: "mdh 0x00000000 1" returns "3456" * Example3: "mdb 0x00000000" returns "56" * Example4: "mdh 0x00000000 2" returns "3456 1012" * Example5: "mdb 0x00000000 3" returns "56 34 12" **/ static int jim_target_md(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { const char *cmd_name = Jim_GetString(argv[0], NULL); Jim_GetOptInfo goi; Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1); if ((goi.argc < 1) || (goi.argc > 3)) { Jim_SetResultFormatted(goi.interp, "usage: %s [phys] <address> [<count>]", cmd_name); return JIM_ERR; } int (*fn)(struct target *target, uint32_t address, uint32_t size, uint32_t count, uint8_t *buffer); fn = target_read_memory; int e; if (strcmp(Jim_GetString(argv[1], NULL), "phys") == 0) { /* consume it */ struct Jim_Obj *obj; e = Jim_GetOpt_Obj(&goi, &obj); if (e != JIM_OK) return e; fn = target_read_phys_memory; } /* Read address parameter */ jim_wide addr; e = Jim_GetOpt_Wide(&goi, &addr); if (e != JIM_OK) return JIM_ERR; /* If next parameter exists, read it out as the count parameter, if not, set it to 1 (default) */ jim_wide count; if (goi.argc == 1) { e = Jim_GetOpt_Wide(&goi, &count); if (e != JIM_OK) return JIM_ERR; } else count = 1; /* all args must be consumed */ if (goi.argc != 0) return JIM_ERR; jim_wide dwidth = 1; /* shut up gcc */ if (strcasecmp(cmd_name, "mdw") == 0) dwidth = 4; else if (strcasecmp(cmd_name, "mdh") == 0) dwidth = 2; else if (strcasecmp(cmd_name, "mdb") == 0) dwidth = 1; else { LOG_ERROR("command '%s' unknown: ", cmd_name); return JIM_ERR; } /* convert count to "bytes" */ int bytes = count * dwidth; struct target *target = Jim_CmdPrivData(goi.interp); uint8_t target_buf[32]; jim_wide x, y, z; while (bytes > 0) { y = (bytes < 16) ? bytes : 16; /* y = min(bytes, 16); */ /* Try to read out next block */ e = fn(target, addr, dwidth, y / dwidth, target_buf); if (e != ERROR_OK) { Jim_SetResultFormatted(interp, "error reading target @ 0x%08lx", (long)addr); return JIM_ERR; } command_print_sameline(NULL, "0x%08x ", (int)(addr)); switch (dwidth) { case 4: for (x = 0; x < 16 && x < y; x += 4) { z = target_buffer_get_u32(target, &(target_buf[x])); command_print_sameline(NULL, "%08x ", (int)(z)); } for (; (x < 16) ; x += 4) command_print_sameline(NULL, " "); break; case 2: for (x = 0; x < 16 && x < y; x += 2) { z = target_buffer_get_u16(target, &(target_buf[x])); command_print_sameline(NULL, "%04x ", (int)(z)); } for (; (x < 16) ; x += 2) command_print_sameline(NULL, " "); break; case 1: default: for (x = 0 ; (x < 16) && (x < y) ; x += 1) { z = target_buffer_get_u8(target, &(target_buf[x])); command_print_sameline(NULL, "%02x ", (int)(z)); } for (; (x < 16) ; x += 1) command_print_sameline(NULL, " "); break; } /* ascii-ify the bytes */ for (x = 0 ; x < y ; x++) { if ((target_buf[x] >= 0x20) && (target_buf[x] <= 0x7e)) { /* good */ } else { /* smack it */ target_buf[x] = '.'; } } /* space pad */ while (x < 16) { target_buf[x] = ' '; x++; } /* terminate */ target_buf[16] = 0; /* print - with a newline */ command_print_sameline(NULL, "%s\n", target_buf); /* NEXT... */ bytes -= 16; addr += 16; } return JIM_OK; } static int jim_target_mem2array(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { struct target *target = Jim_CmdPrivData(interp); return target_mem2array(interp, target, argc - 1, argv + 1); } static int jim_target_array2mem(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { struct target *target = Jim_CmdPrivData(interp); return target_array2mem(interp, target, argc - 1, argv + 1); } static int jim_target_tap_disabled(Jim_Interp *interp) { Jim_SetResultFormatted(interp, "[TAP is disabled]"); return JIM_ERR; } static int jim_target_examine(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { bool allow_defer = false; Jim_GetOptInfo goi; Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1); if (goi.argc > 1) { const char *cmd_name = Jim_GetString(argv[0], NULL); Jim_SetResultFormatted(goi.interp, "usage: %s ['allow-defer']", cmd_name); return JIM_ERR; } if (goi.argc > 0 && strcmp(Jim_GetString(argv[1], NULL), "allow-defer") == 0) { /* consume it */ struct Jim_Obj *obj; int e = Jim_GetOpt_Obj(&goi, &obj); if (e != JIM_OK) return e; allow_defer = true; } struct target *target = Jim_CmdPrivData(interp); if (!target->tap->enabled) return jim_target_tap_disabled(interp); if (allow_defer && target->defer_examine) { LOG_INFO("Deferring arp_examine of %s", target_name(target)); LOG_INFO("Use arp_examine command to examine it manually!"); return JIM_OK; } int e = target->type->examine(target); if (e != ERROR_OK) return JIM_ERR; return JIM_OK; } static int jim_target_was_examined(Jim_Interp *interp, int argc, Jim_Obj * const *argv) { struct target *target = Jim_CmdPrivData(interp); Jim_SetResultBool(interp, target_was_examined(target)); return JIM_OK; } static int jim_target_examine_deferred(Jim_Interp *interp, int argc, Jim_Obj * const *argv) { struct target *target = Jim_CmdPrivData(interp); Jim_SetResultBool(interp, target->defer_examine); return JIM_OK; } static int jim_target_halt_gdb(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 1) { Jim_WrongNumArgs(interp, 1, argv, "[no parameters]"); return JIM_ERR; } struct target *target = Jim_CmdPrivData(interp); if (target_call_event_callbacks(target, TARGET_EVENT_GDB_HALT) != ERROR_OK) return JIM_ERR; return JIM_OK; } static int jim_target_poll(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 1) { Jim_WrongNumArgs(interp, 1, argv, "[no parameters]"); return JIM_ERR; } struct target *target = Jim_CmdPrivData(interp); if (!target->tap->enabled) return jim_target_tap_disabled(interp); int e; if (!(target_was_examined(target))) e = ERROR_TARGET_NOT_EXAMINED; else e = target->type->poll(target); if (e != ERROR_OK) return JIM_ERR; return JIM_OK; } static int jim_target_reset(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_GetOptInfo goi; Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1); if (goi.argc != 2) { Jim_WrongNumArgs(interp, 0, argv, "([tT]|[fF]|assert|deassert) BOOL"); return JIM_ERR; } Jim_Nvp *n; int e = Jim_GetOpt_Nvp(&goi, nvp_assert, &n); if (e != JIM_OK) { Jim_GetOpt_NvpUnknown(&goi, nvp_assert, 1); return e; } /* the halt or not param */ jim_wide a; e = Jim_GetOpt_Wide(&goi, &a); if (e != JIM_OK) return e; struct target *target = Jim_CmdPrivData(goi.interp); if (!target->tap->enabled) return jim_target_tap_disabled(interp); if (!target->type->assert_reset || !target->type->deassert_reset) { Jim_SetResultFormatted(interp, "No target-specific reset for %s", target_name(target)); return JIM_ERR; } if (target->defer_examine) target_reset_examined(target); /* determine if we should halt or not. */ target->reset_halt = !!a; /* When this happens - all workareas are invalid. */ target_free_all_working_areas_restore(target, 0); /* do the assert */ if (n->value == NVP_ASSERT) e = target->type->assert_reset(target); else e = target->type->deassert_reset(target); return (e == ERROR_OK) ? JIM_OK : JIM_ERR; } static int jim_target_halt(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 1) { Jim_WrongNumArgs(interp, 1, argv, "[no parameters]"); return JIM_ERR; } struct target *target = Jim_CmdPrivData(interp); if (!target->tap->enabled) return jim_target_tap_disabled(interp); int e = target->type->halt(target); return (e == ERROR_OK) ? JIM_OK : JIM_ERR; } static int jim_target_wait_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_GetOptInfo goi; Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1); /* params: <name> statename timeoutmsecs */ if (goi.argc != 2) { const char *cmd_name = Jim_GetString(argv[0], NULL); Jim_SetResultFormatted(goi.interp, "%s <state_name> <timeout_in_msec>", cmd_name); return JIM_ERR; } Jim_Nvp *n; int e = Jim_GetOpt_Nvp(&goi, nvp_target_state, &n); if (e != JIM_OK) { Jim_GetOpt_NvpUnknown(&goi, nvp_target_state, 1); return e; } jim_wide a; e = Jim_GetOpt_Wide(&goi, &a); if (e != JIM_OK) return e; struct target *target = Jim_CmdPrivData(interp); if (!target->tap->enabled) return jim_target_tap_disabled(interp); e = target_wait_state(target, n->value, a); if (e != ERROR_OK) { Jim_Obj *eObj = Jim_NewIntObj(interp, e); Jim_SetResultFormatted(goi.interp, "target: %s wait %s fails (%#s) %s", target_name(target), n->name, eObj, target_strerror_safe(e)); Jim_FreeNewObj(interp, eObj); return JIM_ERR; } return JIM_OK; } /* List for human, Events defined for this target. * scripts/programs should use 'name cget -event NAME' */ static int jim_target_event_list(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { struct command_context *cmd_ctx = current_command_context(interp); assert(cmd_ctx != NULL); struct target *target = Jim_CmdPrivData(interp); struct target_event_action *teap = target->event_action; command_print(cmd_ctx, "Event actions for target (%d) %s\n", target->target_number, target_name(target)); command_print(cmd_ctx, "%-25s | Body", "Event"); command_print(cmd_ctx, "------------------------- | " "----------------------------------------"); while (teap) { Jim_Nvp *opt = Jim_Nvp_value2name_simple(nvp_target_event, teap->event); command_print(cmd_ctx, "%-25s | %s", opt->name, Jim_GetString(teap->body, NULL)); teap = teap->next; } command_print(cmd_ctx, "***END***"); return JIM_OK; } static int jim_target_current_state(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 1) { Jim_WrongNumArgs(interp, 1, argv, "[no parameters]"); return JIM_ERR; } struct target *target = Jim_CmdPrivData(interp); Jim_SetResultString(interp, target_state_name(target), -1); return JIM_OK; } static int jim_target_invoke_event(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_GetOptInfo goi; Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1); if (goi.argc != 1) { const char *cmd_name = Jim_GetString(argv[0], NULL); Jim_SetResultFormatted(goi.interp, "%s <eventname>", cmd_name); return JIM_ERR; } Jim_Nvp *n; int e = Jim_GetOpt_Nvp(&goi, nvp_target_event, &n); if (e != JIM_OK) { Jim_GetOpt_NvpUnknown(&goi, nvp_target_event, 1); return e; } struct target *target = Jim_CmdPrivData(interp); target_handle_event(target, n->value); return JIM_OK; } static const struct command_registration target_instance_command_handlers[] = { { .name = "configure", .mode = COMMAND_CONFIG, .jim_handler = jim_target_configure, .help = "configure a new target for use", .usage = "[target_attribute ...]", }, { .name = "cget", .mode = COMMAND_ANY, .jim_handler = jim_target_configure, .help = "returns the specified target attribute", .usage = "target_attribute", }, { .name = "mww", .mode = COMMAND_EXEC, .jim_handler = jim_target_mw, .help = "Write 32-bit word(s) to target memory", .usage = "address data [count]", }, { .name = "mwh", .mode = COMMAND_EXEC, .jim_handler = jim_target_mw, .help = "Write 16-bit half-word(s) to target memory", .usage = "address data [count]", }, { .name = "mwb", .mode = COMMAND_EXEC, .jim_handler = jim_target_mw, .help = "Write byte(s) to target memory", .usage = "address data [count]", }, { .name = "mdw", .mode = COMMAND_EXEC, .jim_handler = jim_target_md, .help = "Display target memory as 32-bit words", .usage = "address [count]", }, { .name = "mdh", .mode = COMMAND_EXEC, .jim_handler = jim_target_md, .help = "Display target memory as 16-bit half-words", .usage = "address [count]", }, { .name = "mdb", .mode = COMMAND_EXEC, .jim_handler = jim_target_md, .help = "Display target memory as 8-bit bytes", .usage = "address [count]", }, { .name = "array2mem", .mode = COMMAND_EXEC, .jim_handler = jim_target_array2mem, .help = "Writes Tcl array of 8/16/32 bit numbers " "to target memory", .usage = "arrayname bitwidth address count", }, { .name = "mem2array", .mode = COMMAND_EXEC, .jim_handler = jim_target_mem2array, .help = "Loads Tcl array of 8/16/32 bit numbers " "from target memory", .usage = "arrayname bitwidth address count", }, { .name = "eventlist", .mode = COMMAND_EXEC, .jim_handler = jim_target_event_list, .help = "displays a table of events defined for this target", }, { .name = "curstate", .mode = COMMAND_EXEC, .jim_handler = jim_target_current_state, .help = "displays the current state of this target", }, { .name = "arp_examine", .mode = COMMAND_EXEC, .jim_handler = jim_target_examine, .help = "used internally for reset processing", .usage = "arp_examine ['allow-defer']", }, { .name = "was_examined", .mode = COMMAND_EXEC, .jim_handler = jim_target_was_examined, .help = "used internally for reset processing", .usage = "was_examined", }, { .name = "examine_deferred", .mode = COMMAND_EXEC, .jim_handler = jim_target_examine_deferred, .help = "used internally for reset processing", .usage = "examine_deferred", }, { .name = "arp_halt_gdb", .mode = COMMAND_EXEC, .jim_handler = jim_target_halt_gdb, .help = "used internally for reset processing to halt GDB", }, { .name = "arp_poll", .mode = COMMAND_EXEC, .jim_handler = jim_target_poll, .help = "used internally for reset processing", }, { .name = "arp_reset", .mode = COMMAND_EXEC, .jim_handler = jim_target_reset, .help = "used internally for reset processing", }, { .name = "arp_halt", .mode = COMMAND_EXEC, .jim_handler = jim_target_halt, .help = "used internally for reset processing", }, { .name = "arp_waitstate", .mode = COMMAND_EXEC, .jim_handler = jim_target_wait_state, .help = "used internally for reset processing", }, { .name = "invoke-event", .mode = COMMAND_EXEC, .jim_handler = jim_target_invoke_event, .help = "invoke handler for specified event", .usage = "event_name", }, COMMAND_REGISTRATION_DONE }; static int target_create(Jim_GetOptInfo *goi) { Jim_Obj *new_cmd; Jim_Cmd *cmd; const char *cp; int e; int x; struct target *target; struct command_context *cmd_ctx; cmd_ctx = current_command_context(goi->interp); assert(cmd_ctx != NULL); if (goi->argc < 3) { Jim_WrongNumArgs(goi->interp, 1, goi->argv, "?name? ?type? ..options..."); return JIM_ERR; } /* COMMAND */ Jim_GetOpt_Obj(goi, &new_cmd); /* does this command exist? */ cmd = Jim_GetCommand(goi->interp, new_cmd, JIM_ERRMSG); if (cmd) { cp = Jim_GetString(new_cmd, NULL); Jim_SetResultFormatted(goi->interp, "Command/target: %s Exists", cp); return JIM_ERR; } /* TYPE */ e = Jim_GetOpt_String(goi, &cp, NULL); if (e != JIM_OK) return e; struct transport *tr = get_current_transport(); if (tr->override_target) { e = tr->override_target(&cp); if (e != ERROR_OK) { LOG_ERROR("The selected transport doesn't support this target"); return JIM_ERR; } LOG_INFO("The selected transport took over low-level target control. The results might differ compared to plain JTAG/SWD"); } /* now does target type exist */ for (x = 0 ; target_types[x] ; x++) { if (0 == strcmp(cp, target_types[x]->name)) { /* found */ break; } /* check for deprecated name */ if (target_types[x]->deprecated_name) { if (0 == strcmp(cp, target_types[x]->deprecated_name)) { /* found */ LOG_WARNING("target name is deprecated use: \'%s\'", target_types[x]->name); break; } } } if (target_types[x] == NULL) { Jim_SetResultFormatted(goi->interp, "Unknown target type %s, try one of ", cp); for (x = 0 ; target_types[x] ; x++) { if (target_types[x + 1]) { Jim_AppendStrings(goi->interp, Jim_GetResult(goi->interp), target_types[x]->name, ", ", NULL); } else { Jim_AppendStrings(goi->interp, Jim_GetResult(goi->interp), " or ", target_types[x]->name, NULL); } } return JIM_ERR; } /* Create it */ target = calloc(1, sizeof(struct target)); /* set target number */ target->target_number = new_target_number(); cmd_ctx->current_target = target->target_number; /* allocate memory for each unique target type */ target->type = calloc(1, sizeof(struct target_type)); memcpy(target->type, target_types[x], sizeof(struct target_type)); /* will be set by "-endian" */ target->endianness = TARGET_ENDIAN_UNKNOWN; /* default to first core, override with -coreid */ target->coreid = 0; target->working_area = 0x0; target->working_area_size = 0x0; target->working_areas = NULL; target->backup_working_area = 0; target->state = TARGET_UNKNOWN; target->debug_reason = DBG_REASON_UNDEFINED; target->reg_cache = NULL; target->breakpoints = NULL; target->watchpoints = NULL; target->next = NULL; target->arch_info = NULL; target->display = 1; target->halt_issued = false; /* initialize trace information */ target->trace_info = calloc(1, sizeof(struct trace)); target->dbgmsg = NULL; target->dbg_msg_enabled = 0; target->endianness = TARGET_ENDIAN_UNKNOWN; target->rtos = NULL; target->rtos_auto_detect = false; /* Do the rest as "configure" options */ goi->isconfigure = 1; e = target_configure(goi, target); if (target->tap == NULL) { Jim_SetResultString(goi->interp, "-chain-position required when creating target", -1); e = JIM_ERR; } if (e != JIM_OK) { free(target->type); free(target); return e; } if (target->endianness == TARGET_ENDIAN_UNKNOWN) { /* default endian to little if not specified */ target->endianness = TARGET_LITTLE_ENDIAN; } cp = Jim_GetString(new_cmd, NULL); target->cmd_name = strdup(cp); /* create the target specific commands */ if (target->type->commands) { e = register_commands(cmd_ctx, NULL, target->type->commands); if (ERROR_OK != e) LOG_ERROR("unable to register '%s' commands", cp); } if (target->type->target_create) (*(target->type->target_create))(target, goi->interp); /* append to end of list */ { struct target **tpp; tpp = &(all_targets); while (*tpp) tpp = &((*tpp)->next); *tpp = target; } /* now - create the new target name command */ const struct command_registration target_subcommands[] = { { .chain = target_instance_command_handlers, }, { .chain = target->type->commands, }, COMMAND_REGISTRATION_DONE }; const struct command_registration target_commands[] = { { .name = cp, .mode = COMMAND_ANY, .help = "target command group", .usage = "", .chain = target_subcommands, }, COMMAND_REGISTRATION_DONE }; e = register_commands(cmd_ctx, NULL, target_commands); if (ERROR_OK != e) return JIM_ERR; struct command *c = command_find_in_context(cmd_ctx, cp); assert(c); command_set_handler_data(c, target); return (ERROR_OK == e) ? JIM_OK : JIM_ERR; } static int jim_target_current(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 1) { Jim_WrongNumArgs(interp, 1, argv, "Too many parameters"); return JIM_ERR; } struct command_context *cmd_ctx = current_command_context(interp); assert(cmd_ctx != NULL); Jim_SetResultString(interp, target_name(get_current_target(cmd_ctx)), -1); return JIM_OK; } static int jim_target_types(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 1) { Jim_WrongNumArgs(interp, 1, argv, "Too many parameters"); return JIM_ERR; } Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0)); for (unsigned x = 0; NULL != target_types[x]; x++) { Jim_ListAppendElement(interp, Jim_GetResult(interp), Jim_NewStringObj(interp, target_types[x]->name, -1)); } return JIM_OK; } static int jim_target_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { if (argc != 1) { Jim_WrongNumArgs(interp, 1, argv, "Too many parameters"); return JIM_ERR; } Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0)); struct target *target = all_targets; while (target) { Jim_ListAppendElement(interp, Jim_GetResult(interp), Jim_NewStringObj(interp, target_name(target), -1)); target = target->next; } return JIM_OK; } static int jim_target_smp(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { int i; const char *targetname; int retval, len; struct target *target = (struct target *) NULL; struct target_list *head, *curr, *new; curr = (struct target_list *) NULL; head = (struct target_list *) NULL; retval = 0; LOG_DEBUG("%d", argc); /* argv[1] = target to associate in smp * argv[2] = target to assoicate in smp * argv[3] ... */ for (i = 1; i < argc; i++) { targetname = Jim_GetString(argv[i], &len); target = get_target(targetname); LOG_DEBUG("%s ", targetname); if (target) { new = malloc(sizeof(struct target_list)); new->target = target; new->next = (struct target_list *)NULL; if (head == (struct target_list *)NULL) { head = new; curr = head; } else { curr->next = new; curr = new; } } } /* now parse the list of cpu and put the target in smp mode*/ curr = head; while (curr != (struct target_list *)NULL) { target = curr->target; target->smp = 1; target->head = head; curr = curr->next; } if (target && target->rtos) retval = rtos_smp_init(head->target); return retval; } static int jim_target_create(Jim_Interp *interp, int argc, Jim_Obj *const *argv) { Jim_GetOptInfo goi; Jim_GetOpt_Setup(&goi, interp, argc - 1, argv + 1); if (goi.argc < 3) { Jim_WrongNumArgs(goi.interp, goi.argc, goi.argv, "<name> <target_type> [<target_options> ...]"); return JIM_ERR; } return target_create(&goi); } static const struct command_registration target_subcommand_handlers[] = { { .name = "init", .mode = COMMAND_CONFIG, .handler = handle_target_init_command, .help = "initialize targets", }, { .name = "create", /* REVISIT this should be COMMAND_CONFIG ... */ .mode = COMMAND_ANY, .jim_handler = jim_target_create, .usage = "name type '-chain-position' name [options ...]", .help = "Creates and selects a new target", }, { .name = "current", .mode = COMMAND_ANY, .jim_handler = jim_target_current, .help = "Returns the currently selected target", }, { .name = "types", .mode = COMMAND_ANY, .jim_handler = jim_target_types, .help = "Returns the available target types as " "a list of strings", }, { .name = "names", .mode = COMMAND_ANY, .jim_handler = jim_target_names, .help = "Returns the names of all targets as a list of strings", }, { .name = "smp", .mode = COMMAND_ANY, .jim_handler = jim_target_smp, .usage = "targetname1 targetname2 ...", .help = "gather several target in a smp list" }, COMMAND_REGISTRATION_DONE }; struct FastLoad { uint32_t address; uint8_t *data; int length; }; static int fastload_num; static struct FastLoad *fastload; static void free_fastload(void) { if (fastload != NULL) { int i; for (i = 0; i < fastload_num; i++) { if (fastload[i].data) free(fastload[i].data); } free(fastload); fastload = NULL; } } COMMAND_HANDLER(handle_fast_load_image_command) { uint8_t *buffer; size_t buf_cnt; uint32_t image_size; uint32_t min_address = 0; uint32_t max_address = 0xffffffff; int i; struct image image; int retval = CALL_COMMAND_HANDLER(parse_load_image_command_CMD_ARGV, &image, &min_address, &max_address); if (ERROR_OK != retval) return retval; struct duration bench; duration_start(&bench); retval = image_open(&image, CMD_ARGV[0], (CMD_ARGC >= 3) ? CMD_ARGV[2] : NULL); if (retval != ERROR_OK) return retval; image_size = 0x0; retval = ERROR_OK; fastload_num = image.num_sections; fastload = malloc(sizeof(struct FastLoad)*image.num_sections); if (fastload == NULL) { command_print(CMD_CTX, "out of memory"); image_close(&image); return ERROR_FAIL; } memset(fastload, 0, sizeof(struct FastLoad)*image.num_sections); for (i = 0; i < image.num_sections; i++) { buffer = malloc(image.sections[i].size); if (buffer == NULL) { command_print(CMD_CTX, "error allocating buffer for section (%d bytes)", (int)(image.sections[i].size)); retval = ERROR_FAIL; break; } retval = image_read_section(&image, i, 0x0, image.sections[i].size, buffer, &buf_cnt); if (retval != ERROR_OK) { free(buffer); break; } uint32_t offset = 0; uint32_t length = buf_cnt; /* DANGER!!! beware of unsigned comparision here!!! */ if ((image.sections[i].base_address + buf_cnt >= min_address) && (image.sections[i].base_address < max_address)) { if (image.sections[i].base_address < min_address) { /* clip addresses below */ offset += min_address-image.sections[i].base_address; length -= offset; } if (image.sections[i].base_address + buf_cnt > max_address) length -= (image.sections[i].base_address + buf_cnt)-max_address; fastload[i].address = image.sections[i].base_address + offset; fastload[i].data = malloc(length); if (fastload[i].data == NULL) { free(buffer); command_print(CMD_CTX, "error allocating buffer for section (%" PRIu32 " bytes)", length); retval = ERROR_FAIL; break; } memcpy(fastload[i].data, buffer + offset, length); fastload[i].length = length; image_size += length; command_print(CMD_CTX, "%u bytes written at address 0x%8.8x", (unsigned int)length, ((unsigned int)(image.sections[i].base_address + offset))); } free(buffer); } if ((ERROR_OK == retval) && (duration_measure(&bench) == ERROR_OK)) { command_print(CMD_CTX, "Loaded %" PRIu32 " bytes " "in %fs (%0.3f KiB/s)", image_size, duration_elapsed(&bench), duration_kbps(&bench, image_size)); command_print(CMD_CTX, "WARNING: image has not been loaded to target!" "You can issue a 'fast_load' to finish loading."); } image_close(&image); if (retval != ERROR_OK) free_fastload(); return retval; } COMMAND_HANDLER(handle_fast_load_command) { if (CMD_ARGC > 0) return ERROR_COMMAND_SYNTAX_ERROR; if (fastload == NULL) { LOG_ERROR("No image in memory"); return ERROR_FAIL; } int i; int64_t ms = timeval_ms(); int size = 0; int retval = ERROR_OK; for (i = 0; i < fastload_num; i++) { struct target *target = get_current_target(CMD_CTX); command_print(CMD_CTX, "Write to 0x%08x, length 0x%08x", (unsigned int)(fastload[i].address), (unsigned int)(fastload[i].length)); retval = target_write_buffer(target, fastload[i].address, fastload[i].length, fastload[i].data); if (retval != ERROR_OK) break; size += fastload[i].length; } if (retval == ERROR_OK) { int64_t after = timeval_ms(); command_print(CMD_CTX, "Loaded image %f kBytes/s", (float)(size/1024.0)/((float)(after-ms)/1000.0)); } return retval; } static const struct command_registration target_command_handlers[] = { { .name = "targets", .handler = handle_targets_command, .mode = COMMAND_ANY, .help = "change current default target (one parameter) " "or prints table of all targets (no parameters)", .usage = "[target]", }, { .name = "target", .mode = COMMAND_CONFIG, .help = "configure target", .chain = target_subcommand_handlers, }, COMMAND_REGISTRATION_DONE }; int target_register_commands(struct command_context *cmd_ctx) { return register_commands(cmd_ctx, NULL, target_command_handlers); } static bool target_reset_nag = true; bool get_target_reset_nag(void) { return target_reset_nag; } COMMAND_HANDLER(handle_target_reset_nag) { return CALL_COMMAND_HANDLER(handle_command_parse_bool, &target_reset_nag, "Nag after each reset about options to improve " "performance"); } COMMAND_HANDLER(handle_ps_command) { struct target *target = get_current_target(CMD_CTX); char *display; if (target->state != TARGET_HALTED) { LOG_INFO("target not halted !!"); return ERROR_OK; } if ((target->rtos) && (target->rtos->type) && (target->rtos->type->ps_command)) { display = target->rtos->type->ps_command(target); command_print(CMD_CTX, "%s", display); free(display); return ERROR_OK; } else { LOG_INFO("failed"); return ERROR_TARGET_FAILURE; } } static void binprint(struct command_context *cmd_ctx, const char *text, const uint8_t *buf, int size) { if (text != NULL) command_print_sameline(cmd_ctx, "%s", text); for (int i = 0; i < size; i++) command_print_sameline(cmd_ctx, " %02x", buf[i]); command_print(cmd_ctx, " "); } COMMAND_HANDLER(handle_test_mem_access_command) { struct target *target = get_current_target(CMD_CTX); uint32_t test_size; int retval = ERROR_OK; if (target->state != TARGET_HALTED) { LOG_INFO("target not halted !!"); return ERROR_FAIL; } if (CMD_ARGC != 1) return ERROR_COMMAND_SYNTAX_ERROR; COMMAND_PARSE_NUMBER(u32, CMD_ARGV[0], test_size); /* Test reads */ size_t num_bytes = test_size + 4; struct working_area *wa = NULL; retval = target_alloc_working_area(target, num_bytes, &wa); if (retval != ERROR_OK) { LOG_ERROR("Not enough working area"); return ERROR_FAIL; } uint8_t *test_pattern = malloc(num_bytes); for (size_t i = 0; i < num_bytes; i++) test_pattern[i] = rand(); retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern); if (retval != ERROR_OK) { LOG_ERROR("Test pattern write failed"); goto out; } for (int host_offset = 0; host_offset <= 1; host_offset++) { for (int size = 1; size <= 4; size *= 2) { for (int offset = 0; offset < 4; offset++) { uint32_t count = test_size / size; size_t host_bufsiz = (count + 2) * size + host_offset; uint8_t *read_ref = malloc(host_bufsiz); uint8_t *read_buf = malloc(host_bufsiz); for (size_t i = 0; i < host_bufsiz; i++) { read_ref[i] = rand(); read_buf[i] = read_ref[i]; } command_print_sameline(CMD_CTX, "Test read %" PRIu32 " x %d @ %d to %saligned buffer: ", count, size, offset, host_offset ? "un" : ""); struct duration bench; duration_start(&bench); retval = target_read_memory(target, wa->address + offset, size, count, read_buf + size + host_offset); duration_measure(&bench); if (retval == ERROR_TARGET_UNALIGNED_ACCESS) { command_print(CMD_CTX, "Unsupported alignment"); goto next; } else if (retval != ERROR_OK) { command_print(CMD_CTX, "Memory read failed"); goto next; } /* replay on host */ memcpy(read_ref + size + host_offset, test_pattern + offset, count * size); /* check result */ int result = memcmp(read_ref, read_buf, host_bufsiz); if (result == 0) { command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)", duration_elapsed(&bench), duration_kbps(&bench, count * size)); } else { command_print(CMD_CTX, "Compare failed"); binprint(CMD_CTX, "ref:", read_ref, host_bufsiz); binprint(CMD_CTX, "buf:", read_buf, host_bufsiz); } next: free(read_ref); free(read_buf); } } } out: free(test_pattern); if (wa != NULL) target_free_working_area(target, wa); /* Test writes */ num_bytes = test_size + 4 + 4 + 4; retval = target_alloc_working_area(target, num_bytes, &wa); if (retval != ERROR_OK) { LOG_ERROR("Not enough working area"); return ERROR_FAIL; } test_pattern = malloc(num_bytes); for (size_t i = 0; i < num_bytes; i++) test_pattern[i] = rand(); for (int host_offset = 0; host_offset <= 1; host_offset++) { for (int size = 1; size <= 4; size *= 2) { for (int offset = 0; offset < 4; offset++) { uint32_t count = test_size / size; size_t host_bufsiz = count * size + host_offset; uint8_t *read_ref = malloc(num_bytes); uint8_t *read_buf = malloc(num_bytes); uint8_t *write_buf = malloc(host_bufsiz); for (size_t i = 0; i < host_bufsiz; i++) write_buf[i] = rand(); command_print_sameline(CMD_CTX, "Test write %" PRIu32 " x %d @ %d from %saligned buffer: ", count, size, offset, host_offset ? "un" : ""); retval = target_write_memory(target, wa->address, 1, num_bytes, test_pattern); if (retval != ERROR_OK) { command_print(CMD_CTX, "Test pattern write failed"); goto nextw; } /* replay on host */ memcpy(read_ref, test_pattern, num_bytes); memcpy(read_ref + size + offset, write_buf + host_offset, count * size); struct duration bench; duration_start(&bench); retval = target_write_memory(target, wa->address + size + offset, size, count, write_buf + host_offset); duration_measure(&bench); if (retval == ERROR_TARGET_UNALIGNED_ACCESS) { command_print(CMD_CTX, "Unsupported alignment"); goto nextw; } else if (retval != ERROR_OK) { command_print(CMD_CTX, "Memory write failed"); goto nextw; } /* read back */ retval = target_read_memory(target, wa->address, 1, num_bytes, read_buf); if (retval != ERROR_OK) { command_print(CMD_CTX, "Test pattern write failed"); goto nextw; } /* check result */ int result = memcmp(read_ref, read_buf, num_bytes); if (result == 0) { command_print(CMD_CTX, "Pass in %fs (%0.3f KiB/s)", duration_elapsed(&bench), duration_kbps(&bench, count * size)); } else { command_print(CMD_CTX, "Compare failed"); binprint(CMD_CTX, "ref:", read_ref, num_bytes); binprint(CMD_CTX, "buf:", read_buf, num_bytes); } nextw: free(read_ref); free(read_buf); } } } free(test_pattern); if (wa != NULL) target_free_working_area(target, wa); return retval; } static const struct command_registration target_exec_command_handlers[] = { { .name = "fast_load_image", .handler = handle_fast_load_image_command, .mode = COMMAND_ANY, .help = "Load image into server memory for later use by " "fast_load; primarily for profiling", .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] " "[min_address [max_length]]", }, { .name = "fast_load", .handler = handle_fast_load_command, .mode = COMMAND_EXEC, .help = "loads active fast load image to current target " "- mainly for profiling purposes", .usage = "", }, { .name = "profile", .handler = handle_profile_command, .mode = COMMAND_EXEC, .usage = "seconds filename [start end]", .help = "profiling samples the CPU PC", }, /** @todo don't register virt2phys() unless target supports it */ { .name = "virt2phys", .handler = handle_virt2phys_command, .mode = COMMAND_ANY, .help = "translate a virtual address into a physical address", .usage = "virtual_address", }, { .name = "reg", .handler = handle_reg_command, .mode = COMMAND_EXEC, .help = "display (reread from target with \"force\") or set a register; " "with no arguments, displays all registers and their values", .usage = "[(register_number|register_name) [(value|'force')]]", }, { .name = "poll", .handler = handle_poll_command, .mode = COMMAND_EXEC, .help = "poll target state; or reconfigure background polling", .usage = "['on'|'off']", }, { .name = "wait_halt", .handler = handle_wait_halt_command, .mode = COMMAND_EXEC, .help = "wait up to the specified number of milliseconds " "(default 5000) for a previously requested halt", .usage = "[milliseconds]", }, { .name = "halt", .handler = handle_halt_command, .mode = COMMAND_EXEC, .help = "request target to halt, then wait up to the specified" "number of milliseconds (default 5000) for it to complete", .usage = "[milliseconds]", }, { .name = "resume", .handler = handle_resume_command, .mode = COMMAND_EXEC, .help = "resume target execution from current PC or address", .usage = "[address]", }, { .name = "reset", .handler = handle_reset_command, .mode = COMMAND_EXEC, .usage = "[run|halt|init]", .help = "Reset all targets into the specified mode." "Default reset mode is run, if not given.", }, { .name = "soft_reset_halt", .handler = handle_soft_reset_halt_command, .mode = COMMAND_EXEC, .usage = "", .help = "halt the target and do a soft reset", }, { .name = "step", .handler = handle_step_command, .mode = COMMAND_EXEC, .help = "step one instruction from current PC or address", .usage = "[address]", }, { .name = "mdw", .handler = handle_md_command, .mode = COMMAND_EXEC, .help = "display memory words", .usage = "['phys'] address [count]", }, { .name = "mdh", .handler = handle_md_command, .mode = COMMAND_EXEC, .help = "display memory half-words", .usage = "['phys'] address [count]", }, { .name = "mdb", .handler = handle_md_command, .mode = COMMAND_EXEC, .help = "display memory bytes", .usage = "['phys'] address [count]", }, { .name = "mww", .handler = handle_mw_command, .mode = COMMAND_EXEC, .help = "write memory word", .usage = "['phys'] address value [count]", }, { .name = "mwh", .handler = handle_mw_command, .mode = COMMAND_EXEC, .help = "write memory half-word", .usage = "['phys'] address value [count]", }, { .name = "mwb", .handler = handle_mw_command, .mode = COMMAND_EXEC, .help = "write memory byte", .usage = "['phys'] address value [count]", }, { .name = "bp", .handler = handle_bp_command, .mode = COMMAND_EXEC, .help = "list or set hardware or software breakpoint", .usage = "<address> [<asid>]<length> ['hw'|'hw_ctx']", }, { .name = "rbp", .handler = handle_rbp_command, .mode = COMMAND_EXEC, .help = "remove breakpoint", .usage = "address", }, { .name = "wp", .handler = handle_wp_command, .mode = COMMAND_EXEC, .help = "list (no params) or create watchpoints", .usage = "[address length [('r'|'w'|'a') value [mask]]]", }, { .name = "rwp", .handler = handle_rwp_command, .mode = COMMAND_EXEC, .help = "remove watchpoint", .usage = "address", }, { .name = "load_image", .handler = handle_load_image_command, .mode = COMMAND_EXEC, .usage = "filename address ['bin'|'ihex'|'elf'|'s19'] " "[min_address] [max_length]", }, { .name = "dump_image", .handler = handle_dump_image_command, .mode = COMMAND_EXEC, .usage = "filename address size", }, { .name = "verify_image_checksum", .handler = handle_verify_image_checksum_command, .mode = COMMAND_EXEC, .usage = "filename [offset [type]]", }, { .name = "verify_image", .handler = handle_verify_image_command, .mode = COMMAND_EXEC, .usage = "filename [offset [type]]", }, { .name = "test_image", .handler = handle_test_image_command, .mode = COMMAND_EXEC, .usage = "filename [offset [type]]", }, { .name = "mem2array", .mode = COMMAND_EXEC, .jim_handler = jim_mem2array, .help = "read 8/16/32 bit memory and return as a TCL array " "for script processing", .usage = "arrayname bitwidth address count", }, { .name = "array2mem", .mode = COMMAND_EXEC, .jim_handler = jim_array2mem, .help = "convert a TCL array to memory locations " "and write the 8/16/32 bit values", .usage = "arrayname bitwidth address count", }, { .name = "reset_nag", .handler = handle_target_reset_nag, .mode = COMMAND_ANY, .help = "Nag after each reset about options that could have been " "enabled to improve performance. ", .usage = "['enable'|'disable']", }, { .name = "ps", .handler = handle_ps_command, .mode = COMMAND_EXEC, .help = "list all tasks ", .usage = " ", }, { .name = "test_mem_access", .handler = handle_test_mem_access_command, .mode = COMMAND_EXEC, .help = "Test the target's memory access functions", .usage = "size", }, COMMAND_REGISTRATION_DONE }; static int target_register_user_commands(struct command_context *cmd_ctx) { int retval = ERROR_OK; retval = target_request_register_commands(cmd_ctx); if (retval != ERROR_OK) return retval; retval = trace_register_commands(cmd_ctx); if (retval != ERROR_OK) return retval; return register_commands(cmd_ctx, NULL, target_exec_command_handlers); }
449394.c
/* ** 2004 April 6 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file implements an external (disk-based) database using BTrees. ** See the header comment on "btreeInt.h" for additional information. ** Including a description of file format and an overview of operation. */ #include "btreeInt.h" /* ** The header string that appears at the beginning of every ** SQLite database. */ static const char zMagicHeader[] = SQLITE_FILE_HEADER; /* ** Set this global variable to 1 to enable tracing using the TRACE ** macro. */ #if 0 int sqlite3BtreeTrace=1; /* True to enable tracing */ # define TRACE(X) if(sqlite3BtreeTrace){printf X;fflush(stdout);} #else # define TRACE(X) #endif /* ** Extract a 2-byte big-endian integer from an array of unsigned bytes. ** But if the value is zero, make it 65536. ** ** This routine is used to extract the "offset to cell content area" value ** from the header of a btree page. If the page size is 65536 and the page ** is empty, the offset should be 65536, but the 2-byte value stores zero. ** This routine makes the necessary adjustment to 65536. */ #define get2byteNotZero(X) (((((int)get2byte(X))-1)&0xffff)+1) /* ** Values passed as the 5th argument to allocateBtreePage() */ #define BTALLOC_ANY 0 /* Allocate any page */ #define BTALLOC_EXACT 1 /* Allocate exact page if possible */ #define BTALLOC_LE 2 /* Allocate any page <= the parameter */ /* ** Macro IfNotOmitAV(x) returns (x) if SQLITE_OMIT_AUTOVACUUM is not ** defined, or 0 if it is. For example: ** ** bIncrVacuum = IfNotOmitAV(pBtShared->incrVacuum); */ #ifndef SQLITE_OMIT_AUTOVACUUM #define IfNotOmitAV(expr) (expr) #else #define IfNotOmitAV(expr) 0 #endif #ifndef SQLITE_OMIT_SHARED_CACHE /* ** A list of BtShared objects that are eligible for participation ** in shared cache. This variable has file scope during normal builds, ** but the test harness needs to access it so we make it global for ** test builds. ** ** Access to this variable is protected by SQLITE_MUTEX_STATIC_MASTER. */ #ifdef SQLITE_TEST BtShared *SQLITE_WSD sqlite3SharedCacheList = 0; #else static BtShared *SQLITE_WSD sqlite3SharedCacheList = 0; #endif #endif /* SQLITE_OMIT_SHARED_CACHE */ #ifndef SQLITE_OMIT_SHARED_CACHE /* ** Enable or disable the shared pager and schema features. ** ** This routine has no effect on existing database connections. ** The shared cache setting effects only future calls to ** sqlite3_open(), sqlite3_open16(), or sqlite3_open_v2(). */ int sqlite3_enable_shared_cache(int enable){ sqlite3GlobalConfig.sharedCacheEnabled = enable; return SQLITE_OK; } #endif #ifdef SQLITE_OMIT_SHARED_CACHE /* ** The functions querySharedCacheTableLock(), setSharedCacheTableLock(), ** and clearAllSharedCacheTableLocks() ** manipulate entries in the BtShared.pLock linked list used to store ** shared-cache table level locks. If the library is compiled with the ** shared-cache feature disabled, then there is only ever one user ** of each BtShared structure and so this locking is not necessary. ** So define the lock related functions as no-ops. */ #define querySharedCacheTableLock(a,b,c) SQLITE_OK #define setSharedCacheTableLock(a,b,c) SQLITE_OK #define clearAllSharedCacheTableLocks(a) #define downgradeAllSharedCacheTableLocks(a) #define hasSharedCacheTableLock(a,b,c,d) 1 #define hasReadConflicts(a, b) 0 #endif /* ** Implementation of the SQLITE_CORRUPT_PAGE() macro. Takes a single ** (MemPage*) as an argument. The (MemPage*) must not be NULL. ** ** If SQLITE_DEBUG is not defined, then this macro is equivalent to ** SQLITE_CORRUPT_BKPT. Or, if SQLITE_DEBUG is set, then the log message ** normally produced as a side-effect of SQLITE_CORRUPT_BKPT is augmented ** with the page number and filename associated with the (MemPage*). */ #ifdef SQLITE_DEBUG int corruptPageError(int lineno, MemPage *p){ char *zMsg; sqlite3BeginBenignMalloc(); zMsg = sqlite3_mprintf("database corruption page %d of %s", (int)p->pgno, sqlite3PagerFilename(p->pBt->pPager, 0) ); sqlite3EndBenignMalloc(); if( zMsg ){ sqlite3ReportError(SQLITE_CORRUPT, lineno, zMsg); } sqlite3_free(zMsg); return SQLITE_CORRUPT_BKPT; } # define SQLITE_CORRUPT_PAGE(pMemPage) corruptPageError(__LINE__, pMemPage) #else # define SQLITE_CORRUPT_PAGE(pMemPage) SQLITE_CORRUPT_PGNO(pMemPage->pgno) #endif #ifndef SQLITE_OMIT_SHARED_CACHE #ifdef SQLITE_DEBUG /* **** This function is only used as part of an assert() statement. *** ** ** Check to see if pBtree holds the required locks to read or write to the ** table with root page iRoot. Return 1 if it does and 0 if not. ** ** For example, when writing to a table with root-page iRoot via ** Btree connection pBtree: ** ** assert( hasSharedCacheTableLock(pBtree, iRoot, 0, WRITE_LOCK) ); ** ** When writing to an index that resides in a sharable database, the ** caller should have first obtained a lock specifying the root page of ** the corresponding table. This makes things a bit more complicated, ** as this module treats each table as a separate structure. To determine ** the table corresponding to the index being written, this ** function has to search through the database schema. ** ** Instead of a lock on the table/index rooted at page iRoot, the caller may ** hold a write-lock on the schema table (root page 1). This is also ** acceptable. */ static int hasSharedCacheTableLock( Btree *pBtree, /* Handle that must hold lock */ Pgno iRoot, /* Root page of b-tree */ int isIndex, /* True if iRoot is the root of an index b-tree */ int eLockType /* Required lock type (READ_LOCK or WRITE_LOCK) */ ){ Schema *pSchema = (Schema *)pBtree->pBt->pSchema; Pgno iTab = 0; BtLock *pLock; /* If this database is not shareable, or if the client is reading ** and has the read-uncommitted flag set, then no lock is required. ** Return true immediately. */ if( (pBtree->sharable==0) || (eLockType==READ_LOCK && (pBtree->db->flags & SQLITE_ReadUncommit)) ){ return 1; } /* If the client is reading or writing an index and the schema is ** not loaded, then it is too difficult to actually check to see if ** the correct locks are held. So do not bother - just return true. ** This case does not come up very often anyhow. */ if( isIndex && (!pSchema || (pSchema->schemaFlags&DB_SchemaLoaded)==0) ){ return 1; } /* Figure out the root-page that the lock should be held on. For table ** b-trees, this is just the root page of the b-tree being read or ** written. For index b-trees, it is the root page of the associated ** table. */ if( isIndex ){ HashElem *p; for(p=sqliteHashFirst(&pSchema->idxHash); p; p=sqliteHashNext(p)){ Index *pIdx = (Index *)sqliteHashData(p); if( pIdx->tnum==(int)iRoot ){ if( iTab ){ /* Two or more indexes share the same root page. There must ** be imposter tables. So just return true. The assert is not ** useful in that case. */ return 1; } iTab = pIdx->pTable->tnum; } } }else{ iTab = iRoot; } /* Search for the required lock. Either a write-lock on root-page iTab, a ** write-lock on the schema table, or (if the client is reading) a ** read-lock on iTab will suffice. Return 1 if any of these are found. */ for(pLock=pBtree->pBt->pLock; pLock; pLock=pLock->pNext){ if( pLock->pBtree==pBtree && (pLock->iTable==iTab || (pLock->eLock==WRITE_LOCK && pLock->iTable==1)) && pLock->eLock>=eLockType ){ return 1; } } /* Failed to find the required lock. */ return 0; } #endif /* SQLITE_DEBUG */ #ifdef SQLITE_DEBUG /* **** This function may be used as part of assert() statements only. **** ** ** Return true if it would be illegal for pBtree to write into the ** table or index rooted at iRoot because other shared connections are ** simultaneously reading that same table or index. ** ** It is illegal for pBtree to write if some other Btree object that ** shares the same BtShared object is currently reading or writing ** the iRoot table. Except, if the other Btree object has the ** read-uncommitted flag set, then it is OK for the other object to ** have a read cursor. ** ** For example, before writing to any part of the table or index ** rooted at page iRoot, one should call: ** ** assert( !hasReadConflicts(pBtree, iRoot) ); */ static int hasReadConflicts(Btree *pBtree, Pgno iRoot){ BtCursor *p; for(p=pBtree->pBt->pCursor; p; p=p->pNext){ if( p->pgnoRoot==iRoot && p->pBtree!=pBtree && 0==(p->pBtree->db->flags & SQLITE_ReadUncommit) ){ return 1; } } return 0; } #endif /* #ifdef SQLITE_DEBUG */ /* ** Query to see if Btree handle p may obtain a lock of type eLock ** (READ_LOCK or WRITE_LOCK) on the table with root-page iTab. Return ** SQLITE_OK if the lock may be obtained (by calling ** setSharedCacheTableLock()), or SQLITE_LOCKED if not. */ static int querySharedCacheTableLock(Btree *p, Pgno iTab, u8 eLock){ BtShared *pBt = p->pBt; BtLock *pIter; assert( sqlite3BtreeHoldsMutex(p) ); assert( eLock==READ_LOCK || eLock==WRITE_LOCK ); assert( p->db!=0 ); assert( !(p->db->flags&SQLITE_ReadUncommit)||eLock==WRITE_LOCK||iTab==1 ); /* If requesting a write-lock, then the Btree must have an open write ** transaction on this file. And, obviously, for this to be so there ** must be an open write transaction on the file itself. */ assert( eLock==READ_LOCK || (p==pBt->pWriter && p->inTrans==TRANS_WRITE) ); assert( eLock==READ_LOCK || pBt->inTransaction==TRANS_WRITE ); /* This routine is a no-op if the shared-cache is not enabled */ if( !p->sharable ){ return SQLITE_OK; } /* If some other connection is holding an exclusive lock, the ** requested lock may not be obtained. */ if( pBt->pWriter!=p && (pBt->btsFlags & BTS_EXCLUSIVE)!=0 ){ sqlite3ConnectionBlocked(p->db, pBt->pWriter->db); return SQLITE_LOCKED_SHAREDCACHE; } for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ /* The condition (pIter->eLock!=eLock) in the following if(...) ** statement is a simplification of: ** ** (eLock==WRITE_LOCK || pIter->eLock==WRITE_LOCK) ** ** since we know that if eLock==WRITE_LOCK, then no other connection ** may hold a WRITE_LOCK on any table in this file (since there can ** only be a single writer). */ assert( pIter->eLock==READ_LOCK || pIter->eLock==WRITE_LOCK ); assert( eLock==READ_LOCK || pIter->pBtree==p || pIter->eLock==READ_LOCK); if( pIter->pBtree!=p && pIter->iTable==iTab && pIter->eLock!=eLock ){ sqlite3ConnectionBlocked(p->db, pIter->pBtree->db); if( eLock==WRITE_LOCK ){ assert( p==pBt->pWriter ); pBt->btsFlags |= BTS_PENDING; } return SQLITE_LOCKED_SHAREDCACHE; } } return SQLITE_OK; } #endif /* !SQLITE_OMIT_SHARED_CACHE */ #ifndef SQLITE_OMIT_SHARED_CACHE /* ** Add a lock on the table with root-page iTable to the shared-btree used ** by Btree handle p. Parameter eLock must be either READ_LOCK or ** WRITE_LOCK. ** ** This function assumes the following: ** ** (a) The specified Btree object p is connected to a sharable ** database (one with the BtShared.sharable flag set), and ** ** (b) No other Btree objects hold a lock that conflicts ** with the requested lock (i.e. querySharedCacheTableLock() has ** already been called and returned SQLITE_OK). ** ** SQLITE_OK is returned if the lock is added successfully. SQLITE_NOMEM ** is returned if a malloc attempt fails. */ static int setSharedCacheTableLock(Btree *p, Pgno iTable, u8 eLock){ BtShared *pBt = p->pBt; BtLock *pLock = 0; BtLock *pIter; assert( sqlite3BtreeHoldsMutex(p) ); assert( eLock==READ_LOCK || eLock==WRITE_LOCK ); assert( p->db!=0 ); /* A connection with the read-uncommitted flag set will never try to ** obtain a read-lock using this function. The only read-lock obtained ** by a connection in read-uncommitted mode is on the sqlite_master ** table, and that lock is obtained in BtreeBeginTrans(). */ assert( 0==(p->db->flags&SQLITE_ReadUncommit) || eLock==WRITE_LOCK ); /* This function should only be called on a sharable b-tree after it ** has been determined that no other b-tree holds a conflicting lock. */ assert( p->sharable ); assert( SQLITE_OK==querySharedCacheTableLock(p, iTable, eLock) ); /* First search the list for an existing lock on this table. */ for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ if( pIter->iTable==iTable && pIter->pBtree==p ){ pLock = pIter; break; } } /* If the above search did not find a BtLock struct associating Btree p ** with table iTable, allocate one and link it into the list. */ if( !pLock ){ pLock = (BtLock *)sqlite3MallocZero(sizeof(BtLock)); if( !pLock ){ return SQLITE_NOMEM_BKPT; } pLock->iTable = iTable; pLock->pBtree = p; pLock->pNext = pBt->pLock; pBt->pLock = pLock; } /* Set the BtLock.eLock variable to the maximum of the current lock ** and the requested lock. This means if a write-lock was already held ** and a read-lock requested, we don't incorrectly downgrade the lock. */ assert( WRITE_LOCK>READ_LOCK ); if( eLock>pLock->eLock ){ pLock->eLock = eLock; } return SQLITE_OK; } #endif /* !SQLITE_OMIT_SHARED_CACHE */ #ifndef SQLITE_OMIT_SHARED_CACHE /* ** Release all the table locks (locks obtained via calls to ** the setSharedCacheTableLock() procedure) held by Btree object p. ** ** This function assumes that Btree p has an open read or write ** transaction. If it does not, then the BTS_PENDING flag ** may be incorrectly cleared. */ static void clearAllSharedCacheTableLocks(Btree *p){ BtShared *pBt = p->pBt; BtLock **ppIter = &pBt->pLock; assert( sqlite3BtreeHoldsMutex(p) ); assert( p->sharable || 0==*ppIter ); assert( p->inTrans>0 ); while( *ppIter ){ BtLock *pLock = *ppIter; assert( (pBt->btsFlags & BTS_EXCLUSIVE)==0 || pBt->pWriter==pLock->pBtree ); assert( pLock->pBtree->inTrans>=pLock->eLock ); if( pLock->pBtree==p ){ *ppIter = pLock->pNext; assert( pLock->iTable!=1 || pLock==&p->lock ); if( pLock->iTable!=1 ){ sqlite3_free(pLock); } }else{ ppIter = &pLock->pNext; } } assert( (pBt->btsFlags & BTS_PENDING)==0 || pBt->pWriter ); if( pBt->pWriter==p ){ pBt->pWriter = 0; pBt->btsFlags &= ~(BTS_EXCLUSIVE|BTS_PENDING); }else if( pBt->nTransaction==2 ){ /* This function is called when Btree p is concluding its ** transaction. If there currently exists a writer, and p is not ** that writer, then the number of locks held by connections other ** than the writer must be about to drop to zero. In this case ** set the BTS_PENDING flag to 0. ** ** If there is not currently a writer, then BTS_PENDING must ** be zero already. So this next line is harmless in that case. */ pBt->btsFlags &= ~BTS_PENDING; } } /* ** This function changes all write-locks held by Btree p into read-locks. */ static void downgradeAllSharedCacheTableLocks(Btree *p){ BtShared *pBt = p->pBt; if( pBt->pWriter==p ){ BtLock *pLock; pBt->pWriter = 0; pBt->btsFlags &= ~(BTS_EXCLUSIVE|BTS_PENDING); for(pLock=pBt->pLock; pLock; pLock=pLock->pNext){ assert( pLock->eLock==READ_LOCK || pLock->pBtree==p ); pLock->eLock = READ_LOCK; } } } #endif /* SQLITE_OMIT_SHARED_CACHE */ static void releasePage(MemPage *pPage); /* Forward reference */ static void releasePageOne(MemPage *pPage); /* Forward reference */ static void releasePageNotNull(MemPage *pPage); /* Forward reference */ /* ***** This routine is used inside of assert() only **** ** ** Verify that the cursor holds the mutex on its BtShared */ #ifdef SQLITE_DEBUG static int cursorHoldsMutex(BtCursor *p){ return sqlite3_mutex_held(p->pBt->mutex); } /* Verify that the cursor and the BtShared agree about what is the current ** database connetion. This is important in shared-cache mode. If the database ** connection pointers get out-of-sync, it is possible for routines like ** btreeInitPage() to reference an stale connection pointer that references a ** a connection that has already closed. This routine is used inside assert() ** statements only and for the purpose of double-checking that the btree code ** does keep the database connection pointers up-to-date. */ static int cursorOwnsBtShared(BtCursor *p){ assert( cursorHoldsMutex(p) ); return (p->pBtree->db==p->pBt->db); } #endif /* ** Invalidate the overflow cache of the cursor passed as the first argument. ** on the shared btree structure pBt. */ #define invalidateOverflowCache(pCur) (pCur->curFlags &= ~BTCF_ValidOvfl) /* ** Invalidate the overflow page-list cache for all cursors opened ** on the shared btree structure pBt. */ static void invalidateAllOverflowCache(BtShared *pBt){ BtCursor *p; assert( sqlite3_mutex_held(pBt->mutex) ); for(p=pBt->pCursor; p; p=p->pNext){ invalidateOverflowCache(p); } } #ifndef SQLITE_OMIT_INCRBLOB /* ** This function is called before modifying the contents of a table ** to invalidate any incrblob cursors that are open on the ** row or one of the rows being modified. ** ** If argument isClearTable is true, then the entire contents of the ** table is about to be deleted. In this case invalidate all incrblob ** cursors open on any row within the table with root-page pgnoRoot. ** ** Otherwise, if argument isClearTable is false, then the row with ** rowid iRow is being replaced or deleted. In this case invalidate ** only those incrblob cursors open on that specific row. */ static void invalidateIncrblobCursors( Btree *pBtree, /* The database file to check */ Pgno pgnoRoot, /* The table that might be changing */ i64 iRow, /* The rowid that might be changing */ int isClearTable /* True if all rows are being deleted */ ){ BtCursor *p; if( pBtree->hasIncrblobCur==0 ) return; assert( sqlite3BtreeHoldsMutex(pBtree) ); pBtree->hasIncrblobCur = 0; for(p=pBtree->pBt->pCursor; p; p=p->pNext){ if( (p->curFlags & BTCF_Incrblob)!=0 ){ pBtree->hasIncrblobCur = 1; if( p->pgnoRoot==pgnoRoot && (isClearTable || p->info.nKey==iRow) ){ p->eState = CURSOR_INVALID; } } } } #else /* Stub function when INCRBLOB is omitted */ #define invalidateIncrblobCursors(w,x,y,z) #endif /* SQLITE_OMIT_INCRBLOB */ /* ** Set bit pgno of the BtShared.pHasContent bitvec. This is called ** when a page that previously contained data becomes a free-list leaf ** page. ** ** The BtShared.pHasContent bitvec exists to work around an obscure ** bug caused by the interaction of two useful IO optimizations surrounding ** free-list leaf pages: ** ** 1) When all data is deleted from a page and the page becomes ** a free-list leaf page, the page is not written to the database ** (as free-list leaf pages contain no meaningful data). Sometimes ** such a page is not even journalled (as it will not be modified, ** why bother journalling it?). ** ** 2) When a free-list leaf page is reused, its content is not read ** from the database or written to the journal file (why should it ** be, if it is not at all meaningful?). ** ** By themselves, these optimizations work fine and provide a handy ** performance boost to bulk delete or insert operations. However, if ** a page is moved to the free-list and then reused within the same ** transaction, a problem comes up. If the page is not journalled when ** it is moved to the free-list and it is also not journalled when it ** is extracted from the free-list and reused, then the original data ** may be lost. In the event of a rollback, it may not be possible ** to restore the database to its original configuration. ** ** The solution is the BtShared.pHasContent bitvec. Whenever a page is ** moved to become a free-list leaf page, the corresponding bit is ** set in the bitvec. Whenever a leaf page is extracted from the free-list, ** optimization 2 above is omitted if the corresponding bit is already ** set in BtShared.pHasContent. The contents of the bitvec are cleared ** at the end of every transaction. */ static int btreeSetHasContent(BtShared *pBt, Pgno pgno){ int rc = SQLITE_OK; if( !pBt->pHasContent ){ assert( pgno<=pBt->nPage ); pBt->pHasContent = sqlite3BitvecCreate(pBt->nPage); if( !pBt->pHasContent ){ rc = SQLITE_NOMEM_BKPT; } } if( rc==SQLITE_OK && pgno<=sqlite3BitvecSize(pBt->pHasContent) ){ rc = sqlite3BitvecSet(pBt->pHasContent, pgno); } return rc; } /* ** Query the BtShared.pHasContent vector. ** ** This function is called when a free-list leaf page is removed from the ** free-list for reuse. It returns false if it is safe to retrieve the ** page from the pager layer with the 'no-content' flag set. True otherwise. */ static int btreeGetHasContent(BtShared *pBt, Pgno pgno){ Bitvec *p = pBt->pHasContent; return (p && (pgno>sqlite3BitvecSize(p) || sqlite3BitvecTest(p, pgno))); } /* ** Clear (destroy) the BtShared.pHasContent bitvec. This should be ** invoked at the conclusion of each write-transaction. */ static void btreeClearHasContent(BtShared *pBt){ sqlite3BitvecDestroy(pBt->pHasContent); pBt->pHasContent = 0; } /* ** Release all of the apPage[] pages for a cursor. */ static void btreeReleaseAllCursorPages(BtCursor *pCur){ int i; if( pCur->iPage>=0 ){ for(i=0; i<pCur->iPage; i++){ releasePageNotNull(pCur->apPage[i]); } releasePageNotNull(pCur->pPage); pCur->iPage = -1; } } /* ** The cursor passed as the only argument must point to a valid entry ** when this function is called (i.e. have eState==CURSOR_VALID). This ** function saves the current cursor key in variables pCur->nKey and ** pCur->pKey. SQLITE_OK is returned if successful or an SQLite error ** code otherwise. ** ** If the cursor is open on an intkey table, then the integer key ** (the rowid) is stored in pCur->nKey and pCur->pKey is left set to ** NULL. If the cursor is open on a non-intkey table, then pCur->pKey is ** set to point to a malloced buffer pCur->nKey bytes in size containing ** the key. */ static int saveCursorKey(BtCursor *pCur){ int rc = SQLITE_OK; assert( CURSOR_VALID==pCur->eState ); assert( 0==pCur->pKey ); assert( cursorHoldsMutex(pCur) ); if( pCur->curIntKey ){ /* Only the rowid is required for a table btree */ pCur->nKey = sqlite3BtreeIntegerKey(pCur); }else{ /* For an index btree, save the complete key content. It is possible ** that the current key is corrupt. In that case, it is possible that ** the sqlite3VdbeRecordUnpack() function may overread the buffer by ** up to the size of 1 varint plus 1 8-byte value when the cursor ** position is restored. Hence the 17 bytes of padding allocated ** below. */ void *pKey; pCur->nKey = sqlite3BtreePayloadSize(pCur); pKey = sqlite3Malloc( pCur->nKey + 9 + 8 ); if( pKey ){ rc = sqlite3BtreePayload(pCur, 0, (int)pCur->nKey, pKey); if( rc==SQLITE_OK ){ memset(((u8*)pKey)+pCur->nKey, 0, 9+8); pCur->pKey = pKey; }else{ sqlite3_free(pKey); } }else{ rc = SQLITE_NOMEM_BKPT; } } assert( !pCur->curIntKey || !pCur->pKey ); return rc; } /* ** Save the current cursor position in the variables BtCursor.nKey ** and BtCursor.pKey. The cursor's state is set to CURSOR_REQUIRESEEK. ** ** The caller must ensure that the cursor is valid (has eState==CURSOR_VALID) ** prior to calling this routine. */ static int saveCursorPosition(BtCursor *pCur){ int rc; assert( CURSOR_VALID==pCur->eState || CURSOR_SKIPNEXT==pCur->eState ); assert( 0==pCur->pKey ); assert( cursorHoldsMutex(pCur) ); if( pCur->eState==CURSOR_SKIPNEXT ){ pCur->eState = CURSOR_VALID; }else{ pCur->skipNext = 0; } rc = saveCursorKey(pCur); if( rc==SQLITE_OK ){ btreeReleaseAllCursorPages(pCur); pCur->eState = CURSOR_REQUIRESEEK; } pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl|BTCF_AtLast); return rc; } /* Forward reference */ static int SQLITE_NOINLINE saveCursorsOnList(BtCursor*,Pgno,BtCursor*); /* ** Save the positions of all cursors (except pExcept) that are open on ** the table with root-page iRoot. "Saving the cursor position" means that ** the location in the btree is remembered in such a way that it can be ** moved back to the same spot after the btree has been modified. This ** routine is called just before cursor pExcept is used to modify the ** table, for example in BtreeDelete() or BtreeInsert(). ** ** If there are two or more cursors on the same btree, then all such ** cursors should have their BTCF_Multiple flag set. The btreeCursor() ** routine enforces that rule. This routine only needs to be called in ** the uncommon case when pExpect has the BTCF_Multiple flag set. ** ** If pExpect!=NULL and if no other cursors are found on the same root-page, ** then the BTCF_Multiple flag on pExpect is cleared, to avoid another ** pointless call to this routine. ** ** Implementation note: This routine merely checks to see if any cursors ** need to be saved. It calls out to saveCursorsOnList() in the (unusual) ** event that cursors are in need to being saved. */ static int saveAllCursors(BtShared *pBt, Pgno iRoot, BtCursor *pExcept){ BtCursor *p; assert( sqlite3_mutex_held(pBt->mutex) ); assert( pExcept==0 || pExcept->pBt==pBt ); for(p=pBt->pCursor; p; p=p->pNext){ if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) ) break; } if( p ) return saveCursorsOnList(p, iRoot, pExcept); if( pExcept ) pExcept->curFlags &= ~BTCF_Multiple; return SQLITE_OK; } /* This helper routine to saveAllCursors does the actual work of saving ** the cursors if and when a cursor is found that actually requires saving. ** The common case is that no cursors need to be saved, so this routine is ** broken out from its caller to avoid unnecessary stack pointer movement. */ static int SQLITE_NOINLINE saveCursorsOnList( BtCursor *p, /* The first cursor that needs saving */ Pgno iRoot, /* Only save cursor with this iRoot. Save all if zero */ BtCursor *pExcept /* Do not save this cursor */ ){ do{ if( p!=pExcept && (0==iRoot || p->pgnoRoot==iRoot) ){ if( p->eState==CURSOR_VALID || p->eState==CURSOR_SKIPNEXT ){ int rc = saveCursorPosition(p); if( SQLITE_OK!=rc ){ return rc; } }else{ testcase( p->iPage>=0 ); btreeReleaseAllCursorPages(p); } } p = p->pNext; }while( p ); return SQLITE_OK; } /* ** Clear the current cursor position. */ void sqlite3BtreeClearCursor(BtCursor *pCur){ assert( cursorHoldsMutex(pCur) ); sqlite3_free(pCur->pKey); pCur->pKey = 0; pCur->eState = CURSOR_INVALID; } /* ** In this version of BtreeMoveto, pKey is a packed index record ** such as is generated by the OP_MakeRecord opcode. Unpack the ** record and then call BtreeMovetoUnpacked() to do the work. */ static int btreeMoveto( BtCursor *pCur, /* Cursor open on the btree to be searched */ const void *pKey, /* Packed key if the btree is an index */ i64 nKey, /* Integer key for tables. Size of pKey for indices */ int bias, /* Bias search to the high end */ int *pRes /* Write search results here */ ){ int rc; /* Status code */ UnpackedRecord *pIdxKey; /* Unpacked index key */ if( pKey ){ KeyInfo *pKeyInfo = pCur->pKeyInfo; assert( nKey==(i64)(int)nKey ); pIdxKey = sqlite3VdbeAllocUnpackedRecord(pKeyInfo); if( pIdxKey==0 ) return SQLITE_NOMEM_BKPT; sqlite3VdbeRecordUnpack(pKeyInfo, (int)nKey, pKey, pIdxKey); if( pIdxKey->nField==0 || pIdxKey->nField>pKeyInfo->nAllField ){ rc = SQLITE_CORRUPT_BKPT; goto moveto_done; } }else{ pIdxKey = 0; } rc = sqlite3BtreeMovetoUnpacked(pCur, pIdxKey, nKey, bias, pRes); moveto_done: if( pIdxKey ){ sqlite3DbFree(pCur->pKeyInfo->db, pIdxKey); } return rc; } /* ** Restore the cursor to the position it was in (or as close to as possible) ** when saveCursorPosition() was called. Note that this call deletes the ** saved position info stored by saveCursorPosition(), so there can be ** at most one effective restoreCursorPosition() call after each ** saveCursorPosition(). */ static int btreeRestoreCursorPosition(BtCursor *pCur){ int rc; int skipNext = 0; assert( cursorOwnsBtShared(pCur) ); assert( pCur->eState>=CURSOR_REQUIRESEEK ); if( pCur->eState==CURSOR_FAULT ){ return pCur->skipNext; } pCur->eState = CURSOR_INVALID; if( sqlite3FaultSim(410) ){ rc = SQLITE_IOERR; }else{ rc = btreeMoveto(pCur, pCur->pKey, pCur->nKey, 0, &skipNext); } if( rc==SQLITE_OK ){ sqlite3_free(pCur->pKey); pCur->pKey = 0; assert( pCur->eState==CURSOR_VALID || pCur->eState==CURSOR_INVALID ); if( skipNext ) pCur->skipNext = skipNext; if( pCur->skipNext && pCur->eState==CURSOR_VALID ){ pCur->eState = CURSOR_SKIPNEXT; } } return rc; } #define restoreCursorPosition(p) \ (p->eState>=CURSOR_REQUIRESEEK ? \ btreeRestoreCursorPosition(p) : \ SQLITE_OK) /* ** Determine whether or not a cursor has moved from the position where ** it was last placed, or has been invalidated for any other reason. ** Cursors can move when the row they are pointing at is deleted out ** from under them, for example. Cursor might also move if a btree ** is rebalanced. ** ** Calling this routine with a NULL cursor pointer returns false. ** ** Use the separate sqlite3BtreeCursorRestore() routine to restore a cursor ** back to where it ought to be if this routine returns true. */ int sqlite3BtreeCursorHasMoved(BtCursor *pCur){ assert( EIGHT_BYTE_ALIGNMENT(pCur) || pCur==sqlite3BtreeFakeValidCursor() ); assert( offsetof(BtCursor, eState)==0 ); assert( sizeof(pCur->eState)==1 ); return CURSOR_VALID != *(u8*)pCur; } /* ** Return a pointer to a fake BtCursor object that will always answer ** false to the sqlite3BtreeCursorHasMoved() routine above. The fake ** cursor returned must not be used with any other Btree interface. */ BtCursor *sqlite3BtreeFakeValidCursor(void){ static u8 fakeCursor = CURSOR_VALID; assert( offsetof(BtCursor, eState)==0 ); return (BtCursor*)&fakeCursor; } /* ** This routine restores a cursor back to its original position after it ** has been moved by some outside activity (such as a btree rebalance or ** a row having been deleted out from under the cursor). ** ** On success, the *pDifferentRow parameter is false if the cursor is left ** pointing at exactly the same row. *pDifferntRow is the row the cursor ** was pointing to has been deleted, forcing the cursor to point to some ** nearby row. ** ** This routine should only be called for a cursor that just returned ** TRUE from sqlite3BtreeCursorHasMoved(). */ int sqlite3BtreeCursorRestore(BtCursor *pCur, int *pDifferentRow){ int rc; assert( pCur!=0 ); assert( pCur->eState!=CURSOR_VALID ); rc = restoreCursorPosition(pCur); if( rc ){ *pDifferentRow = 1; return rc; } if( pCur->eState!=CURSOR_VALID ){ *pDifferentRow = 1; }else{ *pDifferentRow = 0; } return SQLITE_OK; } #ifdef SQLITE_ENABLE_CURSOR_HINTS /* ** Provide hints to the cursor. The particular hint given (and the type ** and number of the varargs parameters) is determined by the eHintType ** parameter. See the definitions of the BTREE_HINT_* macros for details. */ void sqlite3BtreeCursorHint(BtCursor *pCur, int eHintType, ...){ /* Used only by system that substitute their own storage engine */ } #endif /* ** Provide flag hints to the cursor. */ void sqlite3BtreeCursorHintFlags(BtCursor *pCur, unsigned x){ assert( x==BTREE_SEEK_EQ || x==BTREE_BULKLOAD || x==0 ); pCur->hints = x; } #ifndef SQLITE_OMIT_AUTOVACUUM /* ** Given a page number of a regular database page, return the page ** number for the pointer-map page that contains the entry for the ** input page number. ** ** Return 0 (not a valid page) for pgno==1 since there is ** no pointer map associated with page 1. The integrity_check logic ** requires that ptrmapPageno(*,1)!=1. */ static Pgno ptrmapPageno(BtShared *pBt, Pgno pgno){ int nPagesPerMapPage; Pgno iPtrMap, ret; assert( sqlite3_mutex_held(pBt->mutex) ); if( pgno<2 ) return 0; nPagesPerMapPage = (pBt->usableSize/5)+1; iPtrMap = (pgno-2)/nPagesPerMapPage; ret = (iPtrMap*nPagesPerMapPage) + 2; if( ret==PENDING_BYTE_PAGE(pBt) ){ ret++; } return ret; } /* ** Write an entry into the pointer map. ** ** This routine updates the pointer map entry for page number 'key' ** so that it maps to type 'eType' and parent page number 'pgno'. ** ** If *pRC is initially non-zero (non-SQLITE_OK) then this routine is ** a no-op. If an error occurs, the appropriate error code is written ** into *pRC. */ static void ptrmapPut(BtShared *pBt, Pgno key, u8 eType, Pgno parent, int *pRC){ DbPage *pDbPage; /* The pointer map page */ u8 *pPtrmap; /* The pointer map data */ Pgno iPtrmap; /* The pointer map page number */ int offset; /* Offset in pointer map page */ int rc; /* Return code from subfunctions */ if( *pRC ) return; assert( sqlite3_mutex_held(pBt->mutex) ); /* The master-journal page number must never be used as a pointer map page */ assert( 0==PTRMAP_ISPAGE(pBt, PENDING_BYTE_PAGE(pBt)) ); assert( pBt->autoVacuum ); if( key==0 ){ *pRC = SQLITE_CORRUPT_BKPT; return; } iPtrmap = PTRMAP_PAGENO(pBt, key); rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage, 0); if( rc!=SQLITE_OK ){ *pRC = rc; return; } if( ((char*)sqlite3PagerGetExtra(pDbPage))[0]!=0 ){ /* The first byte of the extra data is the MemPage.isInit byte. ** If that byte is set, it means this page is also being used ** as a btree page. */ *pRC = SQLITE_CORRUPT_BKPT; goto ptrmap_exit; } offset = PTRMAP_PTROFFSET(iPtrmap, key); if( offset<0 ){ *pRC = SQLITE_CORRUPT_BKPT; goto ptrmap_exit; } assert( offset <= (int)pBt->usableSize-5 ); pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); if( eType!=pPtrmap[offset] || get4byte(&pPtrmap[offset+1])!=parent ){ TRACE(("PTRMAP_UPDATE: %d->(%d,%d)\n", key, eType, parent)); *pRC= rc = sqlite3PagerWrite(pDbPage); if( rc==SQLITE_OK ){ pPtrmap[offset] = eType; put4byte(&pPtrmap[offset+1], parent); } } ptrmap_exit: sqlite3PagerUnref(pDbPage); } /* ** Read an entry from the pointer map. ** ** This routine retrieves the pointer map entry for page 'key', writing ** the type and parent page number to *pEType and *pPgno respectively. ** An error code is returned if something goes wrong, otherwise SQLITE_OK. */ static int ptrmapGet(BtShared *pBt, Pgno key, u8 *pEType, Pgno *pPgno){ DbPage *pDbPage; /* The pointer map page */ int iPtrmap; /* Pointer map page index */ u8 *pPtrmap; /* Pointer map page data */ int offset; /* Offset of entry in pointer map */ int rc; assert( sqlite3_mutex_held(pBt->mutex) ); iPtrmap = PTRMAP_PAGENO(pBt, key); rc = sqlite3PagerGet(pBt->pPager, iPtrmap, &pDbPage, 0); if( rc!=0 ){ return rc; } pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); offset = PTRMAP_PTROFFSET(iPtrmap, key); if( offset<0 ){ sqlite3PagerUnref(pDbPage); return SQLITE_CORRUPT_BKPT; } assert( offset <= (int)pBt->usableSize-5 ); assert( pEType!=0 ); *pEType = pPtrmap[offset]; if( pPgno ) *pPgno = get4byte(&pPtrmap[offset+1]); sqlite3PagerUnref(pDbPage); if( *pEType<1 || *pEType>5 ) return SQLITE_CORRUPT_PGNO(iPtrmap); return SQLITE_OK; } #else /* if defined SQLITE_OMIT_AUTOVACUUM */ #define ptrmapPut(w,x,y,z,rc) #define ptrmapGet(w,x,y,z) SQLITE_OK #define ptrmapPutOvflPtr(x, y, z, rc) #endif /* ** Given a btree page and a cell index (0 means the first cell on ** the page, 1 means the second cell, and so forth) return a pointer ** to the cell content. ** ** findCellPastPtr() does the same except it skips past the initial ** 4-byte child pointer found on interior pages, if there is one. ** ** This routine works only for pages that do not contain overflow cells. */ #define findCell(P,I) \ ((P)->aData + ((P)->maskPage & get2byteAligned(&(P)->aCellIdx[2*(I)]))) #define findCellPastPtr(P,I) \ ((P)->aDataOfst + ((P)->maskPage & get2byteAligned(&(P)->aCellIdx[2*(I)]))) /* ** This is common tail processing for btreeParseCellPtr() and ** btreeParseCellPtrIndex() for the case when the cell does not fit entirely ** on a single B-tree page. Make necessary adjustments to the CellInfo ** structure. */ static SQLITE_NOINLINE void btreeParseCellAdjustSizeForOverflow( MemPage *pPage, /* Page containing the cell */ u8 *pCell, /* Pointer to the cell text. */ CellInfo *pInfo /* Fill in this structure */ ){ /* If the payload will not fit completely on the local page, we have ** to decide how much to store locally and how much to spill onto ** overflow pages. The strategy is to minimize the amount of unused ** space on overflow pages while keeping the amount of local storage ** in between minLocal and maxLocal. ** ** Warning: changing the way overflow payload is distributed in any ** way will result in an incompatible file format. */ int minLocal; /* Minimum amount of payload held locally */ int maxLocal; /* Maximum amount of payload held locally */ int surplus; /* Overflow payload available for local storage */ minLocal = pPage->minLocal; maxLocal = pPage->maxLocal; surplus = minLocal + (pInfo->nPayload - minLocal)%(pPage->pBt->usableSize-4); testcase( surplus==maxLocal ); testcase( surplus==maxLocal+1 ); if( surplus <= maxLocal ){ pInfo->nLocal = (u16)surplus; }else{ pInfo->nLocal = (u16)minLocal; } pInfo->nSize = (u16)(&pInfo->pPayload[pInfo->nLocal] - pCell) + 4; } /* ** The following routines are implementations of the MemPage.xParseCell() ** method. ** ** Parse a cell content block and fill in the CellInfo structure. ** ** btreeParseCellPtr() => table btree leaf nodes ** btreeParseCellNoPayload() => table btree internal nodes ** btreeParseCellPtrIndex() => index btree nodes ** ** There is also a wrapper function btreeParseCell() that works for ** all MemPage types and that references the cell by index rather than ** by pointer. */ static void btreeParseCellPtrNoPayload( MemPage *pPage, /* Page containing the cell */ u8 *pCell, /* Pointer to the cell text. */ CellInfo *pInfo /* Fill in this structure */ ){ assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( pPage->leaf==0 ); assert( pPage->childPtrSize==4 ); #ifndef SQLITE_DEBUG UNUSED_PARAMETER(pPage); #endif pInfo->nSize = 4 + getVarint(&pCell[4], (u64*)&pInfo->nKey); pInfo->nPayload = 0; pInfo->nLocal = 0; pInfo->pPayload = 0; return; } static void btreeParseCellPtr( MemPage *pPage, /* Page containing the cell */ u8 *pCell, /* Pointer to the cell text. */ CellInfo *pInfo /* Fill in this structure */ ){ u8 *pIter; /* For scanning through pCell */ u32 nPayload; /* Number of bytes of cell payload */ u64 iKey; /* Extracted Key value */ assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( pPage->leaf==0 || pPage->leaf==1 ); assert( pPage->intKeyLeaf ); assert( pPage->childPtrSize==0 ); pIter = pCell; /* The next block of code is equivalent to: ** ** pIter += getVarint32(pIter, nPayload); ** ** The code is inlined to avoid a function call. */ nPayload = *pIter; if( nPayload>=0x80 ){ u8 *pEnd = &pIter[8]; nPayload &= 0x7f; do{ nPayload = (nPayload<<7) | (*++pIter & 0x7f); }while( (*pIter)>=0x80 && pIter<pEnd ); } pIter++; /* The next block of code is equivalent to: ** ** pIter += getVarint(pIter, (u64*)&pInfo->nKey); ** ** The code is inlined to avoid a function call. */ iKey = *pIter; if( iKey>=0x80 ){ u8 *pEnd = &pIter[7]; iKey &= 0x7f; while(1){ iKey = (iKey<<7) | (*++pIter & 0x7f); if( (*pIter)<0x80 ) break; if( pIter>=pEnd ){ iKey = (iKey<<8) | *++pIter; break; } } } pIter++; pInfo->nKey = *(i64*)&iKey; pInfo->nPayload = nPayload; pInfo->pPayload = pIter; testcase( nPayload==pPage->maxLocal ); testcase( nPayload==pPage->maxLocal+1 ); if( nPayload<=pPage->maxLocal ){ /* This is the (easy) common case where the entire payload fits ** on the local page. No overflow is required. */ pInfo->nSize = nPayload + (u16)(pIter - pCell); if( pInfo->nSize<4 ) pInfo->nSize = 4; pInfo->nLocal = (u16)nPayload; }else{ btreeParseCellAdjustSizeForOverflow(pPage, pCell, pInfo); } } static void btreeParseCellPtrIndex( MemPage *pPage, /* Page containing the cell */ u8 *pCell, /* Pointer to the cell text. */ CellInfo *pInfo /* Fill in this structure */ ){ u8 *pIter; /* For scanning through pCell */ u32 nPayload; /* Number of bytes of cell payload */ assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( pPage->leaf==0 || pPage->leaf==1 ); assert( pPage->intKeyLeaf==0 ); pIter = pCell + pPage->childPtrSize; nPayload = *pIter; if( nPayload>=0x80 ){ u8 *pEnd = &pIter[8]; nPayload &= 0x7f; do{ nPayload = (nPayload<<7) | (*++pIter & 0x7f); }while( *(pIter)>=0x80 && pIter<pEnd ); } pIter++; pInfo->nKey = nPayload; pInfo->nPayload = nPayload; pInfo->pPayload = pIter; testcase( nPayload==pPage->maxLocal ); testcase( nPayload==pPage->maxLocal+1 ); if( nPayload<=pPage->maxLocal ){ /* This is the (easy) common case where the entire payload fits ** on the local page. No overflow is required. */ pInfo->nSize = nPayload + (u16)(pIter - pCell); if( pInfo->nSize<4 ) pInfo->nSize = 4; pInfo->nLocal = (u16)nPayload; }else{ btreeParseCellAdjustSizeForOverflow(pPage, pCell, pInfo); } } static void btreeParseCell( MemPage *pPage, /* Page containing the cell */ int iCell, /* The cell index. First cell is 0 */ CellInfo *pInfo /* Fill in this structure */ ){ pPage->xParseCell(pPage, findCell(pPage, iCell), pInfo); } /* ** The following routines are implementations of the MemPage.xCellSize ** method. ** ** Compute the total number of bytes that a Cell needs in the cell ** data area of the btree-page. The return number includes the cell ** data header and the local payload, but not any overflow page or ** the space used by the cell pointer. ** ** cellSizePtrNoPayload() => table internal nodes ** cellSizePtr() => all index nodes & table leaf nodes */ static u16 cellSizePtr(MemPage *pPage, u8 *pCell){ u8 *pIter = pCell + pPage->childPtrSize; /* For looping over bytes of pCell */ u8 *pEnd; /* End mark for a varint */ u32 nSize; /* Size value to return */ #ifdef SQLITE_DEBUG /* The value returned by this function should always be the same as ** the (CellInfo.nSize) value found by doing a full parse of the ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of ** this function verifies that this invariant is not violated. */ CellInfo debuginfo; pPage->xParseCell(pPage, pCell, &debuginfo); #endif nSize = *pIter; if( nSize>=0x80 ){ pEnd = &pIter[8]; nSize &= 0x7f; do{ nSize = (nSize<<7) | (*++pIter & 0x7f); }while( *(pIter)>=0x80 && pIter<pEnd ); } pIter++; if( pPage->intKey ){ /* pIter now points at the 64-bit integer key value, a variable length ** integer. The following block moves pIter to point at the first byte ** past the end of the key value. */ pEnd = &pIter[9]; while( (*pIter++)&0x80 && pIter<pEnd ); } testcase( nSize==pPage->maxLocal ); testcase( nSize==pPage->maxLocal+1 ); if( nSize<=pPage->maxLocal ){ nSize += (u32)(pIter - pCell); if( nSize<4 ) nSize = 4; }else{ int minLocal = pPage->minLocal; nSize = minLocal + (nSize - minLocal) % (pPage->pBt->usableSize - 4); testcase( nSize==pPage->maxLocal ); testcase( nSize==pPage->maxLocal+1 ); if( nSize>pPage->maxLocal ){ nSize = minLocal; } nSize += 4 + (u16)(pIter - pCell); } assert( nSize==debuginfo.nSize || CORRUPT_DB ); return (u16)nSize; } static u16 cellSizePtrNoPayload(MemPage *pPage, u8 *pCell){ u8 *pIter = pCell + 4; /* For looping over bytes of pCell */ u8 *pEnd; /* End mark for a varint */ #ifdef SQLITE_DEBUG /* The value returned by this function should always be the same as ** the (CellInfo.nSize) value found by doing a full parse of the ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of ** this function verifies that this invariant is not violated. */ CellInfo debuginfo; pPage->xParseCell(pPage, pCell, &debuginfo); #else UNUSED_PARAMETER(pPage); #endif assert( pPage->childPtrSize==4 ); pEnd = pIter + 9; while( (*pIter++)&0x80 && pIter<pEnd ); assert( debuginfo.nSize==(u16)(pIter - pCell) || CORRUPT_DB ); return (u16)(pIter - pCell); } #ifdef SQLITE_DEBUG /* This variation on cellSizePtr() is used inside of assert() statements ** only. */ static u16 cellSize(MemPage *pPage, int iCell){ return pPage->xCellSize(pPage, findCell(pPage, iCell)); } #endif #ifndef SQLITE_OMIT_AUTOVACUUM /* ** The cell pCell is currently part of page pSrc but will ultimately be part ** of pPage. (pSrc and pPager are often the same.) If pCell contains a ** pointer to an overflow page, insert an entry into the pointer-map for ** the overflow page that will be valid after pCell has been moved to pPage. */ static void ptrmapPutOvflPtr(MemPage *pPage, MemPage *pSrc, u8 *pCell,int *pRC){ CellInfo info; if( *pRC ) return; assert( pCell!=0 ); pPage->xParseCell(pPage, pCell, &info); if( info.nLocal<info.nPayload ){ Pgno ovfl; if( SQLITE_WITHIN(pSrc->aDataEnd, pCell, pCell+info.nLocal) ){ testcase( pSrc!=pPage ); *pRC = SQLITE_CORRUPT_BKPT; return; } ovfl = get4byte(&pCell[info.nSize-4]); ptrmapPut(pPage->pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, pRC); } } #endif /* ** Defragment the page given. This routine reorganizes cells within the ** page so that there are no free-blocks on the free-block list. ** ** Parameter nMaxFrag is the maximum amount of fragmented space that may be ** present in the page after this routine returns. ** ** EVIDENCE-OF: R-44582-60138 SQLite may from time to time reorganize a ** b-tree page so that there are no freeblocks or fragment bytes, all ** unused bytes are contained in the unallocated space region, and all ** cells are packed tightly at the end of the page. */ static int defragmentPage(MemPage *pPage, int nMaxFrag){ int i; /* Loop counter */ int pc; /* Address of the i-th cell */ int hdr; /* Offset to the page header */ int size; /* Size of a cell */ int usableSize; /* Number of usable bytes on a page */ int cellOffset; /* Offset to the cell pointer array */ int cbrk; /* Offset to the cell content area */ int nCell; /* Number of cells on the page */ unsigned char *data; /* The page data */ unsigned char *temp; /* Temp area for cell content */ unsigned char *src; /* Source of content */ int iCellFirst; /* First allowable cell index */ int iCellLast; /* Last possible cell index */ assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( pPage->pBt!=0 ); assert( pPage->pBt->usableSize <= SQLITE_MAX_PAGE_SIZE ); assert( pPage->nOverflow==0 ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); temp = 0; src = data = pPage->aData; hdr = pPage->hdrOffset; cellOffset = pPage->cellOffset; nCell = pPage->nCell; assert( nCell==get2byte(&data[hdr+3]) || CORRUPT_DB ); iCellFirst = cellOffset + 2*nCell; usableSize = pPage->pBt->usableSize; /* This block handles pages with two or fewer free blocks and nMaxFrag ** or fewer fragmented bytes. In this case it is faster to move the ** two (or one) blocks of cells using memmove() and add the required ** offsets to each pointer in the cell-pointer array than it is to ** reconstruct the entire page. */ if( (int)data[hdr+7]<=nMaxFrag ){ int iFree = get2byte(&data[hdr+1]); if( iFree>usableSize-4 ) return SQLITE_CORRUPT_PAGE(pPage); if( iFree ){ int iFree2 = get2byte(&data[iFree]); if( iFree2>usableSize-4 ) return SQLITE_CORRUPT_PAGE(pPage); if( 0==iFree2 || (data[iFree2]==0 && data[iFree2+1]==0) ){ u8 *pEnd = &data[cellOffset + nCell*2]; u8 *pAddr; int sz2 = 0; int sz = get2byte(&data[iFree+2]); int top = get2byte(&data[hdr+5]); if( top>=iFree ){ return SQLITE_CORRUPT_PAGE(pPage); } if( iFree2 ){ if( iFree+sz>iFree2 ) return SQLITE_CORRUPT_PAGE(pPage); sz2 = get2byte(&data[iFree2+2]); if( iFree2+sz2 > usableSize ) return SQLITE_CORRUPT_PAGE(pPage); memmove(&data[iFree+sz+sz2], &data[iFree+sz], iFree2-(iFree+sz)); sz += sz2; }else if( iFree+sz>usableSize ){ return SQLITE_CORRUPT_PAGE(pPage); } cbrk = top+sz; assert( cbrk+(iFree-top) <= usableSize ); memmove(&data[cbrk], &data[top], iFree-top); for(pAddr=&data[cellOffset]; pAddr<pEnd; pAddr+=2){ pc = get2byte(pAddr); if( pc<iFree ){ put2byte(pAddr, pc+sz); } else if( pc<iFree2 ){ put2byte(pAddr, pc+sz2); } } goto defragment_out; } } } cbrk = usableSize; iCellLast = usableSize - 4; for(i=0; i<nCell; i++){ u8 *pAddr; /* The i-th cell pointer */ pAddr = &data[cellOffset + i*2]; pc = get2byte(pAddr); testcase( pc==iCellFirst ); testcase( pc==iCellLast ); /* These conditions have already been verified in btreeInitPage() ** if PRAGMA cell_size_check=ON. */ if( pc<iCellFirst || pc>iCellLast ){ return SQLITE_CORRUPT_PAGE(pPage); } assert( pc>=iCellFirst && pc<=iCellLast ); size = pPage->xCellSize(pPage, &src[pc]); cbrk -= size; if( cbrk<iCellFirst || pc+size>usableSize ){ return SQLITE_CORRUPT_PAGE(pPage); } assert( cbrk+size<=usableSize && cbrk>=iCellFirst ); testcase( cbrk+size==usableSize ); testcase( pc+size==usableSize ); put2byte(pAddr, cbrk); if( temp==0 ){ int x; if( cbrk==pc ) continue; temp = sqlite3PagerTempSpace(pPage->pBt->pPager); x = get2byte(&data[hdr+5]); memcpy(&temp[x], &data[x], (cbrk+size) - x); src = temp; } memcpy(&data[cbrk], &src[pc], size); } data[hdr+7] = 0; defragment_out: assert( pPage->nFree>=0 ); if( data[hdr+7]+cbrk-iCellFirst!=pPage->nFree ){ return SQLITE_CORRUPT_PAGE(pPage); } assert( cbrk>=iCellFirst ); put2byte(&data[hdr+5], cbrk); data[hdr+1] = 0; data[hdr+2] = 0; memset(&data[iCellFirst], 0, cbrk-iCellFirst); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); return SQLITE_OK; } /* ** Search the free-list on page pPg for space to store a cell nByte bytes in ** size. If one can be found, return a pointer to the space and remove it ** from the free-list. ** ** If no suitable space can be found on the free-list, return NULL. ** ** This function may detect corruption within pPg. If corruption is ** detected then *pRc is set to SQLITE_CORRUPT and NULL is returned. ** ** Slots on the free list that are between 1 and 3 bytes larger than nByte ** will be ignored if adding the extra space to the fragmentation count ** causes the fragmentation count to exceed 60. */ static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){ const int hdr = pPg->hdrOffset; /* Offset to page header */ u8 * const aData = pPg->aData; /* Page data */ int iAddr = hdr + 1; /* Address of ptr to pc */ int pc = get2byte(&aData[iAddr]); /* Address of a free slot */ int x; /* Excess size of the slot */ int maxPC = pPg->pBt->usableSize - nByte; /* Max address for a usable slot */ int size; /* Size of the free slot */ assert( pc>0 ); while( pc<=maxPC ){ /* EVIDENCE-OF: R-22710-53328 The third and fourth bytes of each ** freeblock form a big-endian integer which is the size of the freeblock ** in bytes, including the 4-byte header. */ size = get2byte(&aData[pc+2]); if( (x = size - nByte)>=0 ){ testcase( x==4 ); testcase( x==3 ); if( x<4 ){ /* EVIDENCE-OF: R-11498-58022 In a well-formed b-tree page, the total ** number of bytes in fragments may not exceed 60. */ if( aData[hdr+7]>57 ) return 0; /* Remove the slot from the free-list. Update the number of ** fragmented bytes within the page. */ memcpy(&aData[iAddr], &aData[pc], 2); aData[hdr+7] += (u8)x; }else if( x+pc > maxPC ){ /* This slot extends off the end of the usable part of the page */ *pRc = SQLITE_CORRUPT_PAGE(pPg); return 0; }else{ /* The slot remains on the free-list. Reduce its size to account ** for the portion used by the new allocation. */ put2byte(&aData[pc+2], x); } return &aData[pc + x]; } iAddr = pc; pc = get2byte(&aData[pc]); if( pc<=iAddr+size ){ if( pc ){ /* The next slot in the chain is not past the end of the current slot */ *pRc = SQLITE_CORRUPT_PAGE(pPg); } return 0; } } if( pc>maxPC+nByte-4 ){ /* The free slot chain extends off the end of the page */ *pRc = SQLITE_CORRUPT_PAGE(pPg); } return 0; } /* ** Allocate nByte bytes of space from within the B-Tree page passed ** as the first argument. Write into *pIdx the index into pPage->aData[] ** of the first byte of allocated space. Return either SQLITE_OK or ** an error code (usually SQLITE_CORRUPT). ** ** The caller guarantees that there is sufficient space to make the ** allocation. This routine might need to defragment in order to bring ** all the space together, however. This routine will avoid using ** the first two bytes past the cell pointer area since presumably this ** allocation is being made in order to insert a new cell, so we will ** also end up needing a new cell pointer. */ static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ const int hdr = pPage->hdrOffset; /* Local cache of pPage->hdrOffset */ u8 * const data = pPage->aData; /* Local cache of pPage->aData */ int top; /* First byte of cell content area */ int rc = SQLITE_OK; /* Integer return code */ int gap; /* First byte of gap between cell pointers and cell content */ assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( pPage->pBt ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( nByte>=0 ); /* Minimum cell size is 4 */ assert( pPage->nFree>=nByte ); assert( pPage->nOverflow==0 ); assert( nByte < (int)(pPage->pBt->usableSize-8) ); assert( pPage->cellOffset == hdr + 12 - 4*pPage->leaf ); gap = pPage->cellOffset + 2*pPage->nCell; assert( gap<=65536 ); /* EVIDENCE-OF: R-29356-02391 If the database uses a 65536-byte page size ** and the reserved space is zero (the usual value for reserved space) ** then the cell content offset of an empty page wants to be 65536. ** However, that integer is too large to be stored in a 2-byte unsigned ** integer, so a value of 0 is used in its place. */ top = get2byte(&data[hdr+5]); assert( top<=(int)pPage->pBt->usableSize ); /* by btreeComputeFreeSpace() */ if( gap>top ){ if( top==0 && pPage->pBt->usableSize==65536 ){ top = 65536; }else{ return SQLITE_CORRUPT_PAGE(pPage); } } /* If there is enough space between gap and top for one more cell pointer, ** and if the freelist is not empty, then search the ** freelist looking for a slot big enough to satisfy the request. */ testcase( gap+2==top ); testcase( gap+1==top ); testcase( gap==top ); if( (data[hdr+2] || data[hdr+1]) && gap+2<=top ){ u8 *pSpace = pageFindSlot(pPage, nByte, &rc); if( pSpace ){ assert( pSpace>=data && (pSpace - data)<65536 ); *pIdx = (int)(pSpace - data); return SQLITE_OK; }else if( rc ){ return rc; } } /* The request could not be fulfilled using a freelist slot. Check ** to see if defragmentation is necessary. */ testcase( gap+2+nByte==top ); if( gap+2+nByte>top ){ assert( pPage->nCell>0 || CORRUPT_DB ); assert( pPage->nFree>=0 ); rc = defragmentPage(pPage, MIN(4, pPage->nFree - (2+nByte))); if( rc ) return rc; top = get2byteNotZero(&data[hdr+5]); assert( gap+2+nByte<=top ); } /* Allocate memory from the gap in between the cell pointer array ** and the cell content area. The btreeComputeFreeSpace() call has already ** validated the freelist. Given that the freelist is valid, there ** is no way that the allocation can extend off the end of the page. ** The assert() below verifies the previous sentence. */ top -= nByte; put2byte(&data[hdr+5], top); assert( top+nByte <= (int)pPage->pBt->usableSize ); *pIdx = top; return SQLITE_OK; } /* ** Return a section of the pPage->aData to the freelist. ** The first byte of the new free block is pPage->aData[iStart] ** and the size of the block is iSize bytes. ** ** Adjacent freeblocks are coalesced. ** ** Even though the freeblock list was checked by btreeComputeFreeSpace(), ** that routine will not detect overlap between cells or freeblocks. Nor ** does it detect cells or freeblocks that encrouch into the reserved bytes ** at the end of the page. So do additional corruption checks inside this ** routine and return SQLITE_CORRUPT if any problems are found. */ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ u16 iPtr; /* Address of ptr to next freeblock */ u16 iFreeBlk; /* Address of the next freeblock */ u8 hdr; /* Page header size. 0 or 100 */ u8 nFrag = 0; /* Reduction in fragmentation */ u16 iOrigSize = iSize; /* Original value of iSize */ u16 x; /* Offset to cell content area */ u32 iEnd = iStart + iSize; /* First byte past the iStart buffer */ unsigned char *data = pPage->aData; /* Page content */ assert( pPage->pBt!=0 ); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( CORRUPT_DB || iStart>=pPage->hdrOffset+6+pPage->childPtrSize ); assert( CORRUPT_DB || iEnd <= pPage->pBt->usableSize ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( iSize>=4 ); /* Minimum cell size is 4 */ assert( iStart<=pPage->pBt->usableSize-4 ); /* The list of freeblocks must be in ascending order. Find the ** spot on the list where iStart should be inserted. */ hdr = pPage->hdrOffset; iPtr = hdr + 1; if( data[iPtr+1]==0 && data[iPtr]==0 ){ iFreeBlk = 0; /* Shortcut for the case when the freelist is empty */ }else{ while( (iFreeBlk = get2byte(&data[iPtr]))<iStart ){ if( iFreeBlk<iPtr+4 ){ if( iFreeBlk==0 ) break; return SQLITE_CORRUPT_PAGE(pPage); } iPtr = iFreeBlk; } if( iFreeBlk>pPage->pBt->usableSize-4 ){ return SQLITE_CORRUPT_PAGE(pPage); } assert( iFreeBlk>iPtr || iFreeBlk==0 ); /* At this point: ** iFreeBlk: First freeblock after iStart, or zero if none ** iPtr: The address of a pointer to iFreeBlk ** ** Check to see if iFreeBlk should be coalesced onto the end of iStart. */ if( iFreeBlk && iEnd+3>=iFreeBlk ){ nFrag = iFreeBlk - iEnd; if( iEnd>iFreeBlk ) return SQLITE_CORRUPT_PAGE(pPage); iEnd = iFreeBlk + get2byte(&data[iFreeBlk+2]); if( iEnd > pPage->pBt->usableSize ){ return SQLITE_CORRUPT_PAGE(pPage); } iSize = iEnd - iStart; iFreeBlk = get2byte(&data[iFreeBlk]); } /* If iPtr is another freeblock (that is, if iPtr is not the freelist ** pointer in the page header) then check to see if iStart should be ** coalesced onto the end of iPtr. */ if( iPtr>hdr+1 ){ int iPtrEnd = iPtr + get2byte(&data[iPtr+2]); if( iPtrEnd+3>=iStart ){ if( iPtrEnd>iStart ) return SQLITE_CORRUPT_PAGE(pPage); nFrag += iStart - iPtrEnd; iSize = iEnd - iPtr; iStart = iPtr; } } if( nFrag>data[hdr+7] ) return SQLITE_CORRUPT_PAGE(pPage); data[hdr+7] -= nFrag; } x = get2byte(&data[hdr+5]); if( iStart<=x ){ /* The new freeblock is at the beginning of the cell content area, ** so just extend the cell content area rather than create another ** freelist entry */ if( iStart<x || iPtr!=hdr+1 ) return SQLITE_CORRUPT_PAGE(pPage); put2byte(&data[hdr+1], iFreeBlk); put2byte(&data[hdr+5], iEnd); }else{ /* Insert the new freeblock into the freelist */ put2byte(&data[iPtr], iStart); } if( pPage->pBt->btsFlags & BTS_FAST_SECURE ){ /* Overwrite deleted information with zeros when the secure_delete ** option is enabled */ memset(&data[iStart], 0, iSize); } put2byte(&data[iStart], iFreeBlk); put2byte(&data[iStart+2], iSize); pPage->nFree += iOrigSize; return SQLITE_OK; } /* ** Decode the flags byte (the first byte of the header) for a page ** and initialize fields of the MemPage structure accordingly. ** ** Only the following combinations are supported. Anything different ** indicates a corrupt database files: ** ** PTF_ZERODATA ** PTF_ZERODATA | PTF_LEAF ** PTF_LEAFDATA | PTF_INTKEY ** PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF */ static int decodeFlags(MemPage *pPage, int flagByte){ BtShared *pBt; /* A copy of pPage->pBt */ assert( pPage->hdrOffset==(pPage->pgno==1 ? 100 : 0) ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); pPage->leaf = (u8)(flagByte>>3); assert( PTF_LEAF == 1<<3 ); flagByte &= ~PTF_LEAF; pPage->childPtrSize = 4-4*pPage->leaf; pPage->xCellSize = cellSizePtr; pBt = pPage->pBt; if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){ /* EVIDENCE-OF: R-07291-35328 A value of 5 (0x05) means the page is an ** interior table b-tree page. */ assert( (PTF_LEAFDATA|PTF_INTKEY)==5 ); /* EVIDENCE-OF: R-26900-09176 A value of 13 (0x0d) means the page is a ** leaf table b-tree page. */ assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 ); pPage->intKey = 1; if( pPage->leaf ){ pPage->intKeyLeaf = 1; pPage->xParseCell = btreeParseCellPtr; }else{ pPage->intKeyLeaf = 0; pPage->xCellSize = cellSizePtrNoPayload; pPage->xParseCell = btreeParseCellPtrNoPayload; } pPage->maxLocal = pBt->maxLeaf; pPage->minLocal = pBt->minLeaf; }else if( flagByte==PTF_ZERODATA ){ /* EVIDENCE-OF: R-43316-37308 A value of 2 (0x02) means the page is an ** interior index b-tree page. */ assert( (PTF_ZERODATA)==2 ); /* EVIDENCE-OF: R-59615-42828 A value of 10 (0x0a) means the page is a ** leaf index b-tree page. */ assert( (PTF_ZERODATA|PTF_LEAF)==10 ); pPage->intKey = 0; pPage->intKeyLeaf = 0; pPage->xParseCell = btreeParseCellPtrIndex; pPage->maxLocal = pBt->maxLocal; pPage->minLocal = pBt->minLocal; }else{ /* EVIDENCE-OF: R-47608-56469 Any other value for the b-tree page type is ** an error. */ return SQLITE_CORRUPT_PAGE(pPage); } pPage->max1bytePayload = pBt->max1bytePayload; return SQLITE_OK; } /* ** Compute the amount of freespace on the page. In other words, fill ** in the pPage->nFree field. */ static int btreeComputeFreeSpace(MemPage *pPage){ int pc; /* Address of a freeblock within pPage->aData[] */ u8 hdr; /* Offset to beginning of page header */ u8 *data; /* Equal to pPage->aData */ int usableSize; /* Amount of usable space on each page */ int nFree; /* Number of unused bytes on the page */ int top; /* First byte of the cell content area */ int iCellFirst; /* First allowable cell or freeblock offset */ int iCellLast; /* Last possible cell or freeblock offset */ assert( pPage->pBt!=0 ); assert( pPage->pBt->db!=0 ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( pPage->pgno==sqlite3PagerPagenumber(pPage->pDbPage) ); assert( pPage == sqlite3PagerGetExtra(pPage->pDbPage) ); assert( pPage->aData == sqlite3PagerGetData(pPage->pDbPage) ); assert( pPage->isInit==1 ); assert( pPage->nFree<0 ); usableSize = pPage->pBt->usableSize; hdr = pPage->hdrOffset; data = pPage->aData; /* EVIDENCE-OF: R-58015-48175 The two-byte integer at offset 5 designates ** the start of the cell content area. A zero value for this integer is ** interpreted as 65536. */ top = get2byteNotZero(&data[hdr+5]); iCellFirst = hdr + 8 + pPage->childPtrSize + 2*pPage->nCell; iCellLast = usableSize - 4; /* Compute the total free space on the page ** EVIDENCE-OF: R-23588-34450 The two-byte integer at offset 1 gives the ** start of the first freeblock on the page, or is zero if there are no ** freeblocks. */ pc = get2byte(&data[hdr+1]); nFree = data[hdr+7] + top; /* Init nFree to non-freeblock free space */ if( pc>0 ){ u32 next, size; if( pc<iCellFirst ){ /* EVIDENCE-OF: R-55530-52930 In a well-formed b-tree page, there will ** always be at least one cell before the first freeblock. */ return SQLITE_CORRUPT_PAGE(pPage); } while( 1 ){ if( pc>iCellLast ){ /* Freeblock off the end of the page */ return SQLITE_CORRUPT_PAGE(pPage); } next = get2byte(&data[pc]); size = get2byte(&data[pc+2]); nFree = nFree + size; if( next<=pc+size+3 ) break; pc = next; } if( next>0 ){ /* Freeblock not in ascending order */ return SQLITE_CORRUPT_PAGE(pPage); } if( pc+size>(unsigned int)usableSize ){ /* Last freeblock extends past page end */ return SQLITE_CORRUPT_PAGE(pPage); } } /* At this point, nFree contains the sum of the offset to the start ** of the cell-content area plus the number of free bytes within ** the cell-content area. If this is greater than the usable-size ** of the page, then the page must be corrupted. This check also ** serves to verify that the offset to the start of the cell-content ** area, according to the page header, lies within the page. */ if( nFree>usableSize || nFree<iCellFirst ){ return SQLITE_CORRUPT_PAGE(pPage); } pPage->nFree = (u16)(nFree - iCellFirst); return SQLITE_OK; } /* ** Do additional sanity check after btreeInitPage() if ** PRAGMA cell_size_check=ON */ static SQLITE_NOINLINE int btreeCellSizeCheck(MemPage *pPage){ int iCellFirst; /* First allowable cell or freeblock offset */ int iCellLast; /* Last possible cell or freeblock offset */ int i; /* Index into the cell pointer array */ int sz; /* Size of a cell */ int pc; /* Address of a freeblock within pPage->aData[] */ u8 *data; /* Equal to pPage->aData */ int usableSize; /* Maximum usable space on the page */ int cellOffset; /* Start of cell content area */ iCellFirst = pPage->cellOffset + 2*pPage->nCell; usableSize = pPage->pBt->usableSize; iCellLast = usableSize - 4; data = pPage->aData; cellOffset = pPage->cellOffset; if( !pPage->leaf ) iCellLast--; for(i=0; i<pPage->nCell; i++){ pc = get2byteAligned(&data[cellOffset+i*2]); testcase( pc==iCellFirst ); testcase( pc==iCellLast ); if( pc<iCellFirst || pc>iCellLast ){ return SQLITE_CORRUPT_PAGE(pPage); } sz = pPage->xCellSize(pPage, &data[pc]); testcase( pc+sz==usableSize ); if( pc+sz>usableSize ){ return SQLITE_CORRUPT_PAGE(pPage); } } return SQLITE_OK; } /* ** Initialize the auxiliary information for a disk block. ** ** Return SQLITE_OK on success. If we see that the page does ** not contain a well-formed database page, then return ** SQLITE_CORRUPT. Note that a return of SQLITE_OK does not ** guarantee that the page is well-formed. It only shows that ** we failed to detect any corruption. */ static int btreeInitPage(MemPage *pPage){ u8 *data; /* Equal to pPage->aData */ BtShared *pBt; /* The main btree structure */ assert( pPage->pBt!=0 ); assert( pPage->pBt->db!=0 ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( pPage->pgno==sqlite3PagerPagenumber(pPage->pDbPage) ); assert( pPage == sqlite3PagerGetExtra(pPage->pDbPage) ); assert( pPage->aData == sqlite3PagerGetData(pPage->pDbPage) ); assert( pPage->isInit==0 ); pBt = pPage->pBt; data = pPage->aData + pPage->hdrOffset; /* EVIDENCE-OF: R-28594-02890 The one-byte flag at offset 0 indicating ** the b-tree page type. */ if( decodeFlags(pPage, data[0]) ){ return SQLITE_CORRUPT_PAGE(pPage); } assert( pBt->pageSize>=512 && pBt->pageSize<=65536 ); pPage->maskPage = (u16)(pBt->pageSize - 1); pPage->nOverflow = 0; pPage->cellOffset = pPage->hdrOffset + 8 + pPage->childPtrSize; pPage->aCellIdx = data + pPage->childPtrSize + 8; pPage->aDataEnd = pPage->aData + pBt->usableSize; pPage->aDataOfst = pPage->aData + pPage->childPtrSize; /* EVIDENCE-OF: R-37002-32774 The two-byte integer at offset 3 gives the ** number of cells on the page. */ pPage->nCell = get2byte(&data[3]); if( pPage->nCell>MX_CELL(pBt) ){ /* To many cells for a single page. The page must be corrupt */ return SQLITE_CORRUPT_PAGE(pPage); } testcase( pPage->nCell==MX_CELL(pBt) ); /* EVIDENCE-OF: R-24089-57979 If a page contains no cells (which is only ** possible for a root page of a table that contains no rows) then the ** offset to the cell content area will equal the page size minus the ** bytes of reserved space. */ assert( pPage->nCell>0 || get2byteNotZero(&data[5])==(int)pBt->usableSize || CORRUPT_DB ); pPage->nFree = -1; /* Indicate that this value is yet uncomputed */ pPage->isInit = 1; if( pBt->db->flags & SQLITE_CellSizeCk ){ return btreeCellSizeCheck(pPage); } return SQLITE_OK; } /* ** Set up a raw page so that it looks like a database page holding ** no entries. */ static void zeroPage(MemPage *pPage, int flags){ unsigned char *data = pPage->aData; BtShared *pBt = pPage->pBt; u8 hdr = pPage->hdrOffset; u16 first; assert( sqlite3PagerPagenumber(pPage->pDbPage)==pPage->pgno ); assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); assert( sqlite3PagerGetData(pPage->pDbPage) == data ); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( sqlite3_mutex_held(pBt->mutex) ); if( pBt->btsFlags & BTS_FAST_SECURE ){ memset(&data[hdr], 0, pBt->usableSize - hdr); } data[hdr] = (char)flags; first = hdr + ((flags&PTF_LEAF)==0 ? 12 : 8); memset(&data[hdr+1], 0, 4); data[hdr+7] = 0; put2byte(&data[hdr+5], pBt->usableSize); pPage->nFree = (u16)(pBt->usableSize - first); decodeFlags(pPage, flags); pPage->cellOffset = first; pPage->aDataEnd = &data[pBt->usableSize]; pPage->aCellIdx = &data[first]; pPage->aDataOfst = &data[pPage->childPtrSize]; pPage->nOverflow = 0; assert( pBt->pageSize>=512 && pBt->pageSize<=65536 ); pPage->maskPage = (u16)(pBt->pageSize - 1); pPage->nCell = 0; pPage->isInit = 1; } /* ** Convert a DbPage obtained from the pager into a MemPage used by ** the btree layer. */ static MemPage *btreePageFromDbPage(DbPage *pDbPage, Pgno pgno, BtShared *pBt){ MemPage *pPage = (MemPage*)sqlite3PagerGetExtra(pDbPage); if( pgno!=pPage->pgno ){ pPage->aData = sqlite3PagerGetData(pDbPage); pPage->pDbPage = pDbPage; pPage->pBt = pBt; pPage->pgno = pgno; pPage->hdrOffset = pgno==1 ? 100 : 0; } assert( pPage->aData==sqlite3PagerGetData(pDbPage) ); return pPage; } /* ** Get a page from the pager. Initialize the MemPage.pBt and ** MemPage.aData elements if needed. See also: btreeGetUnusedPage(). ** ** If the PAGER_GET_NOCONTENT flag is set, it means that we do not care ** about the content of the page at this time. So do not go to the disk ** to fetch the content. Just fill in the content with zeros for now. ** If in the future we call sqlite3PagerWrite() on this page, that ** means we have started to be concerned about content and the disk ** read should occur at that point. */ static int btreeGetPage( BtShared *pBt, /* The btree */ Pgno pgno, /* Number of the page to fetch */ MemPage **ppPage, /* Return the page in this parameter */ int flags /* PAGER_GET_NOCONTENT or PAGER_GET_READONLY */ ){ int rc; DbPage *pDbPage; assert( flags==0 || flags==PAGER_GET_NOCONTENT || flags==PAGER_GET_READONLY ); assert( sqlite3_mutex_held(pBt->mutex) ); rc = sqlite3PagerGet(pBt->pPager, pgno, (DbPage**)&pDbPage, flags); if( rc ) return rc; *ppPage = btreePageFromDbPage(pDbPage, pgno, pBt); return SQLITE_OK; } /* ** Retrieve a page from the pager cache. If the requested page is not ** already in the pager cache return NULL. Initialize the MemPage.pBt and ** MemPage.aData elements if needed. */ static MemPage *btreePageLookup(BtShared *pBt, Pgno pgno){ DbPage *pDbPage; assert( sqlite3_mutex_held(pBt->mutex) ); pDbPage = sqlite3PagerLookup(pBt->pPager, pgno); if( pDbPage ){ return btreePageFromDbPage(pDbPage, pgno, pBt); } return 0; } /* ** Return the size of the database file in pages. If there is any kind of ** error, return ((unsigned int)-1). */ static Pgno btreePagecount(BtShared *pBt){ return pBt->nPage; } u32 sqlite3BtreeLastPage(Btree *p){ assert( sqlite3BtreeHoldsMutex(p) ); assert( ((p->pBt->nPage)&0x80000000)==0 ); return btreePagecount(p->pBt); } /* ** Get a page from the pager and initialize it. ** ** If pCur!=0 then the page is being fetched as part of a moveToChild() ** call. Do additional sanity checking on the page in this case. ** And if the fetch fails, this routine must decrement pCur->iPage. ** ** The page is fetched as read-write unless pCur is not NULL and is ** a read-only cursor. ** ** If an error occurs, then *ppPage is undefined. It ** may remain unchanged, or it may be set to an invalid value. */ static int getAndInitPage( BtShared *pBt, /* The database file */ Pgno pgno, /* Number of the page to get */ MemPage **ppPage, /* Write the page pointer here */ BtCursor *pCur, /* Cursor to receive the page, or NULL */ int bReadOnly /* True for a read-only page */ ){ int rc; DbPage *pDbPage; assert( sqlite3_mutex_held(pBt->mutex) ); assert( pCur==0 || ppPage==&pCur->pPage ); assert( pCur==0 || bReadOnly==pCur->curPagerFlags ); assert( pCur==0 || pCur->iPage>0 ); if( pgno>btreePagecount(pBt) ){ rc = SQLITE_CORRUPT_BKPT; goto getAndInitPage_error1; } rc = sqlite3PagerGet(pBt->pPager, pgno, (DbPage**)&pDbPage, bReadOnly); if( rc ){ goto getAndInitPage_error1; } *ppPage = (MemPage*)sqlite3PagerGetExtra(pDbPage); if( (*ppPage)->isInit==0 ){ btreePageFromDbPage(pDbPage, pgno, pBt); rc = btreeInitPage(*ppPage); if( rc!=SQLITE_OK ){ goto getAndInitPage_error2; } } assert( (*ppPage)->pgno==pgno ); assert( (*ppPage)->aData==sqlite3PagerGetData(pDbPage) ); /* If obtaining a child page for a cursor, we must verify that the page is ** compatible with the root page. */ if( pCur && ((*ppPage)->nCell<1 || (*ppPage)->intKey!=pCur->curIntKey) ){ rc = SQLITE_CORRUPT_PGNO(pgno); goto getAndInitPage_error2; } return SQLITE_OK; getAndInitPage_error2: releasePage(*ppPage); getAndInitPage_error1: if( pCur ){ pCur->iPage--; pCur->pPage = pCur->apPage[pCur->iPage]; } testcase( pgno==0 ); assert( pgno!=0 || rc==SQLITE_CORRUPT ); return rc; } /* ** Release a MemPage. This should be called once for each prior ** call to btreeGetPage. ** ** Page1 is a special case and must be released using releasePageOne(). */ static void releasePageNotNull(MemPage *pPage){ assert( pPage->aData ); assert( pPage->pBt ); assert( pPage->pDbPage!=0 ); assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); assert( sqlite3PagerGetData(pPage->pDbPage)==pPage->aData ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); sqlite3PagerUnrefNotNull(pPage->pDbPage); } static void releasePage(MemPage *pPage){ if( pPage ) releasePageNotNull(pPage); } static void releasePageOne(MemPage *pPage){ assert( pPage!=0 ); assert( pPage->aData ); assert( pPage->pBt ); assert( pPage->pDbPage!=0 ); assert( sqlite3PagerGetExtra(pPage->pDbPage) == (void*)pPage ); assert( sqlite3PagerGetData(pPage->pDbPage)==pPage->aData ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); sqlite3PagerUnrefPageOne(pPage->pDbPage); } /* ** Get an unused page. ** ** This works just like btreeGetPage() with the addition: ** ** * If the page is already in use for some other purpose, immediately ** release it and return an SQLITE_CURRUPT error. ** * Make sure the isInit flag is clear */ static int btreeGetUnusedPage( BtShared *pBt, /* The btree */ Pgno pgno, /* Number of the page to fetch */ MemPage **ppPage, /* Return the page in this parameter */ int flags /* PAGER_GET_NOCONTENT or PAGER_GET_READONLY */ ){ int rc = btreeGetPage(pBt, pgno, ppPage, flags); if( rc==SQLITE_OK ){ if( sqlite3PagerPageRefcount((*ppPage)->pDbPage)>1 ){ releasePage(*ppPage); *ppPage = 0; return SQLITE_CORRUPT_BKPT; } (*ppPage)->isInit = 0; }else{ *ppPage = 0; } return rc; } /* ** During a rollback, when the pager reloads information into the cache ** so that the cache is restored to its original state at the start of ** the transaction, for each page restored this routine is called. ** ** This routine needs to reset the extra data section at the end of the ** page to agree with the restored data. */ static void pageReinit(DbPage *pData){ MemPage *pPage; pPage = (MemPage *)sqlite3PagerGetExtra(pData); assert( sqlite3PagerPageRefcount(pData)>0 ); if( pPage->isInit ){ assert( sqlite3_mutex_held(pPage->pBt->mutex) ); pPage->isInit = 0; if( sqlite3PagerPageRefcount(pData)>1 ){ /* pPage might not be a btree page; it might be an overflow page ** or ptrmap page or a free page. In those cases, the following ** call to btreeInitPage() will likely return SQLITE_CORRUPT. ** But no harm is done by this. And it is very important that ** btreeInitPage() be called on every btree page so we make ** the call for every page that comes in for re-initing. */ btreeInitPage(pPage); } } } /* ** Invoke the busy handler for a btree. */ static int btreeInvokeBusyHandler(void *pArg){ BtShared *pBt = (BtShared*)pArg; assert( pBt->db ); assert( sqlite3_mutex_held(pBt->db->mutex) ); return sqlite3InvokeBusyHandler(&pBt->db->busyHandler, sqlite3PagerFile(pBt->pPager)); } /* ** Open a database file. ** ** zFilename is the name of the database file. If zFilename is NULL ** then an ephemeral database is created. The ephemeral database might ** be exclusively in memory, or it might use a disk-based memory cache. ** Either way, the ephemeral database will be automatically deleted ** when sqlite3BtreeClose() is called. ** ** If zFilename is ":memory:" then an in-memory database is created ** that is automatically destroyed when it is closed. ** ** The "flags" parameter is a bitmask that might contain bits like ** BTREE_OMIT_JOURNAL and/or BTREE_MEMORY. ** ** If the database is already opened in the same database connection ** and we are in shared cache mode, then the open will fail with an ** SQLITE_CONSTRAINT error. We cannot allow two or more BtShared ** objects in the same database connection since doing so will lead ** to problems with locking. */ int sqlite3BtreeOpen( sqlite3_vfs *pVfs, /* VFS to use for this b-tree */ const char *zFilename, /* Name of the file containing the BTree database */ sqlite3 *db, /* Associated database handle */ Btree **ppBtree, /* Pointer to new Btree object written here */ int flags, /* Options */ int vfsFlags /* Flags passed through to sqlite3_vfs.xOpen() */ ){ BtShared *pBt = 0; /* Shared part of btree structure */ Btree *p; /* Handle to return */ sqlite3_mutex *mutexOpen = 0; /* Prevents a race condition. Ticket #3537 */ int rc = SQLITE_OK; /* Result code from this function */ u8 nReserve; /* Byte of unused space on each page */ unsigned char zDbHeader[100]; /* Database header content */ /* True if opening an ephemeral, temporary database */ const int isTempDb = zFilename==0 || zFilename[0]==0; /* Set the variable isMemdb to true for an in-memory database, or ** false for a file-based database. */ #ifdef SQLITE_OMIT_MEMORYDB const int isMemdb = 0; #else const int isMemdb = (zFilename && strcmp(zFilename, ":memory:")==0) || (isTempDb && sqlite3TempInMemory(db)) || (vfsFlags & SQLITE_OPEN_MEMORY)!=0; #endif assert( db!=0 ); assert( pVfs!=0 ); assert( sqlite3_mutex_held(db->mutex) ); assert( (flags&0xff)==flags ); /* flags fit in 8 bits */ /* Only a BTREE_SINGLE database can be BTREE_UNORDERED */ assert( (flags & BTREE_UNORDERED)==0 || (flags & BTREE_SINGLE)!=0 ); /* A BTREE_SINGLE database is always a temporary and/or ephemeral */ assert( (flags & BTREE_SINGLE)==0 || isTempDb ); if( isMemdb ){ flags |= BTREE_MEMORY; } if( (vfsFlags & SQLITE_OPEN_MAIN_DB)!=0 && (isMemdb || isTempDb) ){ vfsFlags = (vfsFlags & ~SQLITE_OPEN_MAIN_DB) | SQLITE_OPEN_TEMP_DB; } p = sqlite3MallocZero(sizeof(Btree)); if( !p ){ return SQLITE_NOMEM_BKPT; } p->inTrans = TRANS_NONE; p->db = db; #ifndef SQLITE_OMIT_SHARED_CACHE p->lock.pBtree = p; p->lock.iTable = 1; #endif #if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) /* ** If this Btree is a candidate for shared cache, try to find an ** existing BtShared object that we can share with */ if( isTempDb==0 && (isMemdb==0 || (vfsFlags&SQLITE_OPEN_URI)!=0) ){ if( vfsFlags & SQLITE_OPEN_SHAREDCACHE ){ int nFilename = sqlite3Strlen30(zFilename)+1; int nFullPathname = pVfs->mxPathname+1; char *zFullPathname = sqlite3Malloc(MAX(nFullPathname,nFilename)); MUTEX_LOGIC( sqlite3_mutex *mutexShared; ) p->sharable = 1; if( !zFullPathname ){ sqlite3_free(p); return SQLITE_NOMEM_BKPT; } if( isMemdb ){ memcpy(zFullPathname, zFilename, nFilename); }else{ rc = sqlite3OsFullPathname(pVfs, zFilename, nFullPathname, zFullPathname); if( rc ){ sqlite3_free(zFullPathname); sqlite3_free(p); return rc; } } #if SQLITE_THREADSAFE mutexOpen = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_OPEN); sqlite3_mutex_enter(mutexOpen); mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); sqlite3_mutex_enter(mutexShared); #endif for(pBt=GLOBAL(BtShared*,sqlite3SharedCacheList); pBt; pBt=pBt->pNext){ assert( pBt->nRef>0 ); if( 0==strcmp(zFullPathname, sqlite3PagerFilename(pBt->pPager, 0)) && sqlite3PagerVfs(pBt->pPager)==pVfs ){ int iDb; for(iDb=db->nDb-1; iDb>=0; iDb--){ Btree *pExisting = db->aDb[iDb].pBt; if( pExisting && pExisting->pBt==pBt ){ sqlite3_mutex_leave(mutexShared); sqlite3_mutex_leave(mutexOpen); sqlite3_free(zFullPathname); sqlite3_free(p); return SQLITE_CONSTRAINT; } } p->pBt = pBt; pBt->nRef++; break; } } sqlite3_mutex_leave(mutexShared); sqlite3_free(zFullPathname); } #ifdef SQLITE_DEBUG else{ /* In debug mode, we mark all persistent databases as sharable ** even when they are not. This exercises the locking code and ** gives more opportunity for asserts(sqlite3_mutex_held()) ** statements to find locking problems. */ p->sharable = 1; } #endif } #endif if( pBt==0 ){ /* ** The following asserts make sure that structures used by the btree are ** the right size. This is to guard against size changes that result ** when compiling on a different architecture. */ assert( sizeof(i64)==8 ); assert( sizeof(u64)==8 ); assert( sizeof(u32)==4 ); assert( sizeof(u16)==2 ); assert( sizeof(Pgno)==4 ); pBt = sqlite3MallocZero( sizeof(*pBt) ); if( pBt==0 ){ rc = SQLITE_NOMEM_BKPT; goto btree_open_out; } rc = sqlite3PagerOpen(pVfs, &pBt->pPager, zFilename, sizeof(MemPage), flags, vfsFlags, pageReinit); if( rc==SQLITE_OK ){ sqlite3PagerSetMmapLimit(pBt->pPager, db->szMmap); rc = sqlite3PagerReadFileheader(pBt->pPager,sizeof(zDbHeader),zDbHeader); } if( rc!=SQLITE_OK ){ goto btree_open_out; } pBt->openFlags = (u8)flags; pBt->db = db; sqlite3PagerSetBusyHandler(pBt->pPager, btreeInvokeBusyHandler, pBt); p->pBt = pBt; pBt->pCursor = 0; pBt->pPage1 = 0; if( sqlite3PagerIsreadonly(pBt->pPager) ) pBt->btsFlags |= BTS_READ_ONLY; #if defined(SQLITE_SECURE_DELETE) pBt->btsFlags |= BTS_SECURE_DELETE; #elif defined(SQLITE_FAST_SECURE_DELETE) pBt->btsFlags |= BTS_OVERWRITE; #endif /* EVIDENCE-OF: R-51873-39618 The page size for a database file is ** determined by the 2-byte integer located at an offset of 16 bytes from ** the beginning of the database file. */ pBt->pageSize = (zDbHeader[16]<<8) | (zDbHeader[17]<<16); if( pBt->pageSize<512 || pBt->pageSize>SQLITE_MAX_PAGE_SIZE || ((pBt->pageSize-1)&pBt->pageSize)!=0 ){ pBt->pageSize = 0; #ifndef SQLITE_OMIT_AUTOVACUUM /* If the magic name ":memory:" will create an in-memory database, then ** leave the autoVacuum mode at 0 (do not auto-vacuum), even if ** SQLITE_DEFAULT_AUTOVACUUM is true. On the other hand, if ** SQLITE_OMIT_MEMORYDB has been defined, then ":memory:" is just a ** regular file-name. In this case the auto-vacuum applies as per normal. */ if( zFilename && !isMemdb ){ pBt->autoVacuum = (SQLITE_DEFAULT_AUTOVACUUM ? 1 : 0); pBt->incrVacuum = (SQLITE_DEFAULT_AUTOVACUUM==2 ? 1 : 0); } #endif nReserve = 0; }else{ /* EVIDENCE-OF: R-37497-42412 The size of the reserved region is ** determined by the one-byte unsigned integer found at an offset of 20 ** into the database file header. */ nReserve = zDbHeader[20]; pBt->btsFlags |= BTS_PAGESIZE_FIXED; #ifndef SQLITE_OMIT_AUTOVACUUM pBt->autoVacuum = (get4byte(&zDbHeader[36 + 4*4])?1:0); pBt->incrVacuum = (get4byte(&zDbHeader[36 + 7*4])?1:0); #endif } rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, nReserve); if( rc ) goto btree_open_out; pBt->usableSize = pBt->pageSize - nReserve; assert( (pBt->pageSize & 7)==0 ); /* 8-byte alignment of pageSize */ #if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) /* Add the new BtShared object to the linked list sharable BtShareds. */ pBt->nRef = 1; if( p->sharable ){ MUTEX_LOGIC( sqlite3_mutex *mutexShared; ) MUTEX_LOGIC( mutexShared = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER);) if( SQLITE_THREADSAFE && sqlite3GlobalConfig.bCoreMutex ){ pBt->mutex = sqlite3MutexAlloc(SQLITE_MUTEX_FAST); if( pBt->mutex==0 ){ rc = SQLITE_NOMEM_BKPT; goto btree_open_out; } } sqlite3_mutex_enter(mutexShared); pBt->pNext = GLOBAL(BtShared*,sqlite3SharedCacheList); GLOBAL(BtShared*,sqlite3SharedCacheList) = pBt; sqlite3_mutex_leave(mutexShared); } #endif } #if !defined(SQLITE_OMIT_SHARED_CACHE) && !defined(SQLITE_OMIT_DISKIO) /* If the new Btree uses a sharable pBtShared, then link the new ** Btree into the list of all sharable Btrees for the same connection. ** The list is kept in ascending order by pBt address. */ if( p->sharable ){ int i; Btree *pSib; for(i=0; i<db->nDb; i++){ if( (pSib = db->aDb[i].pBt)!=0 && pSib->sharable ){ while( pSib->pPrev ){ pSib = pSib->pPrev; } if( (uptr)p->pBt<(uptr)pSib->pBt ){ p->pNext = pSib; p->pPrev = 0; pSib->pPrev = p; }else{ while( pSib->pNext && (uptr)pSib->pNext->pBt<(uptr)p->pBt ){ pSib = pSib->pNext; } p->pNext = pSib->pNext; p->pPrev = pSib; if( p->pNext ){ p->pNext->pPrev = p; } pSib->pNext = p; } break; } } } #endif *ppBtree = p; btree_open_out: if( rc!=SQLITE_OK ){ if( pBt && pBt->pPager ){ sqlite3PagerClose(pBt->pPager, 0); } sqlite3_free(pBt); sqlite3_free(p); *ppBtree = 0; }else{ sqlite3_file *pFile; /* If the B-Tree was successfully opened, set the pager-cache size to the ** default value. Except, when opening on an existing shared pager-cache, ** do not change the pager-cache size. */ if( sqlite3BtreeSchema(p, 0, 0)==0 ){ sqlite3PagerSetCachesize(p->pBt->pPager, SQLITE_DEFAULT_CACHE_SIZE); } pFile = sqlite3PagerFile(pBt->pPager); if( pFile->pMethods ){ sqlite3OsFileControlHint(pFile, SQLITE_FCNTL_PDB, (void*)&pBt->db); } } if( mutexOpen ){ assert( sqlite3_mutex_held(mutexOpen) ); sqlite3_mutex_leave(mutexOpen); } assert( rc!=SQLITE_OK || sqlite3BtreeConnectionCount(*ppBtree)>0 ); return rc; } /* ** Decrement the BtShared.nRef counter. When it reaches zero, ** remove the BtShared structure from the sharing list. Return ** true if the BtShared.nRef counter reaches zero and return ** false if it is still positive. */ static int removeFromSharingList(BtShared *pBt){ #ifndef SQLITE_OMIT_SHARED_CACHE MUTEX_LOGIC( sqlite3_mutex *pMaster; ) BtShared *pList; int removed = 0; assert( sqlite3_mutex_notheld(pBt->mutex) ); MUTEX_LOGIC( pMaster = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_MASTER); ) sqlite3_mutex_enter(pMaster); pBt->nRef--; if( pBt->nRef<=0 ){ if( GLOBAL(BtShared*,sqlite3SharedCacheList)==pBt ){ GLOBAL(BtShared*,sqlite3SharedCacheList) = pBt->pNext; }else{ pList = GLOBAL(BtShared*,sqlite3SharedCacheList); while( ALWAYS(pList) && pList->pNext!=pBt ){ pList=pList->pNext; } if( ALWAYS(pList) ){ pList->pNext = pBt->pNext; } } if( SQLITE_THREADSAFE ){ sqlite3_mutex_free(pBt->mutex); } removed = 1; } sqlite3_mutex_leave(pMaster); return removed; #else return 1; #endif } /* ** Make sure pBt->pTmpSpace points to an allocation of ** MX_CELL_SIZE(pBt) bytes with a 4-byte prefix for a left-child ** pointer. */ static void allocateTempSpace(BtShared *pBt){ if( !pBt->pTmpSpace ){ pBt->pTmpSpace = sqlite3PageMalloc( pBt->pageSize ); /* One of the uses of pBt->pTmpSpace is to format cells before ** inserting them into a leaf page (function fillInCell()). If ** a cell is less than 4 bytes in size, it is rounded up to 4 bytes ** by the various routines that manipulate binary cells. Which ** can mean that fillInCell() only initializes the first 2 or 3 ** bytes of pTmpSpace, but that the first 4 bytes are copied from ** it into a database page. This is not actually a problem, but it ** does cause a valgrind error when the 1 or 2 bytes of unitialized ** data is passed to system call write(). So to avoid this error, ** zero the first 4 bytes of temp space here. ** ** Also: Provide four bytes of initialized space before the ** beginning of pTmpSpace as an area available to prepend the ** left-child pointer to the beginning of a cell. */ if( pBt->pTmpSpace ){ memset(pBt->pTmpSpace, 0, 8); pBt->pTmpSpace += 4; } } } /* ** Free the pBt->pTmpSpace allocation */ static void freeTempSpace(BtShared *pBt){ if( pBt->pTmpSpace ){ pBt->pTmpSpace -= 4; sqlite3PageFree(pBt->pTmpSpace); pBt->pTmpSpace = 0; } } /* ** Close an open database and invalidate all cursors. */ int sqlite3BtreeClose(Btree *p){ BtShared *pBt = p->pBt; BtCursor *pCur; /* Close all cursors opened via this handle. */ assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); pCur = pBt->pCursor; while( pCur ){ BtCursor *pTmp = pCur; pCur = pCur->pNext; if( pTmp->pBtree==p ){ sqlite3BtreeCloseCursor(pTmp); } } /* Rollback any active transaction and free the handle structure. ** The call to sqlite3BtreeRollback() drops any table-locks held by ** this handle. */ sqlite3BtreeRollback(p, SQLITE_OK, 0); sqlite3BtreeLeave(p); /* If there are still other outstanding references to the shared-btree ** structure, return now. The remainder of this procedure cleans ** up the shared-btree. */ assert( p->wantToLock==0 && p->locked==0 ); if( !p->sharable || removeFromSharingList(pBt) ){ /* The pBt is no longer on the sharing list, so we can access ** it without having to hold the mutex. ** ** Clean out and delete the BtShared object. */ assert( !pBt->pCursor ); sqlite3PagerClose(pBt->pPager, p->db); if( pBt->xFreeSchema && pBt->pSchema ){ pBt->xFreeSchema(pBt->pSchema); } sqlite3DbFree(0, pBt->pSchema); freeTempSpace(pBt); sqlite3_free(pBt); } #ifndef SQLITE_OMIT_SHARED_CACHE assert( p->wantToLock==0 ); assert( p->locked==0 ); if( p->pPrev ) p->pPrev->pNext = p->pNext; if( p->pNext ) p->pNext->pPrev = p->pPrev; #endif sqlite3_free(p); return SQLITE_OK; } /* ** Change the "soft" limit on the number of pages in the cache. ** Unused and unmodified pages will be recycled when the number of ** pages in the cache exceeds this soft limit. But the size of the ** cache is allowed to grow larger than this limit if it contains ** dirty pages or pages still in active use. */ int sqlite3BtreeSetCacheSize(Btree *p, int mxPage){ BtShared *pBt = p->pBt; assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); sqlite3PagerSetCachesize(pBt->pPager, mxPage); sqlite3BtreeLeave(p); return SQLITE_OK; } /* ** Change the "spill" limit on the number of pages in the cache. ** If the number of pages exceeds this limit during a write transaction, ** the pager might attempt to "spill" pages to the journal early in ** order to free up memory. ** ** The value returned is the current spill size. If zero is passed ** as an argument, no changes are made to the spill size setting, so ** using mxPage of 0 is a way to query the current spill size. */ int sqlite3BtreeSetSpillSize(Btree *p, int mxPage){ BtShared *pBt = p->pBt; int res; assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); res = sqlite3PagerSetSpillsize(pBt->pPager, mxPage); sqlite3BtreeLeave(p); return res; } #if SQLITE_MAX_MMAP_SIZE>0 /* ** Change the limit on the amount of the database file that may be ** memory mapped. */ int sqlite3BtreeSetMmapLimit(Btree *p, sqlite3_int64 szMmap){ BtShared *pBt = p->pBt; assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); sqlite3PagerSetMmapLimit(pBt->pPager, szMmap); sqlite3BtreeLeave(p); return SQLITE_OK; } #endif /* SQLITE_MAX_MMAP_SIZE>0 */ /* ** Change the way data is synced to disk in order to increase or decrease ** how well the database resists damage due to OS crashes and power ** failures. Level 1 is the same as asynchronous (no syncs() occur and ** there is a high probability of damage) Level 2 is the default. There ** is a very low but non-zero probability of damage. Level 3 reduces the ** probability of damage to near zero but with a write performance reduction. */ #ifndef SQLITE_OMIT_PAGER_PRAGMAS int sqlite3BtreeSetPagerFlags( Btree *p, /* The btree to set the safety level on */ unsigned pgFlags /* Various PAGER_* flags */ ){ BtShared *pBt = p->pBt; assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); sqlite3PagerSetFlags(pBt->pPager, pgFlags); sqlite3BtreeLeave(p); return SQLITE_OK; } #endif /* ** Change the default pages size and the number of reserved bytes per page. ** Or, if the page size has already been fixed, return SQLITE_READONLY ** without changing anything. ** ** The page size must be a power of 2 between 512 and 65536. If the page ** size supplied does not meet this constraint then the page size is not ** changed. ** ** Page sizes are constrained to be a power of two so that the region ** of the database file used for locking (beginning at PENDING_BYTE, ** the first byte past the 1GB boundary, 0x40000000) needs to occur ** at the beginning of a page. ** ** If parameter nReserve is less than zero, then the number of reserved ** bytes per page is left unchanged. ** ** If the iFix!=0 then the BTS_PAGESIZE_FIXED flag is set so that the page size ** and autovacuum mode can no longer be changed. */ int sqlite3BtreeSetPageSize(Btree *p, int pageSize, int nReserve, int iFix){ int rc = SQLITE_OK; BtShared *pBt = p->pBt; assert( nReserve>=-1 && nReserve<=255 ); sqlite3BtreeEnter(p); #if SQLITE_HAS_CODEC if( nReserve>pBt->optimalReserve ) pBt->optimalReserve = (u8)nReserve; #endif if( pBt->btsFlags & BTS_PAGESIZE_FIXED ){ sqlite3BtreeLeave(p); return SQLITE_READONLY; } if( nReserve<0 ){ nReserve = pBt->pageSize - pBt->usableSize; } assert( nReserve>=0 && nReserve<=255 ); if( pageSize>=512 && pageSize<=SQLITE_MAX_PAGE_SIZE && ((pageSize-1)&pageSize)==0 ){ assert( (pageSize & 7)==0 ); assert( !pBt->pCursor ); pBt->pageSize = (u32)pageSize; freeTempSpace(pBt); } rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, nReserve); pBt->usableSize = pBt->pageSize - (u16)nReserve; if( iFix ) pBt->btsFlags |= BTS_PAGESIZE_FIXED; sqlite3BtreeLeave(p); return rc; } /* ** Return the currently defined page size */ int sqlite3BtreeGetPageSize(Btree *p){ return p->pBt->pageSize; } /* ** This function is similar to sqlite3BtreeGetReserve(), except that it ** may only be called if it is guaranteed that the b-tree mutex is already ** held. ** ** This is useful in one special case in the backup API code where it is ** known that the shared b-tree mutex is held, but the mutex on the ** database handle that owns *p is not. In this case if sqlite3BtreeEnter() ** were to be called, it might collide with some other operation on the ** database handle that owns *p, causing undefined behavior. */ int sqlite3BtreeGetReserveNoMutex(Btree *p){ int n; assert( sqlite3_mutex_held(p->pBt->mutex) ); n = p->pBt->pageSize - p->pBt->usableSize; return n; } /* ** Return the number of bytes of space at the end of every page that ** are intentually left unused. This is the "reserved" space that is ** sometimes used by extensions. ** ** If SQLITE_HAS_MUTEX is defined then the number returned is the ** greater of the current reserved space and the maximum requested ** reserve space. */ int sqlite3BtreeGetOptimalReserve(Btree *p){ int n; sqlite3BtreeEnter(p); n = sqlite3BtreeGetReserveNoMutex(p); #ifdef SQLITE_HAS_CODEC if( n<p->pBt->optimalReserve ) n = p->pBt->optimalReserve; #endif sqlite3BtreeLeave(p); return n; } /* ** Set the maximum page count for a database if mxPage is positive. ** No changes are made if mxPage is 0 or negative. ** Regardless of the value of mxPage, return the maximum page count. */ int sqlite3BtreeMaxPageCount(Btree *p, int mxPage){ int n; sqlite3BtreeEnter(p); n = sqlite3PagerMaxPageCount(p->pBt->pPager, mxPage); sqlite3BtreeLeave(p); return n; } /* ** Change the values for the BTS_SECURE_DELETE and BTS_OVERWRITE flags: ** ** newFlag==0 Both BTS_SECURE_DELETE and BTS_OVERWRITE are cleared ** newFlag==1 BTS_SECURE_DELETE set and BTS_OVERWRITE is cleared ** newFlag==2 BTS_SECURE_DELETE cleared and BTS_OVERWRITE is set ** newFlag==(-1) No changes ** ** This routine acts as a query if newFlag is less than zero ** ** With BTS_OVERWRITE set, deleted content is overwritten by zeros, but ** freelist leaf pages are not written back to the database. Thus in-page ** deleted content is cleared, but freelist deleted content is not. ** ** With BTS_SECURE_DELETE, operation is like BTS_OVERWRITE with the addition ** that freelist leaf pages are written back into the database, increasing ** the amount of disk I/O. */ int sqlite3BtreeSecureDelete(Btree *p, int newFlag){ int b; if( p==0 ) return 0; sqlite3BtreeEnter(p); assert( BTS_OVERWRITE==BTS_SECURE_DELETE*2 ); assert( BTS_FAST_SECURE==(BTS_OVERWRITE|BTS_SECURE_DELETE) ); if( newFlag>=0 ){ p->pBt->btsFlags &= ~BTS_FAST_SECURE; p->pBt->btsFlags |= BTS_SECURE_DELETE*newFlag; } b = (p->pBt->btsFlags & BTS_FAST_SECURE)/BTS_SECURE_DELETE; sqlite3BtreeLeave(p); return b; } /* ** Change the 'auto-vacuum' property of the database. If the 'autoVacuum' ** parameter is non-zero, then auto-vacuum mode is enabled. If zero, it ** is disabled. The default value for the auto-vacuum property is ** determined by the SQLITE_DEFAULT_AUTOVACUUM macro. */ int sqlite3BtreeSetAutoVacuum(Btree *p, int autoVacuum){ #ifdef SQLITE_OMIT_AUTOVACUUM return SQLITE_READONLY; #else BtShared *pBt = p->pBt; int rc = SQLITE_OK; u8 av = (u8)autoVacuum; sqlite3BtreeEnter(p); if( (pBt->btsFlags & BTS_PAGESIZE_FIXED)!=0 && (av ?1:0)!=pBt->autoVacuum ){ rc = SQLITE_READONLY; }else{ pBt->autoVacuum = av ?1:0; pBt->incrVacuum = av==2 ?1:0; } sqlite3BtreeLeave(p); return rc; #endif } /* ** Return the value of the 'auto-vacuum' property. If auto-vacuum is ** enabled 1 is returned. Otherwise 0. */ int sqlite3BtreeGetAutoVacuum(Btree *p){ #ifdef SQLITE_OMIT_AUTOVACUUM return BTREE_AUTOVACUUM_NONE; #else int rc; sqlite3BtreeEnter(p); rc = ( (!p->pBt->autoVacuum)?BTREE_AUTOVACUUM_NONE: (!p->pBt->incrVacuum)?BTREE_AUTOVACUUM_FULL: BTREE_AUTOVACUUM_INCR ); sqlite3BtreeLeave(p); return rc; #endif } /* ** If the user has not set the safety-level for this database connection ** using "PRAGMA synchronous", and if the safety-level is not already ** set to the value passed to this function as the second parameter, ** set it so. */ #if SQLITE_DEFAULT_SYNCHRONOUS!=SQLITE_DEFAULT_WAL_SYNCHRONOUS \ && !defined(SQLITE_OMIT_WAL) static void setDefaultSyncFlag(BtShared *pBt, u8 safety_level){ sqlite3 *db; Db *pDb; if( (db=pBt->db)!=0 && (pDb=db->aDb)!=0 ){ while( pDb->pBt==0 || pDb->pBt->pBt!=pBt ){ pDb++; } if( pDb->bSyncSet==0 && pDb->safety_level!=safety_level && pDb!=&db->aDb[1] ){ pDb->safety_level = safety_level; sqlite3PagerSetFlags(pBt->pPager, pDb->safety_level | (db->flags & PAGER_FLAGS_MASK)); } } } #else # define setDefaultSyncFlag(pBt,safety_level) #endif /* Forward declaration */ static int newDatabase(BtShared*); /* ** Get a reference to pPage1 of the database file. This will ** also acquire a readlock on that file. ** ** SQLITE_OK is returned on success. If the file is not a ** well-formed database file, then SQLITE_CORRUPT is returned. ** SQLITE_BUSY is returned if the database is locked. SQLITE_NOMEM ** is returned if we run out of memory. */ static int lockBtree(BtShared *pBt){ int rc; /* Result code from subfunctions */ MemPage *pPage1; /* Page 1 of the database file */ u32 nPage; /* Number of pages in the database */ u32 nPageFile = 0; /* Number of pages in the database file */ u32 nPageHeader; /* Number of pages in the database according to hdr */ assert( sqlite3_mutex_held(pBt->mutex) ); assert( pBt->pPage1==0 ); rc = sqlite3PagerSharedLock(pBt->pPager); if( rc!=SQLITE_OK ) return rc; rc = btreeGetPage(pBt, 1, &pPage1, 0); if( rc!=SQLITE_OK ) return rc; /* Do some checking to help insure the file we opened really is ** a valid database file. */ nPage = nPageHeader = get4byte(28+(u8*)pPage1->aData); sqlite3PagerPagecount(pBt->pPager, (int*)&nPageFile); if( nPage==0 || memcmp(24+(u8*)pPage1->aData, 92+(u8*)pPage1->aData,4)!=0 ){ nPage = nPageFile; } if( (pBt->db->flags & SQLITE_ResetDatabase)!=0 ){ nPage = 0; } if( nPage>0 ){ u32 pageSize; u32 usableSize; u8 *page1 = pPage1->aData; rc = SQLITE_NOTADB; /* EVIDENCE-OF: R-43737-39999 Every valid SQLite database file begins ** with the following 16 bytes (in hex): 53 51 4c 69 74 65 20 66 6f 72 6d ** 61 74 20 33 00. */ if( memcmp(page1, zMagicHeader, 16)!=0 ){ goto page1_init_failed; } #ifdef SQLITE_OMIT_WAL if( page1[18]>1 ){ pBt->btsFlags |= BTS_READ_ONLY; } if( page1[19]>1 ){ goto page1_init_failed; } #else if( page1[18]>2 ){ pBt->btsFlags |= BTS_READ_ONLY; } if( page1[19]>2 ){ goto page1_init_failed; } /* If the write version is set to 2, this database should be accessed ** in WAL mode. If the log is not already open, open it now. Then ** return SQLITE_OK and return without populating BtShared.pPage1. ** The caller detects this and calls this function again. This is ** required as the version of page 1 currently in the page1 buffer ** may not be the latest version - there may be a newer one in the log ** file. */ if( page1[19]==2 && (pBt->btsFlags & BTS_NO_WAL)==0 ){ int isOpen = 0; rc = sqlite3PagerOpenWal(pBt->pPager, &isOpen); if( rc!=SQLITE_OK ){ goto page1_init_failed; }else{ setDefaultSyncFlag(pBt, SQLITE_DEFAULT_WAL_SYNCHRONOUS+1); if( isOpen==0 ){ releasePageOne(pPage1); return SQLITE_OK; } } rc = SQLITE_NOTADB; }else{ setDefaultSyncFlag(pBt, SQLITE_DEFAULT_SYNCHRONOUS+1); } #endif /* EVIDENCE-OF: R-15465-20813 The maximum and minimum embedded payload ** fractions and the leaf payload fraction values must be 64, 32, and 32. ** ** The original design allowed these amounts to vary, but as of ** version 3.6.0, we require them to be fixed. */ if( memcmp(&page1[21], "\100\040\040",3)!=0 ){ goto page1_init_failed; } /* EVIDENCE-OF: R-51873-39618 The page size for a database file is ** determined by the 2-byte integer located at an offset of 16 bytes from ** the beginning of the database file. */ pageSize = (page1[16]<<8) | (page1[17]<<16); /* EVIDENCE-OF: R-25008-21688 The size of a page is a power of two ** between 512 and 65536 inclusive. */ if( ((pageSize-1)&pageSize)!=0 || pageSize>SQLITE_MAX_PAGE_SIZE || pageSize<=256 ){ goto page1_init_failed; } pBt->btsFlags |= BTS_PAGESIZE_FIXED; assert( (pageSize & 7)==0 ); /* EVIDENCE-OF: R-59310-51205 The "reserved space" size in the 1-byte ** integer at offset 20 is the number of bytes of space at the end of ** each page to reserve for extensions. ** ** EVIDENCE-OF: R-37497-42412 The size of the reserved region is ** determined by the one-byte unsigned integer found at an offset of 20 ** into the database file header. */ usableSize = pageSize - page1[20]; if( (u32)pageSize!=pBt->pageSize ){ /* After reading the first page of the database assuming a page size ** of BtShared.pageSize, we have discovered that the page-size is ** actually pageSize. Unlock the database, leave pBt->pPage1 at ** zero and return SQLITE_OK. The caller will call this function ** again with the correct page-size. */ releasePageOne(pPage1); pBt->usableSize = usableSize; pBt->pageSize = pageSize; freeTempSpace(pBt); rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, pageSize-usableSize); return rc; } if( sqlite3WritableSchema(pBt->db)==0 && nPage>nPageFile ){ rc = SQLITE_CORRUPT_BKPT; goto page1_init_failed; } /* EVIDENCE-OF: R-28312-64704 However, the usable size is not allowed to ** be less than 480. In other words, if the page size is 512, then the ** reserved space size cannot exceed 32. */ if( usableSize<480 ){ goto page1_init_failed; } pBt->pageSize = pageSize; pBt->usableSize = usableSize; #ifndef SQLITE_OMIT_AUTOVACUUM pBt->autoVacuum = (get4byte(&page1[36 + 4*4])?1:0); pBt->incrVacuum = (get4byte(&page1[36 + 7*4])?1:0); #endif } /* maxLocal is the maximum amount of payload to store locally for ** a cell. Make sure it is small enough so that at least minFanout ** cells can will fit on one page. We assume a 10-byte page header. ** Besides the payload, the cell must store: ** 2-byte pointer to the cell ** 4-byte child pointer ** 9-byte nKey value ** 4-byte nData value ** 4-byte overflow page pointer ** So a cell consists of a 2-byte pointer, a header which is as much as ** 17 bytes long, 0 to N bytes of payload, and an optional 4 byte overflow ** page pointer. */ pBt->maxLocal = (u16)((pBt->usableSize-12)*64/255 - 23); pBt->minLocal = (u16)((pBt->usableSize-12)*32/255 - 23); pBt->maxLeaf = (u16)(pBt->usableSize - 35); pBt->minLeaf = (u16)((pBt->usableSize-12)*32/255 - 23); if( pBt->maxLocal>127 ){ pBt->max1bytePayload = 127; }else{ pBt->max1bytePayload = (u8)pBt->maxLocal; } assert( pBt->maxLeaf + 23 <= MX_CELL_SIZE(pBt) ); pBt->pPage1 = pPage1; pBt->nPage = nPage; return SQLITE_OK; page1_init_failed: releasePageOne(pPage1); pBt->pPage1 = 0; return rc; } #ifndef NDEBUG /* ** Return the number of cursors open on pBt. This is for use ** in assert() expressions, so it is only compiled if NDEBUG is not ** defined. ** ** Only write cursors are counted if wrOnly is true. If wrOnly is ** false then all cursors are counted. ** ** For the purposes of this routine, a cursor is any cursor that ** is capable of reading or writing to the database. Cursors that ** have been tripped into the CURSOR_FAULT state are not counted. */ static int countValidCursors(BtShared *pBt, int wrOnly){ BtCursor *pCur; int r = 0; for(pCur=pBt->pCursor; pCur; pCur=pCur->pNext){ if( (wrOnly==0 || (pCur->curFlags & BTCF_WriteFlag)!=0) && pCur->eState!=CURSOR_FAULT ) r++; } return r; } #endif /* ** If there are no outstanding cursors and we are not in the middle ** of a transaction but there is a read lock on the database, then ** this routine unrefs the first page of the database file which ** has the effect of releasing the read lock. ** ** If there is a transaction in progress, this routine is a no-op. */ static void unlockBtreeIfUnused(BtShared *pBt){ assert( sqlite3_mutex_held(pBt->mutex) ); assert( countValidCursors(pBt,0)==0 || pBt->inTransaction>TRANS_NONE ); if( pBt->inTransaction==TRANS_NONE && pBt->pPage1!=0 ){ MemPage *pPage1 = pBt->pPage1; assert( pPage1->aData ); assert( sqlite3PagerRefcount(pBt->pPager)==1 ); pBt->pPage1 = 0; releasePageOne(pPage1); } } /* ** If pBt points to an empty file then convert that empty file ** into a new empty database by initializing the first page of ** the database. */ static int newDatabase(BtShared *pBt){ MemPage *pP1; unsigned char *data; int rc; assert( sqlite3_mutex_held(pBt->mutex) ); if( pBt->nPage>0 ){ return SQLITE_OK; } pP1 = pBt->pPage1; assert( pP1!=0 ); data = pP1->aData; rc = sqlite3PagerWrite(pP1->pDbPage); if( rc ) return rc; memcpy(data, zMagicHeader, sizeof(zMagicHeader)); assert( sizeof(zMagicHeader)==16 ); data[16] = (u8)((pBt->pageSize>>8)&0xff); data[17] = (u8)((pBt->pageSize>>16)&0xff); data[18] = 1; data[19] = 1; assert( pBt->usableSize<=pBt->pageSize && pBt->usableSize+255>=pBt->pageSize); data[20] = (u8)(pBt->pageSize - pBt->usableSize); data[21] = 64; data[22] = 32; data[23] = 32; memset(&data[24], 0, 100-24); zeroPage(pP1, PTF_INTKEY|PTF_LEAF|PTF_LEAFDATA ); pBt->btsFlags |= BTS_PAGESIZE_FIXED; #ifndef SQLITE_OMIT_AUTOVACUUM assert( pBt->autoVacuum==1 || pBt->autoVacuum==0 ); assert( pBt->incrVacuum==1 || pBt->incrVacuum==0 ); put4byte(&data[36 + 4*4], pBt->autoVacuum); put4byte(&data[36 + 7*4], pBt->incrVacuum); #endif pBt->nPage = 1; data[31] = 1; return SQLITE_OK; } /* ** Initialize the first page of the database file (creating a database ** consisting of a single page and no schema objects). Return SQLITE_OK ** if successful, or an SQLite error code otherwise. */ int sqlite3BtreeNewDb(Btree *p){ int rc; sqlite3BtreeEnter(p); p->pBt->nPage = 0; rc = newDatabase(p->pBt); sqlite3BtreeLeave(p); return rc; } /* ** Attempt to start a new transaction. A write-transaction ** is started if the second argument is nonzero, otherwise a read- ** transaction. If the second argument is 2 or more and exclusive ** transaction is started, meaning that no other process is allowed ** to access the database. A preexisting transaction may not be ** upgraded to exclusive by calling this routine a second time - the ** exclusivity flag only works for a new transaction. ** ** A write-transaction must be started before attempting any ** changes to the database. None of the following routines ** will work unless a transaction is started first: ** ** sqlite3BtreeCreateTable() ** sqlite3BtreeCreateIndex() ** sqlite3BtreeClearTable() ** sqlite3BtreeDropTable() ** sqlite3BtreeInsert() ** sqlite3BtreeDelete() ** sqlite3BtreeUpdateMeta() ** ** If an initial attempt to acquire the lock fails because of lock contention ** and the database was previously unlocked, then invoke the busy handler ** if there is one. But if there was previously a read-lock, do not ** invoke the busy handler - just return SQLITE_BUSY. SQLITE_BUSY is ** returned when there is already a read-lock in order to avoid a deadlock. ** ** Suppose there are two processes A and B. A has a read lock and B has ** a reserved lock. B tries to promote to exclusive but is blocked because ** of A's read lock. A tries to promote to reserved but is blocked by B. ** One or the other of the two processes must give way or there can be ** no progress. By returning SQLITE_BUSY and not invoking the busy callback ** when A already has a read lock, we encourage A to give up and let B ** proceed. */ int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVersion){ BtShared *pBt = p->pBt; int rc = SQLITE_OK; sqlite3BtreeEnter(p); btreeIntegrity(p); /* If the btree is already in a write-transaction, or it ** is already in a read-transaction and a read-transaction ** is requested, this is a no-op. */ if( p->inTrans==TRANS_WRITE || (p->inTrans==TRANS_READ && !wrflag) ){ goto trans_begun; } assert( pBt->inTransaction==TRANS_WRITE || IfNotOmitAV(pBt->bDoTruncate)==0 ); if( (p->db->flags & SQLITE_ResetDatabase) && sqlite3PagerIsreadonly(pBt->pPager)==0 ){ pBt->btsFlags &= ~BTS_READ_ONLY; } /* Write transactions are not possible on a read-only database */ if( (pBt->btsFlags & BTS_READ_ONLY)!=0 && wrflag ){ rc = SQLITE_READONLY; goto trans_begun; } #ifndef SQLITE_OMIT_SHARED_CACHE { sqlite3 *pBlock = 0; /* If another database handle has already opened a write transaction ** on this shared-btree structure and a second write transaction is ** requested, return SQLITE_LOCKED. */ if( (wrflag && pBt->inTransaction==TRANS_WRITE) || (pBt->btsFlags & BTS_PENDING)!=0 ){ pBlock = pBt->pWriter->db; }else if( wrflag>1 ){ BtLock *pIter; for(pIter=pBt->pLock; pIter; pIter=pIter->pNext){ if( pIter->pBtree!=p ){ pBlock = pIter->pBtree->db; break; } } } if( pBlock ){ sqlite3ConnectionBlocked(p->db, pBlock); rc = SQLITE_LOCKED_SHAREDCACHE; goto trans_begun; } } #endif /* Any read-only or read-write transaction implies a read-lock on ** page 1. So if some other shared-cache client already has a write-lock ** on page 1, the transaction cannot be opened. */ rc = querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK); if( SQLITE_OK!=rc ) goto trans_begun; pBt->btsFlags &= ~BTS_INITIALLY_EMPTY; if( pBt->nPage==0 ) pBt->btsFlags |= BTS_INITIALLY_EMPTY; do { /* Call lockBtree() until either pBt->pPage1 is populated or ** lockBtree() returns something other than SQLITE_OK. lockBtree() ** may return SQLITE_OK but leave pBt->pPage1 set to 0 if after ** reading page 1 it discovers that the page-size of the database ** file is not pBt->pageSize. In this case lockBtree() will update ** pBt->pageSize to the page-size of the file on disk. */ while( pBt->pPage1==0 && SQLITE_OK==(rc = lockBtree(pBt)) ); if( rc==SQLITE_OK && wrflag ){ if( (pBt->btsFlags & BTS_READ_ONLY)!=0 ){ rc = SQLITE_READONLY; }else{ rc = sqlite3PagerBegin(pBt->pPager,wrflag>1,sqlite3TempInMemory(p->db)); if( rc==SQLITE_OK ){ rc = newDatabase(pBt); }else if( rc==SQLITE_BUSY_SNAPSHOT && pBt->inTransaction==TRANS_NONE ){ /* if there was no transaction opened when this function was ** called and SQLITE_BUSY_SNAPSHOT is returned, change the error ** code to SQLITE_BUSY. */ rc = SQLITE_BUSY; } } } if( rc!=SQLITE_OK ){ unlockBtreeIfUnused(pBt); } }while( (rc&0xFF)==SQLITE_BUSY && pBt->inTransaction==TRANS_NONE && btreeInvokeBusyHandler(pBt) ); sqlite3PagerResetLockTimeout(pBt->pPager); if( rc==SQLITE_OK ){ if( p->inTrans==TRANS_NONE ){ pBt->nTransaction++; #ifndef SQLITE_OMIT_SHARED_CACHE if( p->sharable ){ assert( p->lock.pBtree==p && p->lock.iTable==1 ); p->lock.eLock = READ_LOCK; p->lock.pNext = pBt->pLock; pBt->pLock = &p->lock; } #endif } p->inTrans = (wrflag?TRANS_WRITE:TRANS_READ); if( p->inTrans>pBt->inTransaction ){ pBt->inTransaction = p->inTrans; } if( wrflag ){ MemPage *pPage1 = pBt->pPage1; #ifndef SQLITE_OMIT_SHARED_CACHE assert( !pBt->pWriter ); pBt->pWriter = p; pBt->btsFlags &= ~BTS_EXCLUSIVE; if( wrflag>1 ) pBt->btsFlags |= BTS_EXCLUSIVE; #endif /* If the db-size header field is incorrect (as it may be if an old ** client has been writing the database file), update it now. Doing ** this sooner rather than later means the database size can safely ** re-read the database size from page 1 if a savepoint or transaction ** rollback occurs within the transaction. */ if( pBt->nPage!=get4byte(&pPage1->aData[28]) ){ rc = sqlite3PagerWrite(pPage1->pDbPage); if( rc==SQLITE_OK ){ put4byte(&pPage1->aData[28], pBt->nPage); } } } } trans_begun: if( rc==SQLITE_OK ){ if( pSchemaVersion ){ *pSchemaVersion = get4byte(&pBt->pPage1->aData[40]); } if( wrflag ){ /* This call makes sure that the pager has the correct number of ** open savepoints. If the second parameter is greater than 0 and ** the sub-journal is not already open, then it will be opened here. */ rc = sqlite3PagerOpenSavepoint(pBt->pPager, p->db->nSavepoint); } } btreeIntegrity(p); sqlite3BtreeLeave(p); return rc; } #ifndef SQLITE_OMIT_AUTOVACUUM /* ** Set the pointer-map entries for all children of page pPage. Also, if ** pPage contains cells that point to overflow pages, set the pointer ** map entries for the overflow pages as well. */ static int setChildPtrmaps(MemPage *pPage){ int i; /* Counter variable */ int nCell; /* Number of cells in page pPage */ int rc; /* Return code */ BtShared *pBt = pPage->pBt; Pgno pgno = pPage->pgno; assert( sqlite3_mutex_held(pPage->pBt->mutex) ); rc = pPage->isInit ? SQLITE_OK : btreeInitPage(pPage); if( rc!=SQLITE_OK ) return rc; nCell = pPage->nCell; for(i=0; i<nCell; i++){ u8 *pCell = findCell(pPage, i); ptrmapPutOvflPtr(pPage, pPage, pCell, &rc); if( !pPage->leaf ){ Pgno childPgno = get4byte(pCell); ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno, &rc); } } if( !pPage->leaf ){ Pgno childPgno = get4byte(&pPage->aData[pPage->hdrOffset+8]); ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno, &rc); } return rc; } /* ** Somewhere on pPage is a pointer to page iFrom. Modify this pointer so ** that it points to iTo. Parameter eType describes the type of pointer to ** be modified, as follows: ** ** PTRMAP_BTREE: pPage is a btree-page. The pointer points at a child ** page of pPage. ** ** PTRMAP_OVERFLOW1: pPage is a btree-page. The pointer points at an overflow ** page pointed to by one of the cells on pPage. ** ** PTRMAP_OVERFLOW2: pPage is an overflow-page. The pointer points at the next ** overflow page in the list. */ static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){ assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); if( eType==PTRMAP_OVERFLOW2 ){ /* The pointer is always the first 4 bytes of the page in this case. */ if( get4byte(pPage->aData)!=iFrom ){ return SQLITE_CORRUPT_PAGE(pPage); } put4byte(pPage->aData, iTo); }else{ int i; int nCell; int rc; rc = pPage->isInit ? SQLITE_OK : btreeInitPage(pPage); if( rc ) return rc; nCell = pPage->nCell; for(i=0; i<nCell; i++){ u8 *pCell = findCell(pPage, i); if( eType==PTRMAP_OVERFLOW1 ){ CellInfo info; pPage->xParseCell(pPage, pCell, &info); if( info.nLocal<info.nPayload ){ if( pCell+info.nSize > pPage->aData+pPage->pBt->usableSize ){ return SQLITE_CORRUPT_PAGE(pPage); } if( iFrom==get4byte(pCell+info.nSize-4) ){ put4byte(pCell+info.nSize-4, iTo); break; } } }else{ if( get4byte(pCell)==iFrom ){ put4byte(pCell, iTo); break; } } } if( i==nCell ){ if( eType!=PTRMAP_BTREE || get4byte(&pPage->aData[pPage->hdrOffset+8])!=iFrom ){ return SQLITE_CORRUPT_PAGE(pPage); } put4byte(&pPage->aData[pPage->hdrOffset+8], iTo); } } return SQLITE_OK; } /* ** Move the open database page pDbPage to location iFreePage in the ** database. The pDbPage reference remains valid. ** ** The isCommit flag indicates that there is no need to remember that ** the journal needs to be sync()ed before database page pDbPage->pgno ** can be written to. The caller has already promised not to write to that ** page. */ static int relocatePage( BtShared *pBt, /* Btree */ MemPage *pDbPage, /* Open page to move */ u8 eType, /* Pointer map 'type' entry for pDbPage */ Pgno iPtrPage, /* Pointer map 'page-no' entry for pDbPage */ Pgno iFreePage, /* The location to move pDbPage to */ int isCommit /* isCommit flag passed to sqlite3PagerMovepage */ ){ MemPage *pPtrPage; /* The page that contains a pointer to pDbPage */ Pgno iDbPage = pDbPage->pgno; Pager *pPager = pBt->pPager; int rc; assert( eType==PTRMAP_OVERFLOW2 || eType==PTRMAP_OVERFLOW1 || eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE ); assert( sqlite3_mutex_held(pBt->mutex) ); assert( pDbPage->pBt==pBt ); if( iDbPage<3 ) return SQLITE_CORRUPT_BKPT; /* Move page iDbPage from its current location to page number iFreePage */ TRACE(("AUTOVACUUM: Moving %d to free page %d (ptr page %d type %d)\n", iDbPage, iFreePage, iPtrPage, eType)); rc = sqlite3PagerMovepage(pPager, pDbPage->pDbPage, iFreePage, isCommit); if( rc!=SQLITE_OK ){ return rc; } pDbPage->pgno = iFreePage; /* If pDbPage was a btree-page, then it may have child pages and/or cells ** that point to overflow pages. The pointer map entries for all these ** pages need to be changed. ** ** If pDbPage is an overflow page, then the first 4 bytes may store a ** pointer to a subsequent overflow page. If this is the case, then ** the pointer map needs to be updated for the subsequent overflow page. */ if( eType==PTRMAP_BTREE || eType==PTRMAP_ROOTPAGE ){ rc = setChildPtrmaps(pDbPage); if( rc!=SQLITE_OK ){ return rc; } }else{ Pgno nextOvfl = get4byte(pDbPage->aData); if( nextOvfl!=0 ){ ptrmapPut(pBt, nextOvfl, PTRMAP_OVERFLOW2, iFreePage, &rc); if( rc!=SQLITE_OK ){ return rc; } } } /* Fix the database pointer on page iPtrPage that pointed at iDbPage so ** that it points at iFreePage. Also fix the pointer map entry for ** iPtrPage. */ if( eType!=PTRMAP_ROOTPAGE ){ rc = btreeGetPage(pBt, iPtrPage, &pPtrPage, 0); if( rc!=SQLITE_OK ){ return rc; } rc = sqlite3PagerWrite(pPtrPage->pDbPage); if( rc!=SQLITE_OK ){ releasePage(pPtrPage); return rc; } rc = modifyPagePointer(pPtrPage, iDbPage, iFreePage, eType); releasePage(pPtrPage); if( rc==SQLITE_OK ){ ptrmapPut(pBt, iFreePage, eType, iPtrPage, &rc); } } return rc; } /* Forward declaration required by incrVacuumStep(). */ static int allocateBtreePage(BtShared *, MemPage **, Pgno *, Pgno, u8); /* ** Perform a single step of an incremental-vacuum. If successful, return ** SQLITE_OK. If there is no work to do (and therefore no point in ** calling this function again), return SQLITE_DONE. Or, if an error ** occurs, return some other error code. ** ** More specifically, this function attempts to re-organize the database so ** that the last page of the file currently in use is no longer in use. ** ** Parameter nFin is the number of pages that this database would contain ** were this function called until it returns SQLITE_DONE. ** ** If the bCommit parameter is non-zero, this function assumes that the ** caller will keep calling incrVacuumStep() until it returns SQLITE_DONE ** or an error. bCommit is passed true for an auto-vacuum-on-commit ** operation, or false for an incremental vacuum. */ static int incrVacuumStep(BtShared *pBt, Pgno nFin, Pgno iLastPg, int bCommit){ Pgno nFreeList; /* Number of pages still on the free-list */ int rc; assert( sqlite3_mutex_held(pBt->mutex) ); assert( iLastPg>nFin ); if( !PTRMAP_ISPAGE(pBt, iLastPg) && iLastPg!=PENDING_BYTE_PAGE(pBt) ){ u8 eType; Pgno iPtrPage; nFreeList = get4byte(&pBt->pPage1->aData[36]); if( nFreeList==0 ){ return SQLITE_DONE; } rc = ptrmapGet(pBt, iLastPg, &eType, &iPtrPage); if( rc!=SQLITE_OK ){ return rc; } if( eType==PTRMAP_ROOTPAGE ){ return SQLITE_CORRUPT_BKPT; } if( eType==PTRMAP_FREEPAGE ){ if( bCommit==0 ){ /* Remove the page from the files free-list. This is not required ** if bCommit is non-zero. In that case, the free-list will be ** truncated to zero after this function returns, so it doesn't ** matter if it still contains some garbage entries. */ Pgno iFreePg; MemPage *pFreePg; rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, iLastPg, BTALLOC_EXACT); if( rc!=SQLITE_OK ){ return rc; } assert( iFreePg==iLastPg ); releasePage(pFreePg); } } else { Pgno iFreePg; /* Index of free page to move pLastPg to */ MemPage *pLastPg; u8 eMode = BTALLOC_ANY; /* Mode parameter for allocateBtreePage() */ Pgno iNear = 0; /* nearby parameter for allocateBtreePage() */ rc = btreeGetPage(pBt, iLastPg, &pLastPg, 0); if( rc!=SQLITE_OK ){ return rc; } /* If bCommit is zero, this loop runs exactly once and page pLastPg ** is swapped with the first free page pulled off the free list. ** ** On the other hand, if bCommit is greater than zero, then keep ** looping until a free-page located within the first nFin pages ** of the file is found. */ if( bCommit==0 ){ eMode = BTALLOC_LE; iNear = nFin; } do { MemPage *pFreePg; rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, iNear, eMode); if( rc!=SQLITE_OK ){ releasePage(pLastPg); return rc; } releasePage(pFreePg); }while( bCommit && iFreePg>nFin ); assert( iFreePg<iLastPg ); rc = relocatePage(pBt, pLastPg, eType, iPtrPage, iFreePg, bCommit); releasePage(pLastPg); if( rc!=SQLITE_OK ){ return rc; } } } if( bCommit==0 ){ do { iLastPg--; }while( iLastPg==PENDING_BYTE_PAGE(pBt) || PTRMAP_ISPAGE(pBt, iLastPg) ); pBt->bDoTruncate = 1; pBt->nPage = iLastPg; } return SQLITE_OK; } /* ** The database opened by the first argument is an auto-vacuum database ** nOrig pages in size containing nFree free pages. Return the expected ** size of the database in pages following an auto-vacuum operation. */ static Pgno finalDbSize(BtShared *pBt, Pgno nOrig, Pgno nFree){ int nEntry; /* Number of entries on one ptrmap page */ Pgno nPtrmap; /* Number of PtrMap pages to be freed */ Pgno nFin; /* Return value */ nEntry = pBt->usableSize/5; nPtrmap = (nFree-nOrig+PTRMAP_PAGENO(pBt, nOrig)+nEntry)/nEntry; nFin = nOrig - nFree - nPtrmap; if( nOrig>PENDING_BYTE_PAGE(pBt) && nFin<PENDING_BYTE_PAGE(pBt) ){ nFin--; } while( PTRMAP_ISPAGE(pBt, nFin) || nFin==PENDING_BYTE_PAGE(pBt) ){ nFin--; } return nFin; } /* ** A write-transaction must be opened before calling this function. ** It performs a single unit of work towards an incremental vacuum. ** ** If the incremental vacuum is finished after this function has run, ** SQLITE_DONE is returned. If it is not finished, but no error occurred, ** SQLITE_OK is returned. Otherwise an SQLite error code. */ int sqlite3BtreeIncrVacuum(Btree *p){ int rc; BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); assert( pBt->inTransaction==TRANS_WRITE && p->inTrans==TRANS_WRITE ); if( !pBt->autoVacuum ){ rc = SQLITE_DONE; }else{ Pgno nOrig = btreePagecount(pBt); Pgno nFree = get4byte(&pBt->pPage1->aData[36]); Pgno nFin = finalDbSize(pBt, nOrig, nFree); if( nOrig<nFin ){ rc = SQLITE_CORRUPT_BKPT; }else if( nFree>0 ){ rc = saveAllCursors(pBt, 0, 0); if( rc==SQLITE_OK ){ invalidateAllOverflowCache(pBt); rc = incrVacuumStep(pBt, nFin, nOrig, 0); } if( rc==SQLITE_OK ){ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); put4byte(&pBt->pPage1->aData[28], pBt->nPage); } }else{ rc = SQLITE_DONE; } } sqlite3BtreeLeave(p); return rc; } /* ** This routine is called prior to sqlite3PagerCommit when a transaction ** is committed for an auto-vacuum database. ** ** If SQLITE_OK is returned, then *pnTrunc is set to the number of pages ** the database file should be truncated to during the commit process. ** i.e. the database has been reorganized so that only the first *pnTrunc ** pages are in use. */ static int autoVacuumCommit(BtShared *pBt){ int rc = SQLITE_OK; Pager *pPager = pBt->pPager; VVA_ONLY( int nRef = sqlite3PagerRefcount(pPager); ) assert( sqlite3_mutex_held(pBt->mutex) ); invalidateAllOverflowCache(pBt); assert(pBt->autoVacuum); if( !pBt->incrVacuum ){ Pgno nFin; /* Number of pages in database after autovacuuming */ Pgno nFree; /* Number of pages on the freelist initially */ Pgno iFree; /* The next page to be freed */ Pgno nOrig; /* Database size before freeing */ nOrig = btreePagecount(pBt); if( PTRMAP_ISPAGE(pBt, nOrig) || nOrig==PENDING_BYTE_PAGE(pBt) ){ /* It is not possible to create a database for which the final page ** is either a pointer-map page or the pending-byte page. If one ** is encountered, this indicates corruption. */ return SQLITE_CORRUPT_BKPT; } nFree = get4byte(&pBt->pPage1->aData[36]); nFin = finalDbSize(pBt, nOrig, nFree); if( nFin>nOrig ) return SQLITE_CORRUPT_BKPT; if( nFin<nOrig ){ rc = saveAllCursors(pBt, 0, 0); } for(iFree=nOrig; iFree>nFin && rc==SQLITE_OK; iFree--){ rc = incrVacuumStep(pBt, nFin, iFree, 1); } if( (rc==SQLITE_DONE || rc==SQLITE_OK) && nFree>0 ){ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); put4byte(&pBt->pPage1->aData[32], 0); put4byte(&pBt->pPage1->aData[36], 0); put4byte(&pBt->pPage1->aData[28], nFin); pBt->bDoTruncate = 1; pBt->nPage = nFin; } if( rc!=SQLITE_OK ){ sqlite3PagerRollback(pPager); } } assert( nRef>=sqlite3PagerRefcount(pPager) ); return rc; } #else /* ifndef SQLITE_OMIT_AUTOVACUUM */ # define setChildPtrmaps(x) SQLITE_OK #endif /* ** This routine does the first phase of a two-phase commit. This routine ** causes a rollback journal to be created (if it does not already exist) ** and populated with enough information so that if a power loss occurs ** the database can be restored to its original state by playing back ** the journal. Then the contents of the journal are flushed out to ** the disk. After the journal is safely on oxide, the changes to the ** database are written into the database file and flushed to oxide. ** At the end of this call, the rollback journal still exists on the ** disk and we are still holding all locks, so the transaction has not ** committed. See sqlite3BtreeCommitPhaseTwo() for the second phase of the ** commit process. ** ** This call is a no-op if no write-transaction is currently active on pBt. ** ** Otherwise, sync the database file for the btree pBt. zMaster points to ** the name of a master journal file that should be written into the ** individual journal file, or is NULL, indicating no master journal file ** (single database transaction). ** ** When this is called, the master journal should already have been ** created, populated with this journal pointer and synced to disk. ** ** Once this is routine has returned, the only thing required to commit ** the write-transaction for this database file is to delete the journal. */ int sqlite3BtreeCommitPhaseOne(Btree *p, const char *zMaster){ int rc = SQLITE_OK; if( p->inTrans==TRANS_WRITE ){ BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ rc = autoVacuumCommit(pBt); if( rc!=SQLITE_OK ){ sqlite3BtreeLeave(p); return rc; } } if( pBt->bDoTruncate ){ sqlite3PagerTruncateImage(pBt->pPager, pBt->nPage); } #endif rc = sqlite3PagerCommitPhaseOne(pBt->pPager, zMaster, 0); sqlite3BtreeLeave(p); } return rc; } /* ** This function is called from both BtreeCommitPhaseTwo() and BtreeRollback() ** at the conclusion of a transaction. */ static void btreeEndTransaction(Btree *p){ BtShared *pBt = p->pBt; sqlite3 *db = p->db; assert( sqlite3BtreeHoldsMutex(p) ); #ifndef SQLITE_OMIT_AUTOVACUUM pBt->bDoTruncate = 0; #endif if( p->inTrans>TRANS_NONE && db->nVdbeRead>1 ){ /* If there are other active statements that belong to this database ** handle, downgrade to a read-only transaction. The other statements ** may still be reading from the database. */ downgradeAllSharedCacheTableLocks(p); p->inTrans = TRANS_READ; }else{ /* If the handle had any kind of transaction open, decrement the ** transaction count of the shared btree. If the transaction count ** reaches 0, set the shared state to TRANS_NONE. The unlockBtreeIfUnused() ** call below will unlock the pager. */ if( p->inTrans!=TRANS_NONE ){ clearAllSharedCacheTableLocks(p); pBt->nTransaction--; if( 0==pBt->nTransaction ){ pBt->inTransaction = TRANS_NONE; } } /* Set the current transaction state to TRANS_NONE and unlock the ** pager if this call closed the only read or write transaction. */ p->inTrans = TRANS_NONE; unlockBtreeIfUnused(pBt); } btreeIntegrity(p); } /* ** Commit the transaction currently in progress. ** ** This routine implements the second phase of a 2-phase commit. The ** sqlite3BtreeCommitPhaseOne() routine does the first phase and should ** be invoked prior to calling this routine. The sqlite3BtreeCommitPhaseOne() ** routine did all the work of writing information out to disk and flushing the ** contents so that they are written onto the disk platter. All this ** routine has to do is delete or truncate or zero the header in the ** the rollback journal (which causes the transaction to commit) and ** drop locks. ** ** Normally, if an error occurs while the pager layer is attempting to ** finalize the underlying journal file, this function returns an error and ** the upper layer will attempt a rollback. However, if the second argument ** is non-zero then this b-tree transaction is part of a multi-file ** transaction. In this case, the transaction has already been committed ** (by deleting a master journal file) and the caller will ignore this ** functions return code. So, even if an error occurs in the pager layer, ** reset the b-tree objects internal state to indicate that the write ** transaction has been closed. This is quite safe, as the pager will have ** transitioned to the error state. ** ** This will release the write lock on the database file. If there ** are no active cursors, it also releases the read lock. */ int sqlite3BtreeCommitPhaseTwo(Btree *p, int bCleanup){ if( p->inTrans==TRANS_NONE ) return SQLITE_OK; sqlite3BtreeEnter(p); btreeIntegrity(p); /* If the handle has a write-transaction open, commit the shared-btrees ** transaction and set the shared state to TRANS_READ. */ if( p->inTrans==TRANS_WRITE ){ int rc; BtShared *pBt = p->pBt; assert( pBt->inTransaction==TRANS_WRITE ); assert( pBt->nTransaction>0 ); rc = sqlite3PagerCommitPhaseTwo(pBt->pPager); if( rc!=SQLITE_OK && bCleanup==0 ){ sqlite3BtreeLeave(p); return rc; } p->iDataVersion--; /* Compensate for pPager->iDataVersion++; */ pBt->inTransaction = TRANS_READ; btreeClearHasContent(pBt); } btreeEndTransaction(p); sqlite3BtreeLeave(p); return SQLITE_OK; } /* ** Do both phases of a commit. */ int sqlite3BtreeCommit(Btree *p){ int rc; sqlite3BtreeEnter(p); rc = sqlite3BtreeCommitPhaseOne(p, 0); if( rc==SQLITE_OK ){ rc = sqlite3BtreeCommitPhaseTwo(p, 0); } sqlite3BtreeLeave(p); return rc; } /* ** This routine sets the state to CURSOR_FAULT and the error ** code to errCode for every cursor on any BtShared that pBtree ** references. Or if the writeOnly flag is set to 1, then only ** trip write cursors and leave read cursors unchanged. ** ** Every cursor is a candidate to be tripped, including cursors ** that belong to other database connections that happen to be ** sharing the cache with pBtree. ** ** This routine gets called when a rollback occurs. If the writeOnly ** flag is true, then only write-cursors need be tripped - read-only ** cursors save their current positions so that they may continue ** following the rollback. Or, if writeOnly is false, all cursors are ** tripped. In general, writeOnly is false if the transaction being ** rolled back modified the database schema. In this case b-tree root ** pages may be moved or deleted from the database altogether, making ** it unsafe for read cursors to continue. ** ** If the writeOnly flag is true and an error is encountered while ** saving the current position of a read-only cursor, all cursors, ** including all read-cursors are tripped. ** ** SQLITE_OK is returned if successful, or if an error occurs while ** saving a cursor position, an SQLite error code. */ int sqlite3BtreeTripAllCursors(Btree *pBtree, int errCode, int writeOnly){ BtCursor *p; int rc = SQLITE_OK; assert( (writeOnly==0 || writeOnly==1) && BTCF_WriteFlag==1 ); if( pBtree ){ sqlite3BtreeEnter(pBtree); for(p=pBtree->pBt->pCursor; p; p=p->pNext){ if( writeOnly && (p->curFlags & BTCF_WriteFlag)==0 ){ if( p->eState==CURSOR_VALID || p->eState==CURSOR_SKIPNEXT ){ rc = saveCursorPosition(p); if( rc!=SQLITE_OK ){ (void)sqlite3BtreeTripAllCursors(pBtree, rc, 0); break; } } }else{ sqlite3BtreeClearCursor(p); p->eState = CURSOR_FAULT; p->skipNext = errCode; } btreeReleaseAllCursorPages(p); } sqlite3BtreeLeave(pBtree); } return rc; } /* ** Set the pBt->nPage field correctly, according to the current ** state of the database. Assume pBt->pPage1 is valid. */ static void btreeSetNPage(BtShared *pBt, MemPage *pPage1){ int nPage = get4byte(&pPage1->aData[28]); testcase( nPage==0 ); if( nPage==0 ) sqlite3PagerPagecount(pBt->pPager, &nPage); testcase( pBt->nPage!=nPage ); pBt->nPage = nPage; } /* ** Rollback the transaction in progress. ** ** If tripCode is not SQLITE_OK then cursors will be invalidated (tripped). ** Only write cursors are tripped if writeOnly is true but all cursors are ** tripped if writeOnly is false. Any attempt to use ** a tripped cursor will result in an error. ** ** This will release the write lock on the database file. If there ** are no active cursors, it also releases the read lock. */ int sqlite3BtreeRollback(Btree *p, int tripCode, int writeOnly){ int rc; BtShared *pBt = p->pBt; MemPage *pPage1; assert( writeOnly==1 || writeOnly==0 ); assert( tripCode==SQLITE_ABORT_ROLLBACK || tripCode==SQLITE_OK ); sqlite3BtreeEnter(p); if( tripCode==SQLITE_OK ){ rc = tripCode = saveAllCursors(pBt, 0, 0); if( rc ) writeOnly = 0; }else{ rc = SQLITE_OK; } if( tripCode ){ int rc2 = sqlite3BtreeTripAllCursors(p, tripCode, writeOnly); assert( rc==SQLITE_OK || (writeOnly==0 && rc2==SQLITE_OK) ); if( rc2!=SQLITE_OK ) rc = rc2; } btreeIntegrity(p); if( p->inTrans==TRANS_WRITE ){ int rc2; assert( TRANS_WRITE==pBt->inTransaction ); rc2 = sqlite3PagerRollback(pBt->pPager); if( rc2!=SQLITE_OK ){ rc = rc2; } /* The rollback may have destroyed the pPage1->aData value. So ** call btreeGetPage() on page 1 again to make ** sure pPage1->aData is set correctly. */ if( btreeGetPage(pBt, 1, &pPage1, 0)==SQLITE_OK ){ btreeSetNPage(pBt, pPage1); releasePageOne(pPage1); } assert( countValidCursors(pBt, 1)==0 ); pBt->inTransaction = TRANS_READ; btreeClearHasContent(pBt); } btreeEndTransaction(p); sqlite3BtreeLeave(p); return rc; } /* ** Start a statement subtransaction. The subtransaction can be rolled ** back independently of the main transaction. You must start a transaction ** before starting a subtransaction. The subtransaction is ended automatically ** if the main transaction commits or rolls back. ** ** Statement subtransactions are used around individual SQL statements ** that are contained within a BEGIN...COMMIT block. If a constraint ** error occurs within the statement, the effect of that one statement ** can be rolled back without having to rollback the entire transaction. ** ** A statement sub-transaction is implemented as an anonymous savepoint. The ** value passed as the second parameter is the total number of savepoints, ** including the new anonymous savepoint, open on the B-Tree. i.e. if there ** are no active savepoints and no other statement-transactions open, ** iStatement is 1. This anonymous savepoint can be released or rolled back ** using the sqlite3BtreeSavepoint() function. */ int sqlite3BtreeBeginStmt(Btree *p, int iStatement){ int rc; BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); assert( p->inTrans==TRANS_WRITE ); assert( (pBt->btsFlags & BTS_READ_ONLY)==0 ); assert( iStatement>0 ); assert( iStatement>p->db->nSavepoint ); assert( pBt->inTransaction==TRANS_WRITE ); /* At the pager level, a statement transaction is a savepoint with ** an index greater than all savepoints created explicitly using ** SQL statements. It is illegal to open, release or rollback any ** such savepoints while the statement transaction savepoint is active. */ rc = sqlite3PagerOpenSavepoint(pBt->pPager, iStatement); sqlite3BtreeLeave(p); return rc; } /* ** The second argument to this function, op, is always SAVEPOINT_ROLLBACK ** or SAVEPOINT_RELEASE. This function either releases or rolls back the ** savepoint identified by parameter iSavepoint, depending on the value ** of op. ** ** Normally, iSavepoint is greater than or equal to zero. However, if op is ** SAVEPOINT_ROLLBACK, then iSavepoint may also be -1. In this case the ** contents of the entire transaction are rolled back. This is different ** from a normal transaction rollback, as no locks are released and the ** transaction remains open. */ int sqlite3BtreeSavepoint(Btree *p, int op, int iSavepoint){ int rc = SQLITE_OK; if( p && p->inTrans==TRANS_WRITE ){ BtShared *pBt = p->pBt; assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK ); assert( iSavepoint>=0 || (iSavepoint==-1 && op==SAVEPOINT_ROLLBACK) ); sqlite3BtreeEnter(p); if( op==SAVEPOINT_ROLLBACK ){ rc = saveAllCursors(pBt, 0, 0); } if( rc==SQLITE_OK ){ rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint); } if( rc==SQLITE_OK ){ if( iSavepoint<0 && (pBt->btsFlags & BTS_INITIALLY_EMPTY)!=0 ){ pBt->nPage = 0; } rc = newDatabase(pBt); btreeSetNPage(pBt, pBt->pPage1); /* pBt->nPage might be zero if the database was corrupt when ** the transaction was started. Otherwise, it must be at least 1. */ assert( CORRUPT_DB || pBt->nPage>0 ); } sqlite3BtreeLeave(p); } return rc; } /* ** Create a new cursor for the BTree whose root is on the page ** iTable. If a read-only cursor is requested, it is assumed that ** the caller already has at least a read-only transaction open ** on the database already. If a write-cursor is requested, then ** the caller is assumed to have an open write transaction. ** ** If the BTREE_WRCSR bit of wrFlag is clear, then the cursor can only ** be used for reading. If the BTREE_WRCSR bit is set, then the cursor ** can be used for reading or for writing if other conditions for writing ** are also met. These are the conditions that must be met in order ** for writing to be allowed: ** ** 1: The cursor must have been opened with wrFlag containing BTREE_WRCSR ** ** 2: Other database connections that share the same pager cache ** but which are not in the READ_UNCOMMITTED state may not have ** cursors open with wrFlag==0 on the same table. Otherwise ** the changes made by this write cursor would be visible to ** the read cursors in the other database connection. ** ** 3: The database must be writable (not on read-only media) ** ** 4: There must be an active transaction. ** ** The BTREE_FORDELETE bit of wrFlag may optionally be set if BTREE_WRCSR ** is set. If FORDELETE is set, that is a hint to the implementation that ** this cursor will only be used to seek to and delete entries of an index ** as part of a larger DELETE statement. The FORDELETE hint is not used by ** this implementation. But in a hypothetical alternative storage engine ** in which index entries are automatically deleted when corresponding table ** rows are deleted, the FORDELETE flag is a hint that all SEEK and DELETE ** operations on this cursor can be no-ops and all READ operations can ** return a null row (2-bytes: 0x01 0x00). ** ** No checking is done to make sure that page iTable really is the ** root page of a b-tree. If it is not, then the cursor acquired ** will not work correctly. ** ** It is assumed that the sqlite3BtreeCursorZero() has been called ** on pCur to initialize the memory space prior to invoking this routine. */ static int btreeCursor( Btree *p, /* The btree */ int iTable, /* Root page of table to open */ int wrFlag, /* 1 to write. 0 read-only */ struct KeyInfo *pKeyInfo, /* First arg to comparison function */ BtCursor *pCur /* Space for new cursor */ ){ BtShared *pBt = p->pBt; /* Shared b-tree handle */ BtCursor *pX; /* Looping over other all cursors */ assert( sqlite3BtreeHoldsMutex(p) ); assert( wrFlag==0 || wrFlag==BTREE_WRCSR || wrFlag==(BTREE_WRCSR|BTREE_FORDELETE) ); /* The following assert statements verify that if this is a sharable ** b-tree database, the connection is holding the required table locks, ** and that no other connection has any open cursor that conflicts with ** this lock. */ assert( hasSharedCacheTableLock(p, iTable, pKeyInfo!=0, (wrFlag?2:1)) ); assert( wrFlag==0 || !hasReadConflicts(p, iTable) ); /* Assert that the caller has opened the required transaction. */ assert( p->inTrans>TRANS_NONE ); assert( wrFlag==0 || p->inTrans==TRANS_WRITE ); assert( pBt->pPage1 && pBt->pPage1->aData ); assert( wrFlag==0 || (pBt->btsFlags & BTS_READ_ONLY)==0 ); if( wrFlag ){ allocateTempSpace(pBt); if( pBt->pTmpSpace==0 ) return SQLITE_NOMEM_BKPT; } if( iTable==1 && btreePagecount(pBt)==0 ){ assert( wrFlag==0 ); iTable = 0; } /* Now that no other errors can occur, finish filling in the BtCursor ** variables and link the cursor into the BtShared list. */ pCur->pgnoRoot = (Pgno)iTable; pCur->iPage = -1; pCur->pKeyInfo = pKeyInfo; pCur->pBtree = p; pCur->pBt = pBt; pCur->curFlags = wrFlag ? BTCF_WriteFlag : 0; pCur->curPagerFlags = wrFlag ? 0 : PAGER_GET_READONLY; /* If there are two or more cursors on the same btree, then all such ** cursors *must* have the BTCF_Multiple flag set. */ for(pX=pBt->pCursor; pX; pX=pX->pNext){ if( pX->pgnoRoot==(Pgno)iTable ){ pX->curFlags |= BTCF_Multiple; pCur->curFlags |= BTCF_Multiple; } } pCur->pNext = pBt->pCursor; pBt->pCursor = pCur; pCur->eState = CURSOR_INVALID; return SQLITE_OK; } int sqlite3BtreeCursor( Btree *p, /* The btree */ int iTable, /* Root page of table to open */ int wrFlag, /* 1 to write. 0 read-only */ struct KeyInfo *pKeyInfo, /* First arg to xCompare() */ BtCursor *pCur /* Write new cursor here */ ){ int rc; if( iTable<1 ){ rc = SQLITE_CORRUPT_BKPT; }else{ sqlite3BtreeEnter(p); rc = btreeCursor(p, iTable, wrFlag, pKeyInfo, pCur); sqlite3BtreeLeave(p); } return rc; } /* ** Return the size of a BtCursor object in bytes. ** ** This interfaces is needed so that users of cursors can preallocate ** sufficient storage to hold a cursor. The BtCursor object is opaque ** to users so they cannot do the sizeof() themselves - they must call ** this routine. */ int sqlite3BtreeCursorSize(void){ return ROUND8(sizeof(BtCursor)); } /* ** Initialize memory that will be converted into a BtCursor object. ** ** The simple approach here would be to memset() the entire object ** to zero. But it turns out that the apPage[] and aiIdx[] arrays ** do not need to be zeroed and they are large, so we can save a lot ** of run-time by skipping the initialization of those elements. */ void sqlite3BtreeCursorZero(BtCursor *p){ memset(p, 0, offsetof(BtCursor, BTCURSOR_FIRST_UNINIT)); } /* ** Close a cursor. The read lock on the database file is released ** when the last cursor is closed. */ int sqlite3BtreeCloseCursor(BtCursor *pCur){ Btree *pBtree = pCur->pBtree; if( pBtree ){ BtShared *pBt = pCur->pBt; sqlite3BtreeEnter(pBtree); assert( pBt->pCursor!=0 ); if( pBt->pCursor==pCur ){ pBt->pCursor = pCur->pNext; }else{ BtCursor *pPrev = pBt->pCursor; do{ if( pPrev->pNext==pCur ){ pPrev->pNext = pCur->pNext; break; } pPrev = pPrev->pNext; }while( ALWAYS(pPrev) ); } btreeReleaseAllCursorPages(pCur); unlockBtreeIfUnused(pBt); sqlite3_free(pCur->aOverflow); sqlite3_free(pCur->pKey); sqlite3BtreeLeave(pBtree); pCur->pBtree = 0; } return SQLITE_OK; } /* ** Make sure the BtCursor* given in the argument has a valid ** BtCursor.info structure. If it is not already valid, call ** btreeParseCell() to fill it in. ** ** BtCursor.info is a cache of the information in the current cell. ** Using this cache reduces the number of calls to btreeParseCell(). */ #ifndef NDEBUG static int cellInfoEqual(CellInfo *a, CellInfo *b){ if( a->nKey!=b->nKey ) return 0; if( a->pPayload!=b->pPayload ) return 0; if( a->nPayload!=b->nPayload ) return 0; if( a->nLocal!=b->nLocal ) return 0; if( a->nSize!=b->nSize ) return 0; return 1; } static void assertCellInfo(BtCursor *pCur){ CellInfo info; memset(&info, 0, sizeof(info)); btreeParseCell(pCur->pPage, pCur->ix, &info); assert( CORRUPT_DB || cellInfoEqual(&info, &pCur->info) ); } #else #define assertCellInfo(x) #endif static SQLITE_NOINLINE void getCellInfo(BtCursor *pCur){ if( pCur->info.nSize==0 ){ pCur->curFlags |= BTCF_ValidNKey; btreeParseCell(pCur->pPage,pCur->ix,&pCur->info); }else{ assertCellInfo(pCur); } } #ifndef NDEBUG /* The next routine used only within assert() statements */ /* ** Return true if the given BtCursor is valid. A valid cursor is one ** that is currently pointing to a row in a (non-empty) table. ** This is a verification routine is used only within assert() statements. */ int sqlite3BtreeCursorIsValid(BtCursor *pCur){ return pCur && pCur->eState==CURSOR_VALID; } #endif /* NDEBUG */ int sqlite3BtreeCursorIsValidNN(BtCursor *pCur){ assert( pCur!=0 ); return pCur->eState==CURSOR_VALID; } /* ** Return the value of the integer key or "rowid" for a table btree. ** This routine is only valid for a cursor that is pointing into a ** ordinary table btree. If the cursor points to an index btree or ** is invalid, the result of this routine is undefined. */ i64 sqlite3BtreeIntegerKey(BtCursor *pCur){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->curIntKey ); getCellInfo(pCur); return pCur->info.nKey; } #ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC /* ** Return the offset into the database file for the start of the ** payload to which the cursor is pointing. */ i64 sqlite3BtreeOffset(BtCursor *pCur){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); getCellInfo(pCur); return (i64)pCur->pBt->pageSize*((i64)pCur->pPage->pgno - 1) + (i64)(pCur->info.pPayload - pCur->pPage->aData); } #endif /* SQLITE_ENABLE_OFFSET_SQL_FUNC */ /* ** Return the number of bytes of payload for the entry that pCur is ** currently pointing to. For table btrees, this will be the amount ** of data. For index btrees, this will be the size of the key. ** ** The caller must guarantee that the cursor is pointing to a non-NULL ** valid entry. In other words, the calling procedure must guarantee ** that the cursor has Cursor.eState==CURSOR_VALID. */ u32 sqlite3BtreePayloadSize(BtCursor *pCur){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); getCellInfo(pCur); return pCur->info.nPayload; } /* ** Return an upper bound on the size of any record for the table ** that the cursor is pointing into. ** ** This is an optimization. Everything will still work if this ** routine always returns 2147483647 (which is the largest record ** that SQLite can handle) or more. But returning a smaller value might ** prevent large memory allocations when trying to interpret a ** corrupt datrabase. ** ** The current implementation merely returns the size of the underlying ** database file. */ sqlite3_int64 sqlite3BtreeMaxRecordSize(BtCursor *pCur){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); return pCur->pBt->pageSize * (sqlite3_int64)pCur->pBt->nPage; } /* ** Given the page number of an overflow page in the database (parameter ** ovfl), this function finds the page number of the next page in the ** linked list of overflow pages. If possible, it uses the auto-vacuum ** pointer-map data instead of reading the content of page ovfl to do so. ** ** If an error occurs an SQLite error code is returned. Otherwise: ** ** The page number of the next overflow page in the linked list is ** written to *pPgnoNext. If page ovfl is the last page in its linked ** list, *pPgnoNext is set to zero. ** ** If ppPage is not NULL, and a reference to the MemPage object corresponding ** to page number pOvfl was obtained, then *ppPage is set to point to that ** reference. It is the responsibility of the caller to call releasePage() ** on *ppPage to free the reference. In no reference was obtained (because ** the pointer-map was used to obtain the value for *pPgnoNext), then ** *ppPage is set to zero. */ static int getOverflowPage( BtShared *pBt, /* The database file */ Pgno ovfl, /* Current overflow page number */ MemPage **ppPage, /* OUT: MemPage handle (may be NULL) */ Pgno *pPgnoNext /* OUT: Next overflow page number */ ){ Pgno next = 0; MemPage *pPage = 0; int rc = SQLITE_OK; assert( sqlite3_mutex_held(pBt->mutex) ); assert(pPgnoNext); #ifndef SQLITE_OMIT_AUTOVACUUM /* Try to find the next page in the overflow list using the ** autovacuum pointer-map pages. Guess that the next page in ** the overflow list is page number (ovfl+1). If that guess turns ** out to be wrong, fall back to loading the data of page ** number ovfl to determine the next page number. */ if( pBt->autoVacuum ){ Pgno pgno; Pgno iGuess = ovfl+1; u8 eType; while( PTRMAP_ISPAGE(pBt, iGuess) || iGuess==PENDING_BYTE_PAGE(pBt) ){ iGuess++; } if( iGuess<=btreePagecount(pBt) ){ rc = ptrmapGet(pBt, iGuess, &eType, &pgno); if( rc==SQLITE_OK && eType==PTRMAP_OVERFLOW2 && pgno==ovfl ){ next = iGuess; rc = SQLITE_DONE; } } } #endif assert( next==0 || rc==SQLITE_DONE ); if( rc==SQLITE_OK ){ rc = btreeGetPage(pBt, ovfl, &pPage, (ppPage==0) ? PAGER_GET_READONLY : 0); assert( rc==SQLITE_OK || pPage==0 ); if( rc==SQLITE_OK ){ next = get4byte(pPage->aData); } } *pPgnoNext = next; if( ppPage ){ *ppPage = pPage; }else{ releasePage(pPage); } return (rc==SQLITE_DONE ? SQLITE_OK : rc); } /* ** Copy data from a buffer to a page, or from a page to a buffer. ** ** pPayload is a pointer to data stored on database page pDbPage. ** If argument eOp is false, then nByte bytes of data are copied ** from pPayload to the buffer pointed at by pBuf. If eOp is true, ** then sqlite3PagerWrite() is called on pDbPage and nByte bytes ** of data are copied from the buffer pBuf to pPayload. ** ** SQLITE_OK is returned on success, otherwise an error code. */ static int copyPayload( void *pPayload, /* Pointer to page data */ void *pBuf, /* Pointer to buffer */ int nByte, /* Number of bytes to copy */ int eOp, /* 0 -> copy from page, 1 -> copy to page */ DbPage *pDbPage /* Page containing pPayload */ ){ if( eOp ){ /* Copy data from buffer to page (a write operation) */ int rc = sqlite3PagerWrite(pDbPage); if( rc!=SQLITE_OK ){ return rc; } memcpy(pPayload, pBuf, nByte); }else{ /* Copy data from page to buffer (a read operation) */ memcpy(pBuf, pPayload, nByte); } return SQLITE_OK; } /* ** This function is used to read or overwrite payload information ** for the entry that the pCur cursor is pointing to. The eOp ** argument is interpreted as follows: ** ** 0: The operation is a read. Populate the overflow cache. ** 1: The operation is a write. Populate the overflow cache. ** ** A total of "amt" bytes are read or written beginning at "offset". ** Data is read to or from the buffer pBuf. ** ** The content being read or written might appear on the main page ** or be scattered out on multiple overflow pages. ** ** If the current cursor entry uses one or more overflow pages ** this function may allocate space for and lazily populate ** the overflow page-list cache array (BtCursor.aOverflow). ** Subsequent calls use this cache to make seeking to the supplied offset ** more efficient. ** ** Once an overflow page-list cache has been allocated, it must be ** invalidated if some other cursor writes to the same table, or if ** the cursor is moved to a different row. Additionally, in auto-vacuum ** mode, the following events may invalidate an overflow page-list cache. ** ** * An incremental vacuum, ** * A commit in auto_vacuum="full" mode, ** * Creating a table (may require moving an overflow page). */ static int accessPayload( BtCursor *pCur, /* Cursor pointing to entry to read from */ u32 offset, /* Begin reading this far into payload */ u32 amt, /* Read this many bytes */ unsigned char *pBuf, /* Write the bytes into this buffer */ int eOp /* zero to read. non-zero to write. */ ){ unsigned char *aPayload; int rc = SQLITE_OK; int iIdx = 0; MemPage *pPage = pCur->pPage; /* Btree page of current entry */ BtShared *pBt = pCur->pBt; /* Btree this cursor belongs to */ #ifdef SQLITE_DIRECT_OVERFLOW_READ unsigned char * const pBufStart = pBuf; /* Start of original out buffer */ #endif assert( pPage ); assert( eOp==0 || eOp==1 ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->ix<pPage->nCell ); assert( cursorHoldsMutex(pCur) ); getCellInfo(pCur); aPayload = pCur->info.pPayload; assert( offset+amt <= pCur->info.nPayload ); assert( aPayload > pPage->aData ); if( (uptr)(aPayload - pPage->aData) > (pBt->usableSize - pCur->info.nLocal) ){ /* Trying to read or write past the end of the data is an error. The ** conditional above is really: ** &aPayload[pCur->info.nLocal] > &pPage->aData[pBt->usableSize] ** but is recast into its current form to avoid integer overflow problems */ return SQLITE_CORRUPT_PAGE(pPage); } /* Check if data must be read/written to/from the btree page itself. */ if( offset<pCur->info.nLocal ){ int a = amt; if( a+offset>pCur->info.nLocal ){ a = pCur->info.nLocal - offset; } rc = copyPayload(&aPayload[offset], pBuf, a, eOp, pPage->pDbPage); offset = 0; pBuf += a; amt -= a; }else{ offset -= pCur->info.nLocal; } if( rc==SQLITE_OK && amt>0 ){ const u32 ovflSize = pBt->usableSize - 4; /* Bytes content per ovfl page */ Pgno nextPage; nextPage = get4byte(&aPayload[pCur->info.nLocal]); /* If the BtCursor.aOverflow[] has not been allocated, allocate it now. ** ** The aOverflow[] array is sized at one entry for each overflow page ** in the overflow chain. The page number of the first overflow page is ** stored in aOverflow[0], etc. A value of 0 in the aOverflow[] array ** means "not yet known" (the cache is lazily populated). */ if( (pCur->curFlags & BTCF_ValidOvfl)==0 ){ int nOvfl = (pCur->info.nPayload-pCur->info.nLocal+ovflSize-1)/ovflSize; if( pCur->aOverflow==0 || nOvfl*(int)sizeof(Pgno) > sqlite3MallocSize(pCur->aOverflow) ){ Pgno *aNew = (Pgno*)sqlite3Realloc( pCur->aOverflow, nOvfl*2*sizeof(Pgno) ); if( aNew==0 ){ return SQLITE_NOMEM_BKPT; }else{ pCur->aOverflow = aNew; } } memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno)); pCur->curFlags |= BTCF_ValidOvfl; }else{ /* If the overflow page-list cache has been allocated and the ** entry for the first required overflow page is valid, skip ** directly to it. */ if( pCur->aOverflow[offset/ovflSize] ){ iIdx = (offset/ovflSize); nextPage = pCur->aOverflow[iIdx]; offset = (offset%ovflSize); } } assert( rc==SQLITE_OK && amt>0 ); while( nextPage ){ /* If required, populate the overflow page-list cache. */ assert( pCur->aOverflow[iIdx]==0 || pCur->aOverflow[iIdx]==nextPage || CORRUPT_DB ); pCur->aOverflow[iIdx] = nextPage; if( offset>=ovflSize ){ /* The only reason to read this page is to obtain the page ** number for the next page in the overflow chain. The page ** data is not required. So first try to lookup the overflow ** page-list cache, if any, then fall back to the getOverflowPage() ** function. */ assert( pCur->curFlags & BTCF_ValidOvfl ); assert( pCur->pBtree->db==pBt->db ); if( pCur->aOverflow[iIdx+1] ){ nextPage = pCur->aOverflow[iIdx+1]; }else{ rc = getOverflowPage(pBt, nextPage, 0, &nextPage); } offset -= ovflSize; }else{ /* Need to read this page properly. It contains some of the ** range of data that is being read (eOp==0) or written (eOp!=0). */ int a = amt; if( a + offset > ovflSize ){ a = ovflSize - offset; } #ifdef SQLITE_DIRECT_OVERFLOW_READ /* If all the following are true: ** ** 1) this is a read operation, and ** 2) data is required from the start of this overflow page, and ** 3) there are no dirty pages in the page-cache ** 4) the database is file-backed, and ** 5) the page is not in the WAL file ** 6) at least 4 bytes have already been read into the output buffer ** ** then data can be read directly from the database file into the ** output buffer, bypassing the page-cache altogether. This speeds ** up loading large records that span many overflow pages. */ if( eOp==0 /* (1) */ && offset==0 /* (2) */ && sqlite3PagerDirectReadOk(pBt->pPager, nextPage) /* (3,4,5) */ && &pBuf[-4]>=pBufStart /* (6) */ ){ sqlite3_file *fd = sqlite3PagerFile(pBt->pPager); u8 aSave[4]; u8 *aWrite = &pBuf[-4]; assert( aWrite>=pBufStart ); /* due to (6) */ memcpy(aSave, aWrite, 4); rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1)); nextPage = get4byte(aWrite); memcpy(aWrite, aSave, 4); }else #endif { DbPage *pDbPage; rc = sqlite3PagerGet(pBt->pPager, nextPage, &pDbPage, (eOp==0 ? PAGER_GET_READONLY : 0) ); if( rc==SQLITE_OK ){ aPayload = sqlite3PagerGetData(pDbPage); nextPage = get4byte(aPayload); rc = copyPayload(&aPayload[offset+4], pBuf, a, eOp, pDbPage); sqlite3PagerUnref(pDbPage); offset = 0; } } amt -= a; if( amt==0 ) return rc; pBuf += a; } if( rc ) break; iIdx++; } } if( rc==SQLITE_OK && amt>0 ){ /* Overflow chain ends prematurely */ return SQLITE_CORRUPT_PAGE(pPage); } return rc; } /* ** Read part of the payload for the row at which that cursor pCur is currently ** pointing. "amt" bytes will be transferred into pBuf[]. The transfer ** begins at "offset". ** ** pCur can be pointing to either a table or an index b-tree. ** If pointing to a table btree, then the content section is read. If ** pCur is pointing to an index b-tree then the key section is read. ** ** For sqlite3BtreePayload(), the caller must ensure that pCur is pointing ** to a valid row in the table. For sqlite3BtreePayloadChecked(), the ** cursor might be invalid or might need to be restored before being read. ** ** Return SQLITE_OK on success or an error code if anything goes ** wrong. An error is returned if "offset+amt" is larger than ** the available payload. */ int sqlite3BtreePayload(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ assert( cursorHoldsMutex(pCur) ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->iPage>=0 && pCur->pPage ); assert( pCur->ix<pCur->pPage->nCell ); return accessPayload(pCur, offset, amt, (unsigned char*)pBuf, 0); } /* ** This variant of sqlite3BtreePayload() works even if the cursor has not ** in the CURSOR_VALID state. It is only used by the sqlite3_blob_read() ** interface. */ #ifndef SQLITE_OMIT_INCRBLOB static SQLITE_NOINLINE int accessPayloadChecked( BtCursor *pCur, u32 offset, u32 amt, void *pBuf ){ int rc; if ( pCur->eState==CURSOR_INVALID ){ return SQLITE_ABORT; } assert( cursorOwnsBtShared(pCur) ); rc = btreeRestoreCursorPosition(pCur); return rc ? rc : accessPayload(pCur, offset, amt, pBuf, 0); } int sqlite3BtreePayloadChecked(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){ if( pCur->eState==CURSOR_VALID ){ assert( cursorOwnsBtShared(pCur) ); return accessPayload(pCur, offset, amt, pBuf, 0); }else{ return accessPayloadChecked(pCur, offset, amt, pBuf); } } #endif /* SQLITE_OMIT_INCRBLOB */ /* ** Return a pointer to payload information from the entry that the ** pCur cursor is pointing to. The pointer is to the beginning of ** the key if index btrees (pPage->intKey==0) and is the data for ** table btrees (pPage->intKey==1). The number of bytes of available ** key/data is written into *pAmt. If *pAmt==0, then the value ** returned will not be a valid pointer. ** ** This routine is an optimization. It is common for the entire key ** and data to fit on the local page and for there to be no overflow ** pages. When that is so, this routine can be used to access the ** key and data without making a copy. If the key and/or data spills ** onto overflow pages, then accessPayload() must be used to reassemble ** the key/data and copy it into a preallocated buffer. ** ** The pointer returned by this routine looks directly into the cached ** page of the database. The data might change or move the next time ** any btree routine is called. */ static const void *fetchPayload( BtCursor *pCur, /* Cursor pointing to entry to read from */ u32 *pAmt /* Write the number of available bytes here */ ){ int amt; assert( pCur!=0 && pCur->iPage>=0 && pCur->pPage); assert( pCur->eState==CURSOR_VALID ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); assert( cursorOwnsBtShared(pCur) ); assert( pCur->ix<pCur->pPage->nCell ); assert( pCur->info.nSize>0 ); assert( pCur->info.pPayload>pCur->pPage->aData || CORRUPT_DB ); assert( pCur->info.pPayload<pCur->pPage->aDataEnd ||CORRUPT_DB); amt = pCur->info.nLocal; if( amt>(int)(pCur->pPage->aDataEnd - pCur->info.pPayload) ){ /* There is too little space on the page for the expected amount ** of local content. Database must be corrupt. */ assert( CORRUPT_DB ); amt = MAX(0, (int)(pCur->pPage->aDataEnd - pCur->info.pPayload)); } *pAmt = (u32)amt; return (void*)pCur->info.pPayload; } /* ** For the entry that cursor pCur is point to, return as ** many bytes of the key or data as are available on the local ** b-tree page. Write the number of available bytes into *pAmt. ** ** The pointer returned is ephemeral. The key/data may move ** or be destroyed on the next call to any Btree routine, ** including calls from other threads against the same cache. ** Hence, a mutex on the BtShared should be held prior to calling ** this routine. ** ** These routines is used to get quick access to key and data ** in the common case where no overflow pages are used. */ const void *sqlite3BtreePayloadFetch(BtCursor *pCur, u32 *pAmt){ return fetchPayload(pCur, pAmt); } /* ** Move the cursor down to a new child page. The newPgno argument is the ** page number of the child page to move to. ** ** This function returns SQLITE_CORRUPT if the page-header flags field of ** the new child page does not match the flags field of the parent (i.e. ** if an intkey page appears to be the parent of a non-intkey page, or ** vice-versa). */ static int moveToChild(BtCursor *pCur, u32 newPgno){ BtShared *pBt = pCur->pBt; assert( cursorOwnsBtShared(pCur) ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->iPage<BTCURSOR_MAX_DEPTH ); assert( pCur->iPage>=0 ); if( pCur->iPage>=(BTCURSOR_MAX_DEPTH-1) ){ return SQLITE_CORRUPT_BKPT; } pCur->info.nSize = 0; pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); pCur->aiIdx[pCur->iPage] = pCur->ix; pCur->apPage[pCur->iPage] = pCur->pPage; pCur->ix = 0; pCur->iPage++; return getAndInitPage(pBt, newPgno, &pCur->pPage, pCur, pCur->curPagerFlags); } #ifdef SQLITE_DEBUG /* ** Page pParent is an internal (non-leaf) tree page. This function ** asserts that page number iChild is the left-child if the iIdx'th ** cell in page pParent. Or, if iIdx is equal to the total number of ** cells in pParent, that page number iChild is the right-child of ** the page. */ static void assertParentIndex(MemPage *pParent, int iIdx, Pgno iChild){ if( CORRUPT_DB ) return; /* The conditions tested below might not be true ** in a corrupt database */ assert( iIdx<=pParent->nCell ); if( iIdx==pParent->nCell ){ assert( get4byte(&pParent->aData[pParent->hdrOffset+8])==iChild ); }else{ assert( get4byte(findCell(pParent, iIdx))==iChild ); } } #else # define assertParentIndex(x,y,z) #endif /* ** Move the cursor up to the parent page. ** ** pCur->idx is set to the cell index that contains the pointer ** to the page we are coming from. If we are coming from the ** right-most child page then pCur->idx is set to one more than ** the largest cell index. */ static void moveToParent(BtCursor *pCur){ MemPage *pLeaf; assert( cursorOwnsBtShared(pCur) ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->iPage>0 ); assert( pCur->pPage ); assertParentIndex( pCur->apPage[pCur->iPage-1], pCur->aiIdx[pCur->iPage-1], pCur->pPage->pgno ); testcase( pCur->aiIdx[pCur->iPage-1] > pCur->apPage[pCur->iPage-1]->nCell ); pCur->info.nSize = 0; pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); pCur->ix = pCur->aiIdx[pCur->iPage-1]; pLeaf = pCur->pPage; pCur->pPage = pCur->apPage[--pCur->iPage]; releasePageNotNull(pLeaf); } /* ** Move the cursor to point to the root page of its b-tree structure. ** ** If the table has a virtual root page, then the cursor is moved to point ** to the virtual root page instead of the actual root page. A table has a ** virtual root page when the actual root page contains no cells and a ** single child page. This can only happen with the table rooted at page 1. ** ** If the b-tree structure is empty, the cursor state is set to ** CURSOR_INVALID and this routine returns SQLITE_EMPTY. Otherwise, ** the cursor is set to point to the first cell located on the root ** (or virtual root) page and the cursor state is set to CURSOR_VALID. ** ** If this function returns successfully, it may be assumed that the ** page-header flags indicate that the [virtual] root-page is the expected ** kind of b-tree page (i.e. if when opening the cursor the caller did not ** specify a KeyInfo structure the flags byte is set to 0x05 or 0x0D, ** indicating a table b-tree, or if the caller did specify a KeyInfo ** structure the flags byte is set to 0x02 or 0x0A, indicating an index ** b-tree). */ static int moveToRoot(BtCursor *pCur){ MemPage *pRoot; int rc = SQLITE_OK; assert( cursorOwnsBtShared(pCur) ); assert( CURSOR_INVALID < CURSOR_REQUIRESEEK ); assert( CURSOR_VALID < CURSOR_REQUIRESEEK ); assert( CURSOR_FAULT > CURSOR_REQUIRESEEK ); assert( pCur->eState < CURSOR_REQUIRESEEK || pCur->iPage<0 ); assert( pCur->pgnoRoot>0 || pCur->iPage<0 ); if( pCur->iPage>=0 ){ if( pCur->iPage ){ releasePageNotNull(pCur->pPage); while( --pCur->iPage ){ releasePageNotNull(pCur->apPage[pCur->iPage]); } pCur->pPage = pCur->apPage[0]; goto skip_init; } }else if( pCur->pgnoRoot==0 ){ pCur->eState = CURSOR_INVALID; return SQLITE_EMPTY; }else{ assert( pCur->iPage==(-1) ); if( pCur->eState>=CURSOR_REQUIRESEEK ){ if( pCur->eState==CURSOR_FAULT ){ assert( pCur->skipNext!=SQLITE_OK ); return pCur->skipNext; } sqlite3BtreeClearCursor(pCur); } rc = getAndInitPage(pCur->pBtree->pBt, pCur->pgnoRoot, &pCur->pPage, 0, pCur->curPagerFlags); if( rc!=SQLITE_OK ){ pCur->eState = CURSOR_INVALID; return rc; } pCur->iPage = 0; pCur->curIntKey = pCur->pPage->intKey; } pRoot = pCur->pPage; assert( pRoot->pgno==pCur->pgnoRoot ); /* If pCur->pKeyInfo is not NULL, then the caller that opened this cursor ** expected to open it on an index b-tree. Otherwise, if pKeyInfo is ** NULL, the caller expects a table b-tree. If this is not the case, ** return an SQLITE_CORRUPT error. ** ** Earlier versions of SQLite assumed that this test could not fail ** if the root page was already loaded when this function was called (i.e. ** if pCur->iPage>=0). But this is not so if the database is corrupted ** in such a way that page pRoot is linked into a second b-tree table ** (or the freelist). */ assert( pRoot->intKey==1 || pRoot->intKey==0 ); if( pRoot->isInit==0 || (pCur->pKeyInfo==0)!=pRoot->intKey ){ return SQLITE_CORRUPT_PAGE(pCur->pPage); } skip_init: pCur->ix = 0; pCur->info.nSize = 0; pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidNKey|BTCF_ValidOvfl); pRoot = pCur->pPage; if( pRoot->nCell>0 ){ pCur->eState = CURSOR_VALID; }else if( !pRoot->leaf ){ Pgno subpage; if( pRoot->pgno!=1 ) return SQLITE_CORRUPT_BKPT; subpage = get4byte(&pRoot->aData[pRoot->hdrOffset+8]); pCur->eState = CURSOR_VALID; rc = moveToChild(pCur, subpage); }else{ pCur->eState = CURSOR_INVALID; rc = SQLITE_EMPTY; } return rc; } /* ** Move the cursor down to the left-most leaf entry beneath the ** entry to which it is currently pointing. ** ** The left-most leaf is the one with the smallest key - the first ** in ascending order. */ static int moveToLeftmost(BtCursor *pCur){ Pgno pgno; int rc = SQLITE_OK; MemPage *pPage; assert( cursorOwnsBtShared(pCur) ); assert( pCur->eState==CURSOR_VALID ); while( rc==SQLITE_OK && !(pPage = pCur->pPage)->leaf ){ assert( pCur->ix<pPage->nCell ); pgno = get4byte(findCell(pPage, pCur->ix)); rc = moveToChild(pCur, pgno); } return rc; } /* ** Move the cursor down to the right-most leaf entry beneath the ** page to which it is currently pointing. Notice the difference ** between moveToLeftmost() and moveToRightmost(). moveToLeftmost() ** finds the left-most entry beneath the *entry* whereas moveToRightmost() ** finds the right-most entry beneath the *page*. ** ** The right-most entry is the one with the largest key - the last ** key in ascending order. */ static int moveToRightmost(BtCursor *pCur){ Pgno pgno; int rc = SQLITE_OK; MemPage *pPage = 0; assert( cursorOwnsBtShared(pCur) ); assert( pCur->eState==CURSOR_VALID ); while( !(pPage = pCur->pPage)->leaf ){ pgno = get4byte(&pPage->aData[pPage->hdrOffset+8]); pCur->ix = pPage->nCell; rc = moveToChild(pCur, pgno); if( rc ) return rc; } pCur->ix = pPage->nCell-1; assert( pCur->info.nSize==0 ); assert( (pCur->curFlags & BTCF_ValidNKey)==0 ); return SQLITE_OK; } /* Move the cursor to the first entry in the table. Return SQLITE_OK ** on success. Set *pRes to 0 if the cursor actually points to something ** or set *pRes to 1 if the table is empty. */ int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){ int rc; assert( cursorOwnsBtShared(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); rc = moveToRoot(pCur); if( rc==SQLITE_OK ){ assert( pCur->pPage->nCell>0 ); *pRes = 0; rc = moveToLeftmost(pCur); }else if( rc==SQLITE_EMPTY ){ assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 ); *pRes = 1; rc = SQLITE_OK; } return rc; } /* Move the cursor to the last entry in the table. Return SQLITE_OK ** on success. Set *pRes to 0 if the cursor actually points to something ** or set *pRes to 1 if the table is empty. */ int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ int rc; assert( cursorOwnsBtShared(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); /* If the cursor already points to the last entry, this is a no-op. */ if( CURSOR_VALID==pCur->eState && (pCur->curFlags & BTCF_AtLast)!=0 ){ #ifdef SQLITE_DEBUG /* This block serves to assert() that the cursor really does point ** to the last entry in the b-tree. */ int ii; for(ii=0; ii<pCur->iPage; ii++){ assert( pCur->aiIdx[ii]==pCur->apPage[ii]->nCell ); } assert( pCur->ix==pCur->pPage->nCell-1 ); assert( pCur->pPage->leaf ); #endif *pRes = 0; return SQLITE_OK; } rc = moveToRoot(pCur); if( rc==SQLITE_OK ){ assert( pCur->eState==CURSOR_VALID ); *pRes = 0; rc = moveToRightmost(pCur); if( rc==SQLITE_OK ){ pCur->curFlags |= BTCF_AtLast; }else{ pCur->curFlags &= ~BTCF_AtLast; } }else if( rc==SQLITE_EMPTY ){ assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 ); *pRes = 1; rc = SQLITE_OK; } return rc; } /* Move the cursor so that it points to an entry near the key ** specified by pIdxKey or intKey. Return a success code. ** ** For INTKEY tables, the intKey parameter is used. pIdxKey ** must be NULL. For index tables, pIdxKey is used and intKey ** is ignored. ** ** If an exact match is not found, then the cursor is always ** left pointing at a leaf page which would hold the entry if it ** were present. The cursor might point to an entry that comes ** before or after the key. ** ** An integer is written into *pRes which is the result of ** comparing the key with the entry to which the cursor is ** pointing. The meaning of the integer written into ** *pRes is as follows: ** ** *pRes<0 The cursor is left pointing at an entry that ** is smaller than intKey/pIdxKey or if the table is empty ** and the cursor is therefore left point to nothing. ** ** *pRes==0 The cursor is left pointing at an entry that ** exactly matches intKey/pIdxKey. ** ** *pRes>0 The cursor is left pointing at an entry that ** is larger than intKey/pIdxKey. ** ** For index tables, the pIdxKey->eqSeen field is set to 1 if there ** exists an entry in the table that exactly matches pIdxKey. */ int sqlite3BtreeMovetoUnpacked( BtCursor *pCur, /* The cursor to be moved */ UnpackedRecord *pIdxKey, /* Unpacked index key */ i64 intKey, /* The table key */ int biasRight, /* If true, bias the search to the high end */ int *pRes /* Write search results here */ ){ int rc; RecordCompare xRecordCompare; assert( cursorOwnsBtShared(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); assert( pRes ); assert( (pIdxKey==0)==(pCur->pKeyInfo==0) ); assert( pCur->eState!=CURSOR_VALID || (pIdxKey==0)==(pCur->curIntKey!=0) ); /* If the cursor is already positioned at the point we are trying ** to move to, then just return without doing any work */ if( pIdxKey==0 && pCur->eState==CURSOR_VALID && (pCur->curFlags & BTCF_ValidNKey)!=0 ){ if( pCur->info.nKey==intKey ){ *pRes = 0; return SQLITE_OK; } if( pCur->info.nKey<intKey ){ if( (pCur->curFlags & BTCF_AtLast)!=0 ){ *pRes = -1; return SQLITE_OK; } /* If the requested key is one more than the previous key, then ** try to get there using sqlite3BtreeNext() rather than a full ** binary search. This is an optimization only. The correct answer ** is still obtained without this case, only a little more slowely */ if( pCur->info.nKey+1==intKey ){ *pRes = 0; rc = sqlite3BtreeNext(pCur, 0); if( rc==SQLITE_OK ){ getCellInfo(pCur); if( pCur->info.nKey==intKey ){ return SQLITE_OK; } }else if( rc==SQLITE_DONE ){ rc = SQLITE_OK; }else{ return rc; } } } } if( pIdxKey ){ xRecordCompare = sqlite3VdbeFindCompare(pIdxKey); pIdxKey->errCode = 0; assert( pIdxKey->default_rc==1 || pIdxKey->default_rc==0 || pIdxKey->default_rc==-1 ); }else{ xRecordCompare = 0; /* All keys are integers */ } rc = moveToRoot(pCur); if( rc ){ if( rc==SQLITE_EMPTY ){ assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 ); *pRes = -1; return SQLITE_OK; } return rc; } assert( pCur->pPage ); assert( pCur->pPage->isInit ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->pPage->nCell > 0 ); assert( pCur->iPage==0 || pCur->apPage[0]->intKey==pCur->curIntKey ); assert( pCur->curIntKey || pIdxKey ); for(;;){ int lwr, upr, idx, c; Pgno chldPg; MemPage *pPage = pCur->pPage; u8 *pCell; /* Pointer to current cell in pPage */ /* pPage->nCell must be greater than zero. If this is the root-page ** the cursor would have been INVALID above and this for(;;) loop ** not run. If this is not the root-page, then the moveToChild() routine ** would have already detected db corruption. Similarly, pPage must ** be the right kind (index or table) of b-tree page. Otherwise ** a moveToChild() or moveToRoot() call would have detected corruption. */ assert( pPage->nCell>0 ); assert( pPage->intKey==(pIdxKey==0) ); lwr = 0; upr = pPage->nCell-1; assert( biasRight==0 || biasRight==1 ); idx = upr>>(1-biasRight); /* idx = biasRight ? upr : (lwr+upr)/2; */ pCur->ix = (u16)idx; if( xRecordCompare==0 ){ for(;;){ i64 nCellKey; pCell = findCellPastPtr(pPage, idx); if( pPage->intKeyLeaf ){ while( 0x80 <= *(pCell++) ){ if( pCell>=pPage->aDataEnd ){ return SQLITE_CORRUPT_PAGE(pPage); } } } getVarint(pCell, (u64*)&nCellKey); if( nCellKey<intKey ){ lwr = idx+1; if( lwr>upr ){ c = -1; break; } }else if( nCellKey>intKey ){ upr = idx-1; if( lwr>upr ){ c = +1; break; } }else{ assert( nCellKey==intKey ); pCur->ix = (u16)idx; if( !pPage->leaf ){ lwr = idx; goto moveto_next_layer; }else{ pCur->curFlags |= BTCF_ValidNKey; pCur->info.nKey = nCellKey; pCur->info.nSize = 0; *pRes = 0; return SQLITE_OK; } } assert( lwr+upr>=0 ); idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2; */ } }else{ for(;;){ int nCell; /* Size of the pCell cell in bytes */ pCell = findCellPastPtr(pPage, idx); /* The maximum supported page-size is 65536 bytes. This means that ** the maximum number of record bytes stored on an index B-Tree ** page is less than 16384 bytes and may be stored as a 2-byte ** varint. This information is used to attempt to avoid parsing ** the entire cell by checking for the cases where the record is ** stored entirely within the b-tree page by inspecting the first ** 2 bytes of the cell. */ nCell = pCell[0]; if( nCell<=pPage->max1bytePayload ){ /* This branch runs if the record-size field of the cell is a ** single byte varint and the record fits entirely on the main ** b-tree page. */ testcase( pCell+nCell+1==pPage->aDataEnd ); c = xRecordCompare(nCell, (void*)&pCell[1], pIdxKey); }else if( !(pCell[1] & 0x80) && (nCell = ((nCell&0x7f)<<7) + pCell[1])<=pPage->maxLocal ){ /* The record-size field is a 2 byte varint and the record ** fits entirely on the main b-tree page. */ testcase( pCell+nCell+2==pPage->aDataEnd ); c = xRecordCompare(nCell, (void*)&pCell[2], pIdxKey); }else{ /* The record flows over onto one or more overflow pages. In ** this case the whole cell needs to be parsed, a buffer allocated ** and accessPayload() used to retrieve the record into the ** buffer before VdbeRecordCompare() can be called. ** ** If the record is corrupt, the xRecordCompare routine may read ** up to two varints past the end of the buffer. An extra 18 ** bytes of padding is allocated at the end of the buffer in ** case this happens. */ void *pCellKey; u8 * const pCellBody = pCell - pPage->childPtrSize; const int nOverrun = 18; /* Size of the overrun padding */ pPage->xParseCell(pPage, pCellBody, &pCur->info); nCell = (int)pCur->info.nKey; testcase( nCell<0 ); /* True if key size is 2^32 or more */ testcase( nCell==0 ); /* Invalid key size: 0x80 0x80 0x00 */ testcase( nCell==1 ); /* Invalid key size: 0x80 0x80 0x01 */ testcase( nCell==2 ); /* Minimum legal index key size */ if( nCell<2 || nCell/pCur->pBt->usableSize>pCur->pBt->nPage ){ rc = SQLITE_CORRUPT_PAGE(pPage); goto moveto_finish; } pCellKey = sqlite3Malloc( nCell+nOverrun ); if( pCellKey==0 ){ rc = SQLITE_NOMEM_BKPT; goto moveto_finish; } pCur->ix = (u16)idx; rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 0); memset(((u8*)pCellKey)+nCell,0,nOverrun); /* Fix uninit warnings */ pCur->curFlags &= ~BTCF_ValidOvfl; if( rc ){ sqlite3_free(pCellKey); goto moveto_finish; } c = sqlite3VdbeRecordCompare(nCell, pCellKey, pIdxKey); sqlite3_free(pCellKey); } assert( (pIdxKey->errCode!=SQLITE_CORRUPT || c==0) && (pIdxKey->errCode!=SQLITE_NOMEM || pCur->pBtree->db->mallocFailed) ); if( c<0 ){ lwr = idx+1; }else if( c>0 ){ upr = idx-1; }else{ assert( c==0 ); *pRes = 0; rc = SQLITE_OK; pCur->ix = (u16)idx; if( pIdxKey->errCode ) rc = SQLITE_CORRUPT_BKPT; goto moveto_finish; } if( lwr>upr ) break; assert( lwr+upr>=0 ); idx = (lwr+upr)>>1; /* idx = (lwr+upr)/2 */ } } assert( lwr==upr+1 || (pPage->intKey && !pPage->leaf) ); assert( pPage->isInit ); if( pPage->leaf ){ assert( pCur->ix<pCur->pPage->nCell ); pCur->ix = (u16)idx; *pRes = c; rc = SQLITE_OK; goto moveto_finish; } moveto_next_layer: if( lwr>=pPage->nCell ){ chldPg = get4byte(&pPage->aData[pPage->hdrOffset+8]); }else{ chldPg = get4byte(findCell(pPage, lwr)); } pCur->ix = (u16)lwr; rc = moveToChild(pCur, chldPg); if( rc ) break; } moveto_finish: pCur->info.nSize = 0; assert( (pCur->curFlags & BTCF_ValidOvfl)==0 ); return rc; } /* ** Return TRUE if the cursor is not pointing at an entry of the table. ** ** TRUE will be returned after a call to sqlite3BtreeNext() moves ** past the last entry in the table or sqlite3BtreePrev() moves past ** the first entry. TRUE is also returned if the table is empty. */ int sqlite3BtreeEof(BtCursor *pCur){ /* TODO: What if the cursor is in CURSOR_REQUIRESEEK but all table entries ** have been deleted? This API will need to change to return an error code ** as well as the boolean result value. */ return (CURSOR_VALID!=pCur->eState); } /* ** Return an estimate for the number of rows in the table that pCur is ** pointing to. Return a negative number if no estimate is currently ** available. */ i64 sqlite3BtreeRowCountEst(BtCursor *pCur){ i64 n; u8 i; assert( cursorOwnsBtShared(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); /* Currently this interface is only called by the OP_IfSmaller ** opcode, and it that case the cursor will always be valid and ** will always point to a leaf node. */ if( NEVER(pCur->eState!=CURSOR_VALID) ) return -1; if( NEVER(pCur->pPage->leaf==0) ) return -1; n = pCur->pPage->nCell; for(i=0; i<pCur->iPage; i++){ n *= pCur->apPage[i]->nCell; } return n; } /* ** Advance the cursor to the next entry in the database. ** Return value: ** ** SQLITE_OK success ** SQLITE_DONE cursor is already pointing at the last element ** otherwise some kind of error occurred ** ** The main entry point is sqlite3BtreeNext(). That routine is optimized ** for the common case of merely incrementing the cell counter BtCursor.aiIdx ** to the next cell on the current page. The (slower) btreeNext() helper ** routine is called when it is necessary to move to a different page or ** to restore the cursor. ** ** If bit 0x01 of the F argument in sqlite3BtreeNext(C,F) is 1, then the ** cursor corresponds to an SQL index and this routine could have been ** skipped if the SQL index had been a unique index. The F argument ** is a hint to the implement. SQLite btree implementation does not use ** this hint, but COMDB2 does. */ static SQLITE_NOINLINE int btreeNext(BtCursor *pCur){ int rc; int idx; MemPage *pPage; assert( cursorOwnsBtShared(pCur) ); if( pCur->eState!=CURSOR_VALID ){ assert( (pCur->curFlags & BTCF_ValidOvfl)==0 ); rc = restoreCursorPosition(pCur); if( rc!=SQLITE_OK ){ return rc; } if( CURSOR_INVALID==pCur->eState ){ return SQLITE_DONE; } if( pCur->eState==CURSOR_SKIPNEXT ){ pCur->eState = CURSOR_VALID; if( pCur->skipNext>0 ) return SQLITE_OK; } } pPage = pCur->pPage; idx = ++pCur->ix; if( !pPage->isInit ){ /* The only known way for this to happen is for there to be a ** recursive SQL function that does a DELETE operation as part of a ** SELECT which deletes content out from under an active cursor ** in a corrupt database file where the table being DELETE-ed from ** has pages in common with the table being queried. See TH3 ** module cov1/btree78.test testcase 220 (2018-06-08) for an ** example. */ return SQLITE_CORRUPT_BKPT; } /* If the database file is corrupt, it is possible for the value of idx ** to be invalid here. This can only occur if a second cursor modifies ** the page while cursor pCur is holding a reference to it. Which can ** only happen if the database is corrupt in such a way as to link the ** page into more than one b-tree structure. */ testcase( idx>pPage->nCell ); if( idx>=pPage->nCell ){ if( !pPage->leaf ){ rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8])); if( rc ) return rc; return moveToLeftmost(pCur); } do{ if( pCur->iPage==0 ){ pCur->eState = CURSOR_INVALID; return SQLITE_DONE; } moveToParent(pCur); pPage = pCur->pPage; }while( pCur->ix>=pPage->nCell ); if( pPage->intKey ){ return sqlite3BtreeNext(pCur, 0); }else{ return SQLITE_OK; } } if( pPage->leaf ){ return SQLITE_OK; }else{ return moveToLeftmost(pCur); } } int sqlite3BtreeNext(BtCursor *pCur, int flags){ MemPage *pPage; UNUSED_PARAMETER( flags ); /* Used in COMDB2 but not native SQLite */ assert( cursorOwnsBtShared(pCur) ); assert( flags==0 || flags==1 ); pCur->info.nSize = 0; pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); if( pCur->eState!=CURSOR_VALID ) return btreeNext(pCur); pPage = pCur->pPage; if( (++pCur->ix)>=pPage->nCell ){ pCur->ix--; return btreeNext(pCur); } if( pPage->leaf ){ return SQLITE_OK; }else{ return moveToLeftmost(pCur); } } /* ** Step the cursor to the back to the previous entry in the database. ** Return values: ** ** SQLITE_OK success ** SQLITE_DONE the cursor is already on the first element of the table ** otherwise some kind of error occurred ** ** The main entry point is sqlite3BtreePrevious(). That routine is optimized ** for the common case of merely decrementing the cell counter BtCursor.aiIdx ** to the previous cell on the current page. The (slower) btreePrevious() ** helper routine is called when it is necessary to move to a different page ** or to restore the cursor. ** ** If bit 0x01 of the F argument to sqlite3BtreePrevious(C,F) is 1, then ** the cursor corresponds to an SQL index and this routine could have been ** skipped if the SQL index had been a unique index. The F argument is a ** hint to the implement. The native SQLite btree implementation does not ** use this hint, but COMDB2 does. */ static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur){ int rc; MemPage *pPage; assert( cursorOwnsBtShared(pCur) ); assert( (pCur->curFlags & (BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey))==0 ); assert( pCur->info.nSize==0 ); if( pCur->eState!=CURSOR_VALID ){ rc = restoreCursorPosition(pCur); if( rc!=SQLITE_OK ){ return rc; } if( CURSOR_INVALID==pCur->eState ){ return SQLITE_DONE; } if( CURSOR_SKIPNEXT==pCur->eState ){ pCur->eState = CURSOR_VALID; if( pCur->skipNext<0 ) return SQLITE_OK; } } pPage = pCur->pPage; assert( pPage->isInit ); if( !pPage->leaf ){ int idx = pCur->ix; rc = moveToChild(pCur, get4byte(findCell(pPage, idx))); if( rc ) return rc; rc = moveToRightmost(pCur); }else{ while( pCur->ix==0 ){ if( pCur->iPage==0 ){ pCur->eState = CURSOR_INVALID; return SQLITE_DONE; } moveToParent(pCur); } assert( pCur->info.nSize==0 ); assert( (pCur->curFlags & (BTCF_ValidOvfl))==0 ); pCur->ix--; pPage = pCur->pPage; if( pPage->intKey && !pPage->leaf ){ rc = sqlite3BtreePrevious(pCur, 0); }else{ rc = SQLITE_OK; } } return rc; } int sqlite3BtreePrevious(BtCursor *pCur, int flags){ assert( cursorOwnsBtShared(pCur) ); assert( flags==0 || flags==1 ); UNUSED_PARAMETER( flags ); /* Used in COMDB2 but not native SQLite */ pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidOvfl|BTCF_ValidNKey); pCur->info.nSize = 0; if( pCur->eState!=CURSOR_VALID || pCur->ix==0 || pCur->pPage->leaf==0 ){ return btreePrevious(pCur); } pCur->ix--; return SQLITE_OK; } /* ** Allocate a new page from the database file. ** ** The new page is marked as dirty. (In other words, sqlite3PagerWrite() ** has already been called on the new page.) The new page has also ** been referenced and the calling routine is responsible for calling ** sqlite3PagerUnref() on the new page when it is done. ** ** SQLITE_OK is returned on success. Any other return value indicates ** an error. *ppPage is set to NULL in the event of an error. ** ** If the "nearby" parameter is not 0, then an effort is made to ** locate a page close to the page number "nearby". This can be used in an ** attempt to keep related pages close to each other in the database file, ** which in turn can make database access faster. ** ** If the eMode parameter is BTALLOC_EXACT and the nearby page exists ** anywhere on the free-list, then it is guaranteed to be returned. If ** eMode is BTALLOC_LT then the page returned will be less than or equal ** to nearby if any such page exists. If eMode is BTALLOC_ANY then there ** are no restrictions on which page is returned. */ static int allocateBtreePage( BtShared *pBt, /* The btree */ MemPage **ppPage, /* Store pointer to the allocated page here */ Pgno *pPgno, /* Store the page number here */ Pgno nearby, /* Search for a page near this one */ u8 eMode /* BTALLOC_EXACT, BTALLOC_LT, or BTALLOC_ANY */ ){ MemPage *pPage1; int rc; u32 n; /* Number of pages on the freelist */ u32 k; /* Number of leaves on the trunk of the freelist */ MemPage *pTrunk = 0; MemPage *pPrevTrunk = 0; Pgno mxPage; /* Total size of the database file */ assert( sqlite3_mutex_held(pBt->mutex) ); assert( eMode==BTALLOC_ANY || (nearby>0 && IfNotOmitAV(pBt->autoVacuum)) ); pPage1 = pBt->pPage1; mxPage = btreePagecount(pBt); /* EVIDENCE-OF: R-05119-02637 The 4-byte big-endian integer at offset 36 ** stores stores the total number of pages on the freelist. */ n = get4byte(&pPage1->aData[36]); testcase( n==mxPage-1 ); if( n>=mxPage ){ return SQLITE_CORRUPT_BKPT; } if( n>0 ){ /* There are pages on the freelist. Reuse one of those pages. */ Pgno iTrunk; u8 searchList = 0; /* If the free-list must be searched for 'nearby' */ u32 nSearch = 0; /* Count of the number of search attempts */ /* If eMode==BTALLOC_EXACT and a query of the pointer-map ** shows that the page 'nearby' is somewhere on the free-list, then ** the entire-list will be searched for that page. */ #ifndef SQLITE_OMIT_AUTOVACUUM if( eMode==BTALLOC_EXACT ){ if( nearby<=mxPage ){ u8 eType; assert( nearby>0 ); assert( pBt->autoVacuum ); rc = ptrmapGet(pBt, nearby, &eType, 0); if( rc ) return rc; if( eType==PTRMAP_FREEPAGE ){ searchList = 1; } } }else if( eMode==BTALLOC_LE ){ searchList = 1; } #endif /* Decrement the free-list count by 1. Set iTrunk to the index of the ** first free-list trunk page. iPrevTrunk is initially 1. */ rc = sqlite3PagerWrite(pPage1->pDbPage); if( rc ) return rc; put4byte(&pPage1->aData[36], n-1); /* The code within this loop is run only once if the 'searchList' variable ** is not true. Otherwise, it runs once for each trunk-page on the ** free-list until the page 'nearby' is located (eMode==BTALLOC_EXACT) ** or until a page less than 'nearby' is located (eMode==BTALLOC_LT) */ do { pPrevTrunk = pTrunk; if( pPrevTrunk ){ /* EVIDENCE-OF: R-01506-11053 The first integer on a freelist trunk page ** is the page number of the next freelist trunk page in the list or ** zero if this is the last freelist trunk page. */ iTrunk = get4byte(&pPrevTrunk->aData[0]); }else{ /* EVIDENCE-OF: R-59841-13798 The 4-byte big-endian integer at offset 32 ** stores the page number of the first page of the freelist, or zero if ** the freelist is empty. */ iTrunk = get4byte(&pPage1->aData[32]); } testcase( iTrunk==mxPage ); if( iTrunk>mxPage || nSearch++ > n ){ rc = SQLITE_CORRUPT_PGNO(pPrevTrunk ? pPrevTrunk->pgno : 1); }else{ rc = btreeGetUnusedPage(pBt, iTrunk, &pTrunk, 0); } if( rc ){ pTrunk = 0; goto end_allocate_page; } assert( pTrunk!=0 ); assert( pTrunk->aData!=0 ); /* EVIDENCE-OF: R-13523-04394 The second integer on a freelist trunk page ** is the number of leaf page pointers to follow. */ k = get4byte(&pTrunk->aData[4]); if( k==0 && !searchList ){ /* The trunk has no leaves and the list is not being searched. ** So extract the trunk page itself and use it as the newly ** allocated page */ assert( pPrevTrunk==0 ); rc = sqlite3PagerWrite(pTrunk->pDbPage); if( rc ){ goto end_allocate_page; } *pPgno = iTrunk; memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4); *ppPage = pTrunk; pTrunk = 0; TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); }else if( k>(u32)(pBt->usableSize/4 - 2) ){ /* Value of k is out of range. Database corruption */ rc = SQLITE_CORRUPT_PGNO(iTrunk); goto end_allocate_page; #ifndef SQLITE_OMIT_AUTOVACUUM }else if( searchList && (nearby==iTrunk || (iTrunk<nearby && eMode==BTALLOC_LE)) ){ /* The list is being searched and this trunk page is the page ** to allocate, regardless of whether it has leaves. */ *pPgno = iTrunk; *ppPage = pTrunk; searchList = 0; rc = sqlite3PagerWrite(pTrunk->pDbPage); if( rc ){ goto end_allocate_page; } if( k==0 ){ if( !pPrevTrunk ){ memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4); }else{ rc = sqlite3PagerWrite(pPrevTrunk->pDbPage); if( rc!=SQLITE_OK ){ goto end_allocate_page; } memcpy(&pPrevTrunk->aData[0], &pTrunk->aData[0], 4); } }else{ /* The trunk page is required by the caller but it contains ** pointers to free-list leaves. The first leaf becomes a trunk ** page in this case. */ MemPage *pNewTrunk; Pgno iNewTrunk = get4byte(&pTrunk->aData[8]); if( iNewTrunk>mxPage ){ rc = SQLITE_CORRUPT_PGNO(iTrunk); goto end_allocate_page; } testcase( iNewTrunk==mxPage ); rc = btreeGetUnusedPage(pBt, iNewTrunk, &pNewTrunk, 0); if( rc!=SQLITE_OK ){ goto end_allocate_page; } rc = sqlite3PagerWrite(pNewTrunk->pDbPage); if( rc!=SQLITE_OK ){ releasePage(pNewTrunk); goto end_allocate_page; } memcpy(&pNewTrunk->aData[0], &pTrunk->aData[0], 4); put4byte(&pNewTrunk->aData[4], k-1); memcpy(&pNewTrunk->aData[8], &pTrunk->aData[12], (k-1)*4); releasePage(pNewTrunk); if( !pPrevTrunk ){ assert( sqlite3PagerIswriteable(pPage1->pDbPage) ); put4byte(&pPage1->aData[32], iNewTrunk); }else{ rc = sqlite3PagerWrite(pPrevTrunk->pDbPage); if( rc ){ goto end_allocate_page; } put4byte(&pPrevTrunk->aData[0], iNewTrunk); } } pTrunk = 0; TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); #endif }else if( k>0 ){ /* Extract a leaf from the trunk */ u32 closest; Pgno iPage; unsigned char *aData = pTrunk->aData; if( nearby>0 ){ u32 i; closest = 0; if( eMode==BTALLOC_LE ){ for(i=0; i<k; i++){ iPage = get4byte(&aData[8+i*4]); if( iPage<=nearby ){ closest = i; break; } } }else{ int dist; dist = sqlite3AbsInt32(get4byte(&aData[8]) - nearby); for(i=1; i<k; i++){ int d2 = sqlite3AbsInt32(get4byte(&aData[8+i*4]) - nearby); if( d2<dist ){ closest = i; dist = d2; } } } }else{ closest = 0; } iPage = get4byte(&aData[8+closest*4]); testcase( iPage==mxPage ); if( iPage>mxPage ){ rc = SQLITE_CORRUPT_PGNO(iTrunk); goto end_allocate_page; } testcase( iPage==mxPage ); if( !searchList || (iPage==nearby || (iPage<nearby && eMode==BTALLOC_LE)) ){ int noContent; *pPgno = iPage; TRACE(("ALLOCATE: %d was leaf %d of %d on trunk %d" ": %d more free pages\n", *pPgno, closest+1, k, pTrunk->pgno, n-1)); rc = sqlite3PagerWrite(pTrunk->pDbPage); if( rc ) goto end_allocate_page; if( closest<k-1 ){ memcpy(&aData[8+closest*4], &aData[4+k*4], 4); } put4byte(&aData[4], k-1); noContent = !btreeGetHasContent(pBt, *pPgno)? PAGER_GET_NOCONTENT : 0; rc = btreeGetUnusedPage(pBt, *pPgno, ppPage, noContent); if( rc==SQLITE_OK ){ rc = sqlite3PagerWrite((*ppPage)->pDbPage); if( rc!=SQLITE_OK ){ releasePage(*ppPage); *ppPage = 0; } } searchList = 0; } } releasePage(pPrevTrunk); pPrevTrunk = 0; }while( searchList ); }else{ /* There are no pages on the freelist, so append a new page to the ** database image. ** ** Normally, new pages allocated by this block can be requested from the ** pager layer with the 'no-content' flag set. This prevents the pager ** from trying to read the pages content from disk. However, if the ** current transaction has already run one or more incremental-vacuum ** steps, then the page we are about to allocate may contain content ** that is required in the event of a rollback. In this case, do ** not set the no-content flag. This causes the pager to load and journal ** the current page content before overwriting it. ** ** Note that the pager will not actually attempt to load or journal ** content for any page that really does lie past the end of the database ** file on disk. So the effects of disabling the no-content optimization ** here are confined to those pages that lie between the end of the ** database image and the end of the database file. */ int bNoContent = (0==IfNotOmitAV(pBt->bDoTruncate))? PAGER_GET_NOCONTENT:0; rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); if( rc ) return rc; pBt->nPage++; if( pBt->nPage==PENDING_BYTE_PAGE(pBt) ) pBt->nPage++; #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum && PTRMAP_ISPAGE(pBt, pBt->nPage) ){ /* If *pPgno refers to a pointer-map page, allocate two new pages ** at the end of the file instead of one. The first allocated page ** becomes a new pointer-map page, the second is used by the caller. */ MemPage *pPg = 0; TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", pBt->nPage)); assert( pBt->nPage!=PENDING_BYTE_PAGE(pBt) ); rc = btreeGetUnusedPage(pBt, pBt->nPage, &pPg, bNoContent); if( rc==SQLITE_OK ){ rc = sqlite3PagerWrite(pPg->pDbPage); releasePage(pPg); } if( rc ) return rc; pBt->nPage++; if( pBt->nPage==PENDING_BYTE_PAGE(pBt) ){ pBt->nPage++; } } #endif put4byte(28 + (u8*)pBt->pPage1->aData, pBt->nPage); *pPgno = pBt->nPage; assert( *pPgno!=PENDING_BYTE_PAGE(pBt) ); rc = btreeGetUnusedPage(pBt, *pPgno, ppPage, bNoContent); if( rc ) return rc; rc = sqlite3PagerWrite((*ppPage)->pDbPage); if( rc!=SQLITE_OK ){ releasePage(*ppPage); *ppPage = 0; } TRACE(("ALLOCATE: %d from end of file\n", *pPgno)); } assert( CORRUPT_DB || *pPgno!=PENDING_BYTE_PAGE(pBt) ); end_allocate_page: releasePage(pTrunk); releasePage(pPrevTrunk); assert( rc!=SQLITE_OK || sqlite3PagerPageRefcount((*ppPage)->pDbPage)<=1 ); assert( rc!=SQLITE_OK || (*ppPage)->isInit==0 ); return rc; } /* ** This function is used to add page iPage to the database file free-list. ** It is assumed that the page is not already a part of the free-list. ** ** The value passed as the second argument to this function is optional. ** If the caller happens to have a pointer to the MemPage object ** corresponding to page iPage handy, it may pass it as the second value. ** Otherwise, it may pass NULL. ** ** If a pointer to a MemPage object is passed as the second argument, ** its reference count is not altered by this function. */ static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){ MemPage *pTrunk = 0; /* Free-list trunk page */ Pgno iTrunk = 0; /* Page number of free-list trunk page */ MemPage *pPage1 = pBt->pPage1; /* Local reference to page 1 */ MemPage *pPage; /* Page being freed. May be NULL. */ int rc; /* Return Code */ u32 nFree; /* Initial number of pages on free-list */ assert( sqlite3_mutex_held(pBt->mutex) ); assert( CORRUPT_DB || iPage>1 ); assert( !pMemPage || pMemPage->pgno==iPage ); if( iPage<2 || iPage>pBt->nPage ){ return SQLITE_CORRUPT_BKPT; } if( pMemPage ){ pPage = pMemPage; sqlite3PagerRef(pPage->pDbPage); }else{ pPage = btreePageLookup(pBt, iPage); } /* Increment the free page count on pPage1 */ rc = sqlite3PagerWrite(pPage1->pDbPage); if( rc ) goto freepage_out; nFree = get4byte(&pPage1->aData[36]); put4byte(&pPage1->aData[36], nFree+1); if( pBt->btsFlags & BTS_SECURE_DELETE ){ /* If the secure_delete option is enabled, then ** always fully overwrite deleted information with zeros. */ if( (!pPage && ((rc = btreeGetPage(pBt, iPage, &pPage, 0))!=0) ) || ((rc = sqlite3PagerWrite(pPage->pDbPage))!=0) ){ goto freepage_out; } memset(pPage->aData, 0, pPage->pBt->pageSize); } /* If the database supports auto-vacuum, write an entry in the pointer-map ** to indicate that the page is free. */ if( ISAUTOVACUUM ){ ptrmapPut(pBt, iPage, PTRMAP_FREEPAGE, 0, &rc); if( rc ) goto freepage_out; } /* Now manipulate the actual database free-list structure. There are two ** possibilities. If the free-list is currently empty, or if the first ** trunk page in the free-list is full, then this page will become a ** new free-list trunk page. Otherwise, it will become a leaf of the ** first trunk page in the current free-list. This block tests if it ** is possible to add the page as a new free-list leaf. */ if( nFree!=0 ){ u32 nLeaf; /* Initial number of leaf cells on trunk page */ iTrunk = get4byte(&pPage1->aData[32]); rc = btreeGetPage(pBt, iTrunk, &pTrunk, 0); if( rc!=SQLITE_OK ){ goto freepage_out; } nLeaf = get4byte(&pTrunk->aData[4]); assert( pBt->usableSize>32 ); if( nLeaf > (u32)pBt->usableSize/4 - 2 ){ rc = SQLITE_CORRUPT_BKPT; goto freepage_out; } if( nLeaf < (u32)pBt->usableSize/4 - 8 ){ /* In this case there is room on the trunk page to insert the page ** being freed as a new leaf. ** ** Note that the trunk page is not really full until it contains ** usableSize/4 - 2 entries, not usableSize/4 - 8 entries as we have ** coded. But due to a coding error in versions of SQLite prior to ** 3.6.0, databases with freelist trunk pages holding more than ** usableSize/4 - 8 entries will be reported as corrupt. In order ** to maintain backwards compatibility with older versions of SQLite, ** we will continue to restrict the number of entries to usableSize/4 - 8 ** for now. At some point in the future (once everyone has upgraded ** to 3.6.0 or later) we should consider fixing the conditional above ** to read "usableSize/4-2" instead of "usableSize/4-8". ** ** EVIDENCE-OF: R-19920-11576 However, newer versions of SQLite still ** avoid using the last six entries in the freelist trunk page array in ** order that database files created by newer versions of SQLite can be ** read by older versions of SQLite. */ rc = sqlite3PagerWrite(pTrunk->pDbPage); if( rc==SQLITE_OK ){ put4byte(&pTrunk->aData[4], nLeaf+1); put4byte(&pTrunk->aData[8+nLeaf*4], iPage); if( pPage && (pBt->btsFlags & BTS_SECURE_DELETE)==0 ){ sqlite3PagerDontWrite(pPage->pDbPage); } rc = btreeSetHasContent(pBt, iPage); } TRACE(("FREE-PAGE: %d leaf on trunk page %d\n",pPage->pgno,pTrunk->pgno)); goto freepage_out; } } /* If control flows to this point, then it was not possible to add the ** the page being freed as a leaf page of the first trunk in the free-list. ** Possibly because the free-list is empty, or possibly because the ** first trunk in the free-list is full. Either way, the page being freed ** will become the new first trunk page in the free-list. */ if( pPage==0 && SQLITE_OK!=(rc = btreeGetPage(pBt, iPage, &pPage, 0)) ){ goto freepage_out; } rc = sqlite3PagerWrite(pPage->pDbPage); if( rc!=SQLITE_OK ){ goto freepage_out; } put4byte(pPage->aData, iTrunk); put4byte(&pPage->aData[4], 0); put4byte(&pPage1->aData[32], iPage); TRACE(("FREE-PAGE: %d new trunk page replacing %d\n", pPage->pgno, iTrunk)); freepage_out: if( pPage ){ pPage->isInit = 0; } releasePage(pPage); releasePage(pTrunk); return rc; } static void freePage(MemPage *pPage, int *pRC){ if( (*pRC)==SQLITE_OK ){ *pRC = freePage2(pPage->pBt, pPage, pPage->pgno); } } /* ** Free any overflow pages associated with the given Cell. Store ** size information about the cell in pInfo. */ static int clearCell( MemPage *pPage, /* The page that contains the Cell */ unsigned char *pCell, /* First byte of the Cell */ CellInfo *pInfo /* Size information about the cell */ ){ BtShared *pBt; Pgno ovflPgno; int rc; int nOvfl; u32 ovflPageSize; assert( sqlite3_mutex_held(pPage->pBt->mutex) ); pPage->xParseCell(pPage, pCell, pInfo); if( pInfo->nLocal==pInfo->nPayload ){ return SQLITE_OK; /* No overflow pages. Return without doing anything */ } testcase( pCell + pInfo->nSize == pPage->aDataEnd ); testcase( pCell + (pInfo->nSize-1) == pPage->aDataEnd ); if( pCell + pInfo->nSize > pPage->aDataEnd ){ /* Cell extends past end of page */ return SQLITE_CORRUPT_PAGE(pPage); } ovflPgno = get4byte(pCell + pInfo->nSize - 4); pBt = pPage->pBt; assert( pBt->usableSize > 4 ); ovflPageSize = pBt->usableSize - 4; nOvfl = (pInfo->nPayload - pInfo->nLocal + ovflPageSize - 1)/ovflPageSize; assert( nOvfl>0 || (CORRUPT_DB && (pInfo->nPayload + ovflPageSize)<ovflPageSize) ); while( nOvfl-- ){ Pgno iNext = 0; MemPage *pOvfl = 0; if( ovflPgno<2 || ovflPgno>btreePagecount(pBt) ){ /* 0 is not a legal page number and page 1 cannot be an ** overflow page. Therefore if ovflPgno<2 or past the end of the ** file the database must be corrupt. */ return SQLITE_CORRUPT_BKPT; } if( nOvfl ){ rc = getOverflowPage(pBt, ovflPgno, &pOvfl, &iNext); if( rc ) return rc; } if( ( pOvfl || ((pOvfl = btreePageLookup(pBt, ovflPgno))!=0) ) && sqlite3PagerPageRefcount(pOvfl->pDbPage)!=1 ){ /* There is no reason any cursor should have an outstanding reference ** to an overflow page belonging to a cell that is being deleted/updated. ** So if there exists more than one reference to this page, then it ** must not really be an overflow page and the database must be corrupt. ** It is helpful to detect this before calling freePage2(), as ** freePage2() may zero the page contents if secure-delete mode is ** enabled. If this 'overflow' page happens to be a page that the ** caller is iterating through or using in some other way, this ** can be problematic. */ rc = SQLITE_CORRUPT_BKPT; }else{ rc = freePage2(pBt, pOvfl, ovflPgno); } if( pOvfl ){ sqlite3PagerUnref(pOvfl->pDbPage); } if( rc ) return rc; ovflPgno = iNext; } return SQLITE_OK; } /* ** Create the byte sequence used to represent a cell on page pPage ** and write that byte sequence into pCell[]. Overflow pages are ** allocated and filled in as necessary. The calling procedure ** is responsible for making sure sufficient space has been allocated ** for pCell[]. ** ** Note that pCell does not necessary need to point to the pPage->aData ** area. pCell might point to some temporary storage. The cell will ** be constructed in this temporary area then copied into pPage->aData ** later. */ static int fillInCell( MemPage *pPage, /* The page that contains the cell */ unsigned char *pCell, /* Complete text of the cell */ const BtreePayload *pX, /* Payload with which to construct the cell */ int *pnSize /* Write cell size here */ ){ int nPayload; const u8 *pSrc; int nSrc, n, rc, mn; int spaceLeft; MemPage *pToRelease; unsigned char *pPrior; unsigned char *pPayload; BtShared *pBt; Pgno pgnoOvfl; int nHeader; assert( sqlite3_mutex_held(pPage->pBt->mutex) ); /* pPage is not necessarily writeable since pCell might be auxiliary ** buffer space that is separate from the pPage buffer area */ assert( pCell<pPage->aData || pCell>=&pPage->aData[pPage->pBt->pageSize] || sqlite3PagerIswriteable(pPage->pDbPage) ); /* Fill in the header. */ nHeader = pPage->childPtrSize; if( pPage->intKey ){ nPayload = pX->nData + pX->nZero; pSrc = pX->pData; nSrc = pX->nData; assert( pPage->intKeyLeaf ); /* fillInCell() only called for leaves */ nHeader += putVarint32(&pCell[nHeader], nPayload); nHeader += putVarint(&pCell[nHeader], *(u64*)&pX->nKey); }else{ assert( pX->nKey<=0x7fffffff && pX->pKey!=0 ); nSrc = nPayload = (int)pX->nKey; pSrc = pX->pKey; nHeader += putVarint32(&pCell[nHeader], nPayload); } /* Fill in the payload */ pPayload = &pCell[nHeader]; if( nPayload<=pPage->maxLocal ){ /* This is the common case where everything fits on the btree page ** and no overflow pages are required. */ n = nHeader + nPayload; testcase( n==3 ); testcase( n==4 ); if( n<4 ) n = 4; *pnSize = n; assert( nSrc<=nPayload ); testcase( nSrc<nPayload ); memcpy(pPayload, pSrc, nSrc); memset(pPayload+nSrc, 0, nPayload-nSrc); return SQLITE_OK; } /* If we reach this point, it means that some of the content will need ** to spill onto overflow pages. */ mn = pPage->minLocal; n = mn + (nPayload - mn) % (pPage->pBt->usableSize - 4); testcase( n==pPage->maxLocal ); testcase( n==pPage->maxLocal+1 ); if( n > pPage->maxLocal ) n = mn; spaceLeft = n; *pnSize = n + nHeader + 4; pPrior = &pCell[nHeader+n]; pToRelease = 0; pgnoOvfl = 0; pBt = pPage->pBt; /* At this point variables should be set as follows: ** ** nPayload Total payload size in bytes ** pPayload Begin writing payload here ** spaceLeft Space available at pPayload. If nPayload>spaceLeft, ** that means content must spill into overflow pages. ** *pnSize Size of the local cell (not counting overflow pages) ** pPrior Where to write the pgno of the first overflow page ** ** Use a call to btreeParseCellPtr() to verify that the values above ** were computed correctly. */ #ifdef SQLITE_DEBUG { CellInfo info; pPage->xParseCell(pPage, pCell, &info); assert( nHeader==(int)(info.pPayload - pCell) ); assert( info.nKey==pX->nKey ); assert( *pnSize == info.nSize ); assert( spaceLeft == info.nLocal ); } #endif /* Write the payload into the local Cell and any extra into overflow pages */ while( 1 ){ n = nPayload; if( n>spaceLeft ) n = spaceLeft; /* If pToRelease is not zero than pPayload points into the data area ** of pToRelease. Make sure pToRelease is still writeable. */ assert( pToRelease==0 || sqlite3PagerIswriteable(pToRelease->pDbPage) ); /* If pPayload is part of the data area of pPage, then make sure pPage ** is still writeable */ assert( pPayload<pPage->aData || pPayload>=&pPage->aData[pBt->pageSize] || sqlite3PagerIswriteable(pPage->pDbPage) ); if( nSrc>=n ){ memcpy(pPayload, pSrc, n); }else if( nSrc>0 ){ n = nSrc; memcpy(pPayload, pSrc, n); }else{ memset(pPayload, 0, n); } nPayload -= n; if( nPayload<=0 ) break; pPayload += n; pSrc += n; nSrc -= n; spaceLeft -= n; if( spaceLeft==0 ){ MemPage *pOvfl = 0; #ifndef SQLITE_OMIT_AUTOVACUUM Pgno pgnoPtrmap = pgnoOvfl; /* Overflow page pointer-map entry page */ if( pBt->autoVacuum ){ do{ pgnoOvfl++; } while( PTRMAP_ISPAGE(pBt, pgnoOvfl) || pgnoOvfl==PENDING_BYTE_PAGE(pBt) ); } #endif rc = allocateBtreePage(pBt, &pOvfl, &pgnoOvfl, pgnoOvfl, 0); #ifndef SQLITE_OMIT_AUTOVACUUM /* If the database supports auto-vacuum, and the second or subsequent ** overflow page is being allocated, add an entry to the pointer-map ** for that page now. ** ** If this is the first overflow page, then write a partial entry ** to the pointer-map. If we write nothing to this pointer-map slot, ** then the optimistic overflow chain processing in clearCell() ** may misinterpret the uninitialized values and delete the ** wrong pages from the database. */ if( pBt->autoVacuum && rc==SQLITE_OK ){ u8 eType = (pgnoPtrmap?PTRMAP_OVERFLOW2:PTRMAP_OVERFLOW1); ptrmapPut(pBt, pgnoOvfl, eType, pgnoPtrmap, &rc); if( rc ){ releasePage(pOvfl); } } #endif if( rc ){ releasePage(pToRelease); return rc; } /* If pToRelease is not zero than pPrior points into the data area ** of pToRelease. Make sure pToRelease is still writeable. */ assert( pToRelease==0 || sqlite3PagerIswriteable(pToRelease->pDbPage) ); /* If pPrior is part of the data area of pPage, then make sure pPage ** is still writeable */ assert( pPrior<pPage->aData || pPrior>=&pPage->aData[pBt->pageSize] || sqlite3PagerIswriteable(pPage->pDbPage) ); put4byte(pPrior, pgnoOvfl); releasePage(pToRelease); pToRelease = pOvfl; pPrior = pOvfl->aData; put4byte(pPrior, 0); pPayload = &pOvfl->aData[4]; spaceLeft = pBt->usableSize - 4; } } releasePage(pToRelease); return SQLITE_OK; } /* ** Remove the i-th cell from pPage. This routine effects pPage only. ** The cell content is not freed or deallocated. It is assumed that ** the cell content has been copied someplace else. This routine just ** removes the reference to the cell from pPage. ** ** "sz" must be the number of bytes in the cell. */ static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){ u32 pc; /* Offset to cell content of cell being deleted */ u8 *data; /* pPage->aData */ u8 *ptr; /* Used to move bytes around within data[] */ int rc; /* The return code */ int hdr; /* Beginning of the header. 0 most pages. 100 page 1 */ if( *pRC ) return; assert( idx>=0 && idx<pPage->nCell ); assert( CORRUPT_DB || sz==cellSize(pPage, idx) ); assert( sqlite3PagerIswriteable(pPage->pDbPage) ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( pPage->nFree>=0 ); data = pPage->aData; ptr = &pPage->aCellIdx[2*idx]; pc = get2byte(ptr); hdr = pPage->hdrOffset; testcase( pc==get2byte(&data[hdr+5]) ); testcase( pc+sz==pPage->pBt->usableSize ); if( pc+sz > pPage->pBt->usableSize ){ *pRC = SQLITE_CORRUPT_BKPT; return; } rc = freeSpace(pPage, pc, sz); if( rc ){ *pRC = rc; return; } pPage->nCell--; if( pPage->nCell==0 ){ memset(&data[hdr+1], 0, 4); data[hdr+7] = 0; put2byte(&data[hdr+5], pPage->pBt->usableSize); pPage->nFree = pPage->pBt->usableSize - pPage->hdrOffset - pPage->childPtrSize - 8; }else{ memmove(ptr, ptr+2, 2*(pPage->nCell - idx)); put2byte(&data[hdr+3], pPage->nCell); pPage->nFree += 2; } } /* ** Insert a new cell on pPage at cell index "i". pCell points to the ** content of the cell. ** ** If the cell content will fit on the page, then put it there. If it ** will not fit, then make a copy of the cell content into pTemp if ** pTemp is not null. Regardless of pTemp, allocate a new entry ** in pPage->apOvfl[] and make it point to the cell content (either ** in pTemp or the original pCell) and also record its index. ** Allocating a new entry in pPage->aCell[] implies that ** pPage->nOverflow is incremented. ** ** *pRC must be SQLITE_OK when this routine is called. */ static void insertCell( MemPage *pPage, /* Page into which we are copying */ int i, /* New cell becomes the i-th cell of the page */ u8 *pCell, /* Content of the new cell */ int sz, /* Bytes of content in pCell */ u8 *pTemp, /* Temp storage space for pCell, if needed */ Pgno iChild, /* If non-zero, replace first 4 bytes with this value */ int *pRC /* Read and write return code from here */ ){ int idx = 0; /* Where to write new cell content in data[] */ int j; /* Loop counter */ u8 *data; /* The content of the whole page */ u8 *pIns; /* The point in pPage->aCellIdx[] where no cell inserted */ assert( *pRC==SQLITE_OK ); assert( i>=0 && i<=pPage->nCell+pPage->nOverflow ); assert( MX_CELL(pPage->pBt)<=10921 ); assert( pPage->nCell<=MX_CELL(pPage->pBt) || CORRUPT_DB ); assert( pPage->nOverflow<=ArraySize(pPage->apOvfl) ); assert( ArraySize(pPage->apOvfl)==ArraySize(pPage->aiOvfl) ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); /* The cell should normally be sized correctly. However, when moving a ** malformed cell from a leaf page to an interior page, if the cell size ** wanted to be less than 4 but got rounded up to 4 on the leaf, then size ** might be less than 8 (leaf-size + pointer) on the interior node. Hence ** the term after the || in the following assert(). */ assert( sz==pPage->xCellSize(pPage, pCell) || (sz==8 && iChild>0) ); assert( pPage->nFree>=0 ); if( pPage->nOverflow || sz+2>pPage->nFree ){ if( pTemp ){ memcpy(pTemp, pCell, sz); pCell = pTemp; } if( iChild ){ put4byte(pCell, iChild); } j = pPage->nOverflow++; /* Comparison against ArraySize-1 since we hold back one extra slot ** as a contingency. In other words, never need more than 3 overflow ** slots but 4 are allocated, just to be safe. */ assert( j < ArraySize(pPage->apOvfl)-1 ); pPage->apOvfl[j] = pCell; pPage->aiOvfl[j] = (u16)i; /* When multiple overflows occur, they are always sequential and in ** sorted order. This invariants arise because multiple overflows can ** only occur when inserting divider cells into the parent page during ** balancing, and the dividers are adjacent and sorted. */ assert( j==0 || pPage->aiOvfl[j-1]<(u16)i ); /* Overflows in sorted order */ assert( j==0 || i==pPage->aiOvfl[j-1]+1 ); /* Overflows are sequential */ }else{ int rc = sqlite3PagerWrite(pPage->pDbPage); if( rc!=SQLITE_OK ){ *pRC = rc; return; } assert( sqlite3PagerIswriteable(pPage->pDbPage) ); data = pPage->aData; assert( &data[pPage->cellOffset]==pPage->aCellIdx ); rc = allocateSpace(pPage, sz, &idx); if( rc ){ *pRC = rc; return; } /* The allocateSpace() routine guarantees the following properties ** if it returns successfully */ assert( idx >= 0 ); assert( idx >= pPage->cellOffset+2*pPage->nCell+2 || CORRUPT_DB ); assert( idx+sz <= (int)pPage->pBt->usableSize ); pPage->nFree -= (u16)(2 + sz); if( iChild ){ /* In a corrupt database where an entry in the cell index section of ** a btree page has a value of 3 or less, the pCell value might point ** as many as 4 bytes in front of the start of the aData buffer for ** the source page. Make sure this does not cause problems by not ** reading the first 4 bytes */ memcpy(&data[idx+4], pCell+4, sz-4); put4byte(&data[idx], iChild); }else{ memcpy(&data[idx], pCell, sz); } pIns = pPage->aCellIdx + i*2; memmove(pIns+2, pIns, 2*(pPage->nCell - i)); put2byte(pIns, idx); pPage->nCell++; /* increment the cell count */ if( (++data[pPage->hdrOffset+4])==0 ) data[pPage->hdrOffset+3]++; assert( get2byte(&data[pPage->hdrOffset+3])==pPage->nCell || CORRUPT_DB ); #ifndef SQLITE_OMIT_AUTOVACUUM if( pPage->pBt->autoVacuum ){ /* The cell may contain a pointer to an overflow page. If so, write ** the entry for the overflow page into the pointer map. */ ptrmapPutOvflPtr(pPage, pPage, pCell, pRC); } #endif } } /* ** The following parameters determine how many adjacent pages get involved ** in a balancing operation. NN is the number of neighbors on either side ** of the page that participate in the balancing operation. NB is the ** total number of pages that participate, including the target page and ** NN neighbors on either side. ** ** The minimum value of NN is 1 (of course). Increasing NN above 1 ** (to 2 or 3) gives a modest improvement in SELECT and DELETE performance ** in exchange for a larger degradation in INSERT and UPDATE performance. ** The value of NN appears to give the best results overall. ** ** (Later:) The description above makes it seem as if these values are ** tunable - as if you could change them and recompile and it would all work. ** But that is unlikely. NB has been 3 since the inception of SQLite and ** we have never tested any other value. */ #define NN 1 /* Number of neighbors on either side of pPage */ #define NB 3 /* (NN*2+1): Total pages involved in the balance */ /* ** A CellArray object contains a cache of pointers and sizes for a ** consecutive sequence of cells that might be held on multiple pages. ** ** The cells in this array are the divider cell or cells from the pParent ** page plus up to three child pages. There are a total of nCell cells. ** ** pRef is a pointer to one of the pages that contributes cells. This is ** used to access information such as MemPage.intKey and MemPage.pBt->pageSize ** which should be common to all pages that contribute cells to this array. ** ** apCell[] and szCell[] hold, respectively, pointers to the start of each ** cell and the size of each cell. Some of the apCell[] pointers might refer ** to overflow cells. In other words, some apCel[] pointers might not point ** to content area of the pages. ** ** A szCell[] of zero means the size of that cell has not yet been computed. ** ** The cells come from as many as four different pages: ** ** ----------- ** | Parent | ** ----------- ** / | \ ** / | \ ** --------- --------- --------- ** |Child-1| |Child-2| |Child-3| ** --------- --------- --------- ** ** The order of cells is in the array is for an index btree is: ** ** 1. All cells from Child-1 in order ** 2. The first divider cell from Parent ** 3. All cells from Child-2 in order ** 4. The second divider cell from Parent ** 5. All cells from Child-3 in order ** ** For a table-btree (with rowids) the items 2 and 4 are empty because ** content exists only in leaves and there are no divider cells. ** ** For an index btree, the apEnd[] array holds pointer to the end of page ** for Child-1, the Parent, Child-2, the Parent (again), and Child-3, ** respectively. The ixNx[] array holds the number of cells contained in ** each of these 5 stages, and all stages to the left. Hence: ** ** ixNx[0] = Number of cells in Child-1. ** ixNx[1] = Number of cells in Child-1 plus 1 for first divider. ** ixNx[2] = Number of cells in Child-1 and Child-2 + 1 for 1st divider. ** ixNx[3] = Number of cells in Child-1 and Child-2 + both divider cells ** ixNx[4] = Total number of cells. ** ** For a table-btree, the concept is similar, except only apEnd[0]..apEnd[2] ** are used and they point to the leaf pages only, and the ixNx value are: ** ** ixNx[0] = Number of cells in Child-1. ** ixNx[1] = Number of cells in Child-1 and Child-2. ** ixNx[2] = Total number of cells. ** ** Sometimes when deleting, a child page can have zero cells. In those ** cases, ixNx[] entries with higher indexes, and the corresponding apEnd[] ** entries, shift down. The end result is that each ixNx[] entry should ** be larger than the previous */ typedef struct CellArray CellArray; struct CellArray { int nCell; /* Number of cells in apCell[] */ MemPage *pRef; /* Reference page */ u8 **apCell; /* All cells begin balanced */ u16 *szCell; /* Local size of all cells in apCell[] */ u8 *apEnd[NB*2]; /* MemPage.aDataEnd values */ int ixNx[NB*2]; /* Index of at which we move to the next apEnd[] */ }; /* ** Make sure the cell sizes at idx, idx+1, ..., idx+N-1 have been ** computed. */ static void populateCellCache(CellArray *p, int idx, int N){ assert( idx>=0 && idx+N<=p->nCell ); while( N>0 ){ assert( p->apCell[idx]!=0 ); if( p->szCell[idx]==0 ){ p->szCell[idx] = p->pRef->xCellSize(p->pRef, p->apCell[idx]); }else{ assert( CORRUPT_DB || p->szCell[idx]==p->pRef->xCellSize(p->pRef, p->apCell[idx]) ); } idx++; N--; } } /* ** Return the size of the Nth element of the cell array */ static SQLITE_NOINLINE u16 computeCellSize(CellArray *p, int N){ assert( N>=0 && N<p->nCell ); assert( p->szCell[N]==0 ); p->szCell[N] = p->pRef->xCellSize(p->pRef, p->apCell[N]); return p->szCell[N]; } static u16 cachedCellSize(CellArray *p, int N){ assert( N>=0 && N<p->nCell ); if( p->szCell[N] ) return p->szCell[N]; return computeCellSize(p, N); } /* ** Array apCell[] contains pointers to nCell b-tree page cells. The ** szCell[] array contains the size in bytes of each cell. This function ** replaces the current contents of page pPg with the contents of the cell ** array. ** ** Some of the cells in apCell[] may currently be stored in pPg. This ** function works around problems caused by this by making a copy of any ** such cells before overwriting the page data. ** ** The MemPage.nFree field is invalidated by this function. It is the ** responsibility of the caller to set it correctly. */ static int rebuildPage( CellArray *pCArray, /* Content to be added to page pPg */ int iFirst, /* First cell in pCArray to use */ int nCell, /* Final number of cells on page */ MemPage *pPg /* The page to be reconstructed */ ){ const int hdr = pPg->hdrOffset; /* Offset of header on pPg */ u8 * const aData = pPg->aData; /* Pointer to data for pPg */ const int usableSize = pPg->pBt->usableSize; u8 * const pEnd = &aData[usableSize]; int i = iFirst; /* Which cell to copy from pCArray*/ u32 j; /* Start of cell content area */ int iEnd = i+nCell; /* Loop terminator */ u8 *pCellptr = pPg->aCellIdx; u8 *pTmp = sqlite3PagerTempSpace(pPg->pBt->pPager); u8 *pData; int k; /* Current slot in pCArray->apEnd[] */ u8 *pSrcEnd; /* Current pCArray->apEnd[k] value */ assert( i<iEnd ); j = get2byte(&aData[hdr+5]); if( NEVER(j>(u32)usableSize) ){ j = 0; } memcpy(&pTmp[j], &aData[j], usableSize - j); for(k=0; pCArray->ixNx[k]<=i && ALWAYS(k<NB*2); k++){} pSrcEnd = pCArray->apEnd[k]; pData = pEnd; while( 1/*exit by break*/ ){ u8 *pCell = pCArray->apCell[i]; u16 sz = pCArray->szCell[i]; assert( sz>0 ); if( SQLITE_WITHIN(pCell,aData,pEnd) ){ if( ((uptr)(pCell+sz))>(uptr)pEnd ) return SQLITE_CORRUPT_BKPT; pCell = &pTmp[pCell - aData]; }else if( (uptr)(pCell+sz)>(uptr)pSrcEnd && (uptr)(pCell)<(uptr)pSrcEnd ){ return SQLITE_CORRUPT_BKPT; } pData -= sz; put2byte(pCellptr, (pData - aData)); pCellptr += 2; if( pData < pCellptr ) return SQLITE_CORRUPT_BKPT; memcpy(pData, pCell, sz); assert( sz==pPg->xCellSize(pPg, pCell) || CORRUPT_DB ); testcase( sz!=pPg->xCellSize(pPg,pCell) ); i++; if( i>=iEnd ) break; if( pCArray->ixNx[k]<=i ){ k++; pSrcEnd = pCArray->apEnd[k]; } } /* The pPg->nFree field is now set incorrectly. The caller will fix it. */ pPg->nCell = nCell; pPg->nOverflow = 0; put2byte(&aData[hdr+1], 0); put2byte(&aData[hdr+3], pPg->nCell); put2byte(&aData[hdr+5], pData - aData); aData[hdr+7] = 0x00; return SQLITE_OK; } /* ** The pCArray objects contains pointers to b-tree cells and the cell sizes. ** This function attempts to add the cells stored in the array to page pPg. ** If it cannot (because the page needs to be defragmented before the cells ** will fit), non-zero is returned. Otherwise, if the cells are added ** successfully, zero is returned. ** ** Argument pCellptr points to the first entry in the cell-pointer array ** (part of page pPg) to populate. After cell apCell[0] is written to the ** page body, a 16-bit offset is written to pCellptr. And so on, for each ** cell in the array. It is the responsibility of the caller to ensure ** that it is safe to overwrite this part of the cell-pointer array. ** ** When this function is called, *ppData points to the start of the ** content area on page pPg. If the size of the content area is extended, ** *ppData is updated to point to the new start of the content area ** before returning. ** ** Finally, argument pBegin points to the byte immediately following the ** end of the space required by this page for the cell-pointer area (for ** all cells - not just those inserted by the current call). If the content ** area must be extended to before this point in order to accomodate all ** cells in apCell[], then the cells do not fit and non-zero is returned. */ static int pageInsertArray( MemPage *pPg, /* Page to add cells to */ u8 *pBegin, /* End of cell-pointer array */ u8 **ppData, /* IN/OUT: Page content-area pointer */ u8 *pCellptr, /* Pointer to cell-pointer area */ int iFirst, /* Index of first cell to add */ int nCell, /* Number of cells to add to pPg */ CellArray *pCArray /* Array of cells */ ){ int i = iFirst; /* Loop counter - cell index to insert */ u8 *aData = pPg->aData; /* Complete page */ u8 *pData = *ppData; /* Content area. A subset of aData[] */ int iEnd = iFirst + nCell; /* End of loop. One past last cell to ins */ int k; /* Current slot in pCArray->apEnd[] */ u8 *pEnd; /* Maximum extent of cell data */ assert( CORRUPT_DB || pPg->hdrOffset==0 ); /* Never called on page 1 */ if( iEnd<=iFirst ) return 0; for(k=0; pCArray->ixNx[k]<=i && ALWAYS(k<NB*2); k++){} pEnd = pCArray->apEnd[k]; while( 1 /*Exit by break*/ ){ int sz, rc; u8 *pSlot; sz = cachedCellSize(pCArray, i); if( (aData[1]==0 && aData[2]==0) || (pSlot = pageFindSlot(pPg,sz,&rc))==0 ){ if( (pData - pBegin)<sz ) return 1; pData -= sz; pSlot = pData; } /* pSlot and pCArray->apCell[i] will never overlap on a well-formed ** database. But they might for a corrupt database. Hence use memmove() ** since memcpy() sends SIGABORT with overlapping buffers on OpenBSD */ assert( (pSlot+sz)<=pCArray->apCell[i] || pSlot>=(pCArray->apCell[i]+sz) || CORRUPT_DB ); if( (uptr)(pCArray->apCell[i]+sz)>(uptr)pEnd && (uptr)(pCArray->apCell[i])<(uptr)pEnd ){ assert( CORRUPT_DB ); (void)SQLITE_CORRUPT_BKPT; return 1; } memmove(pSlot, pCArray->apCell[i], sz); put2byte(pCellptr, (pSlot - aData)); pCellptr += 2; i++; if( i>=iEnd ) break; if( pCArray->ixNx[k]<=i ){ k++; pEnd = pCArray->apEnd[k]; } } *ppData = pData; return 0; } /* ** The pCArray object contains pointers to b-tree cells and their sizes. ** ** This function adds the space associated with each cell in the array ** that is currently stored within the body of pPg to the pPg free-list. ** The cell-pointers and other fields of the page are not updated. ** ** This function returns the total number of cells added to the free-list. */ static int pageFreeArray( MemPage *pPg, /* Page to edit */ int iFirst, /* First cell to delete */ int nCell, /* Cells to delete */ CellArray *pCArray /* Array of cells */ ){ u8 * const aData = pPg->aData; u8 * const pEnd = &aData[pPg->pBt->usableSize]; u8 * const pStart = &aData[pPg->hdrOffset + 8 + pPg->childPtrSize]; int nRet = 0; int i; int iEnd = iFirst + nCell; u8 *pFree = 0; int szFree = 0; for(i=iFirst; i<iEnd; i++){ u8 *pCell = pCArray->apCell[i]; if( SQLITE_WITHIN(pCell, pStart, pEnd) ){ int sz; /* No need to use cachedCellSize() here. The sizes of all cells that ** are to be freed have already been computing while deciding which ** cells need freeing */ sz = pCArray->szCell[i]; assert( sz>0 ); if( pFree!=(pCell + sz) ){ if( pFree ){ assert( pFree>aData && (pFree - aData)<65536 ); freeSpace(pPg, (u16)(pFree - aData), szFree); } pFree = pCell; szFree = sz; if( pFree+sz>pEnd ) return 0; }else{ pFree = pCell; szFree += sz; } nRet++; } } if( pFree ){ assert( pFree>aData && (pFree - aData)<65536 ); freeSpace(pPg, (u16)(pFree - aData), szFree); } return nRet; } /* ** pCArray contains pointers to and sizes of all cells in the page being ** balanced. The current page, pPg, has pPg->nCell cells starting with ** pCArray->apCell[iOld]. After balancing, this page should hold nNew cells ** starting at apCell[iNew]. ** ** This routine makes the necessary adjustments to pPg so that it contains ** the correct cells after being balanced. ** ** The pPg->nFree field is invalid when this function returns. It is the ** responsibility of the caller to set it correctly. */ static int editPage( MemPage *pPg, /* Edit this page */ int iOld, /* Index of first cell currently on page */ int iNew, /* Index of new first cell on page */ int nNew, /* Final number of cells on page */ CellArray *pCArray /* Array of cells and sizes */ ){ u8 * const aData = pPg->aData; const int hdr = pPg->hdrOffset; u8 *pBegin = &pPg->aCellIdx[nNew * 2]; int nCell = pPg->nCell; /* Cells stored on pPg */ u8 *pData; u8 *pCellptr; int i; int iOldEnd = iOld + pPg->nCell + pPg->nOverflow; int iNewEnd = iNew + nNew; #ifdef SQLITE_DEBUG u8 *pTmp = sqlite3PagerTempSpace(pPg->pBt->pPager); memcpy(pTmp, aData, pPg->pBt->usableSize); #endif /* Remove cells from the start and end of the page */ assert( nCell>=0 ); if( iOld<iNew ){ int nShift = pageFreeArray(pPg, iOld, iNew-iOld, pCArray); if( nShift>nCell ) return SQLITE_CORRUPT_BKPT; memmove(pPg->aCellIdx, &pPg->aCellIdx[nShift*2], nCell*2); nCell -= nShift; } if( iNewEnd < iOldEnd ){ int nTail = pageFreeArray(pPg, iNewEnd, iOldEnd - iNewEnd, pCArray); assert( nCell>=nTail ); nCell -= nTail; } pData = &aData[get2byteNotZero(&aData[hdr+5])]; if( pData<pBegin ) goto editpage_fail; /* Add cells to the start of the page */ if( iNew<iOld ){ int nAdd = MIN(nNew,iOld-iNew); assert( (iOld-iNew)<nNew || nCell==0 || CORRUPT_DB ); assert( nAdd>=0 ); pCellptr = pPg->aCellIdx; memmove(&pCellptr[nAdd*2], pCellptr, nCell*2); if( pageInsertArray( pPg, pBegin, &pData, pCellptr, iNew, nAdd, pCArray ) ) goto editpage_fail; nCell += nAdd; } /* Add any overflow cells */ for(i=0; i<pPg->nOverflow; i++){ int iCell = (iOld + pPg->aiOvfl[i]) - iNew; if( iCell>=0 && iCell<nNew ){ pCellptr = &pPg->aCellIdx[iCell * 2]; if( nCell>iCell ){ memmove(&pCellptr[2], pCellptr, (nCell - iCell) * 2); } nCell++; if( pageInsertArray( pPg, pBegin, &pData, pCellptr, iCell+iNew, 1, pCArray ) ) goto editpage_fail; } } /* Append cells to the end of the page */ assert( nCell>=0 ); pCellptr = &pPg->aCellIdx[nCell*2]; if( pageInsertArray( pPg, pBegin, &pData, pCellptr, iNew+nCell, nNew-nCell, pCArray ) ) goto editpage_fail; pPg->nCell = nNew; pPg->nOverflow = 0; put2byte(&aData[hdr+3], pPg->nCell); put2byte(&aData[hdr+5], pData - aData); #ifdef SQLITE_DEBUG for(i=0; i<nNew && !CORRUPT_DB; i++){ u8 *pCell = pCArray->apCell[i+iNew]; int iOff = get2byteAligned(&pPg->aCellIdx[i*2]); if( SQLITE_WITHIN(pCell, aData, &aData[pPg->pBt->usableSize]) ){ pCell = &pTmp[pCell - aData]; } assert( 0==memcmp(pCell, &aData[iOff], pCArray->pRef->xCellSize(pCArray->pRef, pCArray->apCell[i+iNew])) ); } #endif return SQLITE_OK; editpage_fail: /* Unable to edit this page. Rebuild it from scratch instead. */ populateCellCache(pCArray, iNew, nNew); return rebuildPage(pCArray, iNew, nNew, pPg); } #ifndef SQLITE_OMIT_QUICKBALANCE /* ** This version of balance() handles the common special case where ** a new entry is being inserted on the extreme right-end of the ** tree, in other words, when the new entry will become the largest ** entry in the tree. ** ** Instead of trying to balance the 3 right-most leaf pages, just add ** a new page to the right-hand side and put the one new entry in ** that page. This leaves the right side of the tree somewhat ** unbalanced. But odds are that we will be inserting new entries ** at the end soon afterwards so the nearly empty page will quickly ** fill up. On average. ** ** pPage is the leaf page which is the right-most page in the tree. ** pParent is its parent. pPage must have a single overflow entry ** which is also the right-most entry on the page. ** ** The pSpace buffer is used to store a temporary copy of the divider ** cell that will be inserted into pParent. Such a cell consists of a 4 ** byte page number followed by a variable length integer. In other ** words, at most 13 bytes. Hence the pSpace buffer must be at ** least 13 bytes in size. */ static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){ BtShared *const pBt = pPage->pBt; /* B-Tree Database */ MemPage *pNew; /* Newly allocated page */ int rc; /* Return Code */ Pgno pgnoNew; /* Page number of pNew */ assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( sqlite3PagerIswriteable(pParent->pDbPage) ); assert( pPage->nOverflow==1 ); if( pPage->nCell==0 ) return SQLITE_CORRUPT_BKPT; /* dbfuzz001.test */ assert( pPage->nFree>=0 ); assert( pParent->nFree>=0 ); /* Allocate a new page. This page will become the right-sibling of ** pPage. Make the parent page writable, so that the new divider cell ** may be inserted. If both these operations are successful, proceed. */ rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0); if( rc==SQLITE_OK ){ u8 *pOut = &pSpace[4]; u8 *pCell = pPage->apOvfl[0]; u16 szCell = pPage->xCellSize(pPage, pCell); u8 *pStop; CellArray b; assert( sqlite3PagerIswriteable(pNew->pDbPage) ); assert( CORRUPT_DB || pPage->aData[0]==(PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF) ); zeroPage(pNew, PTF_INTKEY|PTF_LEAFDATA|PTF_LEAF); b.nCell = 1; b.pRef = pPage; b.apCell = &pCell; b.szCell = &szCell; b.apEnd[0] = pPage->aDataEnd; b.ixNx[0] = 2; rc = rebuildPage(&b, 0, 1, pNew); if( NEVER(rc) ){ releasePage(pNew); return rc; } pNew->nFree = pBt->usableSize - pNew->cellOffset - 2 - szCell; /* If this is an auto-vacuum database, update the pointer map ** with entries for the new page, and any pointer from the ** cell on the page to an overflow page. If either of these ** operations fails, the return code is set, but the contents ** of the parent page are still manipulated by thh code below. ** That is Ok, at this point the parent page is guaranteed to ** be marked as dirty. Returning an error code will cause a ** rollback, undoing any changes made to the parent page. */ if( ISAUTOVACUUM ){ ptrmapPut(pBt, pgnoNew, PTRMAP_BTREE, pParent->pgno, &rc); if( szCell>pNew->minLocal ){ ptrmapPutOvflPtr(pNew, pNew, pCell, &rc); } } /* Create a divider cell to insert into pParent. The divider cell ** consists of a 4-byte page number (the page number of pPage) and ** a variable length key value (which must be the same value as the ** largest key on pPage). ** ** To find the largest key value on pPage, first find the right-most ** cell on pPage. The first two fields of this cell are the ** record-length (a variable length integer at most 32-bits in size) ** and the key value (a variable length integer, may have any value). ** The first of the while(...) loops below skips over the record-length ** field. The second while(...) loop copies the key value from the ** cell on pPage into the pSpace buffer. */ pCell = findCell(pPage, pPage->nCell-1); pStop = &pCell[9]; while( (*(pCell++)&0x80) && pCell<pStop ); pStop = &pCell[9]; while( ((*(pOut++) = *(pCell++))&0x80) && pCell<pStop ); /* Insert the new divider cell into pParent. */ if( rc==SQLITE_OK ){ insertCell(pParent, pParent->nCell, pSpace, (int)(pOut-pSpace), 0, pPage->pgno, &rc); } /* Set the right-child pointer of pParent to point to the new page. */ put4byte(&pParent->aData[pParent->hdrOffset+8], pgnoNew); /* Release the reference to the new page. */ releasePage(pNew); } return rc; } #endif /* SQLITE_OMIT_QUICKBALANCE */ #if 0 /* ** This function does not contribute anything to the operation of SQLite. ** it is sometimes activated temporarily while debugging code responsible ** for setting pointer-map entries. */ static int ptrmapCheckPages(MemPage **apPage, int nPage){ int i, j; for(i=0; i<nPage; i++){ Pgno n; u8 e; MemPage *pPage = apPage[i]; BtShared *pBt = pPage->pBt; assert( pPage->isInit ); for(j=0; j<pPage->nCell; j++){ CellInfo info; u8 *z; z = findCell(pPage, j); pPage->xParseCell(pPage, z, &info); if( info.nLocal<info.nPayload ){ Pgno ovfl = get4byte(&z[info.nSize-4]); ptrmapGet(pBt, ovfl, &e, &n); assert( n==pPage->pgno && e==PTRMAP_OVERFLOW1 ); } if( !pPage->leaf ){ Pgno child = get4byte(z); ptrmapGet(pBt, child, &e, &n); assert( n==pPage->pgno && e==PTRMAP_BTREE ); } } if( !pPage->leaf ){ Pgno child = get4byte(&pPage->aData[pPage->hdrOffset+8]); ptrmapGet(pBt, child, &e, &n); assert( n==pPage->pgno && e==PTRMAP_BTREE ); } } return 1; } #endif /* ** This function is used to copy the contents of the b-tree node stored ** on page pFrom to page pTo. If page pFrom was not a leaf page, then ** the pointer-map entries for each child page are updated so that the ** parent page stored in the pointer map is page pTo. If pFrom contained ** any cells with overflow page pointers, then the corresponding pointer ** map entries are also updated so that the parent page is page pTo. ** ** If pFrom is currently carrying any overflow cells (entries in the ** MemPage.apOvfl[] array), they are not copied to pTo. ** ** Before returning, page pTo is reinitialized using btreeInitPage(). ** ** The performance of this function is not critical. It is only used by ** the balance_shallower() and balance_deeper() procedures, neither of ** which are called often under normal circumstances. */ static void copyNodeContent(MemPage *pFrom, MemPage *pTo, int *pRC){ if( (*pRC)==SQLITE_OK ){ BtShared * const pBt = pFrom->pBt; u8 * const aFrom = pFrom->aData; u8 * const aTo = pTo->aData; int const iFromHdr = pFrom->hdrOffset; int const iToHdr = ((pTo->pgno==1) ? 100 : 0); int rc; int iData; assert( pFrom->isInit ); assert( pFrom->nFree>=iToHdr ); assert( get2byte(&aFrom[iFromHdr+5]) <= (int)pBt->usableSize ); /* Copy the b-tree node content from page pFrom to page pTo. */ iData = get2byte(&aFrom[iFromHdr+5]); memcpy(&aTo[iData], &aFrom[iData], pBt->usableSize-iData); memcpy(&aTo[iToHdr], &aFrom[iFromHdr], pFrom->cellOffset + 2*pFrom->nCell); /* Reinitialize page pTo so that the contents of the MemPage structure ** match the new data. The initialization of pTo can actually fail under ** fairly obscure circumstances, even though it is a copy of initialized ** page pFrom. */ pTo->isInit = 0; rc = btreeInitPage(pTo); if( rc==SQLITE_OK ) rc = btreeComputeFreeSpace(pTo); if( rc!=SQLITE_OK ){ *pRC = rc; return; } /* If this is an auto-vacuum database, update the pointer-map entries ** for any b-tree or overflow pages that pTo now contains the pointers to. */ if( ISAUTOVACUUM ){ *pRC = setChildPtrmaps(pTo); } } } /* ** This routine redistributes cells on the iParentIdx'th child of pParent ** (hereafter "the page") and up to 2 siblings so that all pages have about the ** same amount of free space. Usually a single sibling on either side of the ** page are used in the balancing, though both siblings might come from one ** side if the page is the first or last child of its parent. If the page ** has fewer than 2 siblings (something which can only happen if the page ** is a root page or a child of a root page) then all available siblings ** participate in the balancing. ** ** The number of siblings of the page might be increased or decreased by ** one or two in an effort to keep pages nearly full but not over full. ** ** Note that when this routine is called, some of the cells on the page ** might not actually be stored in MemPage.aData[]. This can happen ** if the page is overfull. This routine ensures that all cells allocated ** to the page and its siblings fit into MemPage.aData[] before returning. ** ** In the course of balancing the page and its siblings, cells may be ** inserted into or removed from the parent page (pParent). Doing so ** may cause the parent page to become overfull or underfull. If this ** happens, it is the responsibility of the caller to invoke the correct ** balancing routine to fix this problem (see the balance() routine). ** ** If this routine fails for any reason, it might leave the database ** in a corrupted state. So if this routine fails, the database should ** be rolled back. ** ** The third argument to this function, aOvflSpace, is a pointer to a ** buffer big enough to hold one page. If while inserting cells into the parent ** page (pParent) the parent page becomes overfull, this buffer is ** used to store the parent's overflow cells. Because this function inserts ** a maximum of four divider cells into the parent page, and the maximum ** size of a cell stored within an internal node is always less than 1/4 ** of the page-size, the aOvflSpace[] buffer is guaranteed to be large ** enough for all overflow cells. ** ** If aOvflSpace is set to a null pointer, this function returns ** SQLITE_NOMEM. */ static int balance_nonroot( MemPage *pParent, /* Parent page of siblings being balanced */ int iParentIdx, /* Index of "the page" in pParent */ u8 *aOvflSpace, /* page-size bytes of space for parent ovfl */ int isRoot, /* True if pParent is a root-page */ int bBulk /* True if this call is part of a bulk load */ ){ BtShared *pBt; /* The whole database */ int nMaxCells = 0; /* Allocated size of apCell, szCell, aFrom. */ int nNew = 0; /* Number of pages in apNew[] */ int nOld; /* Number of pages in apOld[] */ int i, j, k; /* Loop counters */ int nxDiv; /* Next divider slot in pParent->aCell[] */ int rc = SQLITE_OK; /* The return code */ u16 leafCorrection; /* 4 if pPage is a leaf. 0 if not */ int leafData; /* True if pPage is a leaf of a LEAFDATA tree */ int usableSpace; /* Bytes in pPage beyond the header */ int pageFlags; /* Value of pPage->aData[0] */ int iSpace1 = 0; /* First unused byte of aSpace1[] */ int iOvflSpace = 0; /* First unused byte of aOvflSpace[] */ int szScratch; /* Size of scratch memory requested */ MemPage *apOld[NB]; /* pPage and up to two siblings */ MemPage *apNew[NB+2]; /* pPage and up to NB siblings after balancing */ u8 *pRight; /* Location in parent of right-sibling pointer */ u8 *apDiv[NB-1]; /* Divider cells in pParent */ int cntNew[NB+2]; /* Index in b.paCell[] of cell after i-th page */ int cntOld[NB+2]; /* Old index in b.apCell[] */ int szNew[NB+2]; /* Combined size of cells placed on i-th page */ u8 *aSpace1; /* Space for copies of dividers cells */ Pgno pgno; /* Temp var to store a page number in */ u8 abDone[NB+2]; /* True after i'th new page is populated */ Pgno aPgno[NB+2]; /* Page numbers of new pages before shuffling */ Pgno aPgOrder[NB+2]; /* Copy of aPgno[] used for sorting pages */ u16 aPgFlags[NB+2]; /* flags field of new pages before shuffling */ CellArray b; /* Parsed information on cells being balanced */ memset(abDone, 0, sizeof(abDone)); b.nCell = 0; b.apCell = 0; pBt = pParent->pBt; assert( sqlite3_mutex_held(pBt->mutex) ); assert( sqlite3PagerIswriteable(pParent->pDbPage) ); /* At this point pParent may have at most one overflow cell. And if ** this overflow cell is present, it must be the cell with ** index iParentIdx. This scenario comes about when this function ** is called (indirectly) from sqlite3BtreeDelete(). */ assert( pParent->nOverflow==0 || pParent->nOverflow==1 ); assert( pParent->nOverflow==0 || pParent->aiOvfl[0]==iParentIdx ); if( !aOvflSpace ){ return SQLITE_NOMEM_BKPT; } assert( pParent->nFree>=0 ); /* Find the sibling pages to balance. Also locate the cells in pParent ** that divide the siblings. An attempt is made to find NN siblings on ** either side of pPage. More siblings are taken from one side, however, ** if there are fewer than NN siblings on the other side. If pParent ** has NB or fewer children then all children of pParent are taken. ** ** This loop also drops the divider cells from the parent page. This ** way, the remainder of the function does not have to deal with any ** overflow cells in the parent page, since if any existed they will ** have already been removed. */ i = pParent->nOverflow + pParent->nCell; if( i<2 ){ nxDiv = 0; }else{ assert( bBulk==0 || bBulk==1 ); if( iParentIdx==0 ){ nxDiv = 0; }else if( iParentIdx==i ){ nxDiv = i-2+bBulk; }else{ nxDiv = iParentIdx-1; } i = 2-bBulk; } nOld = i+1; if( (i+nxDiv-pParent->nOverflow)==pParent->nCell ){ pRight = &pParent->aData[pParent->hdrOffset+8]; }else{ pRight = findCell(pParent, i+nxDiv-pParent->nOverflow); } pgno = get4byte(pRight); while( 1 ){ rc = getAndInitPage(pBt, pgno, &apOld[i], 0, 0); if( rc ){ memset(apOld, 0, (i+1)*sizeof(MemPage*)); goto balance_cleanup; } if( apOld[i]->nFree<0 ){ rc = btreeComputeFreeSpace(apOld[i]); if( rc ){ memset(apOld, 0, (i)*sizeof(MemPage*)); goto balance_cleanup; } } if( (i--)==0 ) break; if( pParent->nOverflow && i+nxDiv==pParent->aiOvfl[0] ){ apDiv[i] = pParent->apOvfl[0]; pgno = get4byte(apDiv[i]); szNew[i] = pParent->xCellSize(pParent, apDiv[i]); pParent->nOverflow = 0; }else{ apDiv[i] = findCell(pParent, i+nxDiv-pParent->nOverflow); pgno = get4byte(apDiv[i]); szNew[i] = pParent->xCellSize(pParent, apDiv[i]); /* Drop the cell from the parent page. apDiv[i] still points to ** the cell within the parent, even though it has been dropped. ** This is safe because dropping a cell only overwrites the first ** four bytes of it, and this function does not need the first ** four bytes of the divider cell. So the pointer is safe to use ** later on. ** ** But not if we are in secure-delete mode. In secure-delete mode, ** the dropCell() routine will overwrite the entire cell with zeroes. ** In this case, temporarily copy the cell into the aOvflSpace[] ** buffer. It will be copied out again as soon as the aSpace[] buffer ** is allocated. */ if( pBt->btsFlags & BTS_FAST_SECURE ){ int iOff; iOff = SQLITE_PTR_TO_INT(apDiv[i]) - SQLITE_PTR_TO_INT(pParent->aData); if( (iOff+szNew[i])>(int)pBt->usableSize ){ rc = SQLITE_CORRUPT_BKPT; memset(apOld, 0, (i+1)*sizeof(MemPage*)); goto balance_cleanup; }else{ memcpy(&aOvflSpace[iOff], apDiv[i], szNew[i]); apDiv[i] = &aOvflSpace[apDiv[i]-pParent->aData]; } } dropCell(pParent, i+nxDiv-pParent->nOverflow, szNew[i], &rc); } } /* Make nMaxCells a multiple of 4 in order to preserve 8-byte ** alignment */ nMaxCells = nOld*(MX_CELL(pBt) + ArraySize(pParent->apOvfl)); nMaxCells = (nMaxCells + 3)&~3; /* ** Allocate space for memory structures */ szScratch = nMaxCells*sizeof(u8*) /* b.apCell */ + nMaxCells*sizeof(u16) /* b.szCell */ + pBt->pageSize; /* aSpace1 */ assert( szScratch<=7*(int)pBt->pageSize ); b.apCell = sqlite3StackAllocRaw(0, szScratch ); if( b.apCell==0 ){ rc = SQLITE_NOMEM_BKPT; goto balance_cleanup; } b.szCell = (u16*)&b.apCell[nMaxCells]; aSpace1 = (u8*)&b.szCell[nMaxCells]; assert( EIGHT_BYTE_ALIGNMENT(aSpace1) ); /* ** Load pointers to all cells on sibling pages and the divider cells ** into the local b.apCell[] array. Make copies of the divider cells ** into space obtained from aSpace1[]. The divider cells have already ** been removed from pParent. ** ** If the siblings are on leaf pages, then the child pointers of the ** divider cells are stripped from the cells before they are copied ** into aSpace1[]. In this way, all cells in b.apCell[] are without ** child pointers. If siblings are not leaves, then all cell in ** b.apCell[] include child pointers. Either way, all cells in b.apCell[] ** are alike. ** ** leafCorrection: 4 if pPage is a leaf. 0 if pPage is not a leaf. ** leafData: 1 if pPage holds key+data and pParent holds only keys. */ b.pRef = apOld[0]; leafCorrection = b.pRef->leaf*4; leafData = b.pRef->intKeyLeaf; for(i=0; i<nOld; i++){ MemPage *pOld = apOld[i]; int limit = pOld->nCell; u8 *aData = pOld->aData; u16 maskPage = pOld->maskPage; u8 *piCell = aData + pOld->cellOffset; u8 *piEnd; VVA_ONLY( int nCellAtStart = b.nCell; ) /* Verify that all sibling pages are of the same "type" (table-leaf, ** table-interior, index-leaf, or index-interior). */ if( pOld->aData[0]!=apOld[0]->aData[0] ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } /* Load b.apCell[] with pointers to all cells in pOld. If pOld ** contains overflow cells, include them in the b.apCell[] array ** in the correct spot. ** ** Note that when there are multiple overflow cells, it is always the ** case that they are sequential and adjacent. This invariant arises ** because multiple overflows can only occurs when inserting divider ** cells into a parent on a prior balance, and divider cells are always ** adjacent and are inserted in order. There is an assert() tagged ** with "NOTE 1" in the overflow cell insertion loop to prove this ** invariant. ** ** This must be done in advance. Once the balance starts, the cell ** offset section of the btree page will be overwritten and we will no ** long be able to find the cells if a pointer to each cell is not saved ** first. */ memset(&b.szCell[b.nCell], 0, sizeof(b.szCell[0])*(limit+pOld->nOverflow)); if( pOld->nOverflow>0 ){ if( limit<pOld->aiOvfl[0] ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } limit = pOld->aiOvfl[0]; for(j=0; j<limit; j++){ b.apCell[b.nCell] = aData + (maskPage & get2byteAligned(piCell)); piCell += 2; b.nCell++; } for(k=0; k<pOld->nOverflow; k++){ assert( k==0 || pOld->aiOvfl[k-1]+1==pOld->aiOvfl[k] );/* NOTE 1 */ b.apCell[b.nCell] = pOld->apOvfl[k]; b.nCell++; } } piEnd = aData + pOld->cellOffset + 2*pOld->nCell; while( piCell<piEnd ){ assert( b.nCell<nMaxCells ); b.apCell[b.nCell] = aData + (maskPage & get2byteAligned(piCell)); piCell += 2; b.nCell++; } assert( (b.nCell-nCellAtStart)==(pOld->nCell+pOld->nOverflow) ); cntOld[i] = b.nCell; if( i<nOld-1 && !leafData){ u16 sz = (u16)szNew[i]; u8 *pTemp; assert( b.nCell<nMaxCells ); b.szCell[b.nCell] = sz; pTemp = &aSpace1[iSpace1]; iSpace1 += sz; assert( sz<=pBt->maxLocal+23 ); assert( iSpace1 <= (int)pBt->pageSize ); memcpy(pTemp, apDiv[i], sz); b.apCell[b.nCell] = pTemp+leafCorrection; assert( leafCorrection==0 || leafCorrection==4 ); b.szCell[b.nCell] = b.szCell[b.nCell] - leafCorrection; if( !pOld->leaf ){ assert( leafCorrection==0 ); assert( pOld->hdrOffset==0 ); /* The right pointer of the child page pOld becomes the left ** pointer of the divider cell */ memcpy(b.apCell[b.nCell], &pOld->aData[8], 4); }else{ assert( leafCorrection==4 ); while( b.szCell[b.nCell]<4 ){ /* Do not allow any cells smaller than 4 bytes. If a smaller cell ** does exist, pad it with 0x00 bytes. */ assert( b.szCell[b.nCell]==3 || CORRUPT_DB ); assert( b.apCell[b.nCell]==&aSpace1[iSpace1-3] || CORRUPT_DB ); aSpace1[iSpace1++] = 0x00; b.szCell[b.nCell]++; } } b.nCell++; } } /* ** Figure out the number of pages needed to hold all b.nCell cells. ** Store this number in "k". Also compute szNew[] which is the total ** size of all cells on the i-th page and cntNew[] which is the index ** in b.apCell[] of the cell that divides page i from page i+1. ** cntNew[k] should equal b.nCell. ** ** Values computed by this block: ** ** k: The total number of sibling pages ** szNew[i]: Spaced used on the i-th sibling page. ** cntNew[i]: Index in b.apCell[] and b.szCell[] for the first cell to ** the right of the i-th sibling page. ** usableSpace: Number of bytes of space available on each sibling. ** */ usableSpace = pBt->usableSize - 12 + leafCorrection; for(i=k=0; i<nOld; i++, k++){ MemPage *p = apOld[i]; b.apEnd[k] = p->aDataEnd; b.ixNx[k] = cntOld[i]; if( k && b.ixNx[k]==b.ixNx[k-1] ){ k--; /* Omit b.ixNx[] entry for child pages with no cells */ } if( !leafData ){ k++; b.apEnd[k] = pParent->aDataEnd; b.ixNx[k] = cntOld[i]+1; } assert( p->nFree>=0 ); szNew[i] = usableSpace - p->nFree; for(j=0; j<p->nOverflow; j++){ szNew[i] += 2 + p->xCellSize(p, p->apOvfl[j]); } cntNew[i] = cntOld[i]; } k = nOld; for(i=0; i<k; i++){ int sz; while( szNew[i]>usableSpace ){ if( i+1>=k ){ k = i+2; if( k>NB+2 ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } szNew[k-1] = 0; cntNew[k-1] = b.nCell; } sz = 2 + cachedCellSize(&b, cntNew[i]-1); szNew[i] -= sz; if( !leafData ){ if( cntNew[i]<b.nCell ){ sz = 2 + cachedCellSize(&b, cntNew[i]); }else{ sz = 0; } } szNew[i+1] += sz; cntNew[i]--; } while( cntNew[i]<b.nCell ){ sz = 2 + cachedCellSize(&b, cntNew[i]); if( szNew[i]+sz>usableSpace ) break; szNew[i] += sz; cntNew[i]++; if( !leafData ){ if( cntNew[i]<b.nCell ){ sz = 2 + cachedCellSize(&b, cntNew[i]); }else{ sz = 0; } } szNew[i+1] -= sz; } if( cntNew[i]>=b.nCell ){ k = i+1; }else if( cntNew[i] <= (i>0 ? cntNew[i-1] : 0) ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } } /* ** The packing computed by the previous block is biased toward the siblings ** on the left side (siblings with smaller keys). The left siblings are ** always nearly full, while the right-most sibling might be nearly empty. ** The next block of code attempts to adjust the packing of siblings to ** get a better balance. ** ** This adjustment is more than an optimization. The packing above might ** be so out of balance as to be illegal. For example, the right-most ** sibling might be completely empty. This adjustment is not optional. */ for(i=k-1; i>0; i--){ int szRight = szNew[i]; /* Size of sibling on the right */ int szLeft = szNew[i-1]; /* Size of sibling on the left */ int r; /* Index of right-most cell in left sibling */ int d; /* Index of first cell to the left of right sibling */ r = cntNew[i-1] - 1; d = r + 1 - leafData; (void)cachedCellSize(&b, d); do{ assert( d<nMaxCells ); assert( r<nMaxCells ); (void)cachedCellSize(&b, r); if( szRight!=0 && (bBulk || szRight+b.szCell[d]+2 > szLeft-(b.szCell[r]+(i==k-1?0:2)))){ break; } szRight += b.szCell[d] + 2; szLeft -= b.szCell[r] + 2; cntNew[i-1] = r; r--; d--; }while( r>=0 ); szNew[i] = szRight; szNew[i-1] = szLeft; if( cntNew[i-1] <= (i>1 ? cntNew[i-2] : 0) ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } } /* Sanity check: For a non-corrupt database file one of the follwing ** must be true: ** (1) We found one or more cells (cntNew[0])>0), or ** (2) pPage is a virtual root page. A virtual root page is when ** the real root page is page 1 and we are the only child of ** that page. */ assert( cntNew[0]>0 || (pParent->pgno==1 && pParent->nCell==0) || CORRUPT_DB); TRACE(("BALANCE: old: %d(nc=%d) %d(nc=%d) %d(nc=%d)\n", apOld[0]->pgno, apOld[0]->nCell, nOld>=2 ? apOld[1]->pgno : 0, nOld>=2 ? apOld[1]->nCell : 0, nOld>=3 ? apOld[2]->pgno : 0, nOld>=3 ? apOld[2]->nCell : 0 )); /* ** Allocate k new pages. Reuse old pages where possible. */ pageFlags = apOld[0]->aData[0]; for(i=0; i<k; i++){ MemPage *pNew; if( i<nOld ){ pNew = apNew[i] = apOld[i]; apOld[i] = 0; rc = sqlite3PagerWrite(pNew->pDbPage); nNew++; if( rc ) goto balance_cleanup; }else{ assert( i>0 ); rc = allocateBtreePage(pBt, &pNew, &pgno, (bBulk ? 1 : pgno), 0); if( rc ) goto balance_cleanup; zeroPage(pNew, pageFlags); apNew[i] = pNew; nNew++; cntOld[i] = b.nCell; /* Set the pointer-map entry for the new sibling page. */ if( ISAUTOVACUUM ){ ptrmapPut(pBt, pNew->pgno, PTRMAP_BTREE, pParent->pgno, &rc); if( rc!=SQLITE_OK ){ goto balance_cleanup; } } } } /* ** Reassign page numbers so that the new pages are in ascending order. ** This helps to keep entries in the disk file in order so that a scan ** of the table is closer to a linear scan through the file. That in turn ** helps the operating system to deliver pages from the disk more rapidly. ** ** An O(n^2) insertion sort algorithm is used, but since n is never more ** than (NB+2) (a small constant), that should not be a problem. ** ** When NB==3, this one optimization makes the database about 25% faster ** for large insertions and deletions. */ for(i=0; i<nNew; i++){ aPgOrder[i] = aPgno[i] = apNew[i]->pgno; aPgFlags[i] = apNew[i]->pDbPage->flags; for(j=0; j<i; j++){ if( aPgno[j]==aPgno[i] ){ /* This branch is taken if the set of sibling pages somehow contains ** duplicate entries. This can happen if the database is corrupt. ** It would be simpler to detect this as part of the loop below, but ** we do the detection here in order to avoid populating the pager ** cache with two separate objects associated with the same ** page number. */ assert( CORRUPT_DB ); rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; } } } for(i=0; i<nNew; i++){ int iBest = 0; /* aPgno[] index of page number to use */ for(j=1; j<nNew; j++){ if( aPgOrder[j]<aPgOrder[iBest] ) iBest = j; } pgno = aPgOrder[iBest]; aPgOrder[iBest] = 0xffffffff; if( iBest!=i ){ if( iBest>i ){ sqlite3PagerRekey(apNew[iBest]->pDbPage, pBt->nPage+iBest+1, 0); } sqlite3PagerRekey(apNew[i]->pDbPage, pgno, aPgFlags[iBest]); apNew[i]->pgno = pgno; } } TRACE(("BALANCE: new: %d(%d nc=%d) %d(%d nc=%d) %d(%d nc=%d) " "%d(%d nc=%d) %d(%d nc=%d)\n", apNew[0]->pgno, szNew[0], cntNew[0], nNew>=2 ? apNew[1]->pgno : 0, nNew>=2 ? szNew[1] : 0, nNew>=2 ? cntNew[1] - cntNew[0] - !leafData : 0, nNew>=3 ? apNew[2]->pgno : 0, nNew>=3 ? szNew[2] : 0, nNew>=3 ? cntNew[2] - cntNew[1] - !leafData : 0, nNew>=4 ? apNew[3]->pgno : 0, nNew>=4 ? szNew[3] : 0, nNew>=4 ? cntNew[3] - cntNew[2] - !leafData : 0, nNew>=5 ? apNew[4]->pgno : 0, nNew>=5 ? szNew[4] : 0, nNew>=5 ? cntNew[4] - cntNew[3] - !leafData : 0 )); assert( sqlite3PagerIswriteable(pParent->pDbPage) ); put4byte(pRight, apNew[nNew-1]->pgno); /* If the sibling pages are not leaves, ensure that the right-child pointer ** of the right-most new sibling page is set to the value that was ** originally in the same field of the right-most old sibling page. */ if( (pageFlags & PTF_LEAF)==0 && nOld!=nNew ){ MemPage *pOld = (nNew>nOld ? apNew : apOld)[nOld-1]; memcpy(&apNew[nNew-1]->aData[8], &pOld->aData[8], 4); } /* Make any required updates to pointer map entries associated with ** cells stored on sibling pages following the balance operation. Pointer ** map entries associated with divider cells are set by the insertCell() ** routine. The associated pointer map entries are: ** ** a) if the cell contains a reference to an overflow chain, the ** entry associated with the first page in the overflow chain, and ** ** b) if the sibling pages are not leaves, the child page associated ** with the cell. ** ** If the sibling pages are not leaves, then the pointer map entry ** associated with the right-child of each sibling may also need to be ** updated. This happens below, after the sibling pages have been ** populated, not here. */ if( ISAUTOVACUUM ){ MemPage *pOld; MemPage *pNew = pOld = apNew[0]; int cntOldNext = pNew->nCell + pNew->nOverflow; int iNew = 0; int iOld = 0; for(i=0; i<b.nCell; i++){ u8 *pCell = b.apCell[i]; while( i==cntOldNext ){ iOld++; assert( iOld<nNew || iOld<nOld ); assert( iOld>=0 && iOld<NB ); pOld = iOld<nNew ? apNew[iOld] : apOld[iOld]; cntOldNext += pOld->nCell + pOld->nOverflow + !leafData; } if( i==cntNew[iNew] ){ pNew = apNew[++iNew]; if( !leafData ) continue; } /* Cell pCell is destined for new sibling page pNew. Originally, it ** was either part of sibling page iOld (possibly an overflow cell), ** or else the divider cell to the left of sibling page iOld. So, ** if sibling page iOld had the same page number as pNew, and if ** pCell really was a part of sibling page iOld (not a divider or ** overflow cell), we can skip updating the pointer map entries. */ if( iOld>=nNew || pNew->pgno!=aPgno[iOld] || !SQLITE_WITHIN(pCell,pOld->aData,pOld->aDataEnd) ){ if( !leafCorrection ){ ptrmapPut(pBt, get4byte(pCell), PTRMAP_BTREE, pNew->pgno, &rc); } if( cachedCellSize(&b,i)>pNew->minLocal ){ ptrmapPutOvflPtr(pNew, pOld, pCell, &rc); } if( rc ) goto balance_cleanup; } } } /* Insert new divider cells into pParent. */ for(i=0; i<nNew-1; i++){ u8 *pCell; u8 *pTemp; int sz; MemPage *pNew = apNew[i]; j = cntNew[i]; assert( j<nMaxCells ); assert( b.apCell[j]!=0 ); pCell = b.apCell[j]; sz = b.szCell[j] + leafCorrection; pTemp = &aOvflSpace[iOvflSpace]; if( !pNew->leaf ){ memcpy(&pNew->aData[8], pCell, 4); }else if( leafData ){ /* If the tree is a leaf-data tree, and the siblings are leaves, ** then there is no divider cell in b.apCell[]. Instead, the divider ** cell consists of the integer key for the right-most cell of ** the sibling-page assembled above only. */ CellInfo info; j--; pNew->xParseCell(pNew, b.apCell[j], &info); pCell = pTemp; sz = 4 + putVarint(&pCell[4], info.nKey); pTemp = 0; }else{ pCell -= 4; /* Obscure case for non-leaf-data trees: If the cell at pCell was ** previously stored on a leaf node, and its reported size was 4 ** bytes, then it may actually be smaller than this ** (see btreeParseCellPtr(), 4 bytes is the minimum size of ** any cell). But it is important to pass the correct size to ** insertCell(), so reparse the cell now. ** ** This can only happen for b-trees used to evaluate "IN (SELECT ...)" ** and WITHOUT ROWID tables with exactly one column which is the ** primary key. */ if( b.szCell[j]==4 ){ assert(leafCorrection==4); sz = pParent->xCellSize(pParent, pCell); } } iOvflSpace += sz; assert( sz<=pBt->maxLocal+23 ); assert( iOvflSpace <= (int)pBt->pageSize ); insertCell(pParent, nxDiv+i, pCell, sz, pTemp, pNew->pgno, &rc); if( rc!=SQLITE_OK ) goto balance_cleanup; assert( sqlite3PagerIswriteable(pParent->pDbPage) ); } /* Now update the actual sibling pages. The order in which they are updated ** is important, as this code needs to avoid disrupting any page from which ** cells may still to be read. In practice, this means: ** ** (1) If cells are moving left (from apNew[iPg] to apNew[iPg-1]) ** then it is not safe to update page apNew[iPg] until after ** the left-hand sibling apNew[iPg-1] has been updated. ** ** (2) If cells are moving right (from apNew[iPg] to apNew[iPg+1]) ** then it is not safe to update page apNew[iPg] until after ** the right-hand sibling apNew[iPg+1] has been updated. ** ** If neither of the above apply, the page is safe to update. ** ** The iPg value in the following loop starts at nNew-1 goes down ** to 0, then back up to nNew-1 again, thus making two passes over ** the pages. On the initial downward pass, only condition (1) above ** needs to be tested because (2) will always be true from the previous ** step. On the upward pass, both conditions are always true, so the ** upwards pass simply processes pages that were missed on the downward ** pass. */ for(i=1-nNew; i<nNew; i++){ int iPg = i<0 ? -i : i; assert( iPg>=0 && iPg<nNew ); if( abDone[iPg] ) continue; /* Skip pages already processed */ if( i>=0 /* On the upwards pass, or... */ || cntOld[iPg-1]>=cntNew[iPg-1] /* Condition (1) is true */ ){ int iNew; int iOld; int nNewCell; /* Verify condition (1): If cells are moving left, update iPg ** only after iPg-1 has already been updated. */ assert( iPg==0 || cntOld[iPg-1]>=cntNew[iPg-1] || abDone[iPg-1] ); /* Verify condition (2): If cells are moving right, update iPg ** only after iPg+1 has already been updated. */ assert( cntNew[iPg]>=cntOld[iPg] || abDone[iPg+1] ); if( iPg==0 ){ iNew = iOld = 0; nNewCell = cntNew[0]; }else{ iOld = iPg<nOld ? (cntOld[iPg-1] + !leafData) : b.nCell; iNew = cntNew[iPg-1] + !leafData; nNewCell = cntNew[iPg] - iNew; } rc = editPage(apNew[iPg], iOld, iNew, nNewCell, &b); if( rc ) goto balance_cleanup; abDone[iPg]++; apNew[iPg]->nFree = usableSpace-szNew[iPg]; assert( apNew[iPg]->nOverflow==0 ); assert( apNew[iPg]->nCell==nNewCell ); } } /* All pages have been processed exactly once */ assert( memcmp(abDone, "\01\01\01\01\01", nNew)==0 ); assert( nOld>0 ); assert( nNew>0 ); if( isRoot && pParent->nCell==0 && pParent->hdrOffset<=apNew[0]->nFree ){ /* The root page of the b-tree now contains no cells. The only sibling ** page is the right-child of the parent. Copy the contents of the ** child page into the parent, decreasing the overall height of the ** b-tree structure by one. This is described as the "balance-shallower" ** sub-algorithm in some documentation. ** ** If this is an auto-vacuum database, the call to copyNodeContent() ** sets all pointer-map entries corresponding to database image pages ** for which the pointer is stored within the content being copied. ** ** It is critical that the child page be defragmented before being ** copied into the parent, because if the parent is page 1 then it will ** by smaller than the child due to the database header, and so all the ** free space needs to be up front. */ assert( nNew==1 || CORRUPT_DB ); rc = defragmentPage(apNew[0], -1); testcase( rc!=SQLITE_OK ); assert( apNew[0]->nFree == (get2byteNotZero(&apNew[0]->aData[5]) - apNew[0]->cellOffset - apNew[0]->nCell*2) || rc!=SQLITE_OK ); copyNodeContent(apNew[0], pParent, &rc); freePage(apNew[0], &rc); }else if( ISAUTOVACUUM && !leafCorrection ){ /* Fix the pointer map entries associated with the right-child of each ** sibling page. All other pointer map entries have already been taken ** care of. */ for(i=0; i<nNew; i++){ u32 key = get4byte(&apNew[i]->aData[8]); ptrmapPut(pBt, key, PTRMAP_BTREE, apNew[i]->pgno, &rc); } } assert( pParent->isInit ); TRACE(("BALANCE: finished: old=%d new=%d cells=%d\n", nOld, nNew, b.nCell)); /* Free any old pages that were not reused as new pages. */ for(i=nNew; i<nOld; i++){ freePage(apOld[i], &rc); } #if 0 if( ISAUTOVACUUM && rc==SQLITE_OK && apNew[0]->isInit ){ /* The ptrmapCheckPages() contains assert() statements that verify that ** all pointer map pages are set correctly. This is helpful while ** debugging. This is usually disabled because a corrupt database may ** cause an assert() statement to fail. */ ptrmapCheckPages(apNew, nNew); ptrmapCheckPages(&pParent, 1); } #endif /* ** Cleanup before returning. */ balance_cleanup: sqlite3StackFree(0, b.apCell); for(i=0; i<nOld; i++){ releasePage(apOld[i]); } for(i=0; i<nNew; i++){ releasePage(apNew[i]); } return rc; } /* ** This function is called when the root page of a b-tree structure is ** overfull (has one or more overflow pages). ** ** A new child page is allocated and the contents of the current root ** page, including overflow cells, are copied into the child. The root ** page is then overwritten to make it an empty page with the right-child ** pointer pointing to the new page. ** ** Before returning, all pointer-map entries corresponding to pages ** that the new child-page now contains pointers to are updated. The ** entry corresponding to the new right-child pointer of the root ** page is also updated. ** ** If successful, *ppChild is set to contain a reference to the child ** page and SQLITE_OK is returned. In this case the caller is required ** to call releasePage() on *ppChild exactly once. If an error occurs, ** an error code is returned and *ppChild is set to 0. */ static int balance_deeper(MemPage *pRoot, MemPage **ppChild){ int rc; /* Return value from subprocedures */ MemPage *pChild = 0; /* Pointer to a new child page */ Pgno pgnoChild = 0; /* Page number of the new child page */ BtShared *pBt = pRoot->pBt; /* The BTree */ assert( pRoot->nOverflow>0 ); assert( sqlite3_mutex_held(pBt->mutex) ); /* Make pRoot, the root page of the b-tree, writable. Allocate a new ** page that will become the new right-child of pPage. Copy the contents ** of the node stored on pRoot into the new child page. */ rc = sqlite3PagerWrite(pRoot->pDbPage); if( rc==SQLITE_OK ){ rc = allocateBtreePage(pBt,&pChild,&pgnoChild,pRoot->pgno,0); copyNodeContent(pRoot, pChild, &rc); if( ISAUTOVACUUM ){ ptrmapPut(pBt, pgnoChild, PTRMAP_BTREE, pRoot->pgno, &rc); } } if( rc ){ *ppChild = 0; releasePage(pChild); return rc; } assert( sqlite3PagerIswriteable(pChild->pDbPage) ); assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); assert( pChild->nCell==pRoot->nCell || CORRUPT_DB ); TRACE(("BALANCE: copy root %d into %d\n", pRoot->pgno, pChild->pgno)); /* Copy the overflow cells from pRoot to pChild */ memcpy(pChild->aiOvfl, pRoot->aiOvfl, pRoot->nOverflow*sizeof(pRoot->aiOvfl[0])); memcpy(pChild->apOvfl, pRoot->apOvfl, pRoot->nOverflow*sizeof(pRoot->apOvfl[0])); pChild->nOverflow = pRoot->nOverflow; /* Zero the contents of pRoot. Then install pChild as the right-child. */ zeroPage(pRoot, pChild->aData[0] & ~PTF_LEAF); put4byte(&pRoot->aData[pRoot->hdrOffset+8], pgnoChild); *ppChild = pChild; return SQLITE_OK; } /* ** The page that pCur currently points to has just been modified in ** some way. This function figures out if this modification means the ** tree needs to be balanced, and if so calls the appropriate balancing ** routine. Balancing routines are: ** ** balance_quick() ** balance_deeper() ** balance_nonroot() */ static int balance(BtCursor *pCur){ int rc = SQLITE_OK; const int nMin = pCur->pBt->usableSize * 2 / 3; u8 aBalanceQuickSpace[13]; u8 *pFree = 0; VVA_ONLY( int balance_quick_called = 0 ); VVA_ONLY( int balance_deeper_called = 0 ); do { int iPage; MemPage *pPage = pCur->pPage; if( NEVER(pPage->nFree<0) && btreeComputeFreeSpace(pPage) ) break; if( pPage->nOverflow==0 && pPage->nFree<=nMin ){ break; }else if( (iPage = pCur->iPage)==0 ){ if( pPage->nOverflow ){ /* The root page of the b-tree is overfull. In this case call the ** balance_deeper() function to create a new child for the root-page ** and copy the current contents of the root-page to it. The ** next iteration of the do-loop will balance the child page. */ assert( balance_deeper_called==0 ); VVA_ONLY( balance_deeper_called++ ); rc = balance_deeper(pPage, &pCur->apPage[1]); if( rc==SQLITE_OK ){ pCur->iPage = 1; pCur->ix = 0; pCur->aiIdx[0] = 0; pCur->apPage[0] = pPage; pCur->pPage = pCur->apPage[1]; assert( pCur->pPage->nOverflow ); } }else{ break; } }else{ MemPage * const pParent = pCur->apPage[iPage-1]; int const iIdx = pCur->aiIdx[iPage-1]; rc = sqlite3PagerWrite(pParent->pDbPage); if( rc==SQLITE_OK && pParent->nFree<0 ){ rc = btreeComputeFreeSpace(pParent); } if( rc==SQLITE_OK ){ #ifndef SQLITE_OMIT_QUICKBALANCE if( pPage->intKeyLeaf && pPage->nOverflow==1 && pPage->aiOvfl[0]==pPage->nCell && pParent->pgno!=1 && pParent->nCell==iIdx ){ /* Call balance_quick() to create a new sibling of pPage on which ** to store the overflow cell. balance_quick() inserts a new cell ** into pParent, which may cause pParent overflow. If this ** happens, the next iteration of the do-loop will balance pParent ** use either balance_nonroot() or balance_deeper(). Until this ** happens, the overflow cell is stored in the aBalanceQuickSpace[] ** buffer. ** ** The purpose of the following assert() is to check that only a ** single call to balance_quick() is made for each call to this ** function. If this were not verified, a subtle bug involving reuse ** of the aBalanceQuickSpace[] might sneak in. */ assert( balance_quick_called==0 ); VVA_ONLY( balance_quick_called++ ); rc = balance_quick(pParent, pPage, aBalanceQuickSpace); }else #endif { /* In this case, call balance_nonroot() to redistribute cells ** between pPage and up to 2 of its sibling pages. This involves ** modifying the contents of pParent, which may cause pParent to ** become overfull or underfull. The next iteration of the do-loop ** will balance the parent page to correct this. ** ** If the parent page becomes overfull, the overflow cell or cells ** are stored in the pSpace buffer allocated immediately below. ** A subsequent iteration of the do-loop will deal with this by ** calling balance_nonroot() (balance_deeper() may be called first, ** but it doesn't deal with overflow cells - just moves them to a ** different page). Once this subsequent call to balance_nonroot() ** has completed, it is safe to release the pSpace buffer used by ** the previous call, as the overflow cell data will have been ** copied either into the body of a database page or into the new ** pSpace buffer passed to the latter call to balance_nonroot(). */ u8 *pSpace = sqlite3PageMalloc(pCur->pBt->pageSize); rc = balance_nonroot(pParent, iIdx, pSpace, iPage==1, pCur->hints&BTREE_BULKLOAD); if( pFree ){ /* If pFree is not NULL, it points to the pSpace buffer used ** by a previous call to balance_nonroot(). Its contents are ** now stored either on real database pages or within the ** new pSpace buffer, so it may be safely freed here. */ sqlite3PageFree(pFree); } /* The pSpace buffer will be freed after the next call to ** balance_nonroot(), or just before this function returns, whichever ** comes first. */ pFree = pSpace; } } pPage->nOverflow = 0; /* The next iteration of the do-loop balances the parent page. */ releasePage(pPage); pCur->iPage--; assert( pCur->iPage>=0 ); pCur->pPage = pCur->apPage[pCur->iPage]; } }while( rc==SQLITE_OK ); if( pFree ){ sqlite3PageFree(pFree); } return rc; } /* Overwrite content from pX into pDest. Only do the write if the ** content is different from what is already there. */ static int btreeOverwriteContent( MemPage *pPage, /* MemPage on which writing will occur */ u8 *pDest, /* Pointer to the place to start writing */ const BtreePayload *pX, /* Source of data to write */ int iOffset, /* Offset of first byte to write */ int iAmt /* Number of bytes to be written */ ){ int nData = pX->nData - iOffset; if( nData<=0 ){ /* Overwritting with zeros */ int i; for(i=0; i<iAmt && pDest[i]==0; i++){} if( i<iAmt ){ int rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ) return rc; memset(pDest + i, 0, iAmt - i); } }else{ if( nData<iAmt ){ /* Mixed read data and zeros at the end. Make a recursive call ** to write the zeros then fall through to write the real data */ int rc = btreeOverwriteContent(pPage, pDest+nData, pX, iOffset+nData, iAmt-nData); if( rc ) return rc; iAmt = nData; } if( memcmp(pDest, ((u8*)pX->pData) + iOffset, iAmt)!=0 ){ int rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ) return rc; /* In a corrupt database, it is possible for the source and destination ** buffers to overlap. This is harmless since the database is already ** corrupt but it does cause valgrind and ASAN warnings. So use ** memmove(). */ memmove(pDest, ((u8*)pX->pData) + iOffset, iAmt); } } return SQLITE_OK; } /* ** Overwrite the cell that cursor pCur is pointing to with fresh content ** contained in pX. */ static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){ int iOffset; /* Next byte of pX->pData to write */ int nTotal = pX->nData + pX->nZero; /* Total bytes of to write */ int rc; /* Return code */ MemPage *pPage = pCur->pPage; /* Page being written */ BtShared *pBt; /* Btree */ Pgno ovflPgno; /* Next overflow page to write */ u32 ovflPageSize; /* Size to write on overflow page */ if( pCur->info.pPayload + pCur->info.nLocal > pPage->aDataEnd ){ return SQLITE_CORRUPT_BKPT; } /* Overwrite the local portion first */ rc = btreeOverwriteContent(pPage, pCur->info.pPayload, pX, 0, pCur->info.nLocal); if( rc ) return rc; if( pCur->info.nLocal==nTotal ) return SQLITE_OK; /* Now overwrite the overflow pages */ iOffset = pCur->info.nLocal; assert( nTotal>=0 ); assert( iOffset>=0 ); ovflPgno = get4byte(pCur->info.pPayload + iOffset); pBt = pPage->pBt; ovflPageSize = pBt->usableSize - 4; do{ rc = btreeGetPage(pBt, ovflPgno, &pPage, 0); if( rc ) return rc; if( sqlite3PagerPageRefcount(pPage->pDbPage)!=1 ){ rc = SQLITE_CORRUPT_BKPT; }else{ if( iOffset+ovflPageSize<(u32)nTotal ){ ovflPgno = get4byte(pPage->aData); }else{ ovflPageSize = nTotal - iOffset; } rc = btreeOverwriteContent(pPage, pPage->aData+4, pX, iOffset, ovflPageSize); } sqlite3PagerUnref(pPage->pDbPage); if( rc ) return rc; iOffset += ovflPageSize; }while( iOffset<nTotal ); return SQLITE_OK; } /* ** Insert a new record into the BTree. The content of the new record ** is described by the pX object. The pCur cursor is used only to ** define what table the record should be inserted into, and is left ** pointing at a random location. ** ** For a table btree (used for rowid tables), only the pX.nKey value of ** the key is used. The pX.pKey value must be NULL. The pX.nKey is the ** rowid or INTEGER PRIMARY KEY of the row. The pX.nData,pData,nZero fields ** hold the content of the row. ** ** For an index btree (used for indexes and WITHOUT ROWID tables), the ** key is an arbitrary byte sequence stored in pX.pKey,nKey. The ** pX.pData,nData,nZero fields must be zero. ** ** If the seekResult parameter is non-zero, then a successful call to ** MovetoUnpacked() to seek cursor pCur to (pKey,nKey) has already ** been performed. In other words, if seekResult!=0 then the cursor ** is currently pointing to a cell that will be adjacent to the cell ** to be inserted. If seekResult<0 then pCur points to a cell that is ** smaller then (pKey,nKey). If seekResult>0 then pCur points to a cell ** that is larger than (pKey,nKey). ** ** If seekResult==0, that means pCur is pointing at some unknown location. ** In that case, this routine must seek the cursor to the correct insertion ** point for (pKey,nKey) before doing the insertion. For index btrees, ** if pX->nMem is non-zero, then pX->aMem contains pointers to the unpacked ** key values and pX->aMem can be used instead of pX->pKey to avoid having ** to decode the key. */ int sqlite3BtreeInsert( BtCursor *pCur, /* Insert data into the table of this cursor */ const BtreePayload *pX, /* Content of the row to be inserted */ int flags, /* True if this is likely an append */ int seekResult /* Result of prior MovetoUnpacked() call */ ){ int rc; int loc = seekResult; /* -1: before desired location +1: after */ int szNew = 0; int idx; MemPage *pPage; Btree *p = pCur->pBtree; BtShared *pBt = p->pBt; unsigned char *oldCell; unsigned char *newCell = 0; assert( (flags & (BTREE_SAVEPOSITION|BTREE_APPEND))==flags ); if( pCur->eState==CURSOR_FAULT ){ assert( pCur->skipNext!=SQLITE_OK ); return pCur->skipNext; } assert( cursorOwnsBtShared(pCur) ); assert( (pCur->curFlags & BTCF_WriteFlag)!=0 && pBt->inTransaction==TRANS_WRITE && (pBt->btsFlags & BTS_READ_ONLY)==0 ); assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) ); /* Assert that the caller has been consistent. If this cursor was opened ** expecting an index b-tree, then the caller should be inserting blob ** keys with no associated data. If the cursor was opened expecting an ** intkey table, the caller should be inserting integer keys with a ** blob of associated data. */ assert( (pX->pKey==0)==(pCur->pKeyInfo==0) ); /* Save the positions of any other cursors open on this table. ** ** In some cases, the call to btreeMoveto() below is a no-op. For ** example, when inserting data into a table with auto-generated integer ** keys, the VDBE layer invokes sqlite3BtreeLast() to figure out the ** integer key to use. It then calls this function to actually insert the ** data into the intkey B-Tree. In this case btreeMoveto() recognizes ** that the cursor is already where it needs to be and returns without ** doing any work. To avoid thwarting these optimizations, it is important ** not to clear the cursor here. */ if( pCur->curFlags & BTCF_Multiple ){ rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur); if( rc ) return rc; } if( pCur->pKeyInfo==0 ){ assert( pX->pKey==0 ); /* If this is an insert into a table b-tree, invalidate any incrblob ** cursors open on the row being replaced */ invalidateIncrblobCursors(p, pCur->pgnoRoot, pX->nKey, 0); /* If BTREE_SAVEPOSITION is set, the cursor must already be pointing ** to a row with the same key as the new entry being inserted. */ #ifdef SQLITE_DEBUG if( flags & BTREE_SAVEPOSITION ){ assert( pCur->curFlags & BTCF_ValidNKey ); assert( pX->nKey==pCur->info.nKey ); assert( pCur->info.nSize!=0 ); assert( loc==0 ); } #endif /* On the other hand, BTREE_SAVEPOSITION==0 does not imply ** that the cursor is not pointing to a row to be overwritten. ** So do a complete check. */ if( (pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey==pCur->info.nKey ){ /* The cursor is pointing to the entry that is to be ** overwritten */ assert( pX->nData>=0 && pX->nZero>=0 ); if( pCur->info.nSize!=0 && pCur->info.nPayload==(u32)pX->nData+pX->nZero ){ /* New entry is the same size as the old. Do an overwrite */ return btreeOverwriteCell(pCur, pX); } assert( loc==0 ); }else if( loc==0 ){ /* The cursor is *not* pointing to the cell to be overwritten, nor ** to an adjacent cell. Move the cursor so that it is pointing either ** to the cell to be overwritten or an adjacent cell. */ rc = sqlite3BtreeMovetoUnpacked(pCur, 0, pX->nKey, flags!=0, &loc); if( rc ) return rc; } }else{ /* This is an index or a WITHOUT ROWID table */ /* If BTREE_SAVEPOSITION is set, the cursor must already be pointing ** to a row with the same key as the new entry being inserted. */ assert( (flags & BTREE_SAVEPOSITION)==0 || loc==0 ); /* If the cursor is not already pointing either to the cell to be ** overwritten, or if a new cell is being inserted, if the cursor is ** not pointing to an immediately adjacent cell, then move the cursor ** so that it does. */ if( loc==0 && (flags & BTREE_SAVEPOSITION)==0 ){ if( pX->nMem ){ UnpackedRecord r; r.pKeyInfo = pCur->pKeyInfo; r.aMem = pX->aMem; r.nField = pX->nMem; r.default_rc = 0; r.errCode = 0; r.r1 = 0; r.r2 = 0; r.eqSeen = 0; rc = sqlite3BtreeMovetoUnpacked(pCur, &r, 0, flags!=0, &loc); }else{ rc = btreeMoveto(pCur, pX->pKey, pX->nKey, flags!=0, &loc); } if( rc ) return rc; } /* If the cursor is currently pointing to an entry to be overwritten ** and the new content is the same as as the old, then use the ** overwrite optimization. */ if( loc==0 ){ getCellInfo(pCur); if( pCur->info.nKey==pX->nKey ){ BtreePayload x2; x2.pData = pX->pKey; x2.nData = pX->nKey; x2.nZero = 0; return btreeOverwriteCell(pCur, &x2); } } } assert( pCur->eState==CURSOR_VALID || (pCur->eState==CURSOR_INVALID && loc) ); pPage = pCur->pPage; assert( pPage->intKey || pX->nKey>=0 ); assert( pPage->leaf || !pPage->intKey ); if( pPage->nFree<0 ){ rc = btreeComputeFreeSpace(pPage); if( rc ) return rc; } TRACE(("INSERT: table=%d nkey=%lld ndata=%d page=%d %s\n", pCur->pgnoRoot, pX->nKey, pX->nData, pPage->pgno, loc==0 ? "overwrite" : "new entry")); assert( pPage->isInit ); newCell = pBt->pTmpSpace; assert( newCell!=0 ); rc = fillInCell(pPage, newCell, pX, &szNew); if( rc ) goto end_insert; assert( szNew==pPage->xCellSize(pPage, newCell) ); assert( szNew <= MX_CELL_SIZE(pBt) ); idx = pCur->ix; if( loc==0 ){ CellInfo info; assert( idx<pPage->nCell ); rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ){ goto end_insert; } oldCell = findCell(pPage, idx); if( !pPage->leaf ){ memcpy(newCell, oldCell, 4); } rc = clearCell(pPage, oldCell, &info); if( info.nSize==szNew && info.nLocal==info.nPayload && (!ISAUTOVACUUM || szNew<pPage->minLocal) ){ /* Overwrite the old cell with the new if they are the same size. ** We could also try to do this if the old cell is smaller, then add ** the leftover space to the free list. But experiments show that ** doing that is no faster then skipping this optimization and just ** calling dropCell() and insertCell(). ** ** This optimization cannot be used on an autovacuum database if the ** new entry uses overflow pages, as the insertCell() call below is ** necessary to add the PTRMAP_OVERFLOW1 pointer-map entry. */ assert( rc==SQLITE_OK ); /* clearCell never fails when nLocal==nPayload */ if( oldCell+szNew > pPage->aDataEnd ) return SQLITE_CORRUPT_BKPT; memcpy(oldCell, newCell, szNew); return SQLITE_OK; } dropCell(pPage, idx, info.nSize, &rc); if( rc ) goto end_insert; }else if( loc<0 && pPage->nCell>0 ){ assert( pPage->leaf ); idx = ++pCur->ix; pCur->curFlags &= ~BTCF_ValidNKey; }else{ assert( pPage->leaf ); } insertCell(pPage, idx, newCell, szNew, 0, 0, &rc); assert( pPage->nOverflow==0 || rc==SQLITE_OK ); assert( rc!=SQLITE_OK || pPage->nCell>0 || pPage->nOverflow>0 ); /* If no error has occurred and pPage has an overflow cell, call balance() ** to redistribute the cells within the tree. Since balance() may move ** the cursor, zero the BtCursor.info.nSize and BTCF_ValidNKey ** variables. ** ** Previous versions of SQLite called moveToRoot() to move the cursor ** back to the root page as balance() used to invalidate the contents ** of BtCursor.apPage[] and BtCursor.aiIdx[]. Instead of doing that, ** set the cursor state to "invalid". This makes common insert operations ** slightly faster. ** ** There is a subtle but important optimization here too. When inserting ** multiple records into an intkey b-tree using a single cursor (as can ** happen while processing an "INSERT INTO ... SELECT" statement), it ** is advantageous to leave the cursor pointing to the last entry in ** the b-tree if possible. If the cursor is left pointing to the last ** entry in the table, and the next row inserted has an integer key ** larger than the largest existing key, it is possible to insert the ** row without seeking the cursor. This can be a big performance boost. */ pCur->info.nSize = 0; if( pPage->nOverflow ){ assert( rc==SQLITE_OK ); pCur->curFlags &= ~(BTCF_ValidNKey); rc = balance(pCur); /* Must make sure nOverflow is reset to zero even if the balance() ** fails. Internal data structure corruption will result otherwise. ** Also, set the cursor state to invalid. This stops saveCursorPosition() ** from trying to save the current position of the cursor. */ pCur->pPage->nOverflow = 0; pCur->eState = CURSOR_INVALID; if( (flags & BTREE_SAVEPOSITION) && rc==SQLITE_OK ){ btreeReleaseAllCursorPages(pCur); if( pCur->pKeyInfo ){ assert( pCur->pKey==0 ); pCur->pKey = sqlite3Malloc( pX->nKey ); if( pCur->pKey==0 ){ rc = SQLITE_NOMEM; }else{ memcpy(pCur->pKey, pX->pKey, pX->nKey); } } pCur->eState = CURSOR_REQUIRESEEK; pCur->nKey = pX->nKey; } } assert( pCur->iPage<0 || pCur->pPage->nOverflow==0 ); end_insert: return rc; } /* ** Delete the entry that the cursor is pointing to. ** ** If the BTREE_SAVEPOSITION bit of the flags parameter is zero, then ** the cursor is left pointing at an arbitrary location after the delete. ** But if that bit is set, then the cursor is left in a state such that ** the next call to BtreeNext() or BtreePrev() moves it to the same row ** as it would have been on if the call to BtreeDelete() had been omitted. ** ** The BTREE_AUXDELETE bit of flags indicates that is one of several deletes ** associated with a single table entry and its indexes. Only one of those ** deletes is considered the "primary" delete. The primary delete occurs ** on a cursor that is not a BTREE_FORDELETE cursor. All but one delete ** operation on non-FORDELETE cursors is tagged with the AUXDELETE flag. ** The BTREE_AUXDELETE bit is a hint that is not used by this implementation, ** but which might be used by alternative storage engines. */ int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ Btree *p = pCur->pBtree; BtShared *pBt = p->pBt; int rc; /* Return code */ MemPage *pPage; /* Page to delete cell from */ unsigned char *pCell; /* Pointer to cell to delete */ int iCellIdx; /* Index of cell to delete */ int iCellDepth; /* Depth of node containing pCell */ CellInfo info; /* Size of the cell being deleted */ int bSkipnext = 0; /* Leaf cursor in SKIPNEXT state */ u8 bPreserve = flags & BTREE_SAVEPOSITION; /* Keep cursor valid */ assert( cursorOwnsBtShared(pCur) ); assert( pBt->inTransaction==TRANS_WRITE ); assert( (pBt->btsFlags & BTS_READ_ONLY)==0 ); assert( pCur->curFlags & BTCF_WriteFlag ); assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) ); assert( !hasReadConflicts(p, pCur->pgnoRoot) ); assert( (flags & ~(BTREE_SAVEPOSITION | BTREE_AUXDELETE))==0 ); if( pCur->eState==CURSOR_REQUIRESEEK ){ rc = btreeRestoreCursorPosition(pCur); if( rc ) return rc; } assert( pCur->eState==CURSOR_VALID ); iCellDepth = pCur->iPage; iCellIdx = pCur->ix; pPage = pCur->pPage; pCell = findCell(pPage, iCellIdx); if( pPage->nFree<0 && btreeComputeFreeSpace(pPage) ) return SQLITE_CORRUPT; /* If the bPreserve flag is set to true, then the cursor position must ** be preserved following this delete operation. If the current delete ** will cause a b-tree rebalance, then this is done by saving the cursor ** key and leaving the cursor in CURSOR_REQUIRESEEK state before ** returning. ** ** Or, if the current delete will not cause a rebalance, then the cursor ** will be left in CURSOR_SKIPNEXT state pointing to the entry immediately ** before or after the deleted entry. In this case set bSkipnext to true. */ if( bPreserve ){ if( !pPage->leaf || (pPage->nFree+cellSizePtr(pPage,pCell)+2)>(int)(pBt->usableSize*2/3) || pPage->nCell==1 /* See dbfuzz001.test for a test case */ ){ /* A b-tree rebalance will be required after deleting this entry. ** Save the cursor key. */ rc = saveCursorKey(pCur); if( rc ) return rc; }else{ bSkipnext = 1; } } /* If the page containing the entry to delete is not a leaf page, move ** the cursor to the largest entry in the tree that is smaller than ** the entry being deleted. This cell will replace the cell being deleted ** from the internal node. The 'previous' entry is used for this instead ** of the 'next' entry, as the previous entry is always a part of the ** sub-tree headed by the child page of the cell being deleted. This makes ** balancing the tree following the delete operation easier. */ if( !pPage->leaf ){ rc = sqlite3BtreePrevious(pCur, 0); assert( rc!=SQLITE_DONE ); if( rc ) return rc; } /* Save the positions of any other cursors open on this table before ** making any modifications. */ if( pCur->curFlags & BTCF_Multiple ){ rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur); if( rc ) return rc; } /* If this is a delete operation to remove a row from a table b-tree, ** invalidate any incrblob cursors open on the row being deleted. */ if( pCur->pKeyInfo==0 ){ invalidateIncrblobCursors(p, pCur->pgnoRoot, pCur->info.nKey, 0); } /* Make the page containing the entry to be deleted writable. Then free any ** overflow pages associated with the entry and finally remove the cell ** itself from within the page. */ rc = sqlite3PagerWrite(pPage->pDbPage); if( rc ) return rc; rc = clearCell(pPage, pCell, &info); dropCell(pPage, iCellIdx, info.nSize, &rc); if( rc ) return rc; /* If the cell deleted was not located on a leaf page, then the cursor ** is currently pointing to the largest entry in the sub-tree headed ** by the child-page of the cell that was just deleted from an internal ** node. The cell from the leaf node needs to be moved to the internal ** node to replace the deleted cell. */ if( !pPage->leaf ){ MemPage *pLeaf = pCur->pPage; int nCell; Pgno n; unsigned char *pTmp; if( pLeaf->nFree<0 ){ rc = btreeComputeFreeSpace(pLeaf); if( rc ) return rc; } if( iCellDepth<pCur->iPage-1 ){ n = pCur->apPage[iCellDepth+1]->pgno; }else{ n = pCur->pPage->pgno; } pCell = findCell(pLeaf, pLeaf->nCell-1); if( pCell<&pLeaf->aData[4] ) return SQLITE_CORRUPT_BKPT; nCell = pLeaf->xCellSize(pLeaf, pCell); assert( MX_CELL_SIZE(pBt) >= nCell ); pTmp = pBt->pTmpSpace; assert( pTmp!=0 ); rc = sqlite3PagerWrite(pLeaf->pDbPage); if( rc==SQLITE_OK ){ insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n, &rc); } dropCell(pLeaf, pLeaf->nCell-1, nCell, &rc); if( rc ) return rc; } /* Balance the tree. If the entry deleted was located on a leaf page, ** then the cursor still points to that page. In this case the first ** call to balance() repairs the tree, and the if(...) condition is ** never true. ** ** Otherwise, if the entry deleted was on an internal node page, then ** pCur is pointing to the leaf page from which a cell was removed to ** replace the cell deleted from the internal node. This is slightly ** tricky as the leaf node may be underfull, and the internal node may ** be either under or overfull. In this case run the balancing algorithm ** on the leaf node first. If the balance proceeds far enough up the ** tree that we can be sure that any problem in the internal node has ** been corrected, so be it. Otherwise, after balancing the leaf node, ** walk the cursor up the tree to the internal node and balance it as ** well. */ rc = balance(pCur); if( rc==SQLITE_OK && pCur->iPage>iCellDepth ){ releasePageNotNull(pCur->pPage); pCur->iPage--; while( pCur->iPage>iCellDepth ){ releasePage(pCur->apPage[pCur->iPage--]); } pCur->pPage = pCur->apPage[pCur->iPage]; rc = balance(pCur); } if( rc==SQLITE_OK ){ if( bSkipnext ){ assert( bPreserve && (pCur->iPage==iCellDepth || CORRUPT_DB) ); assert( pPage==pCur->pPage || CORRUPT_DB ); assert( (pPage->nCell>0 || CORRUPT_DB) && iCellIdx<=pPage->nCell ); pCur->eState = CURSOR_SKIPNEXT; if( iCellIdx>=pPage->nCell ){ pCur->skipNext = -1; pCur->ix = pPage->nCell-1; }else{ pCur->skipNext = 1; } }else{ rc = moveToRoot(pCur); if( bPreserve ){ btreeReleaseAllCursorPages(pCur); pCur->eState = CURSOR_REQUIRESEEK; } if( rc==SQLITE_EMPTY ) rc = SQLITE_OK; } } return rc; } /* ** Create a new BTree table. Write into *piTable the page ** number for the root page of the new table. ** ** The type of type is determined by the flags parameter. Only the ** following values of flags are currently in use. Other values for ** flags might not work: ** ** BTREE_INTKEY|BTREE_LEAFDATA Used for SQL tables with rowid keys ** BTREE_ZERODATA Used for SQL indices */ static int btreeCreateTable(Btree *p, int *piTable, int createTabFlags){ BtShared *pBt = p->pBt; MemPage *pRoot; Pgno pgnoRoot; int rc; int ptfFlags; /* Page-type flage for the root page of new table */ assert( sqlite3BtreeHoldsMutex(p) ); assert( pBt->inTransaction==TRANS_WRITE ); assert( (pBt->btsFlags & BTS_READ_ONLY)==0 ); #ifdef SQLITE_OMIT_AUTOVACUUM rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0); if( rc ){ return rc; } #else if( pBt->autoVacuum ){ Pgno pgnoMove; /* Move a page here to make room for the root-page */ MemPage *pPageMove; /* The page to move to. */ /* Creating a new table may probably require moving an existing database ** to make room for the new tables root page. In case this page turns ** out to be an overflow page, delete all overflow page-map caches ** held by open cursors. */ invalidateAllOverflowCache(pBt); /* Read the value of meta[3] from the database to determine where the ** root page of the new table should go. meta[3] is the largest root-page ** created so far, so the new root-page is (meta[3]+1). */ sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &pgnoRoot); pgnoRoot++; /* The new root-page may not be allocated on a pointer-map page, or the ** PENDING_BYTE page. */ while( pgnoRoot==PTRMAP_PAGENO(pBt, pgnoRoot) || pgnoRoot==PENDING_BYTE_PAGE(pBt) ){ pgnoRoot++; } assert( pgnoRoot>=3 || CORRUPT_DB ); testcase( pgnoRoot<3 ); /* Allocate a page. The page that currently resides at pgnoRoot will ** be moved to the allocated page (unless the allocated page happens ** to reside at pgnoRoot). */ rc = allocateBtreePage(pBt, &pPageMove, &pgnoMove, pgnoRoot, BTALLOC_EXACT); if( rc!=SQLITE_OK ){ return rc; } if( pgnoMove!=pgnoRoot ){ /* pgnoRoot is the page that will be used for the root-page of ** the new table (assuming an error did not occur). But we were ** allocated pgnoMove. If required (i.e. if it was not allocated ** by extending the file), the current page at position pgnoMove ** is already journaled. */ u8 eType = 0; Pgno iPtrPage = 0; /* Save the positions of any open cursors. This is required in ** case they are holding a reference to an xFetch reference ** corresponding to page pgnoRoot. */ rc = saveAllCursors(pBt, 0, 0); releasePage(pPageMove); if( rc!=SQLITE_OK ){ return rc; } /* Move the page currently at pgnoRoot to pgnoMove. */ rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0); if( rc!=SQLITE_OK ){ return rc; } rc = ptrmapGet(pBt, pgnoRoot, &eType, &iPtrPage); if( eType==PTRMAP_ROOTPAGE || eType==PTRMAP_FREEPAGE ){ rc = SQLITE_CORRUPT_BKPT; } if( rc!=SQLITE_OK ){ releasePage(pRoot); return rc; } assert( eType!=PTRMAP_ROOTPAGE ); assert( eType!=PTRMAP_FREEPAGE ); rc = relocatePage(pBt, pRoot, eType, iPtrPage, pgnoMove, 0); releasePage(pRoot); /* Obtain the page at pgnoRoot */ if( rc!=SQLITE_OK ){ return rc; } rc = btreeGetPage(pBt, pgnoRoot, &pRoot, 0); if( rc!=SQLITE_OK ){ return rc; } rc = sqlite3PagerWrite(pRoot->pDbPage); if( rc!=SQLITE_OK ){ releasePage(pRoot); return rc; } }else{ pRoot = pPageMove; } /* Update the pointer-map and meta-data with the new root-page number. */ ptrmapPut(pBt, pgnoRoot, PTRMAP_ROOTPAGE, 0, &rc); if( rc ){ releasePage(pRoot); return rc; } /* When the new root page was allocated, page 1 was made writable in ** order either to increase the database filesize, or to decrement the ** freelist count. Hence, the sqlite3BtreeUpdateMeta() call cannot fail. */ assert( sqlite3PagerIswriteable(pBt->pPage1->pDbPage) ); rc = sqlite3BtreeUpdateMeta(p, 4, pgnoRoot); if( NEVER(rc) ){ releasePage(pRoot); return rc; } }else{ rc = allocateBtreePage(pBt, &pRoot, &pgnoRoot, 1, 0); if( rc ) return rc; } #endif assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); if( createTabFlags & BTREE_INTKEY ){ ptfFlags = PTF_INTKEY | PTF_LEAFDATA | PTF_LEAF; }else{ ptfFlags = PTF_ZERODATA | PTF_LEAF; } zeroPage(pRoot, ptfFlags); sqlite3PagerUnref(pRoot->pDbPage); assert( (pBt->openFlags & BTREE_SINGLE)==0 || pgnoRoot==2 ); *piTable = (int)pgnoRoot; return SQLITE_OK; } int sqlite3BtreeCreateTable(Btree *p, int *piTable, int flags){ int rc; sqlite3BtreeEnter(p); rc = btreeCreateTable(p, piTable, flags); sqlite3BtreeLeave(p); return rc; } /* ** Erase the given database page and all its children. Return ** the page to the freelist. */ static int clearDatabasePage( BtShared *pBt, /* The BTree that contains the table */ Pgno pgno, /* Page number to clear */ int freePageFlag, /* Deallocate page if true */ int *pnChange /* Add number of Cells freed to this counter */ ){ MemPage *pPage; int rc; unsigned char *pCell; int i; int hdr; CellInfo info; assert( sqlite3_mutex_held(pBt->mutex) ); if( pgno>btreePagecount(pBt) ){ return SQLITE_CORRUPT_BKPT; } rc = getAndInitPage(pBt, pgno, &pPage, 0, 0); if( rc ) return rc; if( pPage->bBusy ){ rc = SQLITE_CORRUPT_BKPT; goto cleardatabasepage_out; } pPage->bBusy = 1; hdr = pPage->hdrOffset; for(i=0; i<pPage->nCell; i++){ pCell = findCell(pPage, i); if( !pPage->leaf ){ rc = clearDatabasePage(pBt, get4byte(pCell), 1, pnChange); if( rc ) goto cleardatabasepage_out; } rc = clearCell(pPage, pCell, &info); if( rc ) goto cleardatabasepage_out; } if( !pPage->leaf ){ rc = clearDatabasePage(pBt, get4byte(&pPage->aData[hdr+8]), 1, pnChange); if( rc ) goto cleardatabasepage_out; }else if( pnChange ){ assert( pPage->intKey || CORRUPT_DB ); testcase( !pPage->intKey ); *pnChange += pPage->nCell; } if( freePageFlag ){ freePage(pPage, &rc); }else if( (rc = sqlite3PagerWrite(pPage->pDbPage))==0 ){ zeroPage(pPage, pPage->aData[hdr] | PTF_LEAF); } cleardatabasepage_out: pPage->bBusy = 0; releasePage(pPage); return rc; } /* ** Delete all information from a single table in the database. iTable is ** the page number of the root of the table. After this routine returns, ** the root page is empty, but still exists. ** ** This routine will fail with SQLITE_LOCKED if there are any open ** read cursors on the table. Open write cursors are moved to the ** root of the table. ** ** If pnChange is not NULL, then table iTable must be an intkey table. The ** integer value pointed to by pnChange is incremented by the number of ** entries in the table. */ int sqlite3BtreeClearTable(Btree *p, int iTable, int *pnChange){ int rc; BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); assert( p->inTrans==TRANS_WRITE ); rc = saveAllCursors(pBt, (Pgno)iTable, 0); if( SQLITE_OK==rc ){ /* Invalidate all incrblob cursors open on table iTable (assuming iTable ** is the root of a table b-tree - if it is not, the following call is ** a no-op). */ invalidateIncrblobCursors(p, (Pgno)iTable, 0, 1); rc = clearDatabasePage(pBt, (Pgno)iTable, 0, pnChange); } sqlite3BtreeLeave(p); return rc; } /* ** Delete all information from the single table that pCur is open on. ** ** This routine only work for pCur on an ephemeral table. */ int sqlite3BtreeClearTableOfCursor(BtCursor *pCur){ return sqlite3BtreeClearTable(pCur->pBtree, pCur->pgnoRoot, 0); } /* ** Erase all information in a table and add the root of the table to ** the freelist. Except, the root of the principle table (the one on ** page 1) is never added to the freelist. ** ** This routine will fail with SQLITE_LOCKED if there are any open ** cursors on the table. ** ** If AUTOVACUUM is enabled and the page at iTable is not the last ** root page in the database file, then the last root page ** in the database file is moved into the slot formerly occupied by ** iTable and that last slot formerly occupied by the last root page ** is added to the freelist instead of iTable. In this say, all ** root pages are kept at the beginning of the database file, which ** is necessary for AUTOVACUUM to work right. *piMoved is set to the ** page number that used to be the last root page in the file before ** the move. If no page gets moved, *piMoved is set to 0. ** The last root page is recorded in meta[3] and the value of ** meta[3] is updated by this procedure. */ static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){ int rc; MemPage *pPage = 0; BtShared *pBt = p->pBt; assert( sqlite3BtreeHoldsMutex(p) ); assert( p->inTrans==TRANS_WRITE ); assert( iTable>=2 ); if( iTable>btreePagecount(pBt) ){ return SQLITE_CORRUPT_BKPT; } rc = btreeGetPage(pBt, (Pgno)iTable, &pPage, 0); if( rc ) return rc; rc = sqlite3BtreeClearTable(p, iTable, 0); if( rc ){ releasePage(pPage); return rc; } *piMoved = 0; #ifdef SQLITE_OMIT_AUTOVACUUM freePage(pPage, &rc); releasePage(pPage); #else if( pBt->autoVacuum ){ Pgno maxRootPgno; sqlite3BtreeGetMeta(p, BTREE_LARGEST_ROOT_PAGE, &maxRootPgno); if( iTable==maxRootPgno ){ /* If the table being dropped is the table with the largest root-page ** number in the database, put the root page on the free list. */ freePage(pPage, &rc); releasePage(pPage); if( rc!=SQLITE_OK ){ return rc; } }else{ /* The table being dropped does not have the largest root-page ** number in the database. So move the page that does into the ** gap left by the deleted root-page. */ MemPage *pMove; releasePage(pPage); rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0); if( rc!=SQLITE_OK ){ return rc; } rc = relocatePage(pBt, pMove, PTRMAP_ROOTPAGE, 0, iTable, 0); releasePage(pMove); if( rc!=SQLITE_OK ){ return rc; } pMove = 0; rc = btreeGetPage(pBt, maxRootPgno, &pMove, 0); freePage(pMove, &rc); releasePage(pMove); if( rc!=SQLITE_OK ){ return rc; } *piMoved = maxRootPgno; } /* Set the new 'max-root-page' value in the database header. This ** is the old value less one, less one more if that happens to ** be a root-page number, less one again if that is the ** PENDING_BYTE_PAGE. */ maxRootPgno--; while( maxRootPgno==PENDING_BYTE_PAGE(pBt) || PTRMAP_ISPAGE(pBt, maxRootPgno) ){ maxRootPgno--; } assert( maxRootPgno!=PENDING_BYTE_PAGE(pBt) ); rc = sqlite3BtreeUpdateMeta(p, 4, maxRootPgno); }else{ freePage(pPage, &rc); releasePage(pPage); } #endif return rc; } int sqlite3BtreeDropTable(Btree *p, int iTable, int *piMoved){ int rc; sqlite3BtreeEnter(p); rc = btreeDropTable(p, iTable, piMoved); sqlite3BtreeLeave(p); return rc; } /* ** This function may only be called if the b-tree connection already ** has a read or write transaction open on the database. ** ** Read the meta-information out of a database file. Meta[0] ** is the number of free pages currently in the database. Meta[1] ** through meta[15] are available for use by higher layers. Meta[0] ** is read-only, the others are read/write. ** ** The schema layer numbers meta values differently. At the schema ** layer (and the SetCookie and ReadCookie opcodes) the number of ** free pages is not visible. So Cookie[0] is the same as Meta[1]. ** ** This routine treats Meta[BTREE_DATA_VERSION] as a special case. Instead ** of reading the value out of the header, it instead loads the "DataVersion" ** from the pager. The BTREE_DATA_VERSION value is not actually stored in the ** database file. It is a number computed by the pager. But its access ** pattern is the same as header meta values, and so it is convenient to ** read it from this routine. */ void sqlite3BtreeGetMeta(Btree *p, int idx, u32 *pMeta){ BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); assert( p->inTrans>TRANS_NONE ); assert( SQLITE_OK==querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK) ); assert( pBt->pPage1 ); assert( idx>=0 && idx<=15 ); if( idx==BTREE_DATA_VERSION ){ *pMeta = sqlite3PagerDataVersion(pBt->pPager) + p->iDataVersion; }else{ *pMeta = get4byte(&pBt->pPage1->aData[36 + idx*4]); } /* If auto-vacuum is disabled in this build and this is an auto-vacuum ** database, mark the database as read-only. */ #ifdef SQLITE_OMIT_AUTOVACUUM if( idx==BTREE_LARGEST_ROOT_PAGE && *pMeta>0 ){ pBt->btsFlags |= BTS_READ_ONLY; } #endif sqlite3BtreeLeave(p); } /* ** Write meta-information back into the database. Meta[0] is ** read-only and may not be written. */ int sqlite3BtreeUpdateMeta(Btree *p, int idx, u32 iMeta){ BtShared *pBt = p->pBt; unsigned char *pP1; int rc; assert( idx>=1 && idx<=15 ); sqlite3BtreeEnter(p); assert( p->inTrans==TRANS_WRITE ); assert( pBt->pPage1!=0 ); pP1 = pBt->pPage1->aData; rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); if( rc==SQLITE_OK ){ put4byte(&pP1[36 + idx*4], iMeta); #ifndef SQLITE_OMIT_AUTOVACUUM if( idx==BTREE_INCR_VACUUM ){ assert( pBt->autoVacuum || iMeta==0 ); assert( iMeta==0 || iMeta==1 ); pBt->incrVacuum = (u8)iMeta; } #endif } sqlite3BtreeLeave(p); return rc; } #ifndef SQLITE_OMIT_BTREECOUNT /* ** The first argument, pCur, is a cursor opened on some b-tree. Count the ** number of entries in the b-tree and write the result to *pnEntry. ** ** SQLITE_OK is returned if the operation is successfully executed. ** Otherwise, if an error is encountered (i.e. an IO error or database ** corruption) an SQLite error code is returned. */ int sqlite3BtreeCount(BtCursor *pCur, i64 *pnEntry){ i64 nEntry = 0; /* Value to return in *pnEntry */ int rc; /* Return code */ rc = moveToRoot(pCur); if( rc==SQLITE_EMPTY ){ *pnEntry = 0; return SQLITE_OK; } /* Unless an error occurs, the following loop runs one iteration for each ** page in the B-Tree structure (not including overflow pages). */ while( rc==SQLITE_OK ){ int iIdx; /* Index of child node in parent */ MemPage *pPage; /* Current page of the b-tree */ /* If this is a leaf page or the tree is not an int-key tree, then ** this page contains countable entries. Increment the entry counter ** accordingly. */ pPage = pCur->pPage; if( pPage->leaf || !pPage->intKey ){ nEntry += pPage->nCell; } /* pPage is a leaf node. This loop navigates the cursor so that it ** points to the first interior cell that it points to the parent of ** the next page in the tree that has not yet been visited. The ** pCur->aiIdx[pCur->iPage] value is set to the index of the parent cell ** of the page, or to the number of cells in the page if the next page ** to visit is the right-child of its parent. ** ** If all pages in the tree have been visited, return SQLITE_OK to the ** caller. */ if( pPage->leaf ){ do { if( pCur->iPage==0 ){ /* All pages of the b-tree have been visited. Return successfully. */ *pnEntry = nEntry; return moveToRoot(pCur); } moveToParent(pCur); }while ( pCur->ix>=pCur->pPage->nCell ); pCur->ix++; pPage = pCur->pPage; } /* Descend to the child node of the cell that the cursor currently ** points at. This is the right-child if (iIdx==pPage->nCell). */ iIdx = pCur->ix; if( iIdx==pPage->nCell ){ rc = moveToChild(pCur, get4byte(&pPage->aData[pPage->hdrOffset+8])); }else{ rc = moveToChild(pCur, get4byte(findCell(pPage, iIdx))); } } /* An error has occurred. Return an error code. */ return rc; } #endif /* ** Return the pager associated with a BTree. This routine is used for ** testing and debugging only. */ Pager *sqlite3BtreePager(Btree *p){ return p->pBt->pPager; } #ifndef SQLITE_OMIT_INTEGRITY_CHECK /* ** Append a message to the error message string. */ static void checkAppendMsg( IntegrityCk *pCheck, const char *zFormat, ... ){ va_list ap; if( !pCheck->mxErr ) return; pCheck->mxErr--; pCheck->nErr++; va_start(ap, zFormat); if( pCheck->errMsg.nChar ){ sqlite3_str_append(&pCheck->errMsg, "\n", 1); } if( pCheck->zPfx ){ sqlite3_str_appendf(&pCheck->errMsg, pCheck->zPfx, pCheck->v1, pCheck->v2); } sqlite3_str_vappendf(&pCheck->errMsg, zFormat, ap); va_end(ap); if( pCheck->errMsg.accError==SQLITE_NOMEM ){ pCheck->mallocFailed = 1; } } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ #ifndef SQLITE_OMIT_INTEGRITY_CHECK /* ** Return non-zero if the bit in the IntegrityCk.aPgRef[] array that ** corresponds to page iPg is already set. */ static int getPageReferenced(IntegrityCk *pCheck, Pgno iPg){ assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 ); return (pCheck->aPgRef[iPg/8] & (1 << (iPg & 0x07))); } /* ** Set the bit in the IntegrityCk.aPgRef[] array that corresponds to page iPg. */ static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){ assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 ); pCheck->aPgRef[iPg/8] |= (1 << (iPg & 0x07)); } /* ** Add 1 to the reference count for page iPage. If this is the second ** reference to the page, add an error message to pCheck->zErrMsg. ** Return 1 if there are 2 or more references to the page and 0 if ** if this is the first reference to the page. ** ** Also check that the page number is in bounds. */ static int checkRef(IntegrityCk *pCheck, Pgno iPage){ if( iPage>pCheck->nPage || iPage==0 ){ checkAppendMsg(pCheck, "invalid page number %d", iPage); return 1; } if( getPageReferenced(pCheck, iPage) ){ checkAppendMsg(pCheck, "2nd reference to page %d", iPage); return 1; } setPageReferenced(pCheck, iPage); return 0; } #ifndef SQLITE_OMIT_AUTOVACUUM /* ** Check that the entry in the pointer-map for page iChild maps to ** page iParent, pointer type ptrType. If not, append an error message ** to pCheck. */ static void checkPtrmap( IntegrityCk *pCheck, /* Integrity check context */ Pgno iChild, /* Child page number */ u8 eType, /* Expected pointer map type */ Pgno iParent /* Expected pointer map parent page number */ ){ int rc; u8 ePtrmapType; Pgno iPtrmapParent; rc = ptrmapGet(pCheck->pBt, iChild, &ePtrmapType, &iPtrmapParent); if( rc!=SQLITE_OK ){ if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) pCheck->mallocFailed = 1; checkAppendMsg(pCheck, "Failed to read ptrmap key=%d", iChild); return; } if( ePtrmapType!=eType || iPtrmapParent!=iParent ){ checkAppendMsg(pCheck, "Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)", iChild, eType, iParent, ePtrmapType, iPtrmapParent); } } #endif /* ** Check the integrity of the freelist or of an overflow page list. ** Verify that the number of pages on the list is N. */ static void checkList( IntegrityCk *pCheck, /* Integrity checking context */ int isFreeList, /* True for a freelist. False for overflow page list */ int iPage, /* Page number for first page in the list */ u32 N /* Expected number of pages in the list */ ){ int i; u32 expected = N; int nErrAtStart = pCheck->nErr; while( iPage!=0 && pCheck->mxErr ){ DbPage *pOvflPage; unsigned char *pOvflData; if( checkRef(pCheck, iPage) ) break; N--; if( sqlite3PagerGet(pCheck->pPager, (Pgno)iPage, &pOvflPage, 0) ){ checkAppendMsg(pCheck, "failed to get page %d", iPage); break; } pOvflData = (unsigned char *)sqlite3PagerGetData(pOvflPage); if( isFreeList ){ u32 n = (u32)get4byte(&pOvflData[4]); #ifndef SQLITE_OMIT_AUTOVACUUM if( pCheck->pBt->autoVacuum ){ checkPtrmap(pCheck, iPage, PTRMAP_FREEPAGE, 0); } #endif if( n>pCheck->pBt->usableSize/4-2 ){ checkAppendMsg(pCheck, "freelist leaf count too big on page %d", iPage); N--; }else{ for(i=0; i<(int)n; i++){ Pgno iFreePage = get4byte(&pOvflData[8+i*4]); #ifndef SQLITE_OMIT_AUTOVACUUM if( pCheck->pBt->autoVacuum ){ checkPtrmap(pCheck, iFreePage, PTRMAP_FREEPAGE, 0); } #endif checkRef(pCheck, iFreePage); } N -= n; } } #ifndef SQLITE_OMIT_AUTOVACUUM else{ /* If this database supports auto-vacuum and iPage is not the last ** page in this overflow list, check that the pointer-map entry for ** the following page matches iPage. */ if( pCheck->pBt->autoVacuum && N>0 ){ i = get4byte(pOvflData); checkPtrmap(pCheck, i, PTRMAP_OVERFLOW2, iPage); } } #endif iPage = get4byte(pOvflData); sqlite3PagerUnref(pOvflPage); } if( N && nErrAtStart==pCheck->nErr ){ checkAppendMsg(pCheck, "%s is %d but should be %d", isFreeList ? "size" : "overflow list length", expected-N, expected); } } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ /* ** An implementation of a min-heap. ** ** aHeap[0] is the number of elements on the heap. aHeap[1] is the ** root element. The daughter nodes of aHeap[N] are aHeap[N*2] ** and aHeap[N*2+1]. ** ** The heap property is this: Every node is less than or equal to both ** of its daughter nodes. A consequence of the heap property is that the ** root node aHeap[1] is always the minimum value currently in the heap. ** ** The btreeHeapInsert() routine inserts an unsigned 32-bit number onto ** the heap, preserving the heap property. The btreeHeapPull() routine ** removes the root element from the heap (the minimum value in the heap) ** and then moves other nodes around as necessary to preserve the heap ** property. ** ** This heap is used for cell overlap and coverage testing. Each u32 ** entry represents the span of a cell or freeblock on a btree page. ** The upper 16 bits are the index of the first byte of a range and the ** lower 16 bits are the index of the last byte of that range. */ static void btreeHeapInsert(u32 *aHeap, u32 x){ u32 j, i = ++aHeap[0]; aHeap[i] = x; while( (j = i/2)>0 && aHeap[j]>aHeap[i] ){ x = aHeap[j]; aHeap[j] = aHeap[i]; aHeap[i] = x; i = j; } } static int btreeHeapPull(u32 *aHeap, u32 *pOut){ u32 j, i, x; if( (x = aHeap[0])==0 ) return 0; *pOut = aHeap[1]; aHeap[1] = aHeap[x]; aHeap[x] = 0xffffffff; aHeap[0]--; i = 1; while( (j = i*2)<=aHeap[0] ){ if( aHeap[j]>aHeap[j+1] ) j++; if( aHeap[i]<aHeap[j] ) break; x = aHeap[i]; aHeap[i] = aHeap[j]; aHeap[j] = x; i = j; } return 1; } #ifndef SQLITE_OMIT_INTEGRITY_CHECK /* ** Do various sanity checks on a single page of a tree. Return ** the tree depth. Root pages return 0. Parents of root pages ** return 1, and so forth. ** ** These checks are done: ** ** 1. Make sure that cells and freeblocks do not overlap ** but combine to completely cover the page. ** 2. Make sure integer cell keys are in order. ** 3. Check the integrity of overflow pages. ** 4. Recursively call checkTreePage on all children. ** 5. Verify that the depth of all children is the same. */ static int checkTreePage( IntegrityCk *pCheck, /* Context for the sanity check */ int iPage, /* Page number of the page to check */ i64 *piMinKey, /* Write minimum integer primary key here */ i64 maxKey /* Error if integer primary key greater than this */ ){ MemPage *pPage = 0; /* The page being analyzed */ int i; /* Loop counter */ int rc; /* Result code from subroutine call */ int depth = -1, d2; /* Depth of a subtree */ int pgno; /* Page number */ int nFrag; /* Number of fragmented bytes on the page */ int hdr; /* Offset to the page header */ int cellStart; /* Offset to the start of the cell pointer array */ int nCell; /* Number of cells */ int doCoverageCheck = 1; /* True if cell coverage checking should be done */ int keyCanBeEqual = 1; /* True if IPK can be equal to maxKey ** False if IPK must be strictly less than maxKey */ u8 *data; /* Page content */ u8 *pCell; /* Cell content */ u8 *pCellIdx; /* Next element of the cell pointer array */ BtShared *pBt; /* The BtShared object that owns pPage */ u32 pc; /* Address of a cell */ u32 usableSize; /* Usable size of the page */ u32 contentOffset; /* Offset to the start of the cell content area */ u32 *heap = 0; /* Min-heap used for checking cell coverage */ u32 x, prev = 0; /* Next and previous entry on the min-heap */ const char *saved_zPfx = pCheck->zPfx; int saved_v1 = pCheck->v1; int saved_v2 = pCheck->v2; u8 savedIsInit = 0; /* Check that the page exists */ pBt = pCheck->pBt; usableSize = pBt->usableSize; if( iPage==0 ) return 0; if( checkRef(pCheck, iPage) ) return 0; pCheck->zPfx = "Page %d: "; pCheck->v1 = iPage; if( (rc = btreeGetPage(pBt, (Pgno)iPage, &pPage, 0))!=0 ){ checkAppendMsg(pCheck, "unable to get the page. error code=%d", rc); goto end_of_check; } /* Clear MemPage.isInit to make sure the corruption detection code in ** btreeInitPage() is executed. */ savedIsInit = pPage->isInit; pPage->isInit = 0; if( (rc = btreeInitPage(pPage))!=0 ){ assert( rc==SQLITE_CORRUPT ); /* The only possible error from InitPage */ checkAppendMsg(pCheck, "btreeInitPage() returns error code %d", rc); goto end_of_check; } if( (rc = btreeComputeFreeSpace(pPage))!=0 ){ assert( rc==SQLITE_CORRUPT ); checkAppendMsg(pCheck, "free space corruption", rc); goto end_of_check; } data = pPage->aData; hdr = pPage->hdrOffset; /* Set up for cell analysis */ pCheck->zPfx = "On tree page %d cell %d: "; contentOffset = get2byteNotZero(&data[hdr+5]); assert( contentOffset<=usableSize ); /* Enforced by btreeInitPage() */ /* EVIDENCE-OF: R-37002-32774 The two-byte integer at offset 3 gives the ** number of cells on the page. */ nCell = get2byte(&data[hdr+3]); assert( pPage->nCell==nCell ); /* EVIDENCE-OF: R-23882-45353 The cell pointer array of a b-tree page ** immediately follows the b-tree page header. */ cellStart = hdr + 12 - 4*pPage->leaf; assert( pPage->aCellIdx==&data[cellStart] ); pCellIdx = &data[cellStart + 2*(nCell-1)]; if( !pPage->leaf ){ /* Analyze the right-child page of internal pages */ pgno = get4byte(&data[hdr+8]); #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ pCheck->zPfx = "On page %d at right child: "; checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage); } #endif depth = checkTreePage(pCheck, pgno, &maxKey, maxKey); keyCanBeEqual = 0; }else{ /* For leaf pages, the coverage check will occur in the same loop ** as the other cell checks, so initialize the heap. */ heap = pCheck->heap; heap[0] = 0; } /* EVIDENCE-OF: R-02776-14802 The cell pointer array consists of K 2-byte ** integer offsets to the cell contents. */ for(i=nCell-1; i>=0 && pCheck->mxErr; i--){ CellInfo info; /* Check cell size */ pCheck->v2 = i; assert( pCellIdx==&data[cellStart + i*2] ); pc = get2byteAligned(pCellIdx); pCellIdx -= 2; if( pc<contentOffset || pc>usableSize-4 ){ checkAppendMsg(pCheck, "Offset %d out of range %d..%d", pc, contentOffset, usableSize-4); doCoverageCheck = 0; continue; } pCell = &data[pc]; pPage->xParseCell(pPage, pCell, &info); if( pc+info.nSize>usableSize ){ checkAppendMsg(pCheck, "Extends off end of page"); doCoverageCheck = 0; continue; } /* Check for integer primary key out of range */ if( pPage->intKey ){ if( keyCanBeEqual ? (info.nKey > maxKey) : (info.nKey >= maxKey) ){ checkAppendMsg(pCheck, "Rowid %lld out of order", info.nKey); } maxKey = info.nKey; keyCanBeEqual = 0; /* Only the first key on the page may ==maxKey */ } /* Check the content overflow list */ if( info.nPayload>info.nLocal ){ u32 nPage; /* Number of pages on the overflow chain */ Pgno pgnoOvfl; /* First page of the overflow chain */ assert( pc + info.nSize - 4 <= usableSize ); nPage = (info.nPayload - info.nLocal + usableSize - 5)/(usableSize - 4); pgnoOvfl = get4byte(&pCell[info.nSize - 4]); #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ checkPtrmap(pCheck, pgnoOvfl, PTRMAP_OVERFLOW1, iPage); } #endif checkList(pCheck, 0, pgnoOvfl, nPage); } if( !pPage->leaf ){ /* Check sanity of left child page for internal pages */ pgno = get4byte(pCell); #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage); } #endif d2 = checkTreePage(pCheck, pgno, &maxKey, maxKey); keyCanBeEqual = 0; if( d2!=depth ){ checkAppendMsg(pCheck, "Child page depth differs"); depth = d2; } }else{ /* Populate the coverage-checking heap for leaf pages */ btreeHeapInsert(heap, (pc<<16)|(pc+info.nSize-1)); } } *piMinKey = maxKey; /* Check for complete coverage of the page */ pCheck->zPfx = 0; if( doCoverageCheck && pCheck->mxErr>0 ){ /* For leaf pages, the min-heap has already been initialized and the ** cells have already been inserted. But for internal pages, that has ** not yet been done, so do it now */ if( !pPage->leaf ){ heap = pCheck->heap; heap[0] = 0; for(i=nCell-1; i>=0; i--){ u32 size; pc = get2byteAligned(&data[cellStart+i*2]); size = pPage->xCellSize(pPage, &data[pc]); btreeHeapInsert(heap, (pc<<16)|(pc+size-1)); } } /* Add the freeblocks to the min-heap ** ** EVIDENCE-OF: R-20690-50594 The second field of the b-tree page header ** is the offset of the first freeblock, or zero if there are no ** freeblocks on the page. */ i = get2byte(&data[hdr+1]); while( i>0 ){ int size, j; assert( (u32)i<=usableSize-4 ); /* Enforced by btreeComputeFreeSpace() */ size = get2byte(&data[i+2]); assert( (u32)(i+size)<=usableSize ); /* due to btreeComputeFreeSpace() */ btreeHeapInsert(heap, (((u32)i)<<16)|(i+size-1)); /* EVIDENCE-OF: R-58208-19414 The first 2 bytes of a freeblock are a ** big-endian integer which is the offset in the b-tree page of the next ** freeblock in the chain, or zero if the freeblock is the last on the ** chain. */ j = get2byte(&data[i]); /* EVIDENCE-OF: R-06866-39125 Freeblocks are always connected in order of ** increasing offset. */ assert( j==0 || j>i+size ); /* Enforced by btreeComputeFreeSpace() */ assert( (u32)j<=usableSize-4 ); /* Enforced by btreeComputeFreeSpace() */ i = j; } /* Analyze the min-heap looking for overlap between cells and/or ** freeblocks, and counting the number of untracked bytes in nFrag. ** ** Each min-heap entry is of the form: (start_address<<16)|end_address. ** There is an implied first entry the covers the page header, the cell ** pointer index, and the gap between the cell pointer index and the start ** of cell content. ** ** The loop below pulls entries from the min-heap in order and compares ** the start_address against the previous end_address. If there is an ** overlap, that means bytes are used multiple times. If there is a gap, ** that gap is added to the fragmentation count. */ nFrag = 0; prev = contentOffset - 1; /* Implied first min-heap entry */ while( btreeHeapPull(heap,&x) ){ if( (prev&0xffff)>=(x>>16) ){ checkAppendMsg(pCheck, "Multiple uses for byte %u of page %d", x>>16, iPage); break; }else{ nFrag += (x>>16) - (prev&0xffff) - 1; prev = x; } } nFrag += usableSize - (prev&0xffff) - 1; /* EVIDENCE-OF: R-43263-13491 The total number of bytes in all fragments ** is stored in the fifth field of the b-tree page header. ** EVIDENCE-OF: R-07161-27322 The one-byte integer at offset 7 gives the ** number of fragmented free bytes within the cell content area. */ if( heap[0]==0 && nFrag!=data[hdr+7] ){ checkAppendMsg(pCheck, "Fragmentation of %d bytes reported as %d on page %d", nFrag, data[hdr+7], iPage); } } end_of_check: if( !doCoverageCheck ) pPage->isInit = savedIsInit; releasePage(pPage); pCheck->zPfx = saved_zPfx; pCheck->v1 = saved_v1; pCheck->v2 = saved_v2; return depth+1; } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ #ifndef SQLITE_OMIT_INTEGRITY_CHECK /* ** This routine does a complete check of the given BTree file. aRoot[] is ** an array of pages numbers were each page number is the root page of ** a table. nRoot is the number of entries in aRoot. ** ** A read-only or read-write transaction must be opened before calling ** this function. ** ** Write the number of error seen in *pnErr. Except for some memory ** allocation errors, an error message held in memory obtained from ** malloc is returned if *pnErr is non-zero. If *pnErr==0 then NULL is ** returned. If a memory allocation error occurs, NULL is returned. */ char *sqlite3BtreeIntegrityCheck( Btree *p, /* The btree to be checked */ int *aRoot, /* An array of root pages numbers for individual trees */ int nRoot, /* Number of entries in aRoot[] */ int mxErr, /* Stop reporting errors after this many */ int *pnErr /* Write number of errors seen to this variable */ ){ Pgno i; IntegrityCk sCheck; BtShared *pBt = p->pBt; u64 savedDbFlags = pBt->db->flags; char zErr[100]; VVA_ONLY( int nRef ); sqlite3BtreeEnter(p); assert( p->inTrans>TRANS_NONE && pBt->inTransaction>TRANS_NONE ); VVA_ONLY( nRef = sqlite3PagerRefcount(pBt->pPager) ); assert( nRef>=0 ); sCheck.pBt = pBt; sCheck.pPager = pBt->pPager; sCheck.nPage = btreePagecount(sCheck.pBt); sCheck.mxErr = mxErr; sCheck.nErr = 0; sCheck.mallocFailed = 0; sCheck.zPfx = 0; sCheck.v1 = 0; sCheck.v2 = 0; sCheck.aPgRef = 0; sCheck.heap = 0; sqlite3StrAccumInit(&sCheck.errMsg, 0, zErr, sizeof(zErr), SQLITE_MAX_LENGTH); sCheck.errMsg.printfFlags = SQLITE_PRINTF_INTERNAL; if( sCheck.nPage==0 ){ goto integrity_ck_cleanup; } sCheck.aPgRef = sqlite3MallocZero((sCheck.nPage / 8)+ 1); if( !sCheck.aPgRef ){ sCheck.mallocFailed = 1; goto integrity_ck_cleanup; } sCheck.heap = (u32*)sqlite3PageMalloc( pBt->pageSize ); if( sCheck.heap==0 ){ sCheck.mallocFailed = 1; goto integrity_ck_cleanup; } i = PENDING_BYTE_PAGE(pBt); if( i<=sCheck.nPage ) setPageReferenced(&sCheck, i); /* Check the integrity of the freelist */ sCheck.zPfx = "Main freelist: "; checkList(&sCheck, 1, get4byte(&pBt->pPage1->aData[32]), get4byte(&pBt->pPage1->aData[36])); sCheck.zPfx = 0; /* Check all the tables. */ #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ int mx = 0; int mxInHdr; for(i=0; (int)i<nRoot; i++) if( mx<aRoot[i] ) mx = aRoot[i]; mxInHdr = get4byte(&pBt->pPage1->aData[52]); if( mx!=mxInHdr ){ checkAppendMsg(&sCheck, "max rootpage (%d) disagrees with header (%d)", mx, mxInHdr ); } }else if( get4byte(&pBt->pPage1->aData[64])!=0 ){ checkAppendMsg(&sCheck, "incremental_vacuum enabled with a max rootpage of zero" ); } #endif testcase( pBt->db->flags & SQLITE_CellSizeCk ); pBt->db->flags &= ~(u64)SQLITE_CellSizeCk; for(i=0; (int)i<nRoot && sCheck.mxErr; i++){ i64 notUsed; if( aRoot[i]==0 ) continue; #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum && aRoot[i]>1 ){ checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0); } #endif checkTreePage(&sCheck, aRoot[i], &notUsed, LARGEST_INT64); } pBt->db->flags = savedDbFlags; /* Make sure every page in the file is referenced */ for(i=1; i<=sCheck.nPage && sCheck.mxErr; i++){ #ifdef SQLITE_OMIT_AUTOVACUUM if( getPageReferenced(&sCheck, i)==0 ){ checkAppendMsg(&sCheck, "Page %d is never used", i); } #else /* If the database supports auto-vacuum, make sure no tables contain ** references to pointer-map pages. */ if( getPageReferenced(&sCheck, i)==0 && (PTRMAP_PAGENO(pBt, i)!=i || !pBt->autoVacuum) ){ checkAppendMsg(&sCheck, "Page %d is never used", i); } if( getPageReferenced(&sCheck, i)!=0 && (PTRMAP_PAGENO(pBt, i)==i && pBt->autoVacuum) ){ checkAppendMsg(&sCheck, "Pointer map page %d is referenced", i); } #endif } /* Clean up and report errors. */ integrity_ck_cleanup: sqlite3PageFree(sCheck.heap); sqlite3_free(sCheck.aPgRef); if( sCheck.mallocFailed ){ sqlite3_str_reset(&sCheck.errMsg); sCheck.nErr++; } *pnErr = sCheck.nErr; if( sCheck.nErr==0 ) sqlite3_str_reset(&sCheck.errMsg); /* Make sure this analysis did not leave any unref() pages. */ assert( nRef==sqlite3PagerRefcount(pBt->pPager) ); sqlite3BtreeLeave(p); return sqlite3StrAccumFinish(&sCheck.errMsg); } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ /* ** Return the full pathname of the underlying database file. Return ** an empty string if the database is in-memory or a TEMP database. ** ** The pager filename is invariant as long as the pager is ** open so it is safe to access without the BtShared mutex. */ const char *sqlite3BtreeGetFilename(Btree *p){ assert( p->pBt->pPager!=0 ); return sqlite3PagerFilename(p->pBt->pPager, 1); } /* ** Return the pathname of the journal file for this database. The return ** value of this routine is the same regardless of whether the journal file ** has been created or not. ** ** The pager journal filename is invariant as long as the pager is ** open so it is safe to access without the BtShared mutex. */ const char *sqlite3BtreeGetJournalname(Btree *p){ assert( p->pBt->pPager!=0 ); return sqlite3PagerJournalname(p->pBt->pPager); } /* ** Return non-zero if a transaction is active. */ int sqlite3BtreeIsInTrans(Btree *p){ assert( p==0 || sqlite3_mutex_held(p->db->mutex) ); return (p && (p->inTrans==TRANS_WRITE)); } #ifndef SQLITE_OMIT_WAL /* ** Run a checkpoint on the Btree passed as the first argument. ** ** Return SQLITE_LOCKED if this or any other connection has an open ** transaction on the shared-cache the argument Btree is connected to. ** ** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART. */ int sqlite3BtreeCheckpoint(Btree *p, int eMode, int *pnLog, int *pnCkpt){ int rc = SQLITE_OK; if( p ){ BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); if( pBt->inTransaction!=TRANS_NONE ){ rc = SQLITE_LOCKED; }else{ rc = sqlite3PagerCheckpoint(pBt->pPager, p->db, eMode, pnLog, pnCkpt); } sqlite3BtreeLeave(p); } return rc; } #endif /* ** Return non-zero if a read (or write) transaction is active. */ int sqlite3BtreeIsInReadTrans(Btree *p){ assert( p ); assert( sqlite3_mutex_held(p->db->mutex) ); return p->inTrans!=TRANS_NONE; } int sqlite3BtreeIsInBackup(Btree *p){ assert( p ); assert( sqlite3_mutex_held(p->db->mutex) ); return p->nBackup!=0; } /* ** This function returns a pointer to a blob of memory associated with ** a single shared-btree. The memory is used by client code for its own ** purposes (for example, to store a high-level schema associated with ** the shared-btree). The btree layer manages reference counting issues. ** ** The first time this is called on a shared-btree, nBytes bytes of memory ** are allocated, zeroed, and returned to the caller. For each subsequent ** call the nBytes parameter is ignored and a pointer to the same blob ** of memory returned. ** ** If the nBytes parameter is 0 and the blob of memory has not yet been ** allocated, a null pointer is returned. If the blob has already been ** allocated, it is returned as normal. ** ** Just before the shared-btree is closed, the function passed as the ** xFree argument when the memory allocation was made is invoked on the ** blob of allocated memory. The xFree function should not call sqlite3_free() ** on the memory, the btree layer does that. */ void *sqlite3BtreeSchema(Btree *p, int nBytes, void(*xFree)(void *)){ BtShared *pBt = p->pBt; sqlite3BtreeEnter(p); if( !pBt->pSchema && nBytes ){ pBt->pSchema = sqlite3DbMallocZero(0, nBytes); pBt->xFreeSchema = xFree; } sqlite3BtreeLeave(p); return pBt->pSchema; } /* ** Return SQLITE_LOCKED_SHAREDCACHE if another user of the same shared ** btree as the argument handle holds an exclusive lock on the ** sqlite_master table. Otherwise SQLITE_OK. */ int sqlite3BtreeSchemaLocked(Btree *p){ int rc; assert( sqlite3_mutex_held(p->db->mutex) ); sqlite3BtreeEnter(p); rc = querySharedCacheTableLock(p, MASTER_ROOT, READ_LOCK); assert( rc==SQLITE_OK || rc==SQLITE_LOCKED_SHAREDCACHE ); sqlite3BtreeLeave(p); return rc; } #ifndef SQLITE_OMIT_SHARED_CACHE /* ** Obtain a lock on the table whose root page is iTab. The ** lock is a write lock if isWritelock is true or a read lock ** if it is false. */ int sqlite3BtreeLockTable(Btree *p, int iTab, u8 isWriteLock){ int rc = SQLITE_OK; assert( p->inTrans!=TRANS_NONE ); if( p->sharable ){ u8 lockType = READ_LOCK + isWriteLock; assert( READ_LOCK+1==WRITE_LOCK ); assert( isWriteLock==0 || isWriteLock==1 ); sqlite3BtreeEnter(p); rc = querySharedCacheTableLock(p, iTab, lockType); if( rc==SQLITE_OK ){ rc = setSharedCacheTableLock(p, iTab, lockType); } sqlite3BtreeLeave(p); } return rc; } #endif #ifndef SQLITE_OMIT_INCRBLOB /* ** Argument pCsr must be a cursor opened for writing on an ** INTKEY table currently pointing at a valid table entry. ** This function modifies the data stored as part of that entry. ** ** Only the data content may only be modified, it is not possible to ** change the length of the data stored. If this function is called with ** parameters that attempt to write past the end of the existing data, ** no modifications are made and SQLITE_CORRUPT is returned. */ int sqlite3BtreePutData(BtCursor *pCsr, u32 offset, u32 amt, void *z){ int rc; assert( cursorOwnsBtShared(pCsr) ); assert( sqlite3_mutex_held(pCsr->pBtree->db->mutex) ); assert( pCsr->curFlags & BTCF_Incrblob ); rc = restoreCursorPosition(pCsr); if( rc!=SQLITE_OK ){ return rc; } assert( pCsr->eState!=CURSOR_REQUIRESEEK ); if( pCsr->eState!=CURSOR_VALID ){ return SQLITE_ABORT; } /* Save the positions of all other cursors open on this table. This is ** required in case any of them are holding references to an xFetch ** version of the b-tree page modified by the accessPayload call below. ** ** Note that pCsr must be open on a INTKEY table and saveCursorPosition() ** and hence saveAllCursors() cannot fail on a BTREE_INTKEY table, hence ** saveAllCursors can only return SQLITE_OK. */ VVA_ONLY(rc =) saveAllCursors(pCsr->pBt, pCsr->pgnoRoot, pCsr); assert( rc==SQLITE_OK ); /* Check some assumptions: ** (a) the cursor is open for writing, ** (b) there is a read/write transaction open, ** (c) the connection holds a write-lock on the table (if required), ** (d) there are no conflicting read-locks, and ** (e) the cursor points at a valid row of an intKey table. */ if( (pCsr->curFlags & BTCF_WriteFlag)==0 ){ return SQLITE_READONLY; } assert( (pCsr->pBt->btsFlags & BTS_READ_ONLY)==0 && pCsr->pBt->inTransaction==TRANS_WRITE ); assert( hasSharedCacheTableLock(pCsr->pBtree, pCsr->pgnoRoot, 0, 2) ); assert( !hasReadConflicts(pCsr->pBtree, pCsr->pgnoRoot) ); assert( pCsr->pPage->intKey ); return accessPayload(pCsr, offset, amt, (unsigned char *)z, 1); } /* ** Mark this cursor as an incremental blob cursor. */ void sqlite3BtreeIncrblobCursor(BtCursor *pCur){ pCur->curFlags |= BTCF_Incrblob; pCur->pBtree->hasIncrblobCur = 1; } #endif /* ** Set both the "read version" (single byte at byte offset 18) and ** "write version" (single byte at byte offset 19) fields in the database ** header to iVersion. */ int sqlite3BtreeSetVersion(Btree *pBtree, int iVersion){ BtShared *pBt = pBtree->pBt; int rc; /* Return code */ assert( iVersion==1 || iVersion==2 ); /* If setting the version fields to 1, do not automatically open the ** WAL connection, even if the version fields are currently set to 2. */ pBt->btsFlags &= ~BTS_NO_WAL; if( iVersion==1 ) pBt->btsFlags |= BTS_NO_WAL; rc = sqlite3BtreeBeginTrans(pBtree, 0, 0); if( rc==SQLITE_OK ){ u8 *aData = pBt->pPage1->aData; if( aData[18]!=(u8)iVersion || aData[19]!=(u8)iVersion ){ rc = sqlite3BtreeBeginTrans(pBtree, 2, 0); if( rc==SQLITE_OK ){ rc = sqlite3PagerWrite(pBt->pPage1->pDbPage); if( rc==SQLITE_OK ){ aData[18] = (u8)iVersion; aData[19] = (u8)iVersion; } } } } pBt->btsFlags &= ~BTS_NO_WAL; return rc; } /* ** Return true if the cursor has a hint specified. This routine is ** only used from within assert() statements */ int sqlite3BtreeCursorHasHint(BtCursor *pCsr, unsigned int mask){ return (pCsr->hints & mask)!=0; } /* ** Return true if the given Btree is read-only. */ int sqlite3BtreeIsReadonly(Btree *p){ return (p->pBt->btsFlags & BTS_READ_ONLY)!=0; } /* ** Return the size of the header added to each page by this module. */ int sqlite3HeaderSizeBtree(void){ return ROUND8(sizeof(MemPage)); } #if !defined(SQLITE_OMIT_SHARED_CACHE) /* ** Return true if the Btree passed as the only argument is sharable. */ int sqlite3BtreeSharable(Btree *p){ return p->sharable; } /* ** Return the number of connections to the BtShared object accessed by ** the Btree handle passed as the only argument. For private caches ** this is always 1. For shared caches it may be 1 or greater. */ int sqlite3BtreeConnectionCount(Btree *p){ testcase( p->sharable ); return p->pBt->nRef; } #endif
992960.c
/* Copyright 2019 Triad National Security, LLC. All rights reserved. This file is part of the MSTK project. Please see the license file at the root of this repository or at https://github.com/MeshToolkit/MSTK/blob/master/LICENSE */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <strings.h> #include <math.h> #include "MSTK.h" #include "mkstruc.h" /* Index directions for classification templates k j | / | / |/___ i Model vertex, edge and face enumeration for classification templates MODEL MODEL MODEL VERTICES EDGES FACES 7 ____________ 8 ______7_____ ____________ /| /| /| /| /| 2 /| / | / | 12/ |8 11/ | / | 4 / | 5/___________/6 | /_____3_____/ |6 /___________/ | | | | | | | | | | | | 5| | |________|__| | |_____5__|__| |6 |_1______|__| | /3 | /4 4| / | / | / | / | / | / | /9 2| /10 | / 3 | / |/__________|/ |/__________|/ |/__________|/ 1 2 1 Front - F1 Back - F2 Bottom - F3 Top - F4 Left - F6 Right - F5 */ int main(int argc, char *argv[]) { int mesh_type; int i, j, k, ii, jj, kk, gid, gdim, nx, ny, nz; double xyz[3], llx, lly, llz, urx, ury, urz, dx, dy, dz; MVertex_ptr mv, rverts[8], fverts[4], everts[2]; MEdge_ptr me; MFace_ptr mf; MRegion_ptr mr; Mesh_ptr mesh; char outfile[256]; int vgid_tmpl[3][3][3] = {{{1,4,5},{9,6,12},{3,8,7}},{{1,1,3},{3,1,4},{5,2,7}},{{2,2,6},{10,5,11},{4,6,8}}}; int vgdim_tmpl[3][3][3]= {{{0,1,0},{1,2,1}, {0,1,0}},{{1,2,1},{2,3,2},{1,2,1}},{{0,1,0},{1,2,1},{0,1,0}}}; int egdim_tmpl[3][3] = {{1,2,1},{2,3,2},{1,2,1}}; int egid_tmpl2[3][3] = {{4,6,8},{1,1,2},{2,5,6}}; /* Y direction edges (iterating over i,k) */ int egid_tmpl1[3][3] = {{9,6,12},{3,1,4},{10,5,11}}; /* Z direction edges (iterating over i,j)*/ int egid_tmpl0[3][3] = {{1,1,3},{3,1,4},{5,2,7}}; /* X direction edges (iterating over j,k) */ int fgdim_tmpl[3] = {2,3,2}; int fgid_tmpl0[3] = {6,1,5}; int fgid_tmpl1[3] = {1,1,2}; int fgid_tmpl2[3] = {3,1,4}; if (argc != 7 && argc != 8 && argc != 10 && argc != 11) { fprintf(stderr,"Usage: %s llx lly <llz> urx ury <urz> nx ny <nz> <outfile>\n",argv[0]); fprintf(stderr," llx, lly, <llz>: Lower Left Front Corner Coordinates (specify llz for volume meshes)\n"); fprintf(stderr," urx, ury, <urz>: Upper Right Back Corner Coordinates (specify urz for volume meshes)\n"); fprintf(stderr," nx, ny, <nz>: Number of elements in coordinate directions (specify nz for volume meshes)\n"); fprintf(stderr," outfile: Name of output mstk file - must have .mstk extension\n"); fprintf(stderr,"\n"); exit(-1); } MSTK_Init(); mesh = MESH_New(F1); strcpy(outfile,"struct.mstk"); if (argc >= 10) { mesh_type = 3; /* solid */ if (argc == 11) sscanf(argv[10],"%s",outfile); } else { mesh_type = 2; /* planar */ if (argc == 8) sscanf(argv[7],"%s",outfile); } nz = 0; llz = 0.0; urz = 0.0; if (mesh_type == 2) { sscanf(argv[1],"%lf",&llx); sscanf(argv[2],"%lf",&lly); sscanf(argv[3],"%lf",&urx); sscanf(argv[4],"%lf",&ury); sscanf(argv[5],"%d",&nx); sscanf(argv[6],"%d",&ny); } else { sscanf(argv[1],"%lf",&llx); sscanf(argv[2],"%lf",&lly); sscanf(argv[3],"%lf",&llz); sscanf(argv[4],"%lf",&urx); sscanf(argv[5],"%lf",&ury); sscanf(argv[6],"%lf",&urz); sscanf(argv[7],"%d",&nx); sscanf(argv[8],"%d",&ny); sscanf(argv[9],"%d",&nz); } mesh = MESH_Gen_Structured(llx, lly, llz, urx, ury, urz, nx, ny, nz); /* if (mesh_type == 2) { */ /* /\* 2D Mesh *\/ */ /* MVertex_ptr **verts; */ /* sscanf(argv[1],"%lf",&llx); */ /* sscanf(argv[2],"%lf",&lly); */ /* sscanf(argv[3],"%lf",&urx); */ /* sscanf(argv[4],"%lf",&ury); */ /* sscanf(argv[5],"%d",&nx); */ /* sscanf(argv[6],"%d",&ny); */ /* dx = (urx-llx)/nx; */ /* dy = (ury-lly)/ny; */ /* verts = (MVertex_ptr **) malloc((nx+1)*sizeof(MVertex_ptr *)); */ /* for (i = 0; i < nx+1; i++) */ /* verts[i] = (MVertex_ptr *) malloc((ny+1)*sizeof(MVertex_ptr)); */ /* xyz[2] = 0.0; */ /* for (j = 0; j < ny+1; j++) { */ /* xyz[1] = (j == ny) ? ury : lly + j*dy; */ /* for (i = 0; i < nx+1; i++) { */ /* xyz[0] = (i == nx) ? urx : llx + i*dx; */ /* mv = MV_New(mesh); */ /* MV_Set_Coords(mv,xyz); */ /* if (i == 0) { */ /* if (j == 0) { */ /* MV_Set_GEntDim(mv,0); */ /* MV_Set_GEntID(mv,1); */ /* } */ /* else if (j == ny) { */ /* MV_Set_GEntDim(mv,0); */ /* MV_Set_GEntID(mv,4); */ /* } */ /* else { */ /* MV_Set_GEntDim(mv,1); */ /* MV_Set_GEntID(mv,4); */ /* } */ /* } */ /* else if (i == nx) { */ /* if (j == 0) { */ /* MV_Set_GEntDim(mv,0); */ /* MV_Set_GEntID(mv,2); */ /* } */ /* else if (j == ny) { */ /* MV_Set_GEntDim(mv,0); */ /* MV_Set_GEntID(mv,3); */ /* } */ /* else { */ /* MV_Set_GEntDim(mv,1); */ /* MV_Set_GEntID(mv,2); */ /* } */ /* } */ /* else { */ /* if (j == 0) { */ /* MV_Set_GEntDim(mv,1); */ /* MV_Set_GEntID(mv,1); */ /* } */ /* else if (j == ny) { */ /* MV_Set_GEntDim(mv,1); */ /* MV_Set_GEntID(mv,3); */ /* } */ /* else { */ /* MV_Set_GEntDim(mv,2); */ /* MV_Set_GEntID(mv,1); */ /* } */ /* } */ /* verts[i][j] = mv; */ /* } */ /* } */ /* for (i = 0; i < nx; i++) { */ /* for (j = 0; j < ny; j++) { */ /* MVertex_ptr v0, v1; */ /* List_ptr fedges[4]; */ /* int dir[4]; */ /* mf = MF_New(mesh); */ /* /\* edge 0 *\/ */ /* v0 = verts[i][j]; */ /* v1 = verts[i+1][j]; */ /* fedges[0] = MVs_CommonEdge(v0,v1); */ /* if (fedges[0]) */ /* dir[0] = (ME_Vertex(fedges[0],0) == v0) ? 1 : 0; */ /* else { */ /* me = ME_New(mesh); */ /* ME_Set_Vertex(me,0,v0); */ /* ME_Set_Vertex(me,1,v1); */ /* if (j == 0) { */ /* ME_Set_GEntDim(me,1); */ /* ME_Set_GEntID(me,1); */ /* } */ /* else { */ /* ME_Set_GEntDim(me,2); */ /* ME_Set_GEntID(me,1); */ /* } */ /* fedges[0] = me; */ /* dir[0] = 1; */ /* } */ /* /\* edge 1 *\/ */ /* v0 = verts[i+1][j]; */ /* v1 = verts[i+1][j+1]; */ /* fedges[1] = MVs_CommonEdge(v0,v1); */ /* if (fedges[1]) */ /* dir[1] = (ME_Vertex(fedges[1],0) == v0) ? 1 : 0; */ /* else { */ /* me = ME_New(mesh); */ /* ME_Set_Vertex(me,0,v0); */ /* ME_Set_Vertex(me,1,v1); */ /* if (i+1 == nx) { */ /* ME_Set_GEntDim(me,1); */ /* ME_Set_GEntID(me,2); */ /* } */ /* else { */ /* ME_Set_GEntDim(me,2); */ /* ME_Set_GEntID(me,1); */ /* } */ /* fedges[1] = me; */ /* dir[1] = 1; */ /* } */ /* /\* edge 2 *\/ */ /* v0 = verts[i+1][j+1]; */ /* v1 = verts[i][j+1]; */ /* fedges[2] = MVs_CommonEdge(v0,v1); */ /* if (fedges[2]) */ /* dir[2] = (ME_Vertex(fedges[2],0) == v0) ? 1 : 0; */ /* else { */ /* me = ME_New(mesh); */ /* ME_Set_Vertex(me,0,v0); */ /* ME_Set_Vertex(me,1,v1); */ /* if (j+1 == ny) { */ /* ME_Set_GEntDim(me,1); */ /* ME_Set_GEntID(me,3); */ /* } */ /* else { */ /* ME_Set_GEntDim(me,2); */ /* ME_Set_GEntID(me,1); */ /* } */ /* fedges[2] = me; */ /* dir[2] = 1; */ /* } */ /* /\* edge 3 *\/ */ /* v0 = verts[i][j+1]; */ /* v1 = verts[i][j]; */ /* fedges[3] = MVs_CommonEdge(v0,v1); */ /* if (fedges[3]) */ /* dir[3] = (ME_Vertex(fedges[3],0) == v0) ? 1 : 0; */ /* else { */ /* me = ME_New(mesh); */ /* ME_Set_Vertex(me,0,v0); */ /* ME_Set_Vertex(me,1,v1); */ /* if (i == 0) { */ /* ME_Set_GEntDim(me,1); */ /* ME_Set_GEntID(me,4); */ /* } */ /* else { */ /* ME_Set_GEntDim(me,2); */ /* ME_Set_GEntID(me,1); */ /* } */ /* fedges[3] = me; */ /* dir[3] = 1; */ /* } */ /* MF_Set_Edges(mf,4,fedges,dir); */ /* MF_Set_GEntDim(mf,2); */ /* MF_Set_GEntID(mf,1); */ /* } */ /* } */ /* for (i = 0; i < nx; i++) */ /* free(verts[i]); */ /* free(verts); */ /* } */ /* else { */ /* /\* 3D Mesh *\/ */ /* MVertex_ptr ***verts; */ /* sscanf(argv[1],"%lf",&llx); */ /* sscanf(argv[2],"%lf",&lly); */ /* sscanf(argv[3],"%lf",&llz); */ /* sscanf(argv[4],"%lf",&urx); */ /* sscanf(argv[5],"%lf",&ury); */ /* sscanf(argv[6],"%lf",&urz); */ /* sscanf(argv[7],"%d",&nx); */ /* sscanf(argv[8],"%d",&ny); */ /* sscanf(argv[9],"%d",&nz); */ /* dx = (urx-llx)/nx; */ /* dy = (ury-lly)/ny; */ /* dz = (urz-llz)/nz; */ /* verts = (MVertex_ptr ***) malloc((nx+1)*sizeof(MVertex_ptr **)); */ /* for (j = 0; j < nx+1; j++) { */ /* verts[j] = (MVertex_ptr **) malloc((ny+1)*sizeof(MVertex_ptr *)); */ /* for (k = 0; k < ny+1; k++) */ /* verts[j][k] = (MVertex_ptr *) malloc((nz+1)*sizeof(MVertex_ptr)); */ /* } */ /* for (k = 0; k < nz+1; k++) { */ /* xyz[2] = (k == nz) ? urz : llz + k*dz; */ /* kk = (k%nz) ? 1 : (k ? 2 : 0); */ /* for (j = 0; j < ny+1; j++) { */ /* xyz[1] = (j == ny) ? ury : lly + j*dy; */ /* jj = (j%ny) ? 1 : (j ? 2 : 0); */ /* for (i = 0; i < nx+1; i++) { */ /* xyz[0] = (i == nx) ? urx : llx + i*dx; */ /* ii = (i%nx) ? 1 : (i ? 2 : 0); */ /* mv = MV_New(mesh); */ /* MV_Set_Coords(mv,xyz); */ /* verts[i][j][k] = mv; */ /* gdim = vgdim_tmpl[ii][jj][kk]; */ /* MV_Set_GEntDim(mv,gdim); */ /* gid = vgid_tmpl[ii][jj][kk]; */ /* MV_Set_GEntID(mv,gid); */ /* } */ /* } */ /* } */ /* /\* Create the edges explicitly to get the classification right *\/ */ /* for (i = 0; i < nx+1; i++) { */ /* for (j = 0; j < ny+1; j++) { */ /* for (k = 0; k < nz; k++) { */ /* me = ME_New(mesh); */ /* everts[0] = verts[i][j][k]; */ /* everts[1] = verts[i][j][k+1]; */ /* ME_Set_Vertex(me,0,everts[0]); */ /* ME_Set_Vertex(me,1,everts[1]); */ /* ii = (i%nx) ? 1 : (i ? 2 : 0); */ /* jj = (j%ny) ? 1 : (j ? 2 : 0); */ /* gdim = egdim_tmpl[ii][jj]; */ /* gid = egid_tmpl2[ii][jj]; */ /* ME_Set_GEntDim(me,gdim); */ /* ME_Set_GEntID(me,gid); */ /* } */ /* } */ /* } */ /* for (i = 0; i < nx+1; i++) { */ /* for (k = 0; k < nz+1; k++) { */ /* for (j = 0; j < ny; j++) { */ /* me = ME_New(mesh); */ /* everts[0] = verts[i][j][k]; */ /* everts[1] = verts[i][j+1][k]; */ /* ME_Set_Vertex(me,0,everts[0]); */ /* ME_Set_Vertex(me,1,everts[1]); */ /* ii = (i%nx) ? 1 : (i ? 2 : 0); */ /* kk = (k%nz) ? 1 : (k ? 2 : 0); */ /* gdim = egdim_tmpl[ii][kk]; */ /* gid = egid_tmpl1[ii][kk]; */ /* ME_Set_GEntDim(me,gdim); */ /* ME_Set_GEntID(me,gid); */ /* } */ /* } */ /* } */ /* for (j = 0; j < ny+1; j++) { */ /* for (k = 0; k < nz+1; k++) { */ /* for (i = 0; i < nx; i++) { */ /* me = ME_New(mesh); */ /* everts[0] = verts[i][j][k]; */ /* everts[1] = verts[i+1][j][k]; */ /* ME_Set_Vertex(me,0,everts[0]); */ /* ME_Set_Vertex(me,1,everts[1]); */ /* jj = (j%ny) ? 1 : (j ? 2 : 0); */ /* kk = (k%nz) ? 1 : (k ? 2 : 0); */ /* gdim = egdim_tmpl[jj][kk]; */ /* gid = egid_tmpl0[jj][kk]; */ /* ME_Set_GEntDim(me,gdim); */ /* ME_Set_GEntID(me,gid); */ /* } */ /* } */ /* } */ /* /\* Create the faces explicitly to get the classification right *\/ */ /* for (i = 0; i < nx+1; i++) { */ /* for (j = 0; j < ny; j++) { */ /* for (k = 0; k < nz; k++) { */ /* mf = MF_New(mesh); */ /* fverts[0] = verts[i][j][k]; */ /* fverts[1] = verts[i][j+1][k]; */ /* fverts[2] = verts[i][j+1][k+1]; */ /* fverts[3] = verts[i][j][k+1]; */ /* MF_Set_Vertices(mf,4,fverts); */ /* ii = (i%nx) ? 1 : (i ? 2 : 0); */ /* gdim = fgdim_tmpl[ii]; */ /* gid = fgid_tmpl0[ii]; */ /* MF_Set_GEntDim(mf,gdim); */ /* MF_Set_GEntID(mf,gid); */ /* } */ /* } */ /* } */ /* for (j = 0; j < ny+1; j++) { */ /* for (i = 0; i < nx; i++) { */ /* for (k = 0; k < nz; k++) { */ /* mf = MF_New(mesh); */ /* fverts[0] = verts[i][j][k]; */ /* fverts[1] = verts[i+1][j][k]; */ /* fverts[2] = verts[i+1][j][k+1]; */ /* fverts[3] = verts[i][j][k+1]; */ /* MF_Set_Vertices(mf,4,fverts); */ /* jj = (j%ny) ? 1 : (j ? 2 : 0); */ /* gdim = fgdim_tmpl[jj]; */ /* gid = fgid_tmpl1[jj]; */ /* MF_Set_GEntDim(mf,gdim); */ /* MF_Set_GEntID(mf,gid); */ /* } */ /* } */ /* } */ /* for (k = 0; k < nz+1; k++) { */ /* for (i = 0; i < nx; i++) { */ /* for (j = 0; j < ny; j++) { */ /* mf = MF_New(mesh); */ /* fverts[0] = verts[i][j][k]; */ /* fverts[1] = verts[i+1][j][k]; */ /* fverts[2] = verts[i+1][j+1][k]; */ /* fverts[3] = verts[i][j+1][k]; */ /* MF_Set_Vertices(mf,4,fverts); */ /* kk = (k%nz) ? 1 : (k ? 2 : 0); */ /* gdim = fgdim_tmpl[kk]; */ /* gid = fgid_tmpl2[kk]; */ /* MF_Set_GEntDim(mf,gdim); */ /* MF_Set_GEntID(mf,gid); */ /* } */ /* } */ /* } */ /* /\* Not the most efficient way but the easiest to code *\/ */ /* for (i = 0; i < nx; i++) { */ /* for (j = 0; j < ny; j++) { */ /* for (k = 0; k < nz; k++) { */ /* mr = MR_New(mesh); */ /* MR_Set_GEntID(mr,1); */ /* rverts[0] = verts[i][j][k]; rverts[1] = verts[i+1][j][k]; */ /* rverts[2] = verts[i+1][j+1][k]; rverts[3] = verts[i][j+1][k]; */ /* rverts[4] = verts[i][j][k+1]; rverts[5] = verts[i+1][j][k+1]; */ /* rverts[6] = verts[i+1][j+1][k+1]; rverts[7] = verts[i][j+1][k+1]; */ /* MR_Set_Vertices(mr, 8, rverts, 6, NULL); */ /* } */ /* } */ /* } */ /* for (i = 0; i < nx+1; i++) { */ /* for (j = 0; j < ny+1; j++) */ /* free(verts[i][j]); */ /* free(verts[i]); */ /* } */ /* free(verts); */ /* } */ // Deform the mesh int idx = 0; while ((mv = MESH_Next_Vertex(mesh,&idx))) { int boundary = 0; if ((mesh_type == 2 && MV_GEntDim(mv) == 2) || (mesh_type == 3 && MV_GEntDim(mv) == 3)) boundary = 0; else boundary = 1; MV_Coords(mv,xyz); transform_xyz(xyz, boundary); MV_Set_Coords(mv,xyz); } MESH_WriteToFile(mesh,outfile,F1,NULL); MESH_Delete(mesh); }
380517.c
#include "prime.h" #include <assert.h> #include <stdio.h> #include <stdlib.h> #include "alloc-testing.h" #include "test_helper.h" void test_prime_1() { BitMap *sieve = prime_number_sieve(100); assert(prime_number_sieve_check(sieve, 2)); assert(prime_number_sieve_check(sieve, 3)); assert(prime_number_sieve_check(sieve, 11)); assert(prime_number_sieve_check(sieve, 83)); assert(prime_number_sieve_check(sieve, 89)); assert(prime_number_sieve_check(sieve, 97)); assert(!prime_number_sieve_check(sieve, 88)); assert(!prime_number_sieve_check(sieve, 99)); assert(!prime_number_sieve_check(sieve, 81)); assert(!prime_number_sieve_check(sieve, 1)); assert(!prime_number_sieve_check(sieve, 0)); prime_number_sieve_free(sieve); } void test_prime_2() { /** 1 billion, take long time. */ // BitMap *sieve = prime_number_sieve(1000000000); // unsigned int count = prime_number_sieve_count(sieve); // printf("Prime numbers count: ==%u==", count); // assert(count == 50847534); // prime_number_sieve_free(sieve); } void test_prime_3() { /** 1 million. */ BitMap *sieve = prime_number_sieve(1000000); unsigned int count = prime_number_sieve_count(sieve); // printf("Prime numbers count: ==%u==", count); assert(count == 78498); prime_number_sieve_free(sieve); } void test_prime() { test_prime_1(); test_prime_2(); test_prime_3(); }
541069.c
#include <stdlib.h> #include <math.h> #include "game/objects/hazard.h" #include "game/protos/object_specializer.h" #include "utils/log.h" #include "utils/miscmath.h" #include "game/protos/scene.h" int orb_almost_there(vec2f a, vec2f b) { vec2f dir = vec2f_sub(a, b); return (dir.x >= -2.0f && dir.x <= 2.0f && dir.y >= -2.0f && dir.y <= 2.0f); } void hazard_tick(object *obj) { bk *bk_data = (bk*)object_get_userdata(obj); if(obj->animation_state.finished) { bk_info *anim = bk_get_info(bk_data, obj->cur_animation->id); if (anim->chain_no_hit) { object_set_animation(obj, &bk_get_info(bk_data, anim->chain_no_hit)->ani); object_set_repeat(obj, 0); obj->animation_state.finished = 0; } } if(obj->orbit) { obj->orbit_tick += MATH_PI/32; if(obj->orbit_tick >= MATH_PI*2) { obj->orbit_tick -= MATH_PI*2; } if(orb_almost_there(obj->orbit_dest, obj->orbit_pos)) { // XXX come up with a better equation to randomize the destination obj->orbit_pos = obj->pos; obj->orbit_pos_vary = vec2f_create(0, 0); float mag; int limit = 10; do { obj->orbit_dest = vec2f_create(rand_float()*320.0f, rand_float()*200.0f); obj->orbit_dest_dir = vec2f_sub(obj->orbit_dest, obj->orbit_pos); mag = sqrtf(obj->orbit_dest_dir.x*obj->orbit_dest_dir.x + obj->orbit_dest_dir.y*obj->orbit_dest_dir.y); limit--; } while(mag < 80.0f && limit > 0); obj->orbit_dest_dir.x /= mag; obj->orbit_dest_dir.y /= mag; } } } void hazard_spawn_cb(object *parent, int id, vec2i pos, int g, void *userdata) { scene *s = (scene*)userdata; // Get next animation bk_info *info = bk_get_info(&s->bk_data, id); if(info != NULL) { object *obj = malloc(sizeof(object)); object_create(obj, parent->gs, vec2i_add(pos, info->ani.start_pos), vec2f_create(0,0)); object_set_stl(obj, object_get_stl(parent)); object_set_animation(obj, &info->ani); if(info->probability == 1) { object_set_repeat(obj, 1); } object_set_layers(obj, LAYER_HAZARD|LAYER_HAR); object_set_group(obj, GROUP_PROJECTILE); object_set_userdata(obj, object_get_userdata(parent)); hazard_create(obj, s); if (s->bk_data.file_id == 128 && id == 14) { // XXX hack because we don't understand the ms and md tags // without this, the 'bullet damage' sprite in the desert spawns at 0,0 obj->pos = parent->pos; } game_state_add_object(parent->gs, obj, RENDER_LAYER_BOTTOM, 0, 0); } else { DEBUG("failed to spawn hazard child"); } } void hazard_move(object *obj) { if(obj->orbit) { // Make this object orbit around the center of the arena obj->pos.x = obj->orbit_pos.x+obj->orbit_pos_vary.x; obj->pos.y = obj->orbit_pos.y+obj->orbit_pos_vary.y; obj->orbit_pos.x += 2*obj->orbit_dest_dir.x; obj->orbit_pos.y += 2*obj->orbit_dest_dir.y; obj->orbit_pos_vary.x += sin(obj->orbit_tick)*0.2f; obj->orbit_pos_vary.y += cos(obj->orbit_tick)*0.6f; } } int hazard_create(object *obj, scene *scene) { object_set_spawn_cb(obj, hazard_spawn_cb, (void*)scene); object_set_destroy_cb(obj, cb_scene_destroy_object, (void*)scene); object_set_move_cb(obj, hazard_move); object_set_dynamic_tick_cb(obj, hazard_tick); hazard_bootstrap(obj); return 0; } int hazard_serialize(object *obj, serial *ser) { /*DEBUG("serializing hazard");*/ // Specialization serial_write_int8(ser, SPECID_HAZARD); return 0; } int hazard_unserialize(object *obj, serial *ser, int animation_id, game_state *gs) { bk *bk_data = &gs->sc->bk_data; hazard_create(obj, gs->sc); object_set_userdata(obj, bk_data); object_set_stl(obj, bk_data->sound_translation_table); object_set_animation(obj, &bk_get_info(bk_data, animation_id)->ani); return 0; } void hazard_bootstrap(object *obj) { object_set_serialize_cb(obj, hazard_serialize); object_set_unserialize_cb(obj, hazard_unserialize); }
36019.c
/* * fs/f2fs/super.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/statfs.h> #include <linux/buffer_head.h> #include <linux/backing-dev.h> #include <linux/kthread.h> #include <linux/parser.h> #include <linux/mount.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/random.h> #include <linux/exportfs.h> #include <linux/blkdev.h> #include <linux/f2fs_fs.h> #include <linux/sysfs.h> #include "f2fs.h" #include "node.h" #include "segment.h" #include "xattr.h" #include "gc.h" #include "trace.h" #define CREATE_TRACE_POINTS #include <trace/events/f2fs.h> static struct proc_dir_entry *f2fs_proc_root; static struct kmem_cache *f2fs_inode_cachep; static struct kset *f2fs_kset; enum { Opt_gc_background, Opt_disable_roll_forward, Opt_norecovery, Opt_discard, Opt_noheap, Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_active_logs, Opt_disable_ext_identify, Opt_inline_xattr, Opt_inline_data, Opt_inline_dentry, Opt_flush_merge, Opt_nobarrier, Opt_fastboot, Opt_extent_cache, Opt_noinline_data, Opt_err, }; static match_table_t f2fs_tokens = { {Opt_gc_background, "background_gc=%s"}, {Opt_disable_roll_forward, "disable_roll_forward"}, {Opt_norecovery, "norecovery"}, {Opt_discard, "discard"}, {Opt_noheap, "no_heap"}, {Opt_user_xattr, "user_xattr"}, {Opt_nouser_xattr, "nouser_xattr"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_active_logs, "active_logs=%u"}, {Opt_disable_ext_identify, "disable_ext_identify"}, {Opt_inline_xattr, "inline_xattr"}, {Opt_inline_data, "inline_data"}, {Opt_inline_dentry, "inline_dentry"}, {Opt_flush_merge, "flush_merge"}, {Opt_nobarrier, "nobarrier"}, {Opt_fastboot, "fastboot"}, {Opt_extent_cache, "extent_cache"}, {Opt_noinline_data, "noinline_data"}, {Opt_err, NULL}, }; /* Sysfs support for f2fs */ enum { GC_THREAD, /* struct f2fs_gc_thread */ SM_INFO, /* struct f2fs_sm_info */ NM_INFO, /* struct f2fs_nm_info */ F2FS_SBI, /* struct f2fs_sb_info */ }; struct f2fs_attr { struct attribute attr; ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *); ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *, const char *, size_t); int struct_type; int offset; }; static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type) { if (struct_type == GC_THREAD) return (unsigned char *)sbi->gc_thread; else if (struct_type == SM_INFO) return (unsigned char *)SM_I(sbi); else if (struct_type == NM_INFO) return (unsigned char *)NM_I(sbi); else if (struct_type == F2FS_SBI) return (unsigned char *)sbi; return NULL; } static ssize_t f2fs_sbi_show(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf) { unsigned char *ptr = NULL; unsigned int *ui; ptr = __struct_ptr(sbi, a->struct_type); if (!ptr) return -EINVAL; ui = (unsigned int *)(ptr + a->offset); return snprintf(buf, PAGE_SIZE, "%u\n", *ui); } static ssize_t f2fs_sbi_store(struct f2fs_attr *a, struct f2fs_sb_info *sbi, const char *buf, size_t count) { unsigned char *ptr; unsigned long t; unsigned int *ui; ssize_t ret; ptr = __struct_ptr(sbi, a->struct_type); if (!ptr) return -EINVAL; ui = (unsigned int *)(ptr + a->offset); ret = kstrtoul(skip_spaces(buf), 0, &t); if (ret < 0) return ret; *ui = t; return count; } static ssize_t f2fs_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info, s_kobj); struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr); return a->show ? a->show(a, sbi, buf) : 0; } static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info, s_kobj); struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr); return a->store ? a->store(a, sbi, buf, len) : 0; } static void f2fs_sb_release(struct kobject *kobj) { struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info, s_kobj); complete(&sbi->s_kobj_unregister); } #define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \ static struct f2fs_attr f2fs_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ .show = _show, \ .store = _store, \ .struct_type = _struct_type, \ .offset = _offset \ } #define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \ F2FS_ATTR_OFFSET(struct_type, name, 0644, \ f2fs_sbi_show, f2fs_sbi_store, \ offsetof(struct struct_name, elname)) F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time); F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time); F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time); F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_idle, gc_idle); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, max_small_discards, max_discards); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, batched_trim_sections, trim_sections); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, ipu_policy, ipu_policy); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks); F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level); #define ATTR_LIST(name) (&f2fs_attr_##name.attr) static struct attribute *f2fs_attrs[] = { ATTR_LIST(gc_min_sleep_time), ATTR_LIST(gc_max_sleep_time), ATTR_LIST(gc_no_gc_sleep_time), ATTR_LIST(gc_idle), ATTR_LIST(reclaim_segments), ATTR_LIST(max_small_discards), ATTR_LIST(batched_trim_sections), ATTR_LIST(ipu_policy), ATTR_LIST(min_ipu_util), ATTR_LIST(min_fsync_blocks), ATTR_LIST(max_victim_search), ATTR_LIST(dir_level), ATTR_LIST(ram_thresh), NULL, }; static const struct sysfs_ops f2fs_attr_ops = { .show = f2fs_attr_show, .store = f2fs_attr_store, }; static struct kobj_type f2fs_ktype = { .default_attrs = f2fs_attrs, .sysfs_ops = &f2fs_attr_ops, .release = f2fs_sb_release, }; void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf); va_end(args); } static void init_once(void *foo) { struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; inode_init_once(&fi->vfs_inode); } static int parse_options(struct super_block *sb, char *options) { struct f2fs_sb_info *sbi = F2FS_SB(sb); struct request_queue *q; substring_t args[MAX_OPT_ARGS]; char *p, *name; int arg = 0; if (!options) return 0; while ((p = strsep(&options, ",")) != NULL) { int token; if (!*p) continue; /* * Initialize args struct so we know whether arg was * found; some options take optional arguments. */ args[0].to = args[0].from = NULL; token = match_token(p, f2fs_tokens, args); switch (token) { case Opt_gc_background: name = match_strdup(&args[0]); if (!name) return -ENOMEM; if (strlen(name) == 2 && !strncmp(name, "on", 2)) set_opt(sbi, BG_GC); else if (strlen(name) == 3 && !strncmp(name, "off", 3)) clear_opt(sbi, BG_GC); else { kfree(name); return -EINVAL; } kfree(name); break; case Opt_disable_roll_forward: set_opt(sbi, DISABLE_ROLL_FORWARD); break; case Opt_norecovery: /* this option mounts f2fs with ro */ set_opt(sbi, DISABLE_ROLL_FORWARD); if (!f2fs_readonly(sb)) return -EINVAL; break; case Opt_discard: q = bdev_get_queue(sb->s_bdev); if (blk_queue_discard(q)) { set_opt(sbi, DISCARD); } else { f2fs_msg(sb, KERN_WARNING, "mounting with \"discard\" option, but " "the device does not support discard"); } break; case Opt_noheap: set_opt(sbi, NOHEAP); break; #ifdef CONFIG_F2FS_FS_XATTR case Opt_user_xattr: set_opt(sbi, XATTR_USER); break; case Opt_nouser_xattr: clear_opt(sbi, XATTR_USER); break; case Opt_inline_xattr: set_opt(sbi, INLINE_XATTR); break; #else case Opt_user_xattr: f2fs_msg(sb, KERN_INFO, "user_xattr options not supported"); break; case Opt_nouser_xattr: f2fs_msg(sb, KERN_INFO, "nouser_xattr options not supported"); break; case Opt_inline_xattr: f2fs_msg(sb, KERN_INFO, "inline_xattr options not supported"); break; #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL case Opt_acl: set_opt(sbi, POSIX_ACL); break; case Opt_noacl: clear_opt(sbi, POSIX_ACL); break; #else case Opt_acl: f2fs_msg(sb, KERN_INFO, "acl options not supported"); break; case Opt_noacl: f2fs_msg(sb, KERN_INFO, "noacl options not supported"); break; #endif case Opt_active_logs: if (args->from && match_int(args, &arg)) return -EINVAL; if (arg != 2 && arg != 4 && arg != NR_CURSEG_TYPE) return -EINVAL; sbi->active_logs = arg; break; case Opt_disable_ext_identify: set_opt(sbi, DISABLE_EXT_IDENTIFY); break; case Opt_inline_data: set_opt(sbi, INLINE_DATA); break; case Opt_inline_dentry: set_opt(sbi, INLINE_DENTRY); break; case Opt_flush_merge: set_opt(sbi, FLUSH_MERGE); break; case Opt_nobarrier: set_opt(sbi, NOBARRIER); break; case Opt_fastboot: set_opt(sbi, FASTBOOT); break; case Opt_extent_cache: set_opt(sbi, EXTENT_CACHE); break; case Opt_noinline_data: clear_opt(sbi, INLINE_DATA); break; default: f2fs_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" or missing value", p); return -EINVAL; } } return 0; } static struct inode *f2fs_alloc_inode(struct super_block *sb) { struct f2fs_inode_info *fi; fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO); if (!fi) return NULL; init_once((void *) fi); /* Initialize f2fs-specific inode info */ fi->vfs_inode.i_version = 1; atomic_set(&fi->dirty_pages, 0); fi->i_current_depth = 1; fi->i_advise = 0; rwlock_init(&fi->ext_lock); init_rwsem(&fi->i_sem); INIT_RADIX_TREE(&fi->inmem_root, GFP_NOFS); INIT_LIST_HEAD(&fi->inmem_pages); mutex_init(&fi->inmem_lock); set_inode_flag(fi, FI_NEW_INODE); if (test_opt(F2FS_SB(sb), INLINE_XATTR)) set_inode_flag(fi, FI_INLINE_XATTR); /* Will be used by directory only */ fi->i_dir_level = F2FS_SB(sb)->dir_level; #ifdef CONFIG_F2FS_FS_ENCRYPTION fi->i_crypt_info = NULL; #endif return &fi->vfs_inode; } static int f2fs_drop_inode(struct inode *inode) { /* * This is to avoid a deadlock condition like below. * writeback_single_inode(inode) * - f2fs_write_data_page * - f2fs_gc -> iput -> evict * - inode_wait_for_writeback(inode) */ if (!inode_unhashed(inode) && inode->i_state & I_SYNC) { if (!inode->i_nlink && !is_bad_inode(inode)) { spin_unlock(&inode->i_lock); /* some remained atomic pages should discarded */ if (f2fs_is_atomic_file(inode)) commit_inmem_pages(inode, true); sb_start_intwrite(inode->i_sb); i_size_write(inode, 0); if (F2FS_HAS_BLOCKS(inode)) f2fs_truncate(inode); sb_end_intwrite(inode->i_sb); #ifdef CONFIG_F2FS_FS_ENCRYPTION if (F2FS_I(inode)->i_crypt_info) f2fs_free_encryption_info(inode, F2FS_I(inode)->i_crypt_info); #endif spin_lock(&inode->i_lock); } return 0; } return generic_drop_inode(inode); } /* * f2fs_dirty_inode() is called from __mark_inode_dirty() * * We should call set_dirty_inode to write the dirty inode through write_inode. */ static void f2fs_dirty_inode(struct inode *inode, int flags) { set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE); } static void f2fs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode)); } static void f2fs_destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, f2fs_i_callback); } static void f2fs_put_super(struct super_block *sb) { struct f2fs_sb_info *sbi = F2FS_SB(sb); if (sbi->s_proc) { remove_proc_entry("segment_info", sbi->s_proc); remove_proc_entry(sb->s_id, f2fs_proc_root); } kobject_del(&sbi->s_kobj); f2fs_destroy_stats(sbi); stop_gc_thread(sbi); /* * We don't need to do checkpoint when superblock is clean. * But, the previous checkpoint was not done by umount, it needs to do * clean checkpoint again. */ if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) || !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) { struct cp_control cpc = { .reason = CP_UMOUNT, }; write_checkpoint(sbi, &cpc); } /* * normally superblock is clean, so we need to release this. * In addition, EIO will skip do checkpoint, we need this as well. */ release_dirty_inode(sbi); release_discard_addrs(sbi); iput(sbi->node_inode); iput(sbi->meta_inode); /* destroy f2fs internal modules */ destroy_node_manager(sbi); destroy_segment_manager(sbi); kfree(sbi->ckpt); kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); sb->s_fs_info = NULL; brelse(sbi->raw_super_buf); kfree(sbi); } int f2fs_sync_fs(struct super_block *sb, int sync) { struct f2fs_sb_info *sbi = F2FS_SB(sb); trace_f2fs_sync_fs(sb, sync); if (sync) { struct cp_control cpc; cpc.reason = __get_cp_reason(sbi); mutex_lock(&sbi->gc_mutex); write_checkpoint(sbi, &cpc); mutex_unlock(&sbi->gc_mutex); } else { f2fs_balance_fs(sbi); } f2fs_trace_ios(NULL, 1); return 0; } static int f2fs_freeze(struct super_block *sb) { int err; if (f2fs_readonly(sb)) return 0; err = f2fs_sync_fs(sb, 1); return err; } static int f2fs_unfreeze(struct super_block *sb) { return 0; } static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct f2fs_sb_info *sbi = F2FS_SB(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); block_t total_count, user_block_count, start_count, ovp_count; total_count = le64_to_cpu(sbi->raw_super->block_count); user_block_count = sbi->user_block_count; start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr); ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg; buf->f_type = F2FS_SUPER_MAGIC; buf->f_bsize = sbi->blocksize; buf->f_blocks = total_count - start_count; buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count; buf->f_bavail = user_block_count - valid_user_blocks(sbi); buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; buf->f_ffree = buf->f_files - valid_inode_count(sbi); buf->f_namelen = F2FS_NAME_LEN; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); return 0; } static int f2fs_show_options(struct seq_file *seq, struct dentry *root) { struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb); if (!f2fs_readonly(sbi->sb) && test_opt(sbi, BG_GC)) seq_printf(seq, ",background_gc=%s", "on"); else seq_printf(seq, ",background_gc=%s", "off"); if (test_opt(sbi, DISABLE_ROLL_FORWARD)) seq_puts(seq, ",disable_roll_forward"); if (test_opt(sbi, DISCARD)) seq_puts(seq, ",discard"); if (test_opt(sbi, NOHEAP)) seq_puts(seq, ",no_heap_alloc"); #ifdef CONFIG_F2FS_FS_XATTR if (test_opt(sbi, XATTR_USER)) seq_puts(seq, ",user_xattr"); else seq_puts(seq, ",nouser_xattr"); if (test_opt(sbi, INLINE_XATTR)) seq_puts(seq, ",inline_xattr"); #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL if (test_opt(sbi, POSIX_ACL)) seq_puts(seq, ",acl"); else seq_puts(seq, ",noacl"); #endif if (test_opt(sbi, DISABLE_EXT_IDENTIFY)) seq_puts(seq, ",disable_ext_identify"); if (test_opt(sbi, INLINE_DATA)) seq_puts(seq, ",inline_data"); else seq_puts(seq, ",noinline_data"); if (test_opt(sbi, INLINE_DENTRY)) seq_puts(seq, ",inline_dentry"); if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE)) seq_puts(seq, ",flush_merge"); if (test_opt(sbi, NOBARRIER)) seq_puts(seq, ",nobarrier"); if (test_opt(sbi, FASTBOOT)) seq_puts(seq, ",fastboot"); if (test_opt(sbi, EXTENT_CACHE)) seq_puts(seq, ",extent_cache"); seq_printf(seq, ",active_logs=%u", sbi->active_logs); return 0; } static int segment_info_seq_show(struct seq_file *seq, void *offset) { struct super_block *sb = seq->private; struct f2fs_sb_info *sbi = F2FS_SB(sb); unsigned int total_segs = le32_to_cpu(sbi->raw_super->segment_count_main); int i; seq_puts(seq, "format: segment_type|valid_blocks\n" "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n"); for (i = 0; i < total_segs; i++) { struct seg_entry *se = get_seg_entry(sbi, i); if ((i % 10) == 0) seq_printf(seq, "%-5d", i); seq_printf(seq, "%d|%-3u", se->type, get_valid_blocks(sbi, i, 1)); if ((i % 10) == 9 || i == (total_segs - 1)) seq_putc(seq, '\n'); else seq_putc(seq, ' '); } return 0; } static int segment_info_open_fs(struct inode *inode, struct file *file) { return single_open(file, segment_info_seq_show, PDE_DATA(inode)); } static const struct file_operations f2fs_seq_segment_info_fops = { .owner = THIS_MODULE, .open = segment_info_open_fs, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void default_options(struct f2fs_sb_info *sbi) { /* init some FS parameters */ sbi->active_logs = NR_CURSEG_TYPE; set_opt(sbi, BG_GC); set_opt(sbi, INLINE_DATA); #ifdef CONFIG_F2FS_FS_XATTR set_opt(sbi, XATTR_USER); #endif #ifdef CONFIG_F2FS_FS_POSIX_ACL set_opt(sbi, POSIX_ACL); #endif } static int f2fs_remount(struct super_block *sb, int *flags, char *data) { struct f2fs_sb_info *sbi = F2FS_SB(sb); struct f2fs_mount_info org_mount_opt; int err, active_logs; bool need_restart_gc = false; bool need_stop_gc = false; sync_filesystem(sb); /* * Save the old mount options in case we * need to restore them. */ org_mount_opt = sbi->mount_opt; active_logs = sbi->active_logs; sbi->mount_opt.opt = 0; default_options(sbi); /* parse mount options */ err = parse_options(sb, data); if (err) goto restore_opts; /* * Previous and new state of filesystem is RO, * so skip checking GC and FLUSH_MERGE conditions. */ if (f2fs_readonly(sb) && (*flags & MS_RDONLY)) goto skip; /* * We stop the GC thread if FS is mounted as RO * or if background_gc = off is passed in mount * option. Also sync the filesystem. */ if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) { if (sbi->gc_thread) { stop_gc_thread(sbi); f2fs_sync_fs(sb, 1); need_restart_gc = true; } } else if (!sbi->gc_thread) { err = start_gc_thread(sbi); if (err) goto restore_opts; need_stop_gc = true; } /* * We stop issue flush thread if FS is mounted as RO * or if flush_merge is not passed in mount option. */ if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) { destroy_flush_cmd_control(sbi); } else if (!SM_I(sbi)->cmd_control_info) { err = create_flush_cmd_control(sbi); if (err) goto restore_gc; } skip: /* Update the POSIXACL Flag */ sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); return 0; restore_gc: if (need_restart_gc) { if (start_gc_thread(sbi)) f2fs_msg(sbi->sb, KERN_WARNING, "background gc thread has stopped"); } else if (need_stop_gc) { stop_gc_thread(sbi); } restore_opts: sbi->mount_opt = org_mount_opt; sbi->active_logs = active_logs; return err; } static struct super_operations f2fs_sops = { .alloc_inode = f2fs_alloc_inode, .drop_inode = f2fs_drop_inode, .destroy_inode = f2fs_destroy_inode, .write_inode = f2fs_write_inode, .dirty_inode = f2fs_dirty_inode, .show_options = f2fs_show_options, .evict_inode = f2fs_evict_inode, .put_super = f2fs_put_super, .sync_fs = f2fs_sync_fs, .freeze_fs = f2fs_freeze, .unfreeze_fs = f2fs_unfreeze, .statfs = f2fs_statfs, .remount_fs = f2fs_remount, }; static struct inode *f2fs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; if (check_nid_range(sbi, ino)) return ERR_PTR(-ESTALE); /* * f2fs_iget isn't quite right if the inode is currently unallocated! * However f2fs_iget currently does appropriate checks to handle stale * inodes so everything is OK. */ inode = f2fs_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (unlikely(generation && inode->i_generation != generation)) { /* we didn't find the right inode.. */ iput(inode); return ERR_PTR(-ESTALE); } return inode; } static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, f2fs_nfs_get_inode); } static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, f2fs_nfs_get_inode); } static const struct export_operations f2fs_export_ops = { .fh_to_dentry = f2fs_fh_to_dentry, .fh_to_parent = f2fs_fh_to_parent, .get_parent = f2fs_get_parent, }; static loff_t max_file_size(unsigned bits) { loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS); loff_t leaf_count = ADDRS_PER_BLOCK; /* two direct node blocks */ result += (leaf_count * 2); /* two indirect node blocks */ leaf_count *= NIDS_PER_BLOCK; result += (leaf_count * 2); /* one double indirect node block */ leaf_count *= NIDS_PER_BLOCK; result += leaf_count; result <<= bits; return result; } static int sanity_check_raw_super(struct super_block *sb, struct f2fs_super_block *raw_super) { unsigned int blocksize; if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { f2fs_msg(sb, KERN_INFO, "Magic Mismatch, valid(0x%x) - read(0x%x)", F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic)); return 1; } /* Currently, support only 4KB page cache size */ if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) { f2fs_msg(sb, KERN_INFO, "Invalid page_cache_size (%lu), supports only 4KB\n", PAGE_CACHE_SIZE); return 1; } /* Currently, support only 4KB block size */ blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); if (blocksize != F2FS_BLKSIZE) { f2fs_msg(sb, KERN_INFO, "Invalid blocksize (%u), supports only 4KB\n", blocksize); return 1; } /* Currently, support 512/1024/2048/4096 bytes sector size */ if (le32_to_cpu(raw_super->log_sectorsize) > F2FS_MAX_LOG_SECTOR_SIZE || le32_to_cpu(raw_super->log_sectorsize) < F2FS_MIN_LOG_SECTOR_SIZE) { f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)", le32_to_cpu(raw_super->log_sectorsize)); return 1; } if (le32_to_cpu(raw_super->log_sectors_per_block) + le32_to_cpu(raw_super->log_sectorsize) != F2FS_MAX_LOG_SECTOR_SIZE) { f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block(%u) log sectorsize(%u)", le32_to_cpu(raw_super->log_sectors_per_block), le32_to_cpu(raw_super->log_sectorsize)); return 1; } return 0; } static int sanity_check_ckpt(struct f2fs_sb_info *sbi) { unsigned int total, fsmeta; struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); total = le32_to_cpu(raw_super->segment_count); fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); fsmeta += le32_to_cpu(raw_super->segment_count_sit); fsmeta += le32_to_cpu(raw_super->segment_count_nat); fsmeta += le32_to_cpu(ckpt->rsvd_segment_count); fsmeta += le32_to_cpu(raw_super->segment_count_ssa); if (unlikely(fsmeta >= total)) return 1; if (unlikely(f2fs_cp_error(sbi))) { f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); return 1; } return 0; } static void init_sb_info(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = sbi->raw_super; int i; sbi->log_sectors_per_block = le32_to_cpu(raw_super->log_sectors_per_block); sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize); sbi->blocksize = 1 << sbi->log_blocksize; sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg; sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec); sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone); sbi->total_sections = le32_to_cpu(raw_super->section_count); sbi->total_node_count = (le32_to_cpu(raw_super->segment_count_nat) / 2) * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK; sbi->root_ino_num = le32_to_cpu(raw_super->root_ino); sbi->node_ino_num = le32_to_cpu(raw_super->node_ino); sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino); sbi->cur_victim_sec = NULL_SECNO; sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH; for (i = 0; i < NR_COUNT_TYPE; i++) atomic_set(&sbi->nr_pages[i], 0); sbi->dir_level = DEF_DIR_LEVEL; clear_sbi_flag(sbi, SBI_NEED_FSCK); } /* * Read f2fs raw super block. * Because we have two copies of super block, so read the first one at first, * if the first one is invalid, move to read the second one. */ static int read_raw_super_block(struct super_block *sb, struct f2fs_super_block **raw_super, struct buffer_head **raw_super_buf, int *recovery) { int block = 0; struct buffer_head *buffer; struct f2fs_super_block *super; int err = 0; retry: buffer = sb_bread(sb, block); if (!buffer) { *recovery = 1; f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock", block + 1); if (block == 0) { block++; goto retry; } else { err = -EIO; goto out; } } super = (struct f2fs_super_block *) ((char *)(buffer)->b_data + F2FS_SUPER_OFFSET); /* sanity checking of raw super */ if (sanity_check_raw_super(sb, super)) { brelse(buffer); *recovery = 1; f2fs_msg(sb, KERN_ERR, "Can't find valid F2FS filesystem in %dth superblock", block + 1); if (block == 0) { block++; goto retry; } else { err = -EINVAL; goto out; } } if (!*raw_super) { *raw_super_buf = buffer; *raw_super = super; } else { /* already have a valid superblock */ brelse(buffer); } /* check the validity of the second superblock */ if (block == 0) { block++; goto retry; } out: /* No valid superblock */ if (!*raw_super) return err; return 0; } int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) { struct buffer_head *sbh = sbi->raw_super_buf; sector_t block = sbh->b_blocknr; int err; /* write back-up superblock first */ sbh->b_blocknr = block ? 0 : 1; mark_buffer_dirty(sbh); err = sync_dirty_buffer(sbh); sbh->b_blocknr = block; /* if we are in recovery path, skip writing valid superblock */ if (recover || err) goto out; /* write current valid superblock */ mark_buffer_dirty(sbh); err = sync_dirty_buffer(sbh); out: clear_buffer_write_io_error(sbh); set_buffer_uptodate(sbh); return err; } static int f2fs_fill_super(struct super_block *sb, void *data, int silent) { struct f2fs_sb_info *sbi; struct f2fs_super_block *raw_super; struct buffer_head *raw_super_buf; struct inode *root; long err; bool retry = true, need_fsck = false; char *options = NULL; int recovery, i; try_onemore: err = -EINVAL; raw_super = NULL; raw_super_buf = NULL; recovery = 0; /* allocate memory for f2fs-specific super block info */ sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; /* set a block size */ if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); goto free_sbi; } err = read_raw_super_block(sb, &raw_super, &raw_super_buf, &recovery); if (err) goto free_sbi; sb->s_fs_info = sbi; default_options(sbi); /* parse mount options */ options = kstrdup((const char *)data, GFP_KERNEL); if (data && !options) { err = -ENOMEM; goto free_sb_buf; } err = parse_options(sb, options); if (err) goto free_options; sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); sb->s_max_links = F2FS_LINK_MAX; get_random_bytes(&sbi->s_next_generation, sizeof(u32)); sb->s_op = &f2fs_sops; sb->s_xattr = f2fs_xattr_handlers; sb->s_export_op = &f2fs_export_ops; sb->s_magic = F2FS_SUPER_MAGIC; sb->s_time_gran = 1; sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); /* init f2fs-specific super block info */ sbi->sb = sb; sbi->raw_super = raw_super; sbi->raw_super_buf = raw_super_buf; mutex_init(&sbi->gc_mutex); mutex_init(&sbi->writepages); mutex_init(&sbi->cp_mutex); init_rwsem(&sbi->node_write); clear_sbi_flag(sbi, SBI_POR_DOING); spin_lock_init(&sbi->stat_lock); init_rwsem(&sbi->read_io.io_rwsem); sbi->read_io.sbi = sbi; sbi->read_io.bio = NULL; for (i = 0; i < NR_PAGE_TYPE; i++) { init_rwsem(&sbi->write_io[i].io_rwsem); sbi->write_io[i].sbi = sbi; sbi->write_io[i].bio = NULL; } init_rwsem(&sbi->cp_rwsem); init_waitqueue_head(&sbi->cp_wait); init_sb_info(sbi); /* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); if (IS_ERR(sbi->meta_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); err = PTR_ERR(sbi->meta_inode); goto free_options; } err = get_valid_checkpoint(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); goto free_meta_inode; } /* sanity checking of checkpoint */ err = -EINVAL; if (sanity_check_ckpt(sbi)) { f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint"); goto free_cp; } sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count); sbi->total_valid_inode_count = le32_to_cpu(sbi->ckpt->valid_inode_count); sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); sbi->total_valid_block_count = le64_to_cpu(sbi->ckpt->valid_block_count); sbi->last_valid_block_count = sbi->total_valid_block_count; sbi->alloc_valid_block_count = 0; INIT_LIST_HEAD(&sbi->dir_inode_list); spin_lock_init(&sbi->dir_inode_lock); init_extent_cache_info(sbi); init_ino_entry_info(sbi); /* setup f2fs internal modules */ err = build_segment_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS segment manager"); goto free_sm; } err = build_node_manager(sbi); if (err) { f2fs_msg(sb, KERN_ERR, "Failed to initialize F2FS node manager"); goto free_nm; } build_gc_manager(sbi); /* get an inode for node space */ sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); if (IS_ERR(sbi->node_inode)) { f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); err = PTR_ERR(sbi->node_inode); goto free_nm; } /* if there are nt orphan nodes free them */ recover_orphan_inodes(sbi); /* read root inode and dentry */ root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); if (IS_ERR(root)) { f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); err = PTR_ERR(root); goto free_node_inode; } if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { iput(root); err = -EINVAL; goto free_node_inode; } sb->s_root = d_make_root(root); /* allocate root dentry */ if (!sb->s_root) { err = -ENOMEM; goto free_root_inode; } err = f2fs_build_stats(sbi); if (err) goto free_root_inode; if (f2fs_proc_root) sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root); if (sbi->s_proc) proc_create_data("segment_info", S_IRUGO, sbi->s_proc, &f2fs_seq_segment_info_fops, sb); sbi->s_kobj.kset = f2fs_kset; init_completion(&sbi->s_kobj_unregister); err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL, "%s", sb->s_id); if (err) goto free_proc; /* recover fsynced data */ if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { /* * mount should be failed, when device has readonly mode, and * previous checkpoint was not done by clean system shutdown. */ if (bdev_read_only(sb->s_bdev) && !is_set_ckpt_flags(sbi->ckpt, CP_UMOUNT_FLAG)) { err = -EROFS; goto free_kobj; } if (need_fsck) set_sbi_flag(sbi, SBI_NEED_FSCK); err = recover_fsync_data(sbi); if (err) { need_fsck = true; f2fs_msg(sb, KERN_ERR, "Cannot recover all fsync data errno=%ld", err); goto free_kobj; } } /* * If filesystem is not mounted as read-only then * do start the gc_thread. */ if (test_opt(sbi, BG_GC) && !f2fs_readonly(sb)) { /* After POR, we can run background GC thread.*/ err = start_gc_thread(sbi); if (err) goto free_kobj; } kfree(options); /* recover broken superblock */ if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) { f2fs_msg(sb, KERN_INFO, "Recover invalid superblock"); f2fs_commit_super(sbi, true); } return 0; free_kobj: kobject_del(&sbi->s_kobj); free_proc: if (sbi->s_proc) { remove_proc_entry("segment_info", sbi->s_proc); remove_proc_entry(sb->s_id, f2fs_proc_root); } f2fs_destroy_stats(sbi); free_root_inode: dput(sb->s_root); sb->s_root = NULL; free_node_inode: iput(sbi->node_inode); free_nm: destroy_node_manager(sbi); free_sm: destroy_segment_manager(sbi); free_cp: kfree(sbi->ckpt); free_meta_inode: make_bad_inode(sbi->meta_inode); iput(sbi->meta_inode); free_options: kfree(options); free_sb_buf: brelse(raw_super_buf); free_sbi: kfree(sbi); /* give only one another chance */ if (retry) { retry = false; shrink_dcache_sb(sb); goto try_onemore; } return err; } static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super); } static void kill_f2fs_super(struct super_block *sb) { if (sb->s_root) set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE); kill_block_super(sb); } static struct file_system_type f2fs_fs_type = { .owner = THIS_MODULE, .name = "f2fs", .mount = f2fs_mount, .kill_sb = kill_f2fs_super, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("f2fs"); static int __init init_inodecache(void) { f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache", sizeof(struct f2fs_inode_info)); if (!f2fs_inode_cachep) return -ENOMEM; return 0; } static void destroy_inodecache(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(f2fs_inode_cachep); } static int __init init_f2fs_fs(void) { int err; f2fs_build_trace_ios(); err = init_inodecache(); if (err) goto fail; err = create_node_manager_caches(); if (err) goto free_inodecache; err = create_segment_manager_caches(); if (err) goto free_node_manager_caches; err = create_checkpoint_caches(); if (err) goto free_segment_manager_caches; err = create_extent_cache(); if (err) goto free_checkpoint_caches; f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj); if (!f2fs_kset) { err = -ENOMEM; goto free_extent_cache; } err = f2fs_init_crypto(); if (err) goto free_kset; err = register_filesystem(&f2fs_fs_type); if (err) goto free_crypto; f2fs_create_root_stats(); f2fs_proc_root = proc_mkdir("fs/f2fs", NULL); return 0; free_crypto: f2fs_exit_crypto(); free_kset: kset_unregister(f2fs_kset); free_extent_cache: destroy_extent_cache(); free_checkpoint_caches: destroy_checkpoint_caches(); free_segment_manager_caches: destroy_segment_manager_caches(); free_node_manager_caches: destroy_node_manager_caches(); free_inodecache: destroy_inodecache(); fail: return err; } static void __exit exit_f2fs_fs(void) { remove_proc_entry("fs/f2fs", NULL); f2fs_destroy_root_stats(); unregister_filesystem(&f2fs_fs_type); f2fs_exit_crypto(); destroy_extent_cache(); destroy_checkpoint_caches(); destroy_segment_manager_caches(); destroy_node_manager_caches(); destroy_inodecache(); kset_unregister(f2fs_kset); f2fs_destroy_trace_ios(); } module_init(init_f2fs_fs) module_exit(exit_f2fs_fs) MODULE_AUTHOR("Samsung Electronics's Praesto Team"); MODULE_DESCRIPTION("Flash Friendly File System"); MODULE_LICENSE("GPL");
269024.c
#include <stdio.h> #include <stdlib.h> const int MAX_LINE = 11; int main() { int prev_depth; int count = 0; char input[MAX_LINE]; fgets(input, MAX_LINE, stdin); prev_depth = atoi(input); while (fgets(input, MAX_LINE, stdin)) { int curr_depth = atoi(input); if (curr_depth > prev_depth) count++; prev_depth = curr_depth; } printf("%d\n", count); return 0; }
812599.c
/* * Classifies a poker hand */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <ctype.h> #define NUM_RANKS 13 #define NUM_SUITS 4 #define NUM_CARDS 5 static int num_in_rank[NUM_RANKS]; static int num_in_suit[NUM_SUITS]; static bool flush, straight, four, three; static int pairs; /* read_cards * * Reads the cards into external variables; * checks for bad cards and duplicate cards; */ static void read_cards(void); /* analyze_hand * * Determines whether the hand contains a straight, * a flush, four-of-a-kind, three-of-a-kind; * Determines the number of pairs */ static void analyze_hand(void); /* print_result * * Notifies the user of the result * using the external variables set by * analyze_hand */ static void print_result(void); int main(void) { for ( ; ; ) { read_cards(); analyze_hand(); print_result(); } return 0; } void read_cards(void) { int rank, suit; int cards_read = 0; char ch, rank_ch, suit_ch; bool bad_card; bool card_exists[NUM_RANKS][NUM_SUITS]; printf("\nPoker begins. Specify your hand:\n"); printf("5s for five spades, tc for ten clubs.\n"); printf("0 - to exit.\n"); // Clear\Redefine arrays for (rank = 0; rank < NUM_RANKS; rank++) { num_in_rank[rank] = 0; for (suit = 0; suit < NUM_SUITS; suit++) card_exists[rank][suit] = false; } for (suit = 0; suit < NUM_SUITS; suit++) num_in_suit[suit] = 0; while (cards_read < NUM_CARDS) { bad_card = false; printf("Enter a card: "); scanf(" %c", &rank_ch); switch(tolower(rank_ch)) { case '0': exit(EXIT_SUCCESS); case '2': rank = 0; break; case '3': rank = 1; break; case '4': rank = 2; break; case '5': rank = 3; break; case '6': rank = 4; break; case '7': rank = 5; break; case '8': rank = 6; break; case '9': rank = 7; break; case 't': rank = 8; break; case 'j': rank = 9; break; case 'q': rank = 10; break; case 'k': rank = 11; break; case 'a': rank = 12; break; default: bad_card = true; break; } scanf(" %c", &suit_ch); switch(tolower(suit_ch)) { case 'c': suit = 0; break; case 'd': suit = 1; break; case 'h': suit = 2; break; case 's': suit = 3; break; default: bad_card = false; break; } while ((ch = getchar()) != '\n') if (ch != ' ') bad_card = true; if (bad_card) printf("Bad card; Ignored.\n"); else if (card_exists[rank][suit]) printf("Duplicate card; Ignored.\n"); else { card_exists[rank][suit] = true; num_in_rank[rank]++; num_in_suit[suit]++; cards_read++; } } } void analyze_hand(void) { int rank, suit; int num_conseq = 0; pairs = 0; flush = straight = four = three = false; // Find out if it's a flush for (suit = 0; suit < NUM_SUITS; suit++) if (num_in_suit[suit] == NUM_CARDS) flush = true; // Find out if it's a straight // Skip until number of cards in the rank is not 0 for (rank = 0; num_in_rank[rank] == 0; rank++) ; // Find how much consecutive ranks are not null for ( ; rank < NUM_RANKS && num_in_rank[rank] > 0; num_conseq++, rank++) ; if (num_conseq == NUM_CARDS) { straight = true; return; } // Find how much cards of the same rank for (rank = 0; rank < NUM_RANKS; rank++) switch(num_in_rank[rank]) { case 4: four = true; break; case 3: three = true; break; case 2: pairs++; break; } } void print_result(void) { if (straight && flush) printf("Straight flush"); else if (four) printf("four-of-a-kind"); else if (three && pairs == 1) printf("full house"); else if (flush) printf("flush"); else if (straight) printf("straight"); else if (three) printf("three-of-a-kind"); else if (pairs == 2) printf("two pairs"); else if (pairs == 1) printf("pair"); else printf("high card"); printf("\n\n"); }
120203.c
/******************************************************************************* * * Module Name: dbcmds - Miscellaneous debug commands and output routines * ******************************************************************************/ /****************************************************************************** * * 1. Copyright Notice * * Some or all of this work - Copyright (c) 1999 - 2021, Intel Corp. * All rights reserved. * * 2. License * * 2.1. This is your license from Intel Corp. under its intellectual property * rights. You may have additional license terms from the party that provided * you this software, covering your right to use that party's intellectual * property rights. * * 2.2. Intel grants, free of charge, to any person ("Licensee") obtaining a * copy of the source code appearing in this file ("Covered Code") an * irrevocable, perpetual, worldwide license under Intel's copyrights in the * base code distributed originally by Intel ("Original Intel Code") to copy, * make derivatives, distribute, use and display any portion of the Covered * Code in any form, with the right to sublicense such rights; and * * 2.3. Intel grants Licensee a non-exclusive and non-transferable patent * license (with the right to sublicense), under only those claims of Intel * patents that are infringed by the Original Intel Code, to make, use, sell, * offer to sell, and import the Covered Code and derivative works thereof * solely to the minimum extent necessary to exercise the above copyright * license, and in no event shall the patent license extend to any additions * to or modifications of the Original Intel Code. No other license or right * is granted directly or by implication, estoppel or otherwise; * * The above copyright and patent license is granted only if the following * conditions are met: * * 3. Conditions * * 3.1. Redistribution of Source with Rights to Further Distribute Source. * Redistribution of source code of any substantial portion of the Covered * Code or modification with rights to further distribute source must include * the above Copyright Notice, the above License, this list of Conditions, * and the following Disclaimer and Export Compliance provision. In addition, * Licensee must cause all Covered Code to which Licensee contributes to * contain a file documenting the changes Licensee made to create that Covered * Code and the date of any change. Licensee must include in that file the * documentation of any changes made by any predecessor Licensee. Licensee * must include a prominent statement that the modification is derived, * directly or indirectly, from Original Intel Code. * * 3.2. Redistribution of Source with no Rights to Further Distribute Source. * Redistribution of source code of any substantial portion of the Covered * Code or modification without rights to further distribute source must * include the following Disclaimer and Export Compliance provision in the * documentation and/or other materials provided with distribution. In * addition, Licensee may not authorize further sublicense of source of any * portion of the Covered Code, and must include terms to the effect that the * license from Licensee to its licensee is limited to the intellectual * property embodied in the software Licensee provides to its licensee, and * not to intellectual property embodied in modifications its licensee may * make. * * 3.3. Redistribution of Executable. Redistribution in executable form of any * substantial portion of the Covered Code or modification must reproduce the * above Copyright Notice, and the following Disclaimer and Export Compliance * provision in the documentation and/or other materials provided with the * distribution. * * 3.4. Intel retains all right, title, and interest in and to the Original * Intel Code. * * 3.5. Neither the name Intel nor any other trademark owned or controlled by * Intel shall be used in advertising or otherwise to promote the sale, use or * other dealings in products derived from or relating to the Covered Code * without prior written authorization from Intel. * * 4. Disclaimer and Export Compliance * * 4.1. INTEL MAKES NO WARRANTY OF ANY KIND REGARDING ANY SOFTWARE PROVIDED * HERE. ANY SOFTWARE ORIGINATING FROM INTEL OR DERIVED FROM INTEL SOFTWARE * IS PROVIDED "AS IS," AND INTEL WILL NOT PROVIDE ANY SUPPORT, ASSISTANCE, * INSTALLATION, TRAINING OR OTHER SERVICES. INTEL WILL NOT PROVIDE ANY * UPDATES, ENHANCEMENTS OR EXTENSIONS. INTEL SPECIFICALLY DISCLAIMS ANY * IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT AND FITNESS FOR A * PARTICULAR PURPOSE. * * 4.2. IN NO EVENT SHALL INTEL HAVE ANY LIABILITY TO LICENSEE, ITS LICENSEES * OR ANY OTHER THIRD PARTY, FOR ANY LOST PROFITS, LOST DATA, LOSS OF USE OR * COSTS OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY INDIRECT, * SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THIS AGREEMENT, UNDER ANY * CAUSE OF ACTION OR THEORY OF LIABILITY, AND IRRESPECTIVE OF WHETHER INTEL * HAS ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS * SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE ESSENTIAL PURPOSE OF ANY * LIMITED REMEDY. * * 4.3. Licensee shall not export, either directly or indirectly, any of this * software or system incorporating such software without first obtaining any * required license or other approval from the U. S. Department of Commerce or * any other agency or department of the United States Government. In the * event Licensee exports any such software from the United States or * re-exports any such software from a foreign destination, Licensee shall * ensure that the distribution and export/re-export of the software is in * compliance with all laws, regulations, orders, or other restrictions of the * U.S. Export Administration Regulations. Licensee agrees that neither it nor * any of its subsidiaries will export/re-export any technical data, process, * software, or service, directly or indirectly, to any country for which the * United States government or any agency thereof requires an export license, * other governmental approval, or letter of assurance, without first obtaining * such license, approval or letter. * ***************************************************************************** * * Alternatively, you may choose to be licensed under the terms of the * following license: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Alternatively, you may choose to be licensed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * *****************************************************************************/ #include "acpi.h" #include "accommon.h" #include "acevents.h" #include "acdebug.h" #include "acnamesp.h" #include "acresrc.h" #include "actables.h" #ifdef ACPI_DEBUGGER #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME ("dbcmds") /* Local prototypes */ static void AcpiDmCompareAmlResources ( UINT8 *Aml1Buffer, ACPI_RSDESC_SIZE Aml1BufferLength, UINT8 *Aml2Buffer, ACPI_RSDESC_SIZE Aml2BufferLength); static ACPI_STATUS AcpiDmTestResourceConversion ( ACPI_NAMESPACE_NODE *Node, char *Name); static ACPI_STATUS AcpiDbResourceCallback ( ACPI_RESOURCE *Resource, void *Context); static ACPI_STATUS AcpiDbDeviceResources ( ACPI_HANDLE ObjHandle, UINT32 NestingLevel, void *Context, void **ReturnValue); static void AcpiDbDoOneSleepState ( UINT8 SleepState); static char *AcpiDbTraceMethodName = NULL; /******************************************************************************* * * FUNCTION: AcpiDbConvertToNode * * PARAMETERS: InString - String to convert * * RETURN: Pointer to a NS node * * DESCRIPTION: Convert a string to a valid NS pointer. Handles numeric or * alphanumeric strings. * ******************************************************************************/ ACPI_NAMESPACE_NODE * AcpiDbConvertToNode ( char *InString) { ACPI_NAMESPACE_NODE *Node; ACPI_SIZE Address; if ((*InString >= 0x30) && (*InString <= 0x39)) { /* Numeric argument, convert */ Address = strtoul (InString, NULL, 16); Node = ACPI_TO_POINTER (Address); if (!AcpiOsReadable (Node, sizeof (ACPI_NAMESPACE_NODE))) { AcpiOsPrintf ("Address %p is invalid", Node); return (NULL); } /* Make sure pointer is valid NS node */ if (ACPI_GET_DESCRIPTOR_TYPE (Node) != ACPI_DESC_TYPE_NAMED) { AcpiOsPrintf ("Address %p is not a valid namespace node [%s]\n", Node, AcpiUtGetDescriptorName (Node)); return (NULL); } } else { /* * Alpha argument: The parameter is a name string that must be * resolved to a Namespace object. */ Node = AcpiDbLocalNsLookup (InString); if (!Node) { AcpiOsPrintf ( "Could not find [%s] in namespace, defaulting to root node\n", InString); Node = AcpiGbl_RootNode; } } return (Node); } /******************************************************************************* * * FUNCTION: AcpiDbSleep * * PARAMETERS: ObjectArg - Desired sleep state (0-5). NULL means * invoke all possible sleep states. * * RETURN: Status * * DESCRIPTION: Simulate sleep/wake sequences * ******************************************************************************/ ACPI_STATUS AcpiDbSleep ( char *ObjectArg) { UINT8 SleepState; UINT32 i; ACPI_FUNCTION_TRACE (AcpiDbSleep); /* Null input (no arguments) means to invoke all sleep states */ if (!ObjectArg) { AcpiOsPrintf ("Invoking all possible sleep states, 0-%d\n", ACPI_S_STATES_MAX); for (i = 0; i <= ACPI_S_STATES_MAX; i++) { AcpiDbDoOneSleepState ((UINT8) i); } return_ACPI_STATUS (AE_OK); } /* Convert argument to binary and invoke the sleep state */ SleepState = (UINT8) strtoul (ObjectArg, NULL, 0); AcpiDbDoOneSleepState (SleepState); return_ACPI_STATUS (AE_OK); } /******************************************************************************* * * FUNCTION: AcpiDbDoOneSleepState * * PARAMETERS: SleepState - Desired sleep state (0-5) * * RETURN: None * * DESCRIPTION: Simulate a sleep/wake sequence * ******************************************************************************/ static void AcpiDbDoOneSleepState ( UINT8 SleepState) { ACPI_STATUS Status; UINT8 SleepTypeA; UINT8 SleepTypeB; /* Validate parameter */ if (SleepState > ACPI_S_STATES_MAX) { AcpiOsPrintf ("Sleep state %d out of range (%d max)\n", SleepState, ACPI_S_STATES_MAX); return; } AcpiOsPrintf ("\n---- Invoking sleep state S%d (%s):\n", SleepState, AcpiGbl_SleepStateNames[SleepState]); /* Get the values for the sleep type registers (for display only) */ Status = AcpiGetSleepTypeData (SleepState, &SleepTypeA, &SleepTypeB); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("Could not evaluate [%s] method, %s\n", AcpiGbl_SleepStateNames[SleepState], AcpiFormatException (Status)); return; } AcpiOsPrintf ( "Register values for sleep state S%d: Sleep-A: %.2X, Sleep-B: %.2X\n", SleepState, SleepTypeA, SleepTypeB); /* Invoke the various sleep/wake interfaces */ AcpiOsPrintf ("**** Sleep: Prepare to sleep (S%d) ****\n", SleepState); Status = AcpiEnterSleepStatePrep (SleepState); if (ACPI_FAILURE (Status)) { goto ErrorExit; } AcpiOsPrintf ("**** Sleep: Going to sleep (S%d) ****\n", SleepState); Status = AcpiEnterSleepState (SleepState); if (ACPI_FAILURE (Status)) { goto ErrorExit; } AcpiOsPrintf ("**** Wake: Prepare to return from sleep (S%d) ****\n", SleepState); Status = AcpiLeaveSleepStatePrep (SleepState); if (ACPI_FAILURE (Status)) { goto ErrorExit; } AcpiOsPrintf ("**** Wake: Return from sleep (S%d) ****\n", SleepState); Status = AcpiLeaveSleepState (SleepState); if (ACPI_FAILURE (Status)) { goto ErrorExit; } return; ErrorExit: ACPI_EXCEPTION ((AE_INFO, Status, "During invocation of sleep state S%d", SleepState)); } /******************************************************************************* * * FUNCTION: AcpiDbDisplayLocks * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Display information about internal mutexes. * ******************************************************************************/ void AcpiDbDisplayLocks ( void) { UINT32 i; for (i = 0; i < ACPI_MAX_MUTEX; i++) { AcpiOsPrintf ("%26s : %s\n", AcpiUtGetMutexName (i), AcpiGbl_MutexInfo[i].ThreadId == ACPI_MUTEX_NOT_ACQUIRED ? "Locked" : "Unlocked"); } } /******************************************************************************* * * FUNCTION: AcpiDbDisplayTableInfo * * PARAMETERS: TableArg - Name of table to be displayed * * RETURN: None * * DESCRIPTION: Display information about loaded tables. Current * implementation displays all loaded tables. * ******************************************************************************/ void AcpiDbDisplayTableInfo ( char *TableArg) { UINT32 i; ACPI_TABLE_DESC *TableDesc; ACPI_STATUS Status; /* Header */ AcpiOsPrintf ("Idx ID Status Type " "TableHeader (Sig, Address, Length, Misc)\n"); /* Walk the entire root table list */ for (i = 0; i < AcpiGbl_RootTableList.CurrentTableCount; i++) { TableDesc = &AcpiGbl_RootTableList.Tables[i]; /* Index and Table ID */ AcpiOsPrintf ("%3u %.2u ", i, TableDesc->OwnerId); /* Decode the table flags */ if (!(TableDesc->Flags & ACPI_TABLE_IS_LOADED)) { AcpiOsPrintf ("NotLoaded "); } else { AcpiOsPrintf (" Loaded "); } switch (TableDesc->Flags & ACPI_TABLE_ORIGIN_MASK) { case ACPI_TABLE_ORIGIN_EXTERNAL_VIRTUAL: AcpiOsPrintf ("External/virtual "); break; case ACPI_TABLE_ORIGIN_INTERNAL_PHYSICAL: AcpiOsPrintf ("Internal/physical "); break; case ACPI_TABLE_ORIGIN_INTERNAL_VIRTUAL: AcpiOsPrintf ("Internal/virtual "); break; default: AcpiOsPrintf ("INVALID TYPE "); break; } /* Make sure that the table is mapped */ Status = AcpiTbValidateTable (TableDesc); if (ACPI_FAILURE (Status)) { return; } /* Dump the table header */ if (TableDesc->Pointer) { AcpiTbPrintTableHeader (TableDesc->Address, TableDesc->Pointer); } else { /* If the pointer is null, the table has been unloaded */ ACPI_INFO (("%4.4s - Table has been unloaded", TableDesc->Signature.Ascii)); } } } /******************************************************************************* * * FUNCTION: AcpiDbUnloadAcpiTable * * PARAMETERS: ObjectName - Namespace pathname for an object that * is owned by the table to be unloaded * * RETURN: None * * DESCRIPTION: Unload an ACPI table, via any namespace node that is owned * by the table. * ******************************************************************************/ void AcpiDbUnloadAcpiTable ( char *ObjectName) { ACPI_NAMESPACE_NODE *Node; ACPI_STATUS Status; /* Translate name to an Named object */ Node = AcpiDbConvertToNode (ObjectName); if (!Node) { return; } Status = AcpiUnloadParentTable (ACPI_CAST_PTR (ACPI_HANDLE, Node)); if (ACPI_SUCCESS (Status)) { AcpiOsPrintf ("Parent of [%s] (%p) unloaded and uninstalled\n", ObjectName, Node); } else { AcpiOsPrintf ("%s, while unloading parent table of [%s]\n", AcpiFormatException (Status), ObjectName); } } /******************************************************************************* * * FUNCTION: AcpiDbSendNotify * * PARAMETERS: Name - Name of ACPI object where to send notify * Value - Value of the notify to send. * * RETURN: None * * DESCRIPTION: Send an ACPI notification. The value specified is sent to the * named object as an ACPI notify. * ******************************************************************************/ void AcpiDbSendNotify ( char *Name, UINT32 Value) { ACPI_NAMESPACE_NODE *Node; ACPI_STATUS Status; /* Translate name to an Named object */ Node = AcpiDbConvertToNode (Name); if (!Node) { return; } /* Dispatch the notify if legal */ if (AcpiEvIsNotifyObject (Node)) { Status = AcpiEvQueueNotifyRequest (Node, Value); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("Could not queue notify\n"); } } else { AcpiOsPrintf ( "Named object [%4.4s] Type %s, " "must be Device/Thermal/Processor type\n", AcpiUtGetNodeName (Node), AcpiUtGetTypeName (Node->Type)); } } /******************************************************************************* * * FUNCTION: AcpiDbDisplayInterfaces * * PARAMETERS: ActionArg - Null, "install", or "remove" * InterfaceNameArg - Name for install/remove options * * RETURN: None * * DESCRIPTION: Display or modify the global _OSI interface list * ******************************************************************************/ void AcpiDbDisplayInterfaces ( char *ActionArg, char *InterfaceNameArg) { ACPI_INTERFACE_INFO *NextInterface; char *SubString; ACPI_STATUS Status; /* If no arguments, just display current interface list */ if (!ActionArg) { (void) AcpiOsAcquireMutex (AcpiGbl_OsiMutex, ACPI_WAIT_FOREVER); NextInterface = AcpiGbl_SupportedInterfaces; while (NextInterface) { if (!(NextInterface->Flags & ACPI_OSI_INVALID)) { AcpiOsPrintf ("%s\n", NextInterface->Name); } NextInterface = NextInterface->Next; } AcpiOsReleaseMutex (AcpiGbl_OsiMutex); return; } /* If ActionArg exists, so must InterfaceNameArg */ if (!InterfaceNameArg) { AcpiOsPrintf ("Missing Interface Name argument\n"); return; } /* Uppercase the action for match below */ AcpiUtStrupr (ActionArg); /* Install - install an interface */ SubString = strstr ("INSTALL", ActionArg); if (SubString) { Status = AcpiInstallInterface (InterfaceNameArg); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("%s, while installing \"%s\"\n", AcpiFormatException (Status), InterfaceNameArg); } return; } /* Remove - remove an interface */ SubString = strstr ("REMOVE", ActionArg); if (SubString) { Status = AcpiRemoveInterface (InterfaceNameArg); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("%s, while removing \"%s\"\n", AcpiFormatException (Status), InterfaceNameArg); } return; } /* Invalid ActionArg */ AcpiOsPrintf ("Invalid action argument: %s\n", ActionArg); return; } /******************************************************************************* * * FUNCTION: AcpiDbDisplayTemplate * * PARAMETERS: BufferArg - Buffer name or address * * RETURN: None * * DESCRIPTION: Dump a buffer that contains a resource template * ******************************************************************************/ void AcpiDbDisplayTemplate ( char *BufferArg) { ACPI_NAMESPACE_NODE *Node; ACPI_STATUS Status; ACPI_BUFFER ReturnBuffer; /* Translate BufferArg to an Named object */ Node = AcpiDbConvertToNode (BufferArg); if (!Node || (Node == AcpiGbl_RootNode)) { AcpiOsPrintf ("Invalid argument: %s\n", BufferArg); return; } /* We must have a buffer object */ if (Node->Type != ACPI_TYPE_BUFFER) { AcpiOsPrintf ("Not a Buffer object, cannot be a template: %s\n", BufferArg); return; } ReturnBuffer.Length = ACPI_DEBUG_BUFFER_SIZE; ReturnBuffer.Pointer = AcpiGbl_DbBuffer; /* Attempt to convert the raw buffer to a resource list */ Status = AcpiRsCreateResourceList (Node->Object, &ReturnBuffer); AcpiDbSetOutputDestination (ACPI_DB_REDIRECTABLE_OUTPUT); AcpiDbgLevel |= ACPI_LV_RESOURCES; if (ACPI_FAILURE (Status)) { AcpiOsPrintf ( "Could not convert Buffer to a resource list: %s, %s\n", BufferArg, AcpiFormatException (Status)); goto DumpBuffer; } /* Now we can dump the resource list */ AcpiRsDumpResourceList (ACPI_CAST_PTR (ACPI_RESOURCE, ReturnBuffer.Pointer)); DumpBuffer: AcpiOsPrintf ("\nRaw data buffer:\n"); AcpiUtDebugDumpBuffer ((UINT8 *) Node->Object->Buffer.Pointer, Node->Object->Buffer.Length, DB_BYTE_DISPLAY, ACPI_UINT32_MAX); AcpiDbSetOutputDestination (ACPI_DB_CONSOLE_OUTPUT); return; } /******************************************************************************* * * FUNCTION: AcpiDmCompareAmlResources * * PARAMETERS: Aml1Buffer - Contains first resource list * Aml1BufferLength - Length of first resource list * Aml2Buffer - Contains second resource list * Aml2BufferLength - Length of second resource list * * RETURN: None * * DESCRIPTION: Compare two AML resource lists, descriptor by descriptor (in * order to isolate a miscompare to an individual resource) * ******************************************************************************/ static void AcpiDmCompareAmlResources ( UINT8 *Aml1Buffer, ACPI_RSDESC_SIZE Aml1BufferLength, UINT8 *Aml2Buffer, ACPI_RSDESC_SIZE Aml2BufferLength) { UINT8 *Aml1; UINT8 *Aml2; UINT8 *Aml1End; UINT8 *Aml2End; ACPI_RSDESC_SIZE Aml1Length; ACPI_RSDESC_SIZE Aml2Length; ACPI_RSDESC_SIZE Offset = 0; UINT8 ResourceType; UINT32 Count = 0; UINT32 i; /* Compare overall buffer sizes (may be different due to size rounding) */ if (Aml1BufferLength != Aml2BufferLength) { AcpiOsPrintf ( "**** Buffer length mismatch in converted " "AML: Original %X, New %X ****\n", Aml1BufferLength, Aml2BufferLength); } Aml1 = Aml1Buffer; Aml2 = Aml2Buffer; Aml1End = Aml1Buffer + Aml1BufferLength; Aml2End = Aml2Buffer + Aml2BufferLength; /* Walk the descriptor lists, comparing each descriptor */ while ((Aml1 < Aml1End) && (Aml2 < Aml2End)) { /* Get the lengths of each descriptor */ Aml1Length = AcpiUtGetDescriptorLength (Aml1); Aml2Length = AcpiUtGetDescriptorLength (Aml2); ResourceType = AcpiUtGetResourceType (Aml1); /* Check for descriptor length match */ if (Aml1Length != Aml2Length) { AcpiOsPrintf ( "**** Length mismatch in descriptor [%.2X] type %2.2X, " "Offset %8.8X Len1 %X, Len2 %X ****\n", Count, ResourceType, Offset, Aml1Length, Aml2Length); } /* Check for descriptor byte match */ else if (memcmp (Aml1, Aml2, Aml1Length)) { AcpiOsPrintf ( "**** Data mismatch in descriptor [%.2X] type %2.2X, " "Offset %8.8X ****\n", Count, ResourceType, Offset); for (i = 0; i < Aml1Length; i++) { if (Aml1[i] != Aml2[i]) { AcpiOsPrintf ( "Mismatch at byte offset %.2X: is %2.2X, " "should be %2.2X\n", i, Aml2[i], Aml1[i]); } } } /* Exit on EndTag descriptor */ if (ResourceType == ACPI_RESOURCE_NAME_END_TAG) { return; } /* Point to next descriptor in each buffer */ Count++; Offset += Aml1Length; Aml1 += Aml1Length; Aml2 += Aml2Length; } } /******************************************************************************* * * FUNCTION: AcpiDmTestResourceConversion * * PARAMETERS: Node - Parent device node * Name - resource method name (_CRS) * * RETURN: Status * * DESCRIPTION: Compare the original AML with a conversion of the AML to * internal resource list, then back to AML. * ******************************************************************************/ static ACPI_STATUS AcpiDmTestResourceConversion ( ACPI_NAMESPACE_NODE *Node, char *Name) { ACPI_STATUS Status; ACPI_BUFFER ReturnBuffer; ACPI_BUFFER ResourceBuffer; ACPI_BUFFER NewAml; ACPI_OBJECT *OriginalAml; AcpiOsPrintf ("Resource Conversion Comparison:\n"); NewAml.Length = ACPI_ALLOCATE_LOCAL_BUFFER; ReturnBuffer.Length = ACPI_ALLOCATE_LOCAL_BUFFER; ResourceBuffer.Length = ACPI_ALLOCATE_LOCAL_BUFFER; /* Get the original _CRS AML resource template */ Status = AcpiEvaluateObject (Node, Name, NULL, &ReturnBuffer); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("Could not obtain %s: %s\n", Name, AcpiFormatException (Status)); return (Status); } /* Get the AML resource template, converted to internal resource structs */ Status = AcpiGetCurrentResources (Node, &ResourceBuffer); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("AcpiGetCurrentResources failed: %s\n", AcpiFormatException (Status)); goto Exit1; } /* Convert internal resource list to external AML resource template */ Status = AcpiRsCreateAmlResources (&ResourceBuffer, &NewAml); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("AcpiRsCreateAmlResources failed: %s\n", AcpiFormatException (Status)); goto Exit2; } /* Compare original AML to the newly created AML resource list */ OriginalAml = ReturnBuffer.Pointer; AcpiDmCompareAmlResources (OriginalAml->Buffer.Pointer, (ACPI_RSDESC_SIZE) OriginalAml->Buffer.Length, NewAml.Pointer, (ACPI_RSDESC_SIZE) NewAml.Length); /* Cleanup and exit */ ACPI_FREE (NewAml.Pointer); Exit2: ACPI_FREE (ResourceBuffer.Pointer); Exit1: ACPI_FREE (ReturnBuffer.Pointer); return (Status); } /******************************************************************************* * * FUNCTION: AcpiDbResourceCallback * * PARAMETERS: ACPI_WALK_RESOURCE_CALLBACK * * RETURN: Status * * DESCRIPTION: Simple callback to exercise AcpiWalkResources and * AcpiWalkResourceBuffer. * ******************************************************************************/ static ACPI_STATUS AcpiDbResourceCallback ( ACPI_RESOURCE *Resource, void *Context) { return (AE_OK); } /******************************************************************************* * * FUNCTION: AcpiDbDeviceResources * * PARAMETERS: ACPI_WALK_CALLBACK * * RETURN: Status * * DESCRIPTION: Display the _PRT/_CRS/_PRS resources for a device object. * ******************************************************************************/ static ACPI_STATUS AcpiDbDeviceResources ( ACPI_HANDLE ObjHandle, UINT32 NestingLevel, void *Context, void **ReturnValue) { ACPI_NAMESPACE_NODE *Node; ACPI_NAMESPACE_NODE *PrtNode = NULL; ACPI_NAMESPACE_NODE *CrsNode = NULL; ACPI_NAMESPACE_NODE *PrsNode = NULL; ACPI_NAMESPACE_NODE *AeiNode = NULL; char *ParentPath; ACPI_BUFFER ReturnBuffer; ACPI_STATUS Status; Node = ACPI_CAST_PTR (ACPI_NAMESPACE_NODE, ObjHandle); ParentPath = AcpiNsGetNormalizedPathname (Node, TRUE); if (!ParentPath) { return (AE_NO_MEMORY); } /* Get handles to the resource methods for this device */ (void) AcpiGetHandle (Node, METHOD_NAME__PRT, ACPI_CAST_PTR (ACPI_HANDLE, &PrtNode)); (void) AcpiGetHandle (Node, METHOD_NAME__CRS, ACPI_CAST_PTR (ACPI_HANDLE, &CrsNode)); (void) AcpiGetHandle (Node, METHOD_NAME__PRS, ACPI_CAST_PTR (ACPI_HANDLE, &PrsNode)); (void) AcpiGetHandle (Node, METHOD_NAME__AEI, ACPI_CAST_PTR (ACPI_HANDLE, &AeiNode)); if (!PrtNode && !CrsNode && !PrsNode && !AeiNode) { goto Cleanup; /* Nothing to do */ } AcpiOsPrintf ("\nDevice: %s\n", ParentPath); /* Prepare for a return object of arbitrary size */ ReturnBuffer.Pointer = AcpiGbl_DbBuffer; ReturnBuffer.Length = ACPI_DEBUG_BUFFER_SIZE; /* _PRT */ if (PrtNode) { AcpiOsPrintf ("Evaluating _PRT\n"); Status = AcpiEvaluateObject (PrtNode, NULL, NULL, &ReturnBuffer); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("Could not evaluate _PRT: %s\n", AcpiFormatException (Status)); goto GetCrs; } ReturnBuffer.Pointer = AcpiGbl_DbBuffer; ReturnBuffer.Length = ACPI_DEBUG_BUFFER_SIZE; Status = AcpiGetIrqRoutingTable (Node, &ReturnBuffer); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("GetIrqRoutingTable failed: %s\n", AcpiFormatException (Status)); goto GetCrs; } AcpiRsDumpIrqList (ACPI_CAST_PTR (UINT8, AcpiGbl_DbBuffer)); } /* _CRS */ GetCrs: if (CrsNode) { AcpiOsPrintf ("Evaluating _CRS\n"); ReturnBuffer.Pointer = AcpiGbl_DbBuffer; ReturnBuffer.Length = ACPI_DEBUG_BUFFER_SIZE; Status = AcpiEvaluateObject (CrsNode, NULL, NULL, &ReturnBuffer); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("Could not evaluate _CRS: %s\n", AcpiFormatException (Status)); goto GetPrs; } /* This code exercises the AcpiWalkResources interface */ Status = AcpiWalkResources (Node, METHOD_NAME__CRS, AcpiDbResourceCallback, NULL); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("AcpiWalkResources failed: %s\n", AcpiFormatException (Status)); goto GetPrs; } /* Get the _CRS resource list (test ALLOCATE buffer) */ ReturnBuffer.Pointer = NULL; ReturnBuffer.Length = ACPI_ALLOCATE_LOCAL_BUFFER; Status = AcpiGetCurrentResources (Node, &ReturnBuffer); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("AcpiGetCurrentResources failed: %s\n", AcpiFormatException (Status)); goto GetPrs; } /* This code exercises the AcpiWalkResourceBuffer interface */ Status = AcpiWalkResourceBuffer (&ReturnBuffer, AcpiDbResourceCallback, NULL); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("AcpiWalkResourceBuffer failed: %s\n", AcpiFormatException (Status)); goto EndCrs; } /* Dump the _CRS resource list */ AcpiRsDumpResourceList (ACPI_CAST_PTR (ACPI_RESOURCE, ReturnBuffer.Pointer)); /* * Perform comparison of original AML to newly created AML. This * tests both the AML->Resource conversion and the Resource->AML * conversion. */ (void) AcpiDmTestResourceConversion (Node, METHOD_NAME__CRS); /* Execute _SRS with the resource list */ AcpiOsPrintf ("Evaluating _SRS\n"); Status = AcpiSetCurrentResources (Node, &ReturnBuffer); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("AcpiSetCurrentResources failed: %s\n", AcpiFormatException (Status)); goto EndCrs; } EndCrs: ACPI_FREE (ReturnBuffer.Pointer); } /* _PRS */ GetPrs: if (PrsNode) { AcpiOsPrintf ("Evaluating _PRS\n"); ReturnBuffer.Pointer = AcpiGbl_DbBuffer; ReturnBuffer.Length = ACPI_DEBUG_BUFFER_SIZE; Status = AcpiEvaluateObject (PrsNode, NULL, NULL, &ReturnBuffer); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("Could not evaluate _PRS: %s\n", AcpiFormatException (Status)); goto GetAei; } ReturnBuffer.Pointer = AcpiGbl_DbBuffer; ReturnBuffer.Length = ACPI_DEBUG_BUFFER_SIZE; Status = AcpiGetPossibleResources (Node, &ReturnBuffer); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("AcpiGetPossibleResources failed: %s\n", AcpiFormatException (Status)); goto GetAei; } AcpiRsDumpResourceList (ACPI_CAST_PTR ( ACPI_RESOURCE, AcpiGbl_DbBuffer)); } /* _AEI */ GetAei: if (AeiNode) { AcpiOsPrintf ("Evaluating _AEI\n"); ReturnBuffer.Pointer = AcpiGbl_DbBuffer; ReturnBuffer.Length = ACPI_DEBUG_BUFFER_SIZE; Status = AcpiEvaluateObject (AeiNode, NULL, NULL, &ReturnBuffer); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("Could not evaluate _AEI: %s\n", AcpiFormatException (Status)); goto Cleanup; } ReturnBuffer.Pointer = AcpiGbl_DbBuffer; ReturnBuffer.Length = ACPI_DEBUG_BUFFER_SIZE; Status = AcpiGetEventResources (Node, &ReturnBuffer); if (ACPI_FAILURE (Status)) { AcpiOsPrintf ("AcpiGetEventResources failed: %s\n", AcpiFormatException (Status)); goto Cleanup; } AcpiRsDumpResourceList (ACPI_CAST_PTR ( ACPI_RESOURCE, AcpiGbl_DbBuffer)); } Cleanup: ACPI_FREE (ParentPath); return (AE_OK); } /******************************************************************************* * * FUNCTION: AcpiDbDisplayResources * * PARAMETERS: ObjectArg - String object name or object pointer. * NULL or "*" means "display resources for * all devices" * * RETURN: None * * DESCRIPTION: Display the resource objects associated with a device. * ******************************************************************************/ void AcpiDbDisplayResources ( char *ObjectArg) { ACPI_NAMESPACE_NODE *Node; AcpiDbSetOutputDestination (ACPI_DB_REDIRECTABLE_OUTPUT); AcpiDbgLevel |= ACPI_LV_RESOURCES; /* Asterisk means "display resources for all devices" */ if (!ObjectArg || (!strcmp (ObjectArg, "*"))) { (void) AcpiWalkNamespace (ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, AcpiDbDeviceResources, NULL, NULL, NULL); } else { /* Convert string to object pointer */ Node = AcpiDbConvertToNode (ObjectArg); if (Node) { if (Node->Type != ACPI_TYPE_DEVICE) { AcpiOsPrintf ( "%4.4s: Name is not a device object (%s)\n", Node->Name.Ascii, AcpiUtGetTypeName (Node->Type)); } else { (void) AcpiDbDeviceResources (Node, 0, NULL, NULL); } } } AcpiDbSetOutputDestination (ACPI_DB_CONSOLE_OUTPUT); } #if (!ACPI_REDUCED_HARDWARE) /******************************************************************************* * * FUNCTION: AcpiDbGenerateGpe * * PARAMETERS: GpeArg - Raw GPE number, ascii string * BlockArg - GPE block number, ascii string * 0 or 1 for FADT GPE blocks * * RETURN: None * * DESCRIPTION: Simulate firing of a GPE * ******************************************************************************/ void AcpiDbGenerateGpe ( char *GpeArg, char *BlockArg) { UINT32 BlockNumber = 0; UINT32 GpeNumber; ACPI_GPE_EVENT_INFO *GpeEventInfo; GpeNumber = strtoul (GpeArg, NULL, 0); /* * If no block arg, or block arg == 0 or 1, use the FADT-defined * GPE blocks. */ if (BlockArg) { BlockNumber = strtoul (BlockArg, NULL, 0); if (BlockNumber == 1) { BlockNumber = 0; } } GpeEventInfo = AcpiEvGetGpeEventInfo ( ACPI_TO_POINTER (BlockNumber), GpeNumber); if (!GpeEventInfo) { AcpiOsPrintf ("Invalid GPE\n"); return; } (void) AcpiEvGpeDispatch (NULL, GpeEventInfo, GpeNumber); } /******************************************************************************* * * FUNCTION: AcpiDbGenerateSci * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Simulate an SCI -- just call the SCI dispatch. * ******************************************************************************/ void AcpiDbGenerateSci ( void) { AcpiEvSciDispatch (); } #endif /* !ACPI_REDUCED_HARDWARE */ /******************************************************************************* * * FUNCTION: AcpiDbTrace * * PARAMETERS: EnableArg - ENABLE/AML to enable tracer * DISABLE to disable tracer * MethodArg - Method to trace * OnceArg - Whether trace once * * RETURN: None * * DESCRIPTION: Control method tracing facility * ******************************************************************************/ void AcpiDbTrace ( char *EnableArg, char *MethodArg, char *OnceArg) { UINT32 DebugLevel = 0; UINT32 DebugLayer = 0; UINT32 Flags = 0; AcpiUtStrupr (EnableArg); AcpiUtStrupr (OnceArg); if (MethodArg) { if (AcpiDbTraceMethodName) { ACPI_FREE (AcpiDbTraceMethodName); AcpiDbTraceMethodName = NULL; } AcpiDbTraceMethodName = ACPI_ALLOCATE (strlen (MethodArg) + 1); if (!AcpiDbTraceMethodName) { AcpiOsPrintf ("Failed to allocate method name (%s)\n", MethodArg); return; } strcpy (AcpiDbTraceMethodName, MethodArg); } if (!strcmp (EnableArg, "ENABLE") || !strcmp (EnableArg, "METHOD") || !strcmp (EnableArg, "OPCODE")) { if (!strcmp (EnableArg, "ENABLE")) { /* Inherit current console settings */ DebugLevel = AcpiGbl_DbConsoleDebugLevel; DebugLayer = AcpiDbgLayer; } else { /* Restrict console output to trace points only */ DebugLevel = ACPI_LV_TRACE_POINT; DebugLayer = ACPI_EXECUTER; } Flags = ACPI_TRACE_ENABLED; if (!strcmp (EnableArg, "OPCODE")) { Flags |= ACPI_TRACE_OPCODE; } if (OnceArg && !strcmp (OnceArg, "ONCE")) { Flags |= ACPI_TRACE_ONESHOT; } } (void) AcpiDebugTrace (AcpiDbTraceMethodName, DebugLevel, DebugLayer, Flags); } #endif /* ACPI_DEBUGGER */
107545.c
/* * Copyright IBM Corp. 2007 * Author(s): Peter Oberparleiter <[email protected]> */ #include <linux/vmalloc.h> #include <linux/bitops.h> #include "idset.h" #include "css.h" struct idset { int num_ssid; int num_id; unsigned long bitmap[0]; }; static inline unsigned long bitmap_size(int num_ssid, int num_id) { return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long); } static struct idset *idset_new(int num_ssid, int num_id) { struct idset *set; set = vmalloc(sizeof(struct idset) + bitmap_size(num_ssid, num_id)); if (set) { set->num_ssid = num_ssid; set->num_id = num_id; memset(set->bitmap, 0, bitmap_size(num_ssid, num_id)); } return set; } void idset_free(struct idset *set) { vfree(set); } void idset_clear(struct idset *set) { memset(set->bitmap, 0, bitmap_size(set->num_ssid, set->num_id)); } void idset_fill(struct idset *set) { memset(set->bitmap, 0xff, bitmap_size(set->num_ssid, set->num_id)); } static inline void idset_add(struct idset *set, int ssid, int id) { set_bit(ssid * set->num_id + id, set->bitmap); } static inline void idset_del(struct idset *set, int ssid, int id) { clear_bit(ssid * set->num_id + id, set->bitmap); } static inline int idset_contains(struct idset *set, int ssid, int id) { return test_bit(ssid * set->num_id + id, set->bitmap); } static inline int idset_get_first(struct idset *set, int *ssid, int *id) { int bitnum; bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id); if (bitnum >= set->num_ssid * set->num_id) return 0; *ssid = bitnum / set->num_id; *id = bitnum % set->num_id; return 1; } struct idset *idset_sch_new(void) { return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1); } void idset_sch_add(struct idset *set, struct subchannel_id schid) { idset_add(set, schid.ssid, schid.sch_no); } void idset_sch_del(struct idset *set, struct subchannel_id schid) { idset_del(set, schid.ssid, schid.sch_no); } int idset_sch_contains(struct idset *set, struct subchannel_id schid) { return idset_contains(set, schid.ssid, schid.sch_no); } int idset_sch_get_first(struct idset *set, struct subchannel_id *schid) { int ssid = 0; int id = 0; int rc; rc = idset_get_first(set, &ssid, &id); if (rc) { init_subchannel_id(schid); schid->ssid = ssid; schid->sch_no = id; } return rc; } int idset_is_empty(struct idset *set) { int bitnum; bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id); if (bitnum >= set->num_ssid * set->num_id) return 1; return 0; } void idset_add_set(struct idset *to, struct idset *from) { unsigned long i, len; len = min(__BITOPS_WORDS(to->num_ssid * to->num_id), __BITOPS_WORDS(from->num_ssid * from->num_id)); for (i = 0; i < len ; i++) to->bitmap[i] |= from->bitmap[i]; }
347717.c
/******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Contact Information: * e1000-devel Mailing List <[email protected]> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * ******************************************************************************/ #ifdef CONFIG_DEBUG_FS #include <linux/fs.h> #include <linux/debugfs.h> #include "i40e.h" static struct dentry *i40e_dbg_root; /** * i40e_dbg_find_vsi - searches for the vsi with the given seid * @pf - the pf structure to search for the vsi * @seid - seid of the vsi it is searching for **/ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) { int i; if (seid < 0) dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); else for (i = 0; i < pf->hw.func_caps.num_vsis; i++) if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) return pf->vsi[i]; return NULL; } /** * i40e_dbg_find_veb - searches for the veb with the given seid * @pf - the pf structure to search for the veb * @seid - seid of the veb it is searching for **/ static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) { int i; if ((seid < I40E_BASE_VEB_SEID) || (seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB))) dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); else for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i] && pf->veb[i]->seid == seid) return pf->veb[i]; return NULL; } /************************************************************** * dump * The dump entry in debugfs is for getting a data snapshow of * the driver's current configuration and runtime details. * When the filesystem entry is written, a snapshot is taken. * When the entry is read, the most recent snapshot data is dumped. **************************************************************/ static char *i40e_dbg_dump_buf; static ssize_t i40e_dbg_dump_data_len; static ssize_t i40e_dbg_dump_buffer_len; /** * i40e_dbg_dump_read - read the dump data * @filp: the opened file * @buffer: where to write the data for the user to read * @count: the size of the user's buffer * @ppos: file position offset **/ static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { int bytes_not_copied; int len; /* is *ppos bigger than the available data? */ if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf) return 0; /* be sure to not read beyond the end of available data */ len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos)); bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len); if (bytes_not_copied < 0) return bytes_not_copied; *ppos += len; return len; } /** * i40e_dbg_prep_dump_buf * @pf: the pf we're working with * @buflen: the desired buffer length * * Return positive if success, 0 if failed **/ static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen) { /* if not already big enough, prep for re alloc */ if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) { kfree(i40e_dbg_dump_buf); i40e_dbg_dump_buffer_len = 0; i40e_dbg_dump_buf = NULL; } /* get a new buffer if needed */ if (!i40e_dbg_dump_buf) { i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL); if (i40e_dbg_dump_buf != NULL) i40e_dbg_dump_buffer_len = buflen; } return i40e_dbg_dump_buffer_len; } /** * i40e_dbg_dump_write - trigger a datadump snapshot * @filp: the opened file * @buffer: where to find the user's data * @count: the length of the user's data * @ppos: file position offset * * Any write clears the stats **/ static ssize_t i40e_dbg_dump_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; bool seid_found = false; long seid = -1; int buflen = 0; int i, ret; int len; u8 *p; /* don't allow partial writes */ if (*ppos != 0) return 0; /* decode the SEID given to be dumped */ ret = kstrtol_from_user(buffer, count, 0, &seid); if (ret) { dev_info(&pf->pdev->dev, "bad seid value\n"); } else if (seid == 0) { seid_found = true; kfree(i40e_dbg_dump_buf); i40e_dbg_dump_buffer_len = 0; i40e_dbg_dump_data_len = 0; i40e_dbg_dump_buf = NULL; dev_info(&pf->pdev->dev, "debug buffer freed\n"); } else if (seid == pf->pf_seid || seid == 1) { seid_found = true; buflen = sizeof(struct i40e_pf); buflen += (sizeof(struct i40e_aq_desc) * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries)); if (i40e_dbg_prep_dump_buf(pf, buflen)) { p = i40e_dbg_dump_buf; len = sizeof(struct i40e_pf); memcpy(p, pf, len); p += len; len = (sizeof(struct i40e_aq_desc) * pf->hw.aq.num_asq_entries); memcpy(p, pf->hw.aq.asq.desc_buf.va, len); p += len; len = (sizeof(struct i40e_aq_desc) * pf->hw.aq.num_arq_entries); memcpy(p, pf->hw.aq.arq.desc_buf.va, len); p += len; i40e_dbg_dump_data_len = buflen; dev_info(&pf->pdev->dev, "PF seid %ld dumped %d bytes\n", seid, (int)i40e_dbg_dump_data_len); } } else if (seid >= I40E_BASE_VSI_SEID) { struct i40e_vsi *vsi = NULL; struct i40e_mac_filter *f; int filter_count = 0; mutex_lock(&pf->switch_mutex); vsi = i40e_dbg_find_vsi(pf, seid); if (!vsi) { mutex_unlock(&pf->switch_mutex); goto write_exit; } buflen = sizeof(struct i40e_vsi); buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors; buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs; buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs; buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs; list_for_each_entry(f, &vsi->mac_filter_list, list) filter_count++; buflen += sizeof(struct i40e_mac_filter) * filter_count; if (i40e_dbg_prep_dump_buf(pf, buflen)) { p = i40e_dbg_dump_buf; seid_found = true; len = sizeof(struct i40e_vsi); memcpy(p, vsi, len); p += len; if (vsi->num_q_vectors) { len = (sizeof(struct i40e_q_vector) * vsi->num_q_vectors); memcpy(p, vsi->q_vectors, len); p += len; } if (vsi->num_queue_pairs) { len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs); memcpy(p, vsi->tx_rings, len); p += len; memcpy(p, vsi->rx_rings, len); p += len; } if (vsi->tx_rings[0]) { len = sizeof(struct i40e_tx_buffer); for (i = 0; i < vsi->num_queue_pairs; i++) { memcpy(p, vsi->tx_rings[i]->tx_bi, len); p += len; } len = sizeof(struct i40e_rx_buffer); for (i = 0; i < vsi->num_queue_pairs; i++) { memcpy(p, vsi->rx_rings[i]->rx_bi, len); p += len; } } /* macvlan filter list */ len = sizeof(struct i40e_mac_filter); list_for_each_entry(f, &vsi->mac_filter_list, list) { memcpy(p, f, len); p += len; } i40e_dbg_dump_data_len = buflen; dev_info(&pf->pdev->dev, "VSI seid %ld dumped %d bytes\n", seid, (int)i40e_dbg_dump_data_len); } mutex_unlock(&pf->switch_mutex); } else if (seid >= I40E_BASE_VEB_SEID) { struct i40e_veb *veb = NULL; mutex_lock(&pf->switch_mutex); veb = i40e_dbg_find_veb(pf, seid); if (!veb) { mutex_unlock(&pf->switch_mutex); goto write_exit; } buflen = sizeof(struct i40e_veb); if (i40e_dbg_prep_dump_buf(pf, buflen)) { seid_found = true; memcpy(i40e_dbg_dump_buf, veb, buflen); i40e_dbg_dump_data_len = buflen; dev_info(&pf->pdev->dev, "VEB seid %ld dumped %d bytes\n", seid, (int)i40e_dbg_dump_data_len); } mutex_unlock(&pf->switch_mutex); } write_exit: if (!seid_found) dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid); return count; } static const struct file_operations i40e_dbg_dump_fops = { .owner = THIS_MODULE, .open = simple_open, .read = i40e_dbg_dump_read, .write = i40e_dbg_dump_write, }; /************************************************************** * command * The command entry in debugfs is for giving the driver commands * to be executed - these may be for changing the internal switch * setup, adding or removing filters, or other things. Many of * these will be useful for some forms of unit testing. **************************************************************/ static char i40e_dbg_command_buf[256] = "hello world"; /** * i40e_dbg_command_read - read for command datum * @filp: the opened file * @buffer: where to write the data for the user to read * @count: the size of the user's buffer * @ppos: file position offset **/ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; int bytes_not_copied; int buf_size = 256; char *buf; int len; /* don't allow partial reads */ if (*ppos != 0) return 0; if (count < buf_size) return -ENOSPC; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOSPC; len = snprintf(buf, buf_size, "%s: %s\n", pf->vsi[pf->lan_vsi]->netdev->name, i40e_dbg_command_buf); bytes_not_copied = copy_to_user(buffer, buf, len); kfree(buf); if (bytes_not_copied < 0) return bytes_not_copied; *ppos = len; return len; } /** * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum * @pf: the i40e_pf created in command write * @seid: the seid the user put in **/ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) { struct rtnl_link_stats64 *nstat; struct i40e_mac_filter *f; struct i40e_vsi *vsi; int i; vsi = i40e_dbg_find_vsi(pf, seid); if (!vsi) { dev_info(&pf->pdev->dev, "dump %d: seid not found\n", seid); return; } dev_info(&pf->pdev->dev, "vsi seid %d\n", seid); if (vsi->netdev) dev_info(&pf->pdev->dev, " netdev: name = %s\n", vsi->netdev->name); if (vsi->active_vlans) dev_info(&pf->pdev->dev, " vlgrp: & = %p\n", vsi->active_vlans); dev_info(&pf->pdev->dev, " netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n", vsi->netdev_registered, vsi->current_netdev_flags, vsi->state, vsi->flags); list_for_each_entry(f, &vsi->mac_filter_list, list) { dev_info(&pf->pdev->dev, " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n", f->macaddr, f->vlan, f->is_netdev, f->is_vf, f->counter); } nstat = i40e_get_vsi_stats_struct(vsi); dev_info(&pf->pdev->dev, " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", (long unsigned int)nstat->rx_packets, (long unsigned int)nstat->rx_bytes, (long unsigned int)nstat->rx_errors, (long unsigned int)nstat->rx_dropped); dev_info(&pf->pdev->dev, " net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", (long unsigned int)nstat->tx_packets, (long unsigned int)nstat->tx_bytes, (long unsigned int)nstat->tx_errors, (long unsigned int)nstat->tx_dropped); dev_info(&pf->pdev->dev, " net_stats: multicast = %lu, collisions = %lu\n", (long unsigned int)nstat->multicast, (long unsigned int)nstat->collisions); dev_info(&pf->pdev->dev, " net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", (long unsigned int)nstat->rx_length_errors, (long unsigned int)nstat->rx_over_errors, (long unsigned int)nstat->rx_crc_errors); dev_info(&pf->pdev->dev, " net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", (long unsigned int)nstat->rx_frame_errors, (long unsigned int)nstat->rx_fifo_errors, (long unsigned int)nstat->rx_missed_errors); dev_info(&pf->pdev->dev, " net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", (long unsigned int)nstat->tx_aborted_errors, (long unsigned int)nstat->tx_carrier_errors, (long unsigned int)nstat->tx_fifo_errors); dev_info(&pf->pdev->dev, " net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", (long unsigned int)nstat->tx_heartbeat_errors, (long unsigned int)nstat->tx_window_errors); dev_info(&pf->pdev->dev, " net_stats: rx_compressed = %lu, tx_compressed = %lu\n", (long unsigned int)nstat->rx_compressed, (long unsigned int)nstat->tx_compressed); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", (long unsigned int)vsi->net_stats_offsets.rx_packets, (long unsigned int)vsi->net_stats_offsets.rx_bytes, (long unsigned int)vsi->net_stats_offsets.rx_errors, (long unsigned int)vsi->net_stats_offsets.rx_dropped); dev_info(&pf->pdev->dev, " net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n", (long unsigned int)vsi->net_stats_offsets.tx_packets, (long unsigned int)vsi->net_stats_offsets.tx_bytes, (long unsigned int)vsi->net_stats_offsets.tx_errors, (long unsigned int)vsi->net_stats_offsets.tx_dropped); dev_info(&pf->pdev->dev, " net_stats_offsets: multicast = %lu, collisions = %lu\n", (long unsigned int)vsi->net_stats_offsets.multicast, (long unsigned int)vsi->net_stats_offsets.collisions); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n", (long unsigned int)vsi->net_stats_offsets.rx_length_errors, (long unsigned int)vsi->net_stats_offsets.rx_over_errors, (long unsigned int)vsi->net_stats_offsets.rx_crc_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n", (long unsigned int)vsi->net_stats_offsets.rx_frame_errors, (long unsigned int)vsi->net_stats_offsets.rx_fifo_errors, (long unsigned int)vsi->net_stats_offsets.rx_missed_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n", (long unsigned int)vsi->net_stats_offsets.tx_aborted_errors, (long unsigned int)vsi->net_stats_offsets.tx_carrier_errors, (long unsigned int)vsi->net_stats_offsets.tx_fifo_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n", (long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors, (long unsigned int)vsi->net_stats_offsets.tx_window_errors); dev_info(&pf->pdev->dev, " net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n", (long unsigned int)vsi->net_stats_offsets.rx_compressed, (long unsigned int)vsi->net_stats_offsets.tx_compressed); dev_info(&pf->pdev->dev, " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", vsi->tx_restart, vsi->tx_busy, vsi->rx_buf_failed, vsi->rx_page_failed); rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]); if (!rx_ring) continue; dev_info(&pf->pdev->dev, " rx_rings[%i]: desc = %p\n", i, rx_ring->desc); dev_info(&pf->pdev->dev, " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n", i, rx_ring->dev, rx_ring->netdev, rx_ring->rx_bi); dev_info(&pf->pdev->dev, " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", i, rx_ring->state, rx_ring->queue_index, rx_ring->reg_idx); dev_info(&pf->pdev->dev, " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", i, rx_ring->rx_hdr_len, rx_ring->rx_buf_len, rx_ring->dtype); dev_info(&pf->pdev->dev, " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", i, rx_ring->hsplit, rx_ring->next_to_use, rx_ring->next_to_clean, rx_ring->ring_active); dev_info(&pf->pdev->dev, " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", i, rx_ring->stats.packets, rx_ring->stats.bytes, rx_ring->rx_stats.non_eop_descs); dev_info(&pf->pdev->dev, " rx_rings[%i]: rx_stats: alloc_page_failed = %lld, alloc_buff_failed = %lld\n", i, rx_ring->rx_stats.alloc_page_failed, rx_ring->rx_stats.alloc_buff_failed); dev_info(&pf->pdev->dev, " rx_rings[%i]: size = %i, dma = 0x%08lx\n", i, rx_ring->size, (long unsigned int)rx_ring->dma); dev_info(&pf->pdev->dev, " rx_rings[%i]: vsi = %p, q_vector = %p\n", i, rx_ring->vsi, rx_ring->q_vector); } for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); if (!tx_ring) continue; dev_info(&pf->pdev->dev, " tx_rings[%i]: desc = %p\n", i, tx_ring->desc); dev_info(&pf->pdev->dev, " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n", i, tx_ring->dev, tx_ring->netdev, tx_ring->tx_bi); dev_info(&pf->pdev->dev, " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", i, tx_ring->state, tx_ring->queue_index, tx_ring->reg_idx); dev_info(&pf->pdev->dev, " tx_rings[%i]: dtype = %d\n", i, tx_ring->dtype); dev_info(&pf->pdev->dev, " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", i, tx_ring->hsplit, tx_ring->next_to_use, tx_ring->next_to_clean, tx_ring->ring_active); dev_info(&pf->pdev->dev, " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", i, tx_ring->stats.packets, tx_ring->stats.bytes, tx_ring->tx_stats.restart_queue); dev_info(&pf->pdev->dev, " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", i, tx_ring->tx_stats.tx_busy, tx_ring->tx_stats.tx_done_old); dev_info(&pf->pdev->dev, " tx_rings[%i]: size = %i, dma = 0x%08lx\n", i, tx_ring->size, (long unsigned int)tx_ring->dma); dev_info(&pf->pdev->dev, " tx_rings[%i]: vsi = %p, q_vector = %p\n", i, tx_ring->vsi, tx_ring->q_vector); dev_info(&pf->pdev->dev, " tx_rings[%i]: DCB tc = %d\n", i, tx_ring->dcb_tc); } rcu_read_unlock(); dev_info(&pf->pdev->dev, " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n", vsi->work_limit, vsi->rx_itr_setting, ITR_IS_DYNAMIC(vsi->rx_itr_setting) ? "dynamic" : "fixed", vsi->tx_itr_setting, ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed"); dev_info(&pf->pdev->dev, " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n", vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); dev_info(&pf->pdev->dev, " num_q_vectors = %i, base_vector = %i\n", vsi->num_q_vectors, vsi->base_vector); dev_info(&pf->pdev->dev, " seid = %d, id = %d, uplink_seid = %d\n", vsi->seid, vsi->id, vsi->uplink_seid); dev_info(&pf->pdev->dev, " base_queue = %d, num_queue_pairs = %d, num_desc = %d\n", vsi->base_queue, vsi->num_queue_pairs, vsi->num_desc); dev_info(&pf->pdev->dev, " type = %i\n", vsi->type); dev_info(&pf->pdev->dev, " info: valid_sections = 0x%04x, switch_id = 0x%04x\n", vsi->info.valid_sections, vsi->info.switch_id); dev_info(&pf->pdev->dev, " info: sw_reserved[] = 0x%02x 0x%02x\n", vsi->info.sw_reserved[0], vsi->info.sw_reserved[1]); dev_info(&pf->pdev->dev, " info: sec_flags = 0x%02x, sec_reserved = 0x%02x\n", vsi->info.sec_flags, vsi->info.sec_reserved); dev_info(&pf->pdev->dev, " info: pvid = 0x%04x, fcoe_pvid = 0x%04x, port_vlan_flags = 0x%02x\n", vsi->info.pvid, vsi->info.fcoe_pvid, vsi->info.port_vlan_flags); dev_info(&pf->pdev->dev, " info: pvlan_reserved[] = 0x%02x 0x%02x 0x%02x\n", vsi->info.pvlan_reserved[0], vsi->info.pvlan_reserved[1], vsi->info.pvlan_reserved[2]); dev_info(&pf->pdev->dev, " info: ingress_table = 0x%08x, egress_table = 0x%08x\n", vsi->info.ingress_table, vsi->info.egress_table); dev_info(&pf->pdev->dev, " info: cas_pv_stag = 0x%04x, cas_pv_flags= 0x%02x, cas_pv_reserved = 0x%02x\n", vsi->info.cas_pv_tag, vsi->info.cas_pv_flags, vsi->info.cas_pv_reserved); dev_info(&pf->pdev->dev, " info: queue_mapping[0..7 ] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", vsi->info.queue_mapping[0], vsi->info.queue_mapping[1], vsi->info.queue_mapping[2], vsi->info.queue_mapping[3], vsi->info.queue_mapping[4], vsi->info.queue_mapping[5], vsi->info.queue_mapping[6], vsi->info.queue_mapping[7]); dev_info(&pf->pdev->dev, " info: queue_mapping[8..15] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", vsi->info.queue_mapping[8], vsi->info.queue_mapping[9], vsi->info.queue_mapping[10], vsi->info.queue_mapping[11], vsi->info.queue_mapping[12], vsi->info.queue_mapping[13], vsi->info.queue_mapping[14], vsi->info.queue_mapping[15]); dev_info(&pf->pdev->dev, " info: tc_mapping[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", vsi->info.tc_mapping[0], vsi->info.tc_mapping[1], vsi->info.tc_mapping[2], vsi->info.tc_mapping[3], vsi->info.tc_mapping[4], vsi->info.tc_mapping[5], vsi->info.tc_mapping[6], vsi->info.tc_mapping[7]); dev_info(&pf->pdev->dev, " info: queueing_opt_flags = 0x%02x queueing_opt_reserved[0..2] = 0x%02x 0x%02x 0x%02x\n", vsi->info.queueing_opt_flags, vsi->info.queueing_opt_reserved[0], vsi->info.queueing_opt_reserved[1], vsi->info.queueing_opt_reserved[2]); dev_info(&pf->pdev->dev, " info: up_enable_bits = 0x%02x\n", vsi->info.up_enable_bits); dev_info(&pf->pdev->dev, " info: sched_reserved = 0x%02x, outer_up_table = 0x%04x\n", vsi->info.sched_reserved, vsi->info.outer_up_table); dev_info(&pf->pdev->dev, " info: cmd_reserved[] = 0x%02x 0x%02x 0x%02x 0x0%02x 0x%02x 0x%02x 0x%02x 0x0%02x\n", vsi->info.cmd_reserved[0], vsi->info.cmd_reserved[1], vsi->info.cmd_reserved[2], vsi->info.cmd_reserved[3], vsi->info.cmd_reserved[4], vsi->info.cmd_reserved[5], vsi->info.cmd_reserved[6], vsi->info.cmd_reserved[7]); dev_info(&pf->pdev->dev, " info: qs_handle[] = 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n", vsi->info.qs_handle[0], vsi->info.qs_handle[1], vsi->info.qs_handle[2], vsi->info.qs_handle[3], vsi->info.qs_handle[4], vsi->info.qs_handle[5], vsi->info.qs_handle[6], vsi->info.qs_handle[7]); dev_info(&pf->pdev->dev, " info: stat_counter_idx = 0x%04x, sched_id = 0x%04x\n", vsi->info.stat_counter_idx, vsi->info.sched_id); dev_info(&pf->pdev->dev, " info: resp_reserved[] = 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", vsi->info.resp_reserved[0], vsi->info.resp_reserved[1], vsi->info.resp_reserved[2], vsi->info.resp_reserved[3], vsi->info.resp_reserved[4], vsi->info.resp_reserved[5], vsi->info.resp_reserved[6], vsi->info.resp_reserved[7], vsi->info.resp_reserved[8], vsi->info.resp_reserved[9], vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]); if (vsi->back) dev_info(&pf->pdev->dev, " pf = %p\n", vsi->back); dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx); dev_info(&pf->pdev->dev, " tc_config: numtc = %d, enabled_tc = 0x%x\n", vsi->tc_config.numtc, vsi->tc_config.enabled_tc); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { dev_info(&pf->pdev->dev, " tc_config: tc = %d, qoffset = %d, qcount = %d, netdev_tc = %d\n", i, vsi->tc_config.tc_info[i].qoffset, vsi->tc_config.tc_info[i].qcount, vsi->tc_config.tc_info[i].netdev_tc); } dev_info(&pf->pdev->dev, " bw: bw_limit = %d, bw_max_quanta = %d\n", vsi->bw_limit, vsi->bw_max_quanta); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { dev_info(&pf->pdev->dev, " bw[%d]: ets_share_credits = %d, ets_limit_credits = %d, max_quanta = %d\n", i, vsi->bw_ets_share_credits[i], vsi->bw_ets_limit_credits[i], vsi->bw_ets_max_quanta[i]); } } /** * i40e_dbg_dump_aq_desc - handles dump aq_desc write into command datum * @pf: the i40e_pf created in command write **/ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf) { struct i40e_adminq_ring *ring; struct i40e_hw *hw = &pf->hw; char hdr[32]; int i; snprintf(hdr, sizeof(hdr), "%s %s: ", dev_driver_string(&pf->pdev->dev), dev_name(&pf->pdev->dev)); /* first the send (command) ring, then the receive (event) ring */ dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n"); ring = &(hw->aq.asq); for (i = 0; i < ring->count; i++) { struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); dev_info(&pf->pdev->dev, " at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", i, d->flags, d->opcode, d->datalen, d->retval, d->cookie_high, d->cookie_low); print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, 16, 1, d->params.raw, 16, 0); } dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n"); ring = &(hw->aq.arq); for (i = 0; i < ring->count; i++) { struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i); dev_info(&pf->pdev->dev, " ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n", i, d->flags, d->opcode, d->datalen, d->retval, d->cookie_high, d->cookie_low); print_hex_dump(KERN_INFO, hdr, DUMP_PREFIX_NONE, 16, 1, d->params.raw, 16, 0); } } /** * i40e_dbg_dump_desc - handles dump desc write into command datum * @cnt: number of arguments that the user supplied * @vsi_seid: vsi id entered by user * @ring_id: ring id entered by user * @desc_n: descriptor number entered by user * @pf: the i40e_pf created in command write * @is_rx_ring: true if rx, false if tx **/ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, struct i40e_pf *pf, bool is_rx_ring) { struct i40e_tx_desc *txd; union i40e_rx_desc *rxd; struct i40e_ring ring; struct i40e_vsi *vsi; int i; vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); return; } if (ring_id >= vsi->num_queue_pairs || ring_id < 0) { dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id); return; } if (!vsi->tx_rings || !vsi->tx_rings[0]->desc) { dev_info(&pf->pdev->dev, "descriptor rings have not been allocated for vsi %d\n", vsi_seid); return; } if (is_rx_ring) ring = *vsi->rx_rings[ring_id]; else ring = *vsi->tx_rings[ring_id]; if (cnt == 2) { dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); for (i = 0; i < ring.count; i++) { if (!is_rx_ring) { txd = I40E_TX_DESC(&ring, i); dev_info(&pf->pdev->dev, " d[%03i] = 0x%016llx 0x%016llx\n", i, txd->buffer_addr, txd->cmd_type_offset_bsz); } else if (sizeof(union i40e_rx_desc) == sizeof(union i40e_16byte_rx_desc)) { rxd = I40E_RX_DESC(&ring, i); dev_info(&pf->pdev->dev, " d[%03i] = 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, rxd->read.hdr_addr); } else { rxd = I40E_RX_DESC(&ring, i); dev_info(&pf->pdev->dev, " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, rxd->read.hdr_addr, rxd->read.rsvd1, rxd->read.rsvd2); } } } else if (cnt == 3) { if (desc_n >= ring.count || desc_n < 0) { dev_info(&pf->pdev->dev, "descriptor %d not found\n", desc_n); return; } if (!is_rx_ring) { txd = I40E_TX_DESC(&ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, txd->buffer_addr, txd->cmd_type_offset_bsz); } else if (sizeof(union i40e_rx_desc) == sizeof(union i40e_16byte_rx_desc)) { rxd = I40E_RX_DESC(&ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, rxd->read.pkt_addr, rxd->read.hdr_addr); } else { rxd = I40E_RX_DESC(&ring, desc_n); dev_info(&pf->pdev->dev, "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, rxd->read.pkt_addr, rxd->read.hdr_addr, rxd->read.rsvd1, rxd->read.rsvd2); } } else { dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); } } /** * i40e_dbg_dump_vsi_no_seid - handles dump vsi write into command datum * @pf: the i40e_pf created in command write **/ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf) { int i; for (i = 0; i < pf->hw.func_caps.num_vsis; i++) if (pf->vsi[i]) dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i, pf->vsi[i]->seid); } /** * i40e_dbg_dump_stats - handles dump stats write into command datum * @pf: the i40e_pf created in command write * @estats: the eth stats structure to be dumped **/ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf, struct i40e_eth_stats *estats) { dev_info(&pf->pdev->dev, " ethstats:\n"); dev_info(&pf->pdev->dev, " rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n", estats->rx_bytes, estats->rx_unicast, estats->rx_multicast); dev_info(&pf->pdev->dev, " rx_broadcast = \t%lld \trx_discards = \t\t%lld \trx_errors = \t%lld\n", estats->rx_broadcast, estats->rx_discards, estats->rx_errors); dev_info(&pf->pdev->dev, " rx_missed = \t%lld \trx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n", estats->rx_missed, estats->rx_unknown_protocol, estats->tx_bytes); dev_info(&pf->pdev->dev, " tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n", estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast); dev_info(&pf->pdev->dev, " tx_discards = \t%lld \ttx_errors = \t\t%lld\n", estats->tx_discards, estats->tx_errors); } /** * i40e_dbg_dump_stats - handles dump stats write into command datum * @pf: the i40e_pf created in command write * @stats: the stats structure to be dumped **/ static void i40e_dbg_dump_stats(struct i40e_pf *pf, struct i40e_hw_port_stats *stats) { int i; dev_info(&pf->pdev->dev, " stats:\n"); dev_info(&pf->pdev->dev, " crc_errors = \t\t%lld \tillegal_bytes = \t%lld \terror_bytes = \t\t%lld\n", stats->crc_errors, stats->illegal_bytes, stats->error_bytes); dev_info(&pf->pdev->dev, " mac_local_faults = \t%lld \tmac_remote_faults = \t%lld \trx_length_errors = \t%lld\n", stats->mac_local_faults, stats->mac_remote_faults, stats->rx_length_errors); dev_info(&pf->pdev->dev, " link_xon_rx = \t\t%lld \tlink_xoff_rx = \t\t%lld \tlink_xon_tx = \t\t%lld\n", stats->link_xon_rx, stats->link_xoff_rx, stats->link_xon_tx); dev_info(&pf->pdev->dev, " link_xoff_tx = \t\t%lld \trx_size_64 = \t\t%lld \trx_size_127 = \t\t%lld\n", stats->link_xoff_tx, stats->rx_size_64, stats->rx_size_127); dev_info(&pf->pdev->dev, " rx_size_255 = \t\t%lld \trx_size_511 = \t\t%lld \trx_size_1023 = \t\t%lld\n", stats->rx_size_255, stats->rx_size_511, stats->rx_size_1023); dev_info(&pf->pdev->dev, " rx_size_big = \t\t%lld \trx_undersize = \t\t%lld \trx_jabber = \t\t%lld\n", stats->rx_size_big, stats->rx_undersize, stats->rx_jabber); dev_info(&pf->pdev->dev, " rx_fragments = \t\t%lld \trx_oversize = \t\t%lld \ttx_size_64 = \t\t%lld\n", stats->rx_fragments, stats->rx_oversize, stats->tx_size_64); dev_info(&pf->pdev->dev, " tx_size_127 = \t\t%lld \ttx_size_255 = \t\t%lld \ttx_size_511 = \t\t%lld\n", stats->tx_size_127, stats->tx_size_255, stats->tx_size_511); dev_info(&pf->pdev->dev, " tx_size_1023 = \t\t%lld \ttx_size_big = \t\t%lld \tmac_short_packet_dropped = \t%lld\n", stats->tx_size_1023, stats->tx_size_big, stats->mac_short_packet_dropped); for (i = 0; i < 8; i += 4) { dev_info(&pf->pdev->dev, " priority_xon_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", i, stats->priority_xon_rx[i], i+1, stats->priority_xon_rx[i+1], i+2, stats->priority_xon_rx[i+2], i+3, stats->priority_xon_rx[i+3]); } for (i = 0; i < 8; i += 4) { dev_info(&pf->pdev->dev, " priority_xoff_rx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", i, stats->priority_xoff_rx[i], i+1, stats->priority_xoff_rx[i+1], i+2, stats->priority_xoff_rx[i+2], i+3, stats->priority_xoff_rx[i+3]); } for (i = 0; i < 8; i += 4) { dev_info(&pf->pdev->dev, " priority_xon_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", i, stats->priority_xon_tx[i], i+1, stats->priority_xon_tx[i+1], i+2, stats->priority_xon_tx[i+2], i+3, stats->priority_xon_rx[i+3]); } for (i = 0; i < 8; i += 4) { dev_info(&pf->pdev->dev, " priority_xoff_tx[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", i, stats->priority_xoff_tx[i], i+1, stats->priority_xoff_tx[i+1], i+2, stats->priority_xoff_tx[i+2], i+3, stats->priority_xoff_tx[i+3]); } for (i = 0; i < 8; i += 4) { dev_info(&pf->pdev->dev, " priority_xon_2_xoff[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld \t[%d] = \t%lld\n", i, stats->priority_xon_2_xoff[i], i+1, stats->priority_xon_2_xoff[i+1], i+2, stats->priority_xon_2_xoff[i+2], i+3, stats->priority_xon_2_xoff[i+3]); } i40e_dbg_dump_eth_stats(pf, &stats->eth); } /** * i40e_dbg_dump_veb_seid - handles dump stats of a single given veb * @pf: the i40e_pf created in command write * @seid: the seid the user put in **/ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) { struct i40e_veb *veb; if ((seid < I40E_BASE_VEB_SEID) || (seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) { dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); return; } veb = i40e_dbg_find_veb(pf, seid); if (!veb) { dev_info(&pf->pdev->dev, "can't find veb %d\n", seid); return; } dev_info(&pf->pdev->dev, "veb idx=%d,%d stats_ic=%d seid=%d uplink=%d\n", veb->idx, veb->veb_idx, veb->stats_idx, veb->seid, veb->uplink_seid); i40e_dbg_dump_eth_stats(pf, &veb->stats); } /** * i40e_dbg_dump_veb_all - dumps all known veb's stats * @pf: the i40e_pf created in command write **/ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) { struct i40e_veb *veb; int i; for (i = 0; i < I40E_MAX_VEB; i++) { veb = pf->veb[i]; if (veb) i40e_dbg_dump_veb_seid(pf, veb->seid); } } /** * i40e_dbg_cmd_fd_ctrl - Enable/disable FD sideband/ATR * @pf: the pf that would be altered * @flag: flag that needs enabling or disabling * @enable: Enable/disable FD SD/ATR **/ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) { if (enable) pf->flags |= flag; else pf->flags &= ~flag; dev_info(&pf->pdev->dev, "requesting a pf reset\n"); i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); } #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) /** * i40e_dbg_command_write - write into command datum * @filp: the opened file * @buffer: where to find the user's data * @count: the length of the user's data * @ppos: file position offset **/ static ssize_t i40e_dbg_command_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; char *cmd_buf, *cmd_buf_tmp; int bytes_not_copied; struct i40e_vsi *vsi; int vsi_seid; int veb_seid; int cnt; /* don't allow partial writes */ if (*ppos != 0) return 0; cmd_buf = kzalloc(count + 1, GFP_KERNEL); if (!cmd_buf) return count; bytes_not_copied = copy_from_user(cmd_buf, buffer, count); if (bytes_not_copied < 0) return bytes_not_copied; if (bytes_not_copied > 0) count -= bytes_not_copied; cmd_buf[count] = '\0'; cmd_buf_tmp = strchr(cmd_buf, '\n'); if (cmd_buf_tmp) { *cmd_buf_tmp = '\0'; count = cmd_buf_tmp - cmd_buf + 1; } if (strncmp(cmd_buf, "add vsi", 7) == 0) { vsi_seid = -1; cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); if (cnt == 0) { /* default to PF VSI */ vsi_seid = pf->vsi[pf->lan_vsi]->seid; } else if (vsi_seid < 0) { dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n", vsi_seid); goto command_write_done; } vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); if (vsi) dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", vsi->seid, vsi->uplink_seid); else dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf); } else if (strncmp(cmd_buf, "del vsi", 7) == 0) { sscanf(&cmd_buf[7], "%i", &vsi_seid); vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n", vsi_seid); goto command_write_done; } dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid); i40e_vsi_release(vsi); } else if (strncmp(cmd_buf, "add relay", 9) == 0) { struct i40e_veb *veb; int uplink_seid, i; cnt = sscanf(&cmd_buf[9], "%i %i", &uplink_seid, &vsi_seid); if (cnt != 2) { dev_info(&pf->pdev->dev, "add relay: bad command string, cnt=%d\n", cnt); goto command_write_done; } else if (uplink_seid < 0) { dev_info(&pf->pdev->dev, "add relay %d: bad uplink seid\n", uplink_seid); goto command_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "add relay: VSI %d not found\n", vsi_seid); goto command_write_done; } for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) break; if (i >= I40E_MAX_VEB && uplink_seid != 0 && uplink_seid != pf->mac_seid) { dev_info(&pf->pdev->dev, "add relay: relay uplink %d not found\n", uplink_seid); goto command_write_done; } veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, vsi->tc_config.enabled_tc); if (veb) dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid); else dev_info(&pf->pdev->dev, "add relay failed\n"); } else if (strncmp(cmd_buf, "del relay", 9) == 0) { int i; cnt = sscanf(&cmd_buf[9], "%i", &veb_seid); if (cnt != 1) { dev_info(&pf->pdev->dev, "del relay: bad command string, cnt=%d\n", cnt); goto command_write_done; } else if (veb_seid < 0) { dev_info(&pf->pdev->dev, "del relay %d: bad relay seid\n", veb_seid); goto command_write_done; } /* find the veb */ for (i = 0; i < I40E_MAX_VEB; i++) if (pf->veb[i] && pf->veb[i]->seid == veb_seid) break; if (i >= I40E_MAX_VEB) { dev_info(&pf->pdev->dev, "del relay: relay %d not found\n", veb_seid); goto command_write_done; } dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid); i40e_veb_release(pf->veb[i]); } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) { struct i40e_mac_filter *f; int vlan = 0; u8 ma[6]; int ret; cnt = sscanf(&cmd_buf[11], "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i", &vsi_seid, &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5], &vlan); if (cnt == 7) { vlan = 0; } else if (cnt != 8) { dev_info(&pf->pdev->dev, "add macaddr: bad command string, cnt=%d\n", cnt); goto command_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "add macaddr: VSI %d not found\n", vsi_seid); goto command_write_done; } f = i40e_add_filter(vsi, ma, vlan, false, false); ret = i40e_sync_vsi_filters(vsi); if (f && !ret) dev_info(&pf->pdev->dev, "add macaddr: %pM vlan=%d added to VSI %d\n", ma, vlan, vsi_seid); else dev_info(&pf->pdev->dev, "add macaddr: %pM vlan=%d to VSI %d failed, f=%p ret=%d\n", ma, vlan, vsi_seid, f, ret); } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) { int vlan = 0; u8 ma[6]; int ret; cnt = sscanf(&cmd_buf[11], "%i %hhx:%hhx:%hhx:%hhx:%hhx:%hhx %i", &vsi_seid, &ma[0], &ma[1], &ma[2], &ma[3], &ma[4], &ma[5], &vlan); if (cnt == 7) { vlan = 0; } else if (cnt != 8) { dev_info(&pf->pdev->dev, "del macaddr: bad command string, cnt=%d\n", cnt); goto command_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "del macaddr: VSI %d not found\n", vsi_seid); goto command_write_done; } i40e_del_filter(vsi, ma, vlan, false, false); ret = i40e_sync_vsi_filters(vsi); if (!ret) dev_info(&pf->pdev->dev, "del macaddr: %pM vlan=%d removed from VSI %d\n", ma, vlan, vsi_seid); else dev_info(&pf->pdev->dev, "del macaddr: %pM vlan=%d from VSI %d failed, ret=%d\n", ma, vlan, vsi_seid, ret); } else if (strncmp(cmd_buf, "add pvid", 8) == 0) { i40e_status ret; u16 vid; int v; cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v); if (cnt != 2) { dev_info(&pf->pdev->dev, "add pvid: bad command string, cnt=%d\n", cnt); goto command_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n", vsi_seid); goto command_write_done; } vid = (unsigned)v; ret = i40e_vsi_add_pvid(vsi, vid); if (!ret) dev_info(&pf->pdev->dev, "add pvid: %d added to VSI %d\n", vid, vsi_seid); else dev_info(&pf->pdev->dev, "add pvid: %d to VSI %d failed, ret=%d\n", vid, vsi_seid, ret); } else if (strncmp(cmd_buf, "del pvid", 8) == 0) { cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); if (cnt != 1) { dev_info(&pf->pdev->dev, "del pvid: bad command string, cnt=%d\n", cnt); goto command_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "del pvid: VSI %d not found\n", vsi_seid); goto command_write_done; } i40e_vsi_remove_pvid(vsi); dev_info(&pf->pdev->dev, "del pvid: removed from VSI %d\n", vsi_seid); } else if (strncmp(cmd_buf, "dump", 4) == 0) { if (strncmp(&cmd_buf[5], "switch", 6) == 0) { i40e_fetch_switch_configuration(pf, true); } else if (strncmp(&cmd_buf[5], "vsi", 3) == 0) { cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); if (cnt > 0) i40e_dbg_dump_vsi_seid(pf, vsi_seid); else i40e_dbg_dump_vsi_no_seid(pf); } else if (strncmp(&cmd_buf[5], "veb", 3) == 0) { cnt = sscanf(&cmd_buf[8], "%i", &vsi_seid); if (cnt > 0) i40e_dbg_dump_veb_seid(pf, vsi_seid); else i40e_dbg_dump_veb_all(pf); } else if (strncmp(&cmd_buf[5], "desc", 4) == 0) { int ring_id, desc_n; if (strncmp(&cmd_buf[10], "rx", 2) == 0) { cnt = sscanf(&cmd_buf[12], "%i %i %i", &vsi_seid, &ring_id, &desc_n); i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, desc_n, pf, true); } else if (strncmp(&cmd_buf[10], "tx", 2) == 0) { cnt = sscanf(&cmd_buf[12], "%i %i %i", &vsi_seid, &ring_id, &desc_n); i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, desc_n, pf, false); } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) { i40e_dbg_dump_aq_desc(pf); } else { dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); dev_info(&pf->pdev->dev, "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); dev_info(&pf->pdev->dev, "dump desc aq\n"); } } else if (strncmp(&cmd_buf[5], "stats", 5) == 0) { dev_info(&pf->pdev->dev, "pf stats:\n"); i40e_dbg_dump_stats(pf, &pf->stats); dev_info(&pf->pdev->dev, "pf stats_offsets:\n"); i40e_dbg_dump_stats(pf, &pf->stats_offsets); } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) { dev_info(&pf->pdev->dev, "core reset count: %d\n", pf->corer_count); dev_info(&pf->pdev->dev, "global reset count: %d\n", pf->globr_count); dev_info(&pf->pdev->dev, "emp reset count: %d\n", pf->empr_count); dev_info(&pf->pdev->dev, "pf reset count: %d\n", pf->pfr_count); } else if (strncmp(&cmd_buf[5], "port", 4) == 0) { struct i40e_aqc_query_port_ets_config_resp *bw_data; struct i40e_dcbx_config *cfg = &pf->hw.local_dcbx_config; struct i40e_dcbx_config *r_cfg = &pf->hw.remote_dcbx_config; int i, ret; bw_data = kzalloc(sizeof( struct i40e_aqc_query_port_ets_config_resp), GFP_KERNEL); if (!bw_data) { ret = -ENOMEM; goto command_write_done; } ret = i40e_aq_query_port_ets_config(&pf->hw, pf->mac_seid, bw_data, NULL); if (ret) { dev_info(&pf->pdev->dev, "Query Port ETS Config AQ command failed =0x%x\n", pf->hw.aq.asq_last_status); kfree(bw_data); bw_data = NULL; goto command_write_done; } dev_info(&pf->pdev->dev, "port bw: tc_valid=0x%x tc_strict_prio=0x%x, tc_bw_max=0x%04x,0x%04x\n", bw_data->tc_valid_bits, bw_data->tc_strict_priority_bits, le16_to_cpu(bw_data->tc_bw_max[0]), le16_to_cpu(bw_data->tc_bw_max[1])); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n", bw_data->tc_bw_share_credits[i], le16_to_cpu(bw_data->tc_bw_limits[i])); } kfree(bw_data); bw_data = NULL; dev_info(&pf->pdev->dev, "port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", cfg->etscfg.willing, cfg->etscfg.cbs, cfg->etscfg.maxtcs); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", i, cfg->etscfg.prioritytable[i], cfg->etscfg.tcbwtable[i], cfg->etscfg.tsatable[i]); } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", i, cfg->etsrec.prioritytable[i], cfg->etsrec.tcbwtable[i], cfg->etsrec.tsatable[i]); } dev_info(&pf->pdev->dev, "port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", cfg->pfc.willing, cfg->pfc.mbc, cfg->pfc.pfccap, cfg->pfc.pfcenable); dev_info(&pf->pdev->dev, "port app_table: num_apps=%d\n", cfg->numapps); for (i = 0; i < cfg->numapps; i++) { dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n", i, cfg->app[i].priority, cfg->app[i].selector, cfg->app[i].protocolid); } /* Peer TLV DCBX data */ dev_info(&pf->pdev->dev, "remote port ets_cfg: willing=%d cbs=%d, maxtcs=%d\n", r_cfg->etscfg.willing, r_cfg->etscfg.cbs, r_cfg->etscfg.maxtcs); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", i, r_cfg->etscfg.prioritytable[i], r_cfg->etscfg.tcbwtable[i], r_cfg->etscfg.tsatable[i]); } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", i, r_cfg->etsrec.prioritytable[i], r_cfg->etsrec.tcbwtable[i], r_cfg->etsrec.tsatable[i]); } dev_info(&pf->pdev->dev, "remote port pfc_cfg: willing=%d mbc=%d, pfccap=%d pfcenable=0x%x\n", r_cfg->pfc.willing, r_cfg->pfc.mbc, r_cfg->pfc.pfccap, r_cfg->pfc.pfcenable); dev_info(&pf->pdev->dev, "remote port app_table: num_apps=%d\n", r_cfg->numapps); for (i = 0; i < r_cfg->numapps; i++) { dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n", i, r_cfg->app[i].priority, r_cfg->app[i].selector, r_cfg->app[i].protocolid); } } else { dev_info(&pf->pdev->dev, "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n"); dev_info(&pf->pdev->dev, "dump switch, dump vsi [seid] or\n"); dev_info(&pf->pdev->dev, "dump stats\n"); dev_info(&pf->pdev->dev, "dump reset stats\n"); dev_info(&pf->pdev->dev, "dump port\n"); dev_info(&pf->pdev->dev, "dump debug fwdata <cluster_id> <table_id> <index>\n"); } } else if (strncmp(cmd_buf, "msg_enable", 10) == 0) { u32 level; cnt = sscanf(&cmd_buf[10], "%i", &level); if (cnt) { if (I40E_DEBUG_USER & level) { pf->hw.debug_mask = level; dev_info(&pf->pdev->dev, "set hw.debug_mask = 0x%08x\n", pf->hw.debug_mask); } pf->msg_enable = level; dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n", pf->msg_enable); } else { dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n", pf->msg_enable); } } else if (strncmp(cmd_buf, "pfr", 3) == 0) { dev_info(&pf->pdev->dev, "forcing PFR\n"); i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "corer", 5) == 0) { dev_info(&pf->pdev->dev, "forcing CoreR\n"); i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "globr", 5) == 0) { dev_info(&pf->pdev->dev, "forcing GlobR\n"); i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "empr", 4) == 0) { dev_info(&pf->pdev->dev, "forcing EMPR\n"); i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED)); } else if (strncmp(cmd_buf, "read", 4) == 0) { u32 address; u32 value; cnt = sscanf(&cmd_buf[4], "%i", &address); if (cnt != 1) { dev_info(&pf->pdev->dev, "read <reg>\n"); goto command_write_done; } /* check the range on address */ if (address >= I40E_MAX_REGISTER) { dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n", address); goto command_write_done; } value = rd32(&pf->hw, address); dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n", address, value); } else if (strncmp(cmd_buf, "write", 5) == 0) { u32 address, value; cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value); if (cnt != 2) { dev_info(&pf->pdev->dev, "write <reg> <value>\n"); goto command_write_done; } /* check the range on address */ if (address >= I40E_MAX_REGISTER) { dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n", address); goto command_write_done; } wr32(&pf->hw, address, value); value = rd32(&pf->hw, address); dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n", address, value); } else if (strncmp(cmd_buf, "clear_stats", 11) == 0) { if (strncmp(&cmd_buf[12], "vsi", 3) == 0) { cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid); if (cnt == 0) { int i; for (i = 0; i < pf->hw.func_caps.num_vsis; i++) i40e_vsi_reset_stats(pf->vsi[i]); dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); } else if (cnt == 1) { vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "clear_stats vsi: bad vsi %d\n", vsi_seid); goto command_write_done; } i40e_vsi_reset_stats(vsi); dev_info(&pf->pdev->dev, "vsi clear stats called for vsi %d\n", vsi_seid); } else { dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); } } else if (strncmp(&cmd_buf[12], "pf", 2) == 0) { i40e_pf_reset_stats(pf); dev_info(&pf->pdev->dev, "pf clear stats called\n"); } else { dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats pf\n"); } } else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) { struct i40e_aq_desc *desc; i40e_status ret; desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); if (!desc) goto command_write_done; cnt = sscanf(&cmd_buf[11], "%hx %hx %hx %hx %x %x %x %x %x %x", &desc->flags, &desc->opcode, &desc->datalen, &desc->retval, &desc->cookie_high, &desc->cookie_low, &desc->params.internal.param0, &desc->params.internal.param1, &desc->params.internal.param2, &desc->params.internal.param3); if (cnt != 10) { dev_info(&pf->pdev->dev, "send aq_cmd: bad command string, cnt=%d\n", cnt); kfree(desc); desc = NULL; goto command_write_done; } ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL); if (!ret) { dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) { dev_info(&pf->pdev->dev, "AQ command send failed Opcode %x AQ Error: %d\n", desc->opcode, pf->hw.aq.asq_last_status); } else { dev_info(&pf->pdev->dev, "AQ command send failed Opcode %x Status: %d\n", desc->opcode, ret); } dev_info(&pf->pdev->dev, "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", desc->flags, desc->opcode, desc->datalen, desc->retval, desc->cookie_high, desc->cookie_low, desc->params.internal.param0, desc->params.internal.param1, desc->params.internal.param2, desc->params.internal.param3); kfree(desc); desc = NULL; } else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) { struct i40e_aq_desc *desc; i40e_status ret; u16 buffer_len; u8 *buff; desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL); if (!desc) goto command_write_done; cnt = sscanf(&cmd_buf[20], "%hx %hx %hx %hx %x %x %x %x %x %x %hd", &desc->flags, &desc->opcode, &desc->datalen, &desc->retval, &desc->cookie_high, &desc->cookie_low, &desc->params.internal.param0, &desc->params.internal.param1, &desc->params.internal.param2, &desc->params.internal.param3, &buffer_len); if (cnt != 11) { dev_info(&pf->pdev->dev, "send indirect aq_cmd: bad command string, cnt=%d\n", cnt); kfree(desc); desc = NULL; goto command_write_done; } /* Just stub a buffer big enough in case user messed up */ if (buffer_len == 0) buffer_len = 1280; buff = kzalloc(buffer_len, GFP_KERNEL); if (!buff) { kfree(desc); desc = NULL; goto command_write_done; } desc->flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); ret = i40e_asq_send_command(&pf->hw, desc, buff, buffer_len, NULL); if (!ret) { dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); } else if (ret == I40E_ERR_ADMIN_QUEUE_ERROR) { dev_info(&pf->pdev->dev, "AQ command send failed Opcode %x AQ Error: %d\n", desc->opcode, pf->hw.aq.asq_last_status); } else { dev_info(&pf->pdev->dev, "AQ command send failed Opcode %x Status: %d\n", desc->opcode, ret); } dev_info(&pf->pdev->dev, "AQ desc WB 0x%04x 0x%04x 0x%04x 0x%04x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", desc->flags, desc->opcode, desc->datalen, desc->retval, desc->cookie_high, desc->cookie_low, desc->params.internal.param0, desc->params.internal.param1, desc->params.internal.param2, desc->params.internal.param3); print_hex_dump(KERN_INFO, "AQ buffer WB: ", DUMP_PREFIX_OFFSET, 16, 1, buff, buffer_len, true); kfree(buff); buff = NULL; kfree(desc); desc = NULL; } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) || (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) { struct i40e_fdir_data fd_data; u16 packet_len, i, j = 0; char *asc_packet; bool add = false; int ret; asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, GFP_KERNEL); if (!asc_packet) goto command_write_done; fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, GFP_KERNEL); if (!fd_data.raw_packet) { kfree(asc_packet); asc_packet = NULL; goto command_write_done; } if (strncmp(cmd_buf, "add", 3) == 0) add = true; cnt = sscanf(&cmd_buf[13], "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s", &fd_data.q_index, &fd_data.flex_off, &fd_data.pctype, &fd_data.dest_vsi, &fd_data.dest_ctl, &fd_data.fd_status, &fd_data.cnt_index, &fd_data.fd_id, &packet_len, asc_packet); if (cnt != 10) { dev_info(&pf->pdev->dev, "program fd_filter: bad command string, cnt=%d\n", cnt); kfree(asc_packet); asc_packet = NULL; kfree(fd_data.raw_packet); goto command_write_done; } /* fix packet length if user entered 0 */ if (packet_len == 0) packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP; /* make sure to check the max as well */ packet_len = min_t(u16, packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP); for (i = 0; i < packet_len; i++) { sscanf(&asc_packet[j], "%2hhx ", &fd_data.raw_packet[i]); j += 3; } dev_info(&pf->pdev->dev, "FD raw packet dump\n"); print_hex_dump(KERN_INFO, "FD raw packet: ", DUMP_PREFIX_OFFSET, 16, 1, fd_data.raw_packet, packet_len, true); ret = i40e_program_fdir_filter(&fd_data, pf, add); if (!ret) { dev_info(&pf->pdev->dev, "Filter command send Status : Success\n"); } else { dev_info(&pf->pdev->dev, "Filter command send failed %d\n", ret); } kfree(fd_data.raw_packet); fd_data.raw_packet = NULL; kfree(asc_packet); asc_packet = NULL; } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) { i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false); } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) { i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true); } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) { i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false); } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) { i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true); } else if (strncmp(cmd_buf, "lldp", 4) == 0) { if (strncmp(&cmd_buf[5], "stop", 4) == 0) { int ret; ret = i40e_aq_stop_lldp(&pf->hw, false, NULL); if (ret) { dev_info(&pf->pdev->dev, "Stop LLDP AQ command failed =0x%x\n", pf->hw.aq.asq_last_status); goto command_write_done; } ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, pf->hw.mac.addr, I40E_ETH_P_LLDP, 0, pf->vsi[pf->lan_vsi]->seid, 0, true, NULL, NULL); if (ret) { dev_info(&pf->pdev->dev, "%s: Add Control Packet Filter AQ command failed =0x%x\n", __func__, pf->hw.aq.asq_last_status); goto command_write_done; } #ifdef CONFIG_I40E_DCB pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; #endif /* CONFIG_I40E_DCB */ } else if (strncmp(&cmd_buf[5], "start", 5) == 0) { int ret; ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, pf->hw.mac.addr, I40E_ETH_P_LLDP, 0, pf->vsi[pf->lan_vsi]->seid, 0, false, NULL, NULL); if (ret) { dev_info(&pf->pdev->dev, "%s: Remove Control Packet Filter AQ command failed =0x%x\n", __func__, pf->hw.aq.asq_last_status); /* Continue and start FW LLDP anyways */ } ret = i40e_aq_start_lldp(&pf->hw, NULL); if (ret) { dev_info(&pf->pdev->dev, "Start LLDP AQ command failed =0x%x\n", pf->hw.aq.asq_last_status); goto command_write_done; } #ifdef CONFIG_I40E_DCB pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; #endif /* CONFIG_I40E_DCB */ } else if (strncmp(&cmd_buf[5], "get local", 9) == 0) { u16 llen, rlen; int ret; u8 *buff; buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); if (!buff) goto command_write_done; ret = i40e_aq_get_lldp_mib(&pf->hw, 0, I40E_AQ_LLDP_MIB_LOCAL, buff, I40E_LLDPDU_SIZE, &llen, &rlen, NULL); if (ret) { dev_info(&pf->pdev->dev, "Get LLDP MIB (local) AQ command failed =0x%x\n", pf->hw.aq.asq_last_status); kfree(buff); buff = NULL; goto command_write_done; } dev_info(&pf->pdev->dev, "LLDP MIB (local)\n"); print_hex_dump(KERN_INFO, "LLDP MIB (local): ", DUMP_PREFIX_OFFSET, 16, 1, buff, I40E_LLDPDU_SIZE, true); kfree(buff); buff = NULL; } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { u16 llen, rlen; int ret; u8 *buff; buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); if (!buff) goto command_write_done; ret = i40e_aq_get_lldp_mib(&pf->hw, I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE, I40E_AQ_LLDP_MIB_LOCAL, buff, I40E_LLDPDU_SIZE, &llen, &rlen, NULL); if (ret) { dev_info(&pf->pdev->dev, "Get LLDP MIB (remote) AQ command failed =0x%x\n", pf->hw.aq.asq_last_status); kfree(buff); buff = NULL; goto command_write_done; } dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n"); print_hex_dump(KERN_INFO, "LLDP MIB (remote): ", DUMP_PREFIX_OFFSET, 16, 1, buff, I40E_LLDPDU_SIZE, true); kfree(buff); buff = NULL; } else if (strncmp(&cmd_buf[5], "event on", 8) == 0) { int ret; ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, true, NULL); if (ret) { dev_info(&pf->pdev->dev, "Config LLDP MIB Change Event (on) AQ command failed =0x%x\n", pf->hw.aq.asq_last_status); goto command_write_done; } } else if (strncmp(&cmd_buf[5], "event off", 9) == 0) { int ret; ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL); if (ret) { dev_info(&pf->pdev->dev, "Config LLDP MIB Change Event (off) AQ command failed =0x%x\n", pf->hw.aq.asq_last_status); goto command_write_done; } } } else if (strncmp(cmd_buf, "nvm read", 8) == 0) { u16 buffer_len, bytes; u16 module; u32 offset; u16 *buff; int ret; cnt = sscanf(&cmd_buf[8], "%hx %x %hx", &module, &offset, &buffer_len); if (cnt == 0) { module = 0; offset = 0; buffer_len = 0; } else if (cnt == 1) { offset = 0; buffer_len = 0; } else if (cnt == 2) { buffer_len = 0; } else if (cnt > 3) { dev_info(&pf->pdev->dev, "nvm read: bad command string, cnt=%d\n", cnt); goto command_write_done; } /* set the max length */ buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2); bytes = 2 * buffer_len; /* read at least 1k bytes, no more than 4kB */ bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE); buff = kzalloc(bytes, GFP_KERNEL); if (!buff) goto command_write_done; ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); if (ret) { dev_info(&pf->pdev->dev, "Failed Acquiring NVM resource for read err=%d status=0x%x\n", ret, pf->hw.aq.asq_last_status); kfree(buff); goto command_write_done; } ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset), bytes, (u8 *)buff, true, NULL); i40e_release_nvm(&pf->hw); if (ret) { dev_info(&pf->pdev->dev, "Read NVM AQ failed err=%d status=0x%x\n", ret, pf->hw.aq.asq_last_status); } else { dev_info(&pf->pdev->dev, "Read NVM module=0x%x offset=0x%x words=%d\n", module, offset, buffer_len); if (bytes) print_hex_dump(KERN_INFO, "NVM Dump: ", DUMP_PREFIX_OFFSET, 16, 2, buff, bytes, true); } kfree(buff); buff = NULL; } else { dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); dev_info(&pf->pdev->dev, "available commands\n"); dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n"); dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n"); dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n"); dev_info(&pf->pdev->dev, " del relay <relay_seid>\n"); dev_info(&pf->pdev->dev, " add macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n"); dev_info(&pf->pdev->dev, " del macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n"); dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n"); dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n"); dev_info(&pf->pdev->dev, " dump switch\n"); dev_info(&pf->pdev->dev, " dump vsi [seid]\n"); dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); dev_info(&pf->pdev->dev, " dump desc aq\n"); dev_info(&pf->pdev->dev, " dump stats\n"); dev_info(&pf->pdev->dev, " dump reset stats\n"); dev_info(&pf->pdev->dev, " msg_enable [level]\n"); dev_info(&pf->pdev->dev, " read <reg>\n"); dev_info(&pf->pdev->dev, " write <reg> <value>\n"); dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); dev_info(&pf->pdev->dev, " clear_stats pf\n"); dev_info(&pf->pdev->dev, " pfr\n"); dev_info(&pf->pdev->dev, " corer\n"); dev_info(&pf->pdev->dev, " globr\n"); dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n"); dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n"); dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n"); dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n"); dev_info(&pf->pdev->dev, " fd-atr off\n"); dev_info(&pf->pdev->dev, " fd-atr on\n"); dev_info(&pf->pdev->dev, " fd-sb off\n"); dev_info(&pf->pdev->dev, " fd-sb on\n"); dev_info(&pf->pdev->dev, " lldp start\n"); dev_info(&pf->pdev->dev, " lldp stop\n"); dev_info(&pf->pdev->dev, " lldp get local\n"); dev_info(&pf->pdev->dev, " lldp get remote\n"); dev_info(&pf->pdev->dev, " lldp event on\n"); dev_info(&pf->pdev->dev, " lldp event off\n"); dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n"); } command_write_done: kfree(cmd_buf); cmd_buf = NULL; return count; } static const struct file_operations i40e_dbg_command_fops = { .owner = THIS_MODULE, .open = simple_open, .read = i40e_dbg_command_read, .write = i40e_dbg_command_write, }; /************************************************************** * netdev_ops * The netdev_ops entry in debugfs is for giving the driver commands * to be executed from the netdev operations. **************************************************************/ static char i40e_dbg_netdev_ops_buf[256] = "hello world"; /** * i40e_dbg_netdev_ops - read for netdev_ops datum * @filp: the opened file * @buffer: where to write the data for the user to read * @count: the size of the user's buffer * @ppos: file position offset **/ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; int bytes_not_copied; int buf_size = 256; char *buf; int len; /* don't allow partal reads */ if (*ppos != 0) return 0; if (count < buf_size) return -ENOSPC; buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOSPC; len = snprintf(buf, buf_size, "%s: %s\n", pf->vsi[pf->lan_vsi]->netdev->name, i40e_dbg_netdev_ops_buf); bytes_not_copied = copy_to_user(buffer, buf, len); kfree(buf); if (bytes_not_copied < 0) return bytes_not_copied; *ppos = len; return len; } /** * i40e_dbg_netdev_ops_write - write into netdev_ops datum * @filp: the opened file * @buffer: where to find the user's data * @count: the length of the user's data * @ppos: file position offset **/ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp, const char __user *buffer, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; int bytes_not_copied; struct i40e_vsi *vsi; char *buf_tmp; int vsi_seid; int i, cnt; /* don't allow partial writes */ if (*ppos != 0) return 0; if (count >= sizeof(i40e_dbg_netdev_ops_buf)) return -ENOSPC; memset(i40e_dbg_netdev_ops_buf, 0, sizeof(i40e_dbg_netdev_ops_buf)); bytes_not_copied = copy_from_user(i40e_dbg_netdev_ops_buf, buffer, count); if (bytes_not_copied < 0) return bytes_not_copied; else if (bytes_not_copied > 0) count -= bytes_not_copied; i40e_dbg_netdev_ops_buf[count] = '\0'; buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n'); if (buf_tmp) { *buf_tmp = '\0'; count = buf_tmp - i40e_dbg_netdev_ops_buf + 1; } if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); if (cnt != 1) { dev_info(&pf->pdev->dev, "tx_timeout <vsi_seid>\n"); goto netdev_ops_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not found\n", vsi_seid); goto netdev_ops_write_done; } if (rtnl_trylock()) { vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev); rtnl_unlock(); dev_info(&pf->pdev->dev, "tx_timeout called\n"); } else { dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); } } else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) { int mtu; cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i", &vsi_seid, &mtu); if (cnt != 2) { dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n"); goto netdev_ops_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "change_mtu: VSI %d not found\n", vsi_seid); goto netdev_ops_write_done; } if (rtnl_trylock()) { vsi->netdev->netdev_ops->ndo_change_mtu(vsi->netdev, mtu); rtnl_unlock(); dev_info(&pf->pdev->dev, "change_mtu called\n"); } else { dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); } } else if (strncmp(i40e_dbg_netdev_ops_buf, "set_rx_mode", 11) == 0) { cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); if (cnt != 1) { dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n"); goto netdev_ops_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "set_rx_mode: VSI %d not found\n", vsi_seid); goto netdev_ops_write_done; } if (rtnl_trylock()) { vsi->netdev->netdev_ops->ndo_set_rx_mode(vsi->netdev); rtnl_unlock(); dev_info(&pf->pdev->dev, "set_rx_mode called\n"); } else { dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); } } else if (strncmp(i40e_dbg_netdev_ops_buf, "napi", 4) == 0) { cnt = sscanf(&i40e_dbg_netdev_ops_buf[4], "%i", &vsi_seid); if (cnt != 1) { dev_info(&pf->pdev->dev, "napi <vsi_seid>\n"); goto netdev_ops_write_done; } vsi = i40e_dbg_find_vsi(pf, vsi_seid); if (!vsi) { dev_info(&pf->pdev->dev, "napi: VSI %d not found\n", vsi_seid); goto netdev_ops_write_done; } for (i = 0; i < vsi->num_q_vectors; i++) napi_schedule(&vsi->q_vectors[i]->napi); dev_info(&pf->pdev->dev, "napi called\n"); } else { dev_info(&pf->pdev->dev, "unknown command '%s'\n", i40e_dbg_netdev_ops_buf); dev_info(&pf->pdev->dev, "available commands\n"); dev_info(&pf->pdev->dev, " tx_timeout <vsi_seid>\n"); dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n"); dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n"); dev_info(&pf->pdev->dev, " napi <vsi_seid>\n"); } netdev_ops_write_done: return count; } static const struct file_operations i40e_dbg_netdev_ops_fops = { .owner = THIS_MODULE, .open = simple_open, .read = i40e_dbg_netdev_ops_read, .write = i40e_dbg_netdev_ops_write, }; /** * i40e_dbg_pf_init - setup the debugfs directory for the pf * @pf: the pf that is starting up **/ void i40e_dbg_pf_init(struct i40e_pf *pf) { struct dentry *pfile; const char *name = pci_name(pf->pdev); const struct device *dev = &pf->pdev->dev; pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); if (!pf->i40e_dbg_pf) return; pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, &i40e_dbg_command_fops); if (!pfile) goto create_failed; pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, &i40e_dbg_dump_fops); if (!pfile) goto create_failed; pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, &i40e_dbg_netdev_ops_fops); if (!pfile) goto create_failed; return; create_failed: dev_info(dev, "debugfs dir/file for %s failed\n", name); debugfs_remove_recursive(pf->i40e_dbg_pf); return; } /** * i40e_dbg_pf_exit - clear out the pf's debugfs entries * @pf: the pf that is stopping **/ void i40e_dbg_pf_exit(struct i40e_pf *pf) { debugfs_remove_recursive(pf->i40e_dbg_pf); pf->i40e_dbg_pf = NULL; kfree(i40e_dbg_dump_buf); i40e_dbg_dump_buf = NULL; } /** * i40e_dbg_init - start up debugfs for the driver **/ void i40e_dbg_init(void) { i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL); if (!i40e_dbg_root) pr_info("init of debugfs failed\n"); } /** * i40e_dbg_exit - clean out the driver's debugfs entries **/ void i40e_dbg_exit(void) { debugfs_remove_recursive(i40e_dbg_root); i40e_dbg_root = NULL; } #endif /* CONFIG_DEBUG_FS */
360051.c
// // Created by kamil on 15.05.18. // #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <zconf.h> #include <sys/wait.h> #include <sys/sem.h> #include <sys/shm.h> #include "common.h" enum Client_status status; int shared_memory_id; int semaphore_id; void init() { key_t project_key = ftok(PROJECT_PATH, PROJECT_ID); if (project_key == -1) FAILURE_EXIT("Couldn't obtain a project key\n") shared_memory_id = shmget(project_key, sizeof(struct Barbershop), 0); if (shared_memory_id == -1) FAILURE_EXIT("Couldn't create shared memory\n") barbershop = shmat(shared_memory_id, 0, 0); if (barbershop == (void*) -1) FAILURE_EXIT("Couldn't access shared memory\n") semaphore_id = semget(project_key, 0, 0); if (semaphore_id == -1) FAILURE_EXIT("Couldn't create semaphore\n") } void claim_chair() { pid_t pid = getpid(); if (status == INVITED) { pop_queue(); } else if (status == NEWCOMER) { while (1) { release_semaphore(semaphore_id); get_semaphore(semaphore_id); if (barbershop->barber_status == READY) break; } status = INVITED; } barbershop->selected_client = pid; printf("%lo CLIENT %i: sat in the chair \r\n", get_time(), pid); } void run_client(int S) { pid_t pid = getpid(); int cuts = 0; while (cuts < S) { status = NEWCOMER; get_semaphore(semaphore_id); if (barbershop->barber_status == SLEEPING) { printf("%lo CLIENT %i: woke up the barber \r\n", get_time(), pid); barbershop->barber_status = AWOKEN; claim_chair(); barbershop->barber_status = BUSY; } else if (!is_queue_full()) { enter_queue(pid); printf("%lo CLIENT %i: entering the queue \r\n", get_time(), pid); } else { printf("%lo CLIENT %i: the queue is full \r\n", get_time(), pid); release_semaphore(semaphore_id); return; } release_semaphore(semaphore_id); while(status == NEWCOMER) { get_semaphore(semaphore_id); if (barbershop->selected_client == pid) { status = INVITED; claim_chair(); barbershop->barber_status = BUSY; } release_semaphore(semaphore_id); } while(status == INVITED) { get_semaphore(semaphore_id); if (barbershop->selected_client != pid) { status = SHAVED; printf("%lo CLIENT %i: shaved \r\n", get_time(), pid); barbershop->barber_status = IDLE; cuts++; } release_semaphore(semaphore_id); } } printf("%lo CLIENT %i: left barbershop after %i cuts \r\n", get_time(), pid, S); } int main(int argc, char** argv) { if(argc < 3) FAILURE_EXIT("Not enough arguments \r\n") int clients_number = (int) strtol(argv[1], 0, 10); int S = (int) strtol(argv[2], 0, 10); init(); for(int i = 0; i < clients_number; ++i) { if (fork() == 0) { run_client(S); exit(0); } } while(wait(0)) if (errno != ECHILD) break; }
990433.c
#include<stdio.h> long long f(long long n); int main() { long long n,m; long long n1,m1,o1; scanf("%lld %lld",&m,&n); n1=f(m); m1=f(n); o1=f(m-n); printf("%lld",n1/(m1*o1)); } long long f(long long n) { int i; long long p=1; for (i=1;i<=n;i++) { p=p*i; } return p; }
683391.c
#include "testharness.h" int g; void testg(int x, int err) { if(g != 6) E(err); } int main() { int x = 7; /* * Strictly speaking, the order of increment versus assignment here * is not well defined: see ANSI C standard section 6.5.2.4 [Postfix * increment and decrement operators], paragraph 2. However, both * GCC and VC do the assignment before the side effect. For maximum * compatibility, then, the ending value of x should be 8, not 7. */ x = x++; if (x != 8) E(1); // Both postincrements happen after the assignment ! // x == 8 x = x++ + x++; if(x != 18) E(2); // The postincrement happens BEFORE the function call ! g = 5; testg(g ++, 5); SUCCESS; }
382844.c
// Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stddef.h> #include <stdlib.h> #include <string.h> #include "esp_err.h" #include "esp_wifi.h" #include "esp_wifi_internal.h" #include "esp_event.h" #include "esp_event_loop.h" #include "esp_task.h" #include "esp_eth.h" #include "esp_system.h" #include "rom/ets_sys.h" #include "freertos/FreeRTOS.h" #include "freertos/task.h" #include "freertos/queue.h" #include "freertos/semphr.h" #include "tcpip_adapter.h" #include "esp_log.h" static const char* TAG = "event"; #define WIFI_API_CALL_CHECK(info, api_call, ret) \ do{\ esp_err_t __err = (api_call);\ if ((ret) != __err) {\ ESP_LOGE(TAG, "%s %d %s ret=0x%X", __FUNCTION__, __LINE__, (info), __err);\ return __err;\ }\ } while(0) typedef struct { int err; const char *reason; } wifi_reason_t; static const wifi_reason_t wifi_reason[] = { {0, "wifi reason: other reason"}, {WIFI_REASON_UNSPECIFIED, "wifi reason: unspecified"}, {WIFI_REASON_AUTH_EXPIRE, "wifi reason: auth expire"}, {WIFI_REASON_AUTH_LEAVE, "wifi reason: auth leave"}, {WIFI_REASON_ASSOC_EXPIRE, "wifi reason: assoc expire"}, {WIFI_REASON_ASSOC_TOOMANY, "wifi reason: assoc too many"}, {WIFI_REASON_NOT_AUTHED, "wifi reason: not authed"}, {WIFI_REASON_NOT_ASSOCED, "wifi reason: not assoced"}, {WIFI_REASON_ASSOC_LEAVE, "wifi reason: assoc leave"}, {WIFI_REASON_ASSOC_NOT_AUTHED, "wifi reason: assoc not authed"}, {WIFI_REASON_BEACON_TIMEOUT, "wifi reason: beacon timeout"}, {WIFI_REASON_NO_AP_FOUND, "wifi reason: no ap found"}, {WIFI_REASON_AUTH_FAIL, "wifi reason: auth fail"}, {WIFI_REASON_ASSOC_FAIL, "wifi reason: assoc fail"}, {WIFI_REASON_HANDSHAKE_TIMEOUT, "wifi reason: hanshake timeout"}, {WIFI_REASON_DISASSOC_PWRCAP_BAD, "wifi reason: bad Power Capability, disassoc"}, {WIFI_REASON_DISASSOC_SUPCHAN_BAD, "wifi reason: bad Supported Channels, disassoc"}, {WIFI_REASON_IE_INVALID, "wifi reason: invalid IE"}, {WIFI_REASON_MIC_FAILURE, "wifi reason: MIC failure"}, {WIFI_REASON_4WAY_HANDSHAKE_TIMEOUT, "wifi reason: 4-way keying handshake timeout"}, {WIFI_REASON_GROUP_KEY_UPDATE_TIMEOUT, "wifi reason: Group key handshake"}, {WIFI_REASON_IE_IN_4WAY_DIFFERS, "wifi reason: IE in 4-way differs"}, {WIFI_REASON_GROUP_CIPHER_INVALID, "wifi reason: invalid group cipher"}, {WIFI_REASON_PAIRWISE_CIPHER_INVALID, "wifi reason: invalid pairwise cipher"}, {WIFI_REASON_AKMP_INVALID, "wifi reason: invalid AKMP"}, {WIFI_REASON_UNSUPP_RSN_IE_VERSION, "wifi reason: unsupported RSN IE version"}, {WIFI_REASON_INVALID_RSN_IE_CAP, "wifi reason: invalid RSN IE capability"}, {WIFI_REASON_802_1X_AUTH_FAILED, "wifi reason: 802.1x auth failed"}, {WIFI_REASON_CIPHER_SUITE_REJECTED, "wifi reason: cipher suite rejected"} }; const char* wifi_get_reason(int err) { int i=0; for (i=0; i< sizeof(wifi_reason)/sizeof(wifi_reason_t); i++){ if (err == wifi_reason[i].err){ return wifi_reason[i].reason; } } return wifi_reason[0].reason; } typedef esp_err_t (*system_event_handler_t)(system_event_t *e); static esp_err_t system_event_ap_start_handle_default(system_event_t *event); static esp_err_t system_event_ap_stop_handle_default(system_event_t *event); static esp_err_t system_event_sta_start_handle_default(system_event_t *event); static esp_err_t system_event_sta_stop_handle_default(system_event_t *event); static esp_err_t system_event_sta_connected_handle_default(system_event_t *event); static esp_err_t system_event_sta_disconnected_handle_default(system_event_t *event); static esp_err_t system_event_sta_got_ip_default(system_event_t *event); static esp_err_t system_event_sta_lost_ip_default(system_event_t *event); static esp_err_t system_event_eth_start_handle_default(system_event_t *event); static esp_err_t system_event_eth_stop_handle_default(system_event_t *event); static esp_err_t system_event_eth_connected_handle_default(system_event_t *event); static esp_err_t system_event_eth_disconnected_handle_default(system_event_t *event); static esp_err_t system_event_eth_got_ip_default(system_event_t *event); /* Default event handler functions Any entry in this table which is disabled by config will have a NULL handler. */ static system_event_handler_t default_event_handlers[SYSTEM_EVENT_MAX] = { 0 }; esp_err_t system_event_eth_start_handle_default(system_event_t *event) { tcpip_adapter_ip_info_t eth_ip; uint8_t eth_mac[6]; esp_eth_get_mac(eth_mac); tcpip_adapter_get_ip_info(TCPIP_ADAPTER_IF_ETH, &eth_ip); tcpip_adapter_eth_start(eth_mac, &eth_ip); return ESP_OK; } esp_err_t system_event_eth_stop_handle_default(system_event_t *event) { tcpip_adapter_stop(TCPIP_ADAPTER_IF_ETH); return ESP_OK; } esp_err_t system_event_eth_connected_handle_default(system_event_t *event) { tcpip_adapter_dhcp_status_t status; tcpip_adapter_up(TCPIP_ADAPTER_IF_ETH); tcpip_adapter_dhcpc_get_status(TCPIP_ADAPTER_IF_ETH, &status); if (status == TCPIP_ADAPTER_DHCP_INIT) { tcpip_adapter_dhcpc_start(TCPIP_ADAPTER_IF_ETH); } else if (status == TCPIP_ADAPTER_DHCP_STOPPED) { tcpip_adapter_ip_info_t eth_ip; tcpip_adapter_get_ip_info(TCPIP_ADAPTER_IF_ETH, &eth_ip); if (!(ip4_addr_isany_val(eth_ip.ip) || ip4_addr_isany_val(eth_ip.netmask))) { system_event_t evt; //notify event evt.event_id = SYSTEM_EVENT_ETH_GOT_IP; memcpy(&evt.event_info.got_ip.ip_info, &eth_ip, sizeof(tcpip_adapter_ip_info_t)); esp_event_send(&evt); } else { ESP_LOGE(TAG, "invalid static ip"); } } return ESP_OK; } esp_err_t system_event_eth_disconnected_handle_default(system_event_t *event) { tcpip_adapter_down(TCPIP_ADAPTER_IF_ETH); return ESP_OK; } static esp_err_t system_event_eth_got_ip_default(system_event_t *event) { ESP_LOGI(TAG, "eth ip: " IPSTR ", mask: " IPSTR ", gw: " IPSTR, IP2STR(&event->event_info.got_ip.ip_info.ip), IP2STR(&event->event_info.got_ip.ip_info.netmask), IP2STR(&event->event_info.got_ip.ip_info.gw)); return ESP_OK; } static esp_err_t system_event_sta_got_ip_default(system_event_t *event) { WIFI_API_CALL_CHECK("esp_wifi_internal_set_sta_ip", esp_wifi_internal_set_sta_ip(), ESP_OK); ESP_LOGI(TAG, "sta ip: " IPSTR ", mask: " IPSTR ", gw: " IPSTR, IP2STR(&event->event_info.got_ip.ip_info.ip), IP2STR(&event->event_info.got_ip.ip_info.netmask), IP2STR(&event->event_info.got_ip.ip_info.gw)); return ESP_OK; } static esp_err_t system_event_sta_lost_ip_default(system_event_t *event) { ESP_LOGI(TAG, "station ip lost"); return ESP_OK; } esp_err_t system_event_ap_start_handle_default(system_event_t *event) { tcpip_adapter_ip_info_t ap_ip; uint8_t ap_mac[6]; WIFI_API_CALL_CHECK("esp_wifi_internal_reg_rxcb", esp_wifi_internal_reg_rxcb(ESP_IF_WIFI_AP, (wifi_rxcb_t)tcpip_adapter_ap_input), ESP_OK); WIFI_API_CALL_CHECK("esp_wifi_mac_get", esp_wifi_get_mac(ESP_IF_WIFI_AP, ap_mac), ESP_OK); tcpip_adapter_get_ip_info(TCPIP_ADAPTER_IF_AP, &ap_ip); tcpip_adapter_ap_start(ap_mac, &ap_ip); return ESP_OK; } esp_err_t system_event_ap_stop_handle_default(system_event_t *event) { WIFI_API_CALL_CHECK("esp_wifi_internal_reg_rxcb", esp_wifi_internal_reg_rxcb(ESP_IF_WIFI_AP, NULL), ESP_OK); tcpip_adapter_stop(TCPIP_ADAPTER_IF_AP); return ESP_OK; } esp_err_t system_event_sta_start_handle_default(system_event_t *event) { tcpip_adapter_ip_info_t sta_ip; uint8_t sta_mac[6]; WIFI_API_CALL_CHECK("esp_wifi_mac_get", esp_wifi_get_mac(ESP_IF_WIFI_STA, sta_mac), ESP_OK); tcpip_adapter_get_ip_info(TCPIP_ADAPTER_IF_STA, &sta_ip); tcpip_adapter_sta_start(sta_mac, &sta_ip); return ESP_OK; } esp_err_t system_event_sta_stop_handle_default(system_event_t *event) { tcpip_adapter_stop(TCPIP_ADAPTER_IF_STA); return ESP_OK; } esp_err_t system_event_sta_connected_handle_default(system_event_t *event) { tcpip_adapter_dhcp_status_t status; WIFI_API_CALL_CHECK("esp_wifi_internal_reg_rxcb", esp_wifi_internal_reg_rxcb(ESP_IF_WIFI_STA, (wifi_rxcb_t)tcpip_adapter_sta_input), ESP_OK); tcpip_adapter_up(TCPIP_ADAPTER_IF_STA); tcpip_adapter_dhcpc_get_status(TCPIP_ADAPTER_IF_STA, &status); if (status == TCPIP_ADAPTER_DHCP_INIT) { tcpip_adapter_dhcpc_start(TCPIP_ADAPTER_IF_STA); } else if (status == TCPIP_ADAPTER_DHCP_STOPPED) { tcpip_adapter_ip_info_t sta_ip; tcpip_adapter_ip_info_t sta_old_ip; tcpip_adapter_get_ip_info(TCPIP_ADAPTER_IF_STA, &sta_ip); tcpip_adapter_get_old_ip_info(TCPIP_ADAPTER_IF_STA, &sta_old_ip); if (!(ip4_addr_isany_val(sta_ip.ip) || ip4_addr_isany_val(sta_ip.netmask))) { system_event_t evt; evt.event_id = SYSTEM_EVENT_STA_GOT_IP; evt.event_info.got_ip.ip_changed = false; if (memcmp(&sta_ip, &sta_old_ip, sizeof(sta_ip))) { evt.event_info.got_ip.ip_changed = true; } memcpy(&evt.event_info.got_ip.ip_info, &sta_ip, sizeof(tcpip_adapter_ip_info_t)); tcpip_adapter_set_old_ip_info(TCPIP_ADAPTER_IF_STA, &sta_ip); esp_event_send(&evt); ESP_LOGD(TAG, "static ip: ip changed=%d", evt.event_info.got_ip.ip_changed); } else { ESP_LOGE(TAG, "invalid static ip"); } } return ESP_OK; } esp_err_t system_event_sta_disconnected_handle_default(system_event_t *event) { tcpip_adapter_down(TCPIP_ADAPTER_IF_STA); WIFI_API_CALL_CHECK("esp_wifi_internal_reg_rxcb", esp_wifi_internal_reg_rxcb(ESP_IF_WIFI_STA, NULL), ESP_OK); return ESP_OK; } static esp_err_t esp_system_event_debug(system_event_t *event) { if (event == NULL) { ESP_LOGE(TAG, "event is null!"); return ESP_FAIL; } switch (event->event_id) { case SYSTEM_EVENT_WIFI_READY: { ESP_LOGD(TAG, "SYSTEM_EVENT_WIFI_READY"); break; } case SYSTEM_EVENT_SCAN_DONE: { system_event_sta_scan_done_t *scan_done = &event->event_info.scan_done; ESP_LOGD(TAG, "SYSTEM_EVENT_SCAN_DONE, status:%d, number:%d", scan_done->status, scan_done->number); break; } case SYSTEM_EVENT_STA_START: { ESP_LOGD(TAG, "SYSTEM_EVENT_STA_START"); break; } case SYSTEM_EVENT_STA_STOP: { ESP_LOGD(TAG, "SYSTEM_EVENT_STA_STOP"); break; } case SYSTEM_EVENT_STA_CONNECTED: { system_event_sta_connected_t *connected = &event->event_info.connected; ESP_LOGD(TAG, "SYSTEM_EVENT_STA_CONNECTED, ssid:%s, ssid_len:%d, bssid:" MACSTR ", channel:%d, authmode:%d", \ connected->ssid, connected->ssid_len, MAC2STR(connected->bssid), connected->channel, connected->authmode); break; } case SYSTEM_EVENT_STA_DISCONNECTED: { system_event_sta_disconnected_t *disconnected = &event->event_info.disconnected; ESP_LOGD(TAG, "SYSTEM_EVENT_STA_DISCONNECTED, ssid:%s, ssid_len:%d, bssid:" MACSTR ", reason:%d,%s", \ disconnected->ssid, disconnected->ssid_len, MAC2STR(disconnected->bssid), disconnected->reason, wifi_get_reason(disconnected->reason)); break; } case SYSTEM_EVENT_STA_AUTHMODE_CHANGE: { system_event_sta_authmode_change_t *auth_change = &event->event_info.auth_change; ESP_LOGD(TAG, "SYSTEM_EVENT_STA_AUTHMODE_CHNAGE, old_mode:%d, new_mode:%d", auth_change->old_mode, auth_change->new_mode); break; } case SYSTEM_EVENT_STA_GOT_IP: { system_event_sta_got_ip_t *got_ip = &event->event_info.got_ip; ESP_LOGD(TAG, "SYSTEM_EVENT_STA_GOT_IP, ip:" IPSTR ", mask:" IPSTR ", gw:" IPSTR, IP2STR(&got_ip->ip_info.ip), IP2STR(&got_ip->ip_info.netmask), IP2STR(&got_ip->ip_info.gw)); break; } case SYSTEM_EVENT_STA_LOST_IP: { ESP_LOGD(TAG, "SYSTEM_EVENT_STA_LOST_IP"); break; } case SYSTEM_EVENT_STA_WPS_ER_SUCCESS: { ESP_LOGD(TAG, "SYSTEM_EVENT_STA_WPS_ER_SUCCESS"); break; } case SYSTEM_EVENT_STA_WPS_ER_FAILED: { ESP_LOGD(TAG, "SYSTEM_EVENT_STA_WPS_ER_FAILED"); break; } case SYSTEM_EVENT_STA_WPS_ER_TIMEOUT: { ESP_LOGD(TAG, "SYSTEM_EVENT_STA_WPS_ER_TIMEOUT"); break; } case SYSTEM_EVENT_STA_WPS_ER_PIN: { ESP_LOGD(TAG, "SYSTEM_EVENT_STA_WPS_ER_PIN"); break; } case SYSTEM_EVENT_AP_START: { ESP_LOGD(TAG, "SYSTEM_EVENT_AP_START"); break; } case SYSTEM_EVENT_AP_STOP: { ESP_LOGD(TAG, "SYSTEM_EVENT_AP_STOP"); break; } case SYSTEM_EVENT_AP_STACONNECTED: { system_event_ap_staconnected_t *staconnected = &event->event_info.sta_connected; ESP_LOGD(TAG, "SYSTEM_EVENT_AP_STACONNECTED, mac:" MACSTR ", aid:%d", \ MAC2STR(staconnected->mac), staconnected->aid); break; } case SYSTEM_EVENT_AP_STADISCONNECTED: { system_event_ap_stadisconnected_t *stadisconnected = &event->event_info.sta_disconnected; ESP_LOGD(TAG, "SYSTEM_EVENT_AP_STADISCONNECTED, mac:" MACSTR ", aid:%d", \ MAC2STR(stadisconnected->mac), stadisconnected->aid); break; } case SYSTEM_EVENT_AP_STAIPASSIGNED: { ESP_LOGD(TAG, "SYSTEM_EVENT_AP_STAIPASSIGNED"); break; } case SYSTEM_EVENT_AP_PROBEREQRECVED: { system_event_ap_probe_req_rx_t *ap_probereqrecved = &event->event_info.ap_probereqrecved; ESP_LOGD(TAG, "SYSTEM_EVENT_AP_PROBEREQRECVED, rssi:%d, mac:" MACSTR, \ ap_probereqrecved->rssi, \ MAC2STR(ap_probereqrecved->mac)); break; } case SYSTEM_EVENT_GOT_IP6: { ip6_addr_t *addr = &event->event_info.got_ip6.ip6_info.ip; ESP_LOGD(TAG, "SYSTEM_EVENT_AP_STA_GOT_IP6 address %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", IP6_ADDR_BLOCK1(addr), IP6_ADDR_BLOCK2(addr), IP6_ADDR_BLOCK3(addr), IP6_ADDR_BLOCK4(addr), IP6_ADDR_BLOCK5(addr), IP6_ADDR_BLOCK6(addr), IP6_ADDR_BLOCK7(addr), IP6_ADDR_BLOCK8(addr)); break; } case SYSTEM_EVENT_ETH_START: { ESP_LOGD(TAG, "SYSTEM_EVENT_ETH_START"); break; } case SYSTEM_EVENT_ETH_STOP: { ESP_LOGD(TAG, "SYSTEM_EVENT_ETH_STOP"); break; } case SYSTEM_EVENT_ETH_CONNECTED: { ESP_LOGD(TAG, "SYSTEM_EVENT_ETH_CONNECETED"); break; } case SYSTEM_EVENT_ETH_DISCONNECTED: { ESP_LOGD(TAG, "SYSTEM_EVENT_ETH_DISCONNECETED"); break; } case SYSTEM_EVENT_ETH_GOT_IP: { ESP_LOGD(TAG, "SYSTEM_EVENT_ETH_GOT_IP"); break; } default: { ESP_LOGW(TAG, "unexpected system event %d!", event->event_id); break; } } return ESP_OK; } esp_err_t esp_event_process_default(system_event_t *event) { if (event == NULL) { ESP_LOGE(TAG, "Error: event is null!"); return ESP_FAIL; } esp_system_event_debug(event); if ((event->event_id < SYSTEM_EVENT_MAX)) { if (default_event_handlers[event->event_id] != NULL) { ESP_LOGV(TAG, "enter default callback"); default_event_handlers[event->event_id](event); ESP_LOGV(TAG, "exit default callback"); } } else { ESP_LOGE(TAG, "mismatch or invalid event, id=%d", event->event_id); return ESP_FAIL; } return ESP_OK; } void esp_event_set_default_wifi_handlers() { default_event_handlers[SYSTEM_EVENT_STA_START] = system_event_sta_start_handle_default; default_event_handlers[SYSTEM_EVENT_STA_STOP] = system_event_sta_stop_handle_default; default_event_handlers[SYSTEM_EVENT_STA_CONNECTED] = system_event_sta_connected_handle_default; default_event_handlers[SYSTEM_EVENT_STA_DISCONNECTED] = system_event_sta_disconnected_handle_default; default_event_handlers[SYSTEM_EVENT_STA_GOT_IP] = system_event_sta_got_ip_default; default_event_handlers[SYSTEM_EVENT_STA_LOST_IP] = system_event_sta_lost_ip_default; default_event_handlers[SYSTEM_EVENT_AP_START] = system_event_ap_start_handle_default; default_event_handlers[SYSTEM_EVENT_AP_STOP] = system_event_ap_stop_handle_default; esp_register_shutdown_handler((shutdown_handler_t)esp_wifi_stop); } void esp_event_set_default_eth_handlers() { default_event_handlers[SYSTEM_EVENT_ETH_START] = system_event_eth_start_handle_default; default_event_handlers[SYSTEM_EVENT_ETH_STOP] = system_event_eth_stop_handle_default; default_event_handlers[SYSTEM_EVENT_ETH_CONNECTED] = system_event_eth_connected_handle_default; default_event_handlers[SYSTEM_EVENT_ETH_DISCONNECTED] = system_event_eth_disconnected_handle_default; default_event_handlers[SYSTEM_EVENT_ETH_GOT_IP] = system_event_eth_got_ip_default; }
645219.c
/* Use this file as a template to start implementing a module that also declares object types. All occurrences of 'Xxo' should be changed to something reasonable for your objects. After that, all other occurrences of 'xx' should be changed to something reasonable for your module. If your module is named foo your sourcefile should be named foomodule.c. You will probably want to delete all references to 'x_attr' and add your own types of attributes instead. Maybe you want to name your local variables other than 'self'. If your object type is needed in other files, you'll have to create a file "foobarobject.h"; see floatobject.h for an example. */ /* Xxo objects */ #include "Python.h" static PyObject *ErrorObject; typedef struct { PyObject_HEAD PyObject *x_attr; /* Attributes dictionary */ } XxoObject; static PyObject *Xxo_Type; #define XxoObject_Check(v) (Py_TYPE(v) == Xxo_Type) static XxoObject * newXxoObject(PyObject *arg) { XxoObject *self; self = PyObject_GC_New(XxoObject, (PyTypeObject*)Xxo_Type); if (self == NULL) return NULL; self->x_attr = NULL; return self; } /* Xxo methods */ static int Xxo_traverse(XxoObject *self, visitproc visit, void *arg) { Py_VISIT(self->x_attr); return 0; } static void Xxo_finalize(XxoObject *self) { Py_CLEAR(self->x_attr); } static PyObject * Xxo_demo(XxoObject *self, PyObject *args) { PyObject *o = NULL; if (!PyArg_ParseTuple(args, "|O:demo", &o)) return NULL; /* Test availability of fast type checks */ if (o != NULL && PyUnicode_Check(o)) { Py_INCREF(o); return o; } Py_INCREF(Py_None); return Py_None; } static PyMethodDef Xxo_methods[] = { {"demo", (PyCFunction)Xxo_demo, METH_VARARGS, PyDoc_STR("demo() -> None")}, {NULL, NULL} /* sentinel */ }; static PyObject * Xxo_getattro(XxoObject *self, PyObject *name) { if (self->x_attr != NULL) { PyObject *v = PyDict_GetItemRef(self->x_attr, name); if (v != NULL) { return v; } } return PyObject_GenericGetAttr((PyObject *)self, name); } static int Xxo_setattr(XxoObject *self, const char *name, PyObject *v) { if (self->x_attr == NULL) { self->x_attr = PyDict_New(); if (self->x_attr == NULL) return -1; } if (v == NULL) { int rv = PyDict_DelItemString(self->x_attr, name); if (rv < 0) PyErr_SetString(PyExc_AttributeError, "delete non-existing Xxo attribute"); return rv; } else return PyDict_SetItemString(self->x_attr, name, v); } static PyType_Slot Xxo_Type_slots[] = { {Py_tp_doc, "The Xxo type"}, {Py_tp_traverse, Xxo_traverse}, {Py_tp_finalize, Xxo_finalize}, {Py_tp_getattro, Xxo_getattro}, {Py_tp_setattr, Xxo_setattr}, {Py_tp_methods, Xxo_methods}, {0, 0}, }; static PyType_Spec Xxo_Type_spec = { "xxlimited.Xxo", sizeof(XxoObject), 0, Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, Xxo_Type_slots }; /* --------------------------------------------------------------------- */ /* Function of two integers returning integer */ PyDoc_STRVAR(xx_foo_doc, "foo(i,j)\n\ \n\ Return the sum of i and j."); static PyObject * xx_foo(PyObject *self, PyObject *args) { long i, j; long res; if (!PyArg_ParseTuple(args, "ll:foo", &i, &j)) return NULL; res = i+j; /* XXX Do something here */ return PyLong_FromLong(res); } /* Function of no arguments returning new Xxo object */ static PyObject * xx_new(PyObject *self, PyObject *args) { XxoObject *rv; if (!PyArg_ParseTuple(args, ":new")) return NULL; rv = newXxoObject(args); if (rv == NULL) return NULL; return (PyObject *)rv; } /* Test bad format character */ static PyObject * xx_roj(PyObject *self, PyObject *args) { PyObject *a; long b; if (!PyArg_ParseTuple(args, "O#:roj", &a, &b)) return NULL; Py_INCREF(Py_None); return Py_None; } /* ---------- */ static PyType_Slot Str_Type_slots[] = { {Py_tp_base, NULL}, /* filled out in module init function */ {0, 0}, }; static PyType_Spec Str_Type_spec = { "xxlimited.Str", 0, 0, Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, Str_Type_slots }; /* ---------- */ static PyObject * null_richcompare(PyObject *self, PyObject *other, int op) { Py_RETURN_NOTIMPLEMENTED; } static PyType_Slot Null_Type_slots[] = { {Py_tp_base, NULL}, /* filled out in module init */ {Py_tp_new, NULL}, {Py_tp_richcompare, null_richcompare}, {0, 0} }; static PyType_Spec Null_Type_spec = { "xxlimited.Null", 0, /* basicsize */ 0, /* itemsize */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, Null_Type_slots }; /* ---------- */ /* List of functions defined in the module */ static PyMethodDef xx_methods[] = { {"roj", xx_roj, METH_VARARGS, PyDoc_STR("roj(a,b) -> None")}, {"foo", xx_foo, METH_VARARGS, xx_foo_doc}, {"new", xx_new, METH_VARARGS, PyDoc_STR("new() -> new Xx object")}, {NULL, NULL} /* sentinel */ }; PyDoc_STRVAR(module_doc, "This is a template module just for instruction."); static int xx_modexec(PyObject *m) { PyObject *o; /* Due to cross platform compiler issues the slots must be filled * here. It's required for portability to Windows without requiring * C++. */ Null_Type_slots[0].pfunc = &PyBaseObject_Type; Null_Type_slots[1].pfunc = PyType_GenericNew; Str_Type_slots[0].pfunc = &PyUnicode_Type; Xxo_Type = PyType_FromSpec(&Xxo_Type_spec); if (Xxo_Type == NULL) goto fail; /* Add some symbolic constants to the module */ if (ErrorObject == NULL) { ErrorObject = PyErr_NewException("xxlimited.error", NULL, NULL); if (ErrorObject == NULL) goto fail; } Py_INCREF(ErrorObject); PyModule_AddObject(m, "error", ErrorObject); /* Add Xxo */ o = PyType_FromSpec(&Xxo_Type_spec); if (o == NULL) goto fail; PyModule_AddObject(m, "Xxo", o); /* Add Str */ o = PyType_FromSpec(&Str_Type_spec); if (o == NULL) goto fail; PyModule_AddObject(m, "Str", o); /* Add Null */ o = PyType_FromSpec(&Null_Type_spec); if (o == NULL) goto fail; PyModule_AddObject(m, "Null", o); return 0; fail: Py_XDECREF(m); return -1; } static PyModuleDef_Slot xx_slots[] = { {Py_mod_exec, xx_modexec}, {0, NULL} }; static struct PyModuleDef xxmodule = { PyModuleDef_HEAD_INIT, "xxlimited", module_doc, 0, xx_methods, xx_slots, NULL, NULL, NULL }; /* Export function for the module (*must* be called PyInit_xx) */ PyMODINIT_FUNC PyInit_xxlimited(void) { return PyModuleDef_Init(&xxmodule); }
12984.c
/* ** 2012 April 10 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** This module implements the spellfix1 VIRTUAL TABLE that can be used ** to search a large vocabulary for close matches. See separate ** documentation (http://www.sqlite.org/spellfix1.html) for details. */ #include "sqlite3ext.h" SQLITE_EXTENSION_INIT1 #ifndef SQLITE_AMALGAMATION # include <string.h> # include <stdio.h> # include <stdlib.h> # include <assert.h> # define ALWAYS(X) 1 # define NEVER(X) 0 typedef unsigned char u8; typedef unsigned short u16; # include <ctype.h> #endif #ifndef SQLITE_OMIT_VIRTUALTABLE /* ** Character classes for ASCII characters: ** ** 0 '' Silent letters: H W ** 1 'A' Any vowel: A E I O U (Y) ** 2 'B' A bilabeal stop or fricative: B F P V W ** 3 'C' Other fricatives or back stops: C G J K Q S X Z ** 4 'D' Alveolar stops: D T ** 5 'H' Letter H at the beginning of a word ** 6 'L' Glide: L ** 7 'R' Semivowel: R ** 8 'M' Nasals: M N ** 9 'Y' Letter Y at the beginning of a word. ** 10 '9' Digits: 0 1 2 3 4 5 6 7 8 9 ** 11 ' ' White space ** 12 '?' Other. */ #define CCLASS_SILENT 0 #define CCLASS_VOWEL 1 #define CCLASS_B 2 #define CCLASS_C 3 #define CCLASS_D 4 #define CCLASS_H 5 #define CCLASS_L 6 #define CCLASS_R 7 #define CCLASS_M 8 #define CCLASS_Y 9 #define CCLASS_DIGIT 10 #define CCLASS_SPACE 11 #define CCLASS_OTHER 12 /* ** The following table gives the character class for non-initial ASCII ** characters. */ static const unsigned char midClass[] = { /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_SPACE, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_SPACE, /* */ CCLASS_SPACE, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_SPACE, /* ! */ CCLASS_OTHER, /* " */ CCLASS_OTHER, /* # */ CCLASS_OTHER, /* $ */ CCLASS_OTHER, /* % */ CCLASS_OTHER, /* & */ CCLASS_OTHER, /* ' */ CCLASS_SILENT, /* ( */ CCLASS_OTHER, /* ) */ CCLASS_OTHER, /* * */ CCLASS_OTHER, /* + */ CCLASS_OTHER, /* , */ CCLASS_OTHER, /* - */ CCLASS_OTHER, /* . */ CCLASS_OTHER, /* / */ CCLASS_OTHER, /* 0 */ CCLASS_DIGIT, /* 1 */ CCLASS_DIGIT, /* 2 */ CCLASS_DIGIT, /* 3 */ CCLASS_DIGIT, /* 4 */ CCLASS_DIGIT, /* 5 */ CCLASS_DIGIT, /* 6 */ CCLASS_DIGIT, /* 7 */ CCLASS_DIGIT, /* 8 */ CCLASS_DIGIT, /* 9 */ CCLASS_DIGIT, /* : */ CCLASS_OTHER, /* ; */ CCLASS_OTHER, /* < */ CCLASS_OTHER, /* = */ CCLASS_OTHER, /* > */ CCLASS_OTHER, /* ? */ CCLASS_OTHER, /* @ */ CCLASS_OTHER, /* A */ CCLASS_VOWEL, /* B */ CCLASS_B, /* C */ CCLASS_C, /* D */ CCLASS_D, /* E */ CCLASS_VOWEL, /* F */ CCLASS_B, /* G */ CCLASS_C, /* H */ CCLASS_SILENT, /* I */ CCLASS_VOWEL, /* J */ CCLASS_C, /* K */ CCLASS_C, /* L */ CCLASS_L, /* M */ CCLASS_M, /* N */ CCLASS_M, /* O */ CCLASS_VOWEL, /* P */ CCLASS_B, /* Q */ CCLASS_C, /* R */ CCLASS_R, /* S */ CCLASS_C, /* T */ CCLASS_D, /* U */ CCLASS_VOWEL, /* V */ CCLASS_B, /* W */ CCLASS_B, /* X */ CCLASS_C, /* Y */ CCLASS_VOWEL, /* Z */ CCLASS_C, /* [ */ CCLASS_OTHER, /* \ */ CCLASS_OTHER, /* ] */ CCLASS_OTHER, /* ^ */ CCLASS_OTHER, /* _ */ CCLASS_OTHER, /* ` */ CCLASS_OTHER, /* a */ CCLASS_VOWEL, /* b */ CCLASS_B, /* c */ CCLASS_C, /* d */ CCLASS_D, /* e */ CCLASS_VOWEL, /* f */ CCLASS_B, /* g */ CCLASS_C, /* h */ CCLASS_SILENT, /* i */ CCLASS_VOWEL, /* j */ CCLASS_C, /* k */ CCLASS_C, /* l */ CCLASS_L, /* m */ CCLASS_M, /* n */ CCLASS_M, /* o */ CCLASS_VOWEL, /* p */ CCLASS_B, /* q */ CCLASS_C, /* r */ CCLASS_R, /* s */ CCLASS_C, /* t */ CCLASS_D, /* u */ CCLASS_VOWEL, /* v */ CCLASS_B, /* w */ CCLASS_B, /* x */ CCLASS_C, /* y */ CCLASS_VOWEL, /* z */ CCLASS_C, /* { */ CCLASS_OTHER, /* | */ CCLASS_OTHER, /* } */ CCLASS_OTHER, /* ~ */ CCLASS_OTHER, /* */ CCLASS_OTHER, }; /* ** This tables gives the character class for ASCII characters that form the ** initial character of a word. The only difference from midClass is with ** the letters H, W, and Y. */ static const unsigned char initClass[] = { /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_SPACE, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_SPACE, /* */ CCLASS_SPACE, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_OTHER, /* */ CCLASS_SPACE, /* ! */ CCLASS_OTHER, /* " */ CCLASS_OTHER, /* # */ CCLASS_OTHER, /* $ */ CCLASS_OTHER, /* % */ CCLASS_OTHER, /* & */ CCLASS_OTHER, /* ' */ CCLASS_OTHER, /* ( */ CCLASS_OTHER, /* ) */ CCLASS_OTHER, /* * */ CCLASS_OTHER, /* + */ CCLASS_OTHER, /* , */ CCLASS_OTHER, /* - */ CCLASS_OTHER, /* . */ CCLASS_OTHER, /* / */ CCLASS_OTHER, /* 0 */ CCLASS_DIGIT, /* 1 */ CCLASS_DIGIT, /* 2 */ CCLASS_DIGIT, /* 3 */ CCLASS_DIGIT, /* 4 */ CCLASS_DIGIT, /* 5 */ CCLASS_DIGIT, /* 6 */ CCLASS_DIGIT, /* 7 */ CCLASS_DIGIT, /* 8 */ CCLASS_DIGIT, /* 9 */ CCLASS_DIGIT, /* : */ CCLASS_OTHER, /* ; */ CCLASS_OTHER, /* < */ CCLASS_OTHER, /* = */ CCLASS_OTHER, /* > */ CCLASS_OTHER, /* ? */ CCLASS_OTHER, /* @ */ CCLASS_OTHER, /* A */ CCLASS_VOWEL, /* B */ CCLASS_B, /* C */ CCLASS_C, /* D */ CCLASS_D, /* E */ CCLASS_VOWEL, /* F */ CCLASS_B, /* G */ CCLASS_C, /* H */ CCLASS_SILENT, /* I */ CCLASS_VOWEL, /* J */ CCLASS_C, /* K */ CCLASS_C, /* L */ CCLASS_L, /* M */ CCLASS_M, /* N */ CCLASS_M, /* O */ CCLASS_VOWEL, /* P */ CCLASS_B, /* Q */ CCLASS_C, /* R */ CCLASS_R, /* S */ CCLASS_C, /* T */ CCLASS_D, /* U */ CCLASS_VOWEL, /* V */ CCLASS_B, /* W */ CCLASS_B, /* X */ CCLASS_C, /* Y */ CCLASS_Y, /* Z */ CCLASS_C, /* [ */ CCLASS_OTHER, /* \ */ CCLASS_OTHER, /* ] */ CCLASS_OTHER, /* ^ */ CCLASS_OTHER, /* _ */ CCLASS_OTHER, /* ` */ CCLASS_OTHER, /* a */ CCLASS_VOWEL, /* b */ CCLASS_B, /* c */ CCLASS_C, /* d */ CCLASS_D, /* e */ CCLASS_VOWEL, /* f */ CCLASS_B, /* g */ CCLASS_C, /* h */ CCLASS_SILENT, /* i */ CCLASS_VOWEL, /* j */ CCLASS_C, /* k */ CCLASS_C, /* l */ CCLASS_L, /* m */ CCLASS_M, /* n */ CCLASS_M, /* o */ CCLASS_VOWEL, /* p */ CCLASS_B, /* q */ CCLASS_C, /* r */ CCLASS_R, /* s */ CCLASS_C, /* t */ CCLASS_D, /* u */ CCLASS_VOWEL, /* v */ CCLASS_B, /* w */ CCLASS_B, /* x */ CCLASS_C, /* y */ CCLASS_Y, /* z */ CCLASS_C, /* { */ CCLASS_OTHER, /* | */ CCLASS_OTHER, /* } */ CCLASS_OTHER, /* ~ */ CCLASS_OTHER, /* */ CCLASS_OTHER, }; /* ** Mapping from the character class number (0-13) to a symbol for each ** character class. Note that initClass[] can be used to map the class ** symbol back into the class number. */ static const unsigned char className[] = ".ABCDHLRMY9 ?"; /* ** Generate a "phonetic hash" from a string of ASCII characters ** in zIn[0..nIn-1]. ** ** * Map characters by character class as defined above. ** * Omit double-letters ** * Omit vowels beside R and L ** * Omit T when followed by CH ** * Omit W when followed by R ** * Omit D when followed by J or G ** * Omit K in KN or G in GN at the beginning of a word ** ** Space to hold the result is obtained from sqlite3_malloc() ** ** Return NULL if memory allocation fails. */ static unsigned char *phoneticHash(const unsigned char *zIn, int nIn){ unsigned char *zOut = sqlite3_malloc( nIn + 1 ); int i; int nOut = 0; char cPrev = 0x77; char cPrevX = 0x77; const unsigned char *aClass = initClass; if( zOut==0 ) return 0; if( nIn>2 ){ switch( zIn[0] ){ case 'g': case 'k': { if( zIn[1]=='n' ){ zIn++; nIn--; } break; } } } for(i=0; i<nIn; i++){ unsigned char c = zIn[i]; if( i+1<nIn ){ if( c=='w' && zIn[i+1]=='r' ) continue; if( c=='d' && (zIn[i+1]=='j' || zIn[i+1]=='g') ) continue; if( i+2<nIn ){ if( c=='t' && zIn[i+1]=='c' && zIn[i+2]=='h' ) continue; } } c = aClass[c&0x7f]; if( c==CCLASS_SPACE ) continue; if( c==CCLASS_OTHER && cPrev!=CCLASS_DIGIT ) continue; aClass = midClass; if( c==CCLASS_VOWEL && (cPrevX==CCLASS_R || cPrevX==CCLASS_L) ){ continue; /* No vowels beside L or R */ } if( (c==CCLASS_R || c==CCLASS_L) && cPrevX==CCLASS_VOWEL ){ nOut--; /* No vowels beside L or R */ } cPrev = c; if( c==CCLASS_SILENT ) continue; cPrevX = c; c = className[c]; assert( nOut>=0 ); if( nOut==0 || c!=zOut[nOut-1] ) zOut[nOut++] = c; } zOut[nOut] = 0; return zOut; } /* ** This is an SQL function wrapper around phoneticHash(). See ** the description of phoneticHash() for additional information. */ static void phoneticHashSqlFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ const unsigned char *zIn; unsigned char *zOut; zIn = sqlite3_value_text(argv[0]); if( zIn==0 ) return; zOut = phoneticHash(zIn, sqlite3_value_bytes(argv[0])); if( zOut==0 ){ sqlite3_result_error_nomem(context); }else{ sqlite3_result_text(context, (char*)zOut, -1, sqlite3_free); } } /* ** Return the character class number for a character given its ** context. */ static char characterClass(char cPrev, char c){ return cPrev==0 ? initClass[c&0x7f] : midClass[c&0x7f]; } /* ** Return the cost of inserting or deleting character c immediately ** following character cPrev. If cPrev==0, that means c is the first ** character of the word. */ static int insertOrDeleteCost(char cPrev, char c, char cNext){ char classC = characterClass(cPrev, c); char classCprev; if( classC==CCLASS_SILENT ){ /* Insert or delete "silent" characters such as H or W */ return 1; } if( cPrev==c ){ /* Repeated characters, or miss a repeat */ return 10; } if( classC==CCLASS_VOWEL && (cPrev=='r' || cNext=='r') ){ return 20; /* Insert a vowel before or after 'r' */ } classCprev = characterClass(cPrev, cPrev); if( classC==classCprev ){ if( classC==CCLASS_VOWEL ){ /* Remove or add a new vowel to a vowel cluster */ return 15; }else{ /* Remove or add a consonant not in the same class */ return 50; } } /* any other character insertion or deletion */ return 100; } /* ** Divide the insertion cost by this factor when appending to the ** end of the word. */ #define FINAL_INS_COST_DIV 4 /* ** Return the cost of substituting cTo in place of cFrom assuming ** the previous character is cPrev. If cPrev==0 then cTo is the first ** character of the word. */ static int substituteCost(char cPrev, char cFrom, char cTo){ char classFrom, classTo; if( cFrom==cTo ){ /* Exact match */ return 0; } if( cFrom==(cTo^0x20) && ((cTo>='A' && cTo<='Z') || (cTo>='a' && cTo<='z')) ){ /* differ only in case */ return 0; } classFrom = characterClass(cPrev, cFrom); classTo = characterClass(cPrev, cTo); if( classFrom==classTo ){ /* Same character class */ return 40; } if( classFrom>=CCLASS_B && classFrom<=CCLASS_Y && classTo>=CCLASS_B && classTo<=CCLASS_Y ){ /* Convert from one consonant to another, but in a different class */ return 75; } /* Any other subsitution */ return 100; } /* ** Given two strings zA and zB which are pure ASCII, return the cost ** of transforming zA into zB. If zA ends with '*' assume that it is ** a prefix of zB and give only minimal penalty for extra characters ** on the end of zB. ** ** Smaller numbers mean a closer match. ** ** Negative values indicate an error: ** -1 One of the inputs is NULL ** -2 Non-ASCII characters on input ** -3 Unable to allocate memory ** ** If pnMatch is not NULL, then *pnMatch is set to the number of bytes ** of zB that matched the pattern in zA. If zA does not end with a '*', ** then this value is always the number of bytes in zB (i.e. strlen(zB)). ** If zA does end in a '*', then it is the number of bytes in the prefix ** of zB that was deemed to match zA. */ static int editdist1(const char *zA, const char *zB, int *pnMatch){ int nA, nB; /* Number of characters in zA[] and zB[] */ int xA, xB; /* Loop counters for zA[] and zB[] */ char cA, cB; /* Current character of zA and zB */ char cAprev, cBprev; /* Previous character of zA and zB */ char cAnext, cBnext; /* Next character in zA and zB */ int d; /* North-west cost value */ int dc = 0; /* North-west character value */ int res; /* Final result */ int *m; /* The cost matrix */ char *cx; /* Corresponding character values */ int *toFree = 0; /* Malloced space */ int mStack[60+15]; /* Stack space to use if not too much is needed */ int nMatch = 0; /* Early out if either input is NULL */ if( zA==0 || zB==0 ) return -1; /* Skip any common prefix */ while( zA[0] && zA[0]==zB[0] ){ dc = zA[0]; zA++; zB++; nMatch++; } if( pnMatch ) *pnMatch = nMatch; if( zA[0]==0 && zB[0]==0 ) return 0; #if 0 printf("A=\"%s\" B=\"%s\" dc=%c\n", zA, zB, dc?dc:' '); #endif /* Verify input strings and measure their lengths */ for(nA=0; zA[nA]; nA++){ if( zA[nA]&0x80 ) return -2; } for(nB=0; zB[nB]; nB++){ if( zB[nB]&0x80 ) return -2; } /* Special processing if either string is empty */ if( nA==0 ){ cBprev = dc; for(xB=res=0; (cB = zB[xB])!=0; xB++){ res += insertOrDeleteCost(cBprev, cB, zB[xB+1])/FINAL_INS_COST_DIV; cBprev = cB; } return res; } if( nB==0 ){ cAprev = dc; for(xA=res=0; (cA = zA[xA])!=0; xA++){ res += insertOrDeleteCost(cAprev, cA, zA[xA+1]); cAprev = cA; } return res; } /* A is a prefix of B */ if( zA[0]=='*' && zA[1]==0 ) return 0; /* Allocate and initialize the Wagner matrix */ if( nB<(sizeof(mStack)*4)/(sizeof(mStack[0])*5) ){ m = mStack; }else{ m = toFree = sqlite3_malloc( (nB+1)*5*sizeof(m[0])/4 ); if( m==0 ) return -3; } cx = (char*)&m[nB+1]; /* Compute the Wagner edit distance */ m[0] = 0; cx[0] = dc; cBprev = dc; for(xB=1; xB<=nB; xB++){ cBnext = zB[xB]; cB = zB[xB-1]; cx[xB] = cB; m[xB] = m[xB-1] + insertOrDeleteCost(cBprev, cB, cBnext); cBprev = cB; } cAprev = dc; for(xA=1; xA<=nA; xA++){ int lastA = (xA==nA); cA = zA[xA-1]; cAnext = zA[xA]; if( cA=='*' && lastA ) break; d = m[0]; dc = cx[0]; m[0] = d + insertOrDeleteCost(cAprev, cA, cAnext); cBprev = 0; for(xB=1; xB<=nB; xB++){ int totalCost, insCost, delCost, subCost, ncx; cB = zB[xB-1]; cBnext = zB[xB]; /* Cost to insert cB */ insCost = insertOrDeleteCost(cx[xB-1], cB, cBnext); if( lastA ) insCost /= FINAL_INS_COST_DIV; /* Cost to delete cA */ delCost = insertOrDeleteCost(cx[xB], cA, cBnext); /* Cost to substitute cA->cB */ subCost = substituteCost(cx[xB-1], cA, cB); /* Best cost */ totalCost = insCost + m[xB-1]; ncx = cB; if( (delCost + m[xB])<totalCost ){ totalCost = delCost + m[xB]; ncx = cA; } if( (subCost + d)<totalCost ){ totalCost = subCost + d; } #if 0 printf("%d,%d d=%4d u=%4d r=%4d dc=%c cA=%c cB=%c" " ins=%4d del=%4d sub=%4d t=%4d ncx=%c\n", xA, xB, d, m[xB], m[xB-1], dc?dc:' ', cA, cB, insCost, delCost, subCost, totalCost, ncx?ncx:' '); #endif /* Update the matrix */ d = m[xB]; dc = cx[xB]; m[xB] = totalCost; cx[xB] = ncx; cBprev = cB; } cAprev = cA; } /* Free the wagner matrix and return the result */ if( cA=='*' ){ res = m[1]; for(xB=1; xB<=nB; xB++){ if( m[xB]<res ){ res = m[xB]; if( pnMatch ) *pnMatch = xB+nMatch; } } }else{ res = m[nB]; /* In the current implementation, pnMatch is always NULL if zA does ** not end in "*" */ assert( pnMatch==0 ); } sqlite3_free(toFree); return res; } /* ** Function: editdist(A,B) ** ** Return the cost of transforming string A into string B. Both strings ** must be pure ASCII text. If A ends with '*' then it is assumed to be ** a prefix of B and extra characters on the end of B have minimal additional ** cost. */ static void editdistSqlFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ int res = editdist1( (const char*)sqlite3_value_text(argv[0]), (const char*)sqlite3_value_text(argv[1]), 0); if( res<0 ){ if( res==(-3) ){ sqlite3_result_error_nomem(context); }else if( res==(-2) ){ sqlite3_result_error(context, "non-ASCII input to editdist()", -1); }else{ sqlite3_result_error(context, "NULL input to editdist()", -1); } }else{ sqlite3_result_int(context, res); } } /* End of the fixed-cost edit distance implementation ****************************************************************************** ***************************************************************************** ** Begin: Configurable cost unicode edit distance routines */ /* Forward declaration of structures */ typedef struct EditDist3Cost EditDist3Cost; typedef struct EditDist3Config EditDist3Config; typedef struct EditDist3Point EditDist3Point; typedef struct EditDist3From EditDist3From; typedef struct EditDist3FromString EditDist3FromString; typedef struct EditDist3To EditDist3To; typedef struct EditDist3ToString EditDist3ToString; typedef struct EditDist3Lang EditDist3Lang; /* ** An entry in the edit cost table */ struct EditDist3Cost { EditDist3Cost *pNext; /* Next cost element */ u8 nFrom; /* Number of bytes in aFrom */ u8 nTo; /* Number of bytes in aTo */ u16 iCost; /* Cost of this transformation */ char a[4] ; /* FROM string followed by TO string */ /* Additional TO and FROM string bytes appended as necessary */ }; /* ** Edit costs for a particular language ID */ struct EditDist3Lang { int iLang; /* Language ID */ int iInsCost; /* Default insertion cost */ int iDelCost; /* Default deletion cost */ int iSubCost; /* Default substitution cost */ EditDist3Cost *pCost; /* Costs */ }; /* ** The default EditDist3Lang object, with default costs. */ static const EditDist3Lang editDist3Lang = { 0, 100, 100, 150, 0 }; /* ** Complete configuration */ struct EditDist3Config { int nLang; /* Number of language IDs. Size of a[] */ EditDist3Lang *a; /* One for each distinct language ID */ }; /* ** Extra information about each character in the FROM string. */ struct EditDist3From { int nSubst; /* Number of substitution cost entries */ int nDel; /* Number of deletion cost entries */ int nByte; /* Number of bytes in this character */ EditDist3Cost **apSubst; /* Array of substitution costs for this element */ EditDist3Cost **apDel; /* Array of deletion cost entries */ }; /* ** A precompiled FROM string. * ** In the common case we expect the FROM string to be reused multiple times. ** In other words, the common case will be to measure the edit distance ** from a single origin string to multiple target strings. */ struct EditDist3FromString { char *z; /* The complete text of the FROM string */ int n; /* Number of characters in the FROM string */ int isPrefix; /* True if ends with '*' character */ EditDist3From *a; /* Extra info about each char of the FROM string */ }; /* ** Extra information about each character in the TO string. */ struct EditDist3To { int nIns; /* Number of insertion cost entries */ int nByte; /* Number of bytes in this character */ EditDist3Cost **apIns; /* Array of deletion cost entries */ }; /* ** A precompiled FROM string */ struct EditDist3ToString { char *z; /* The complete text of the TO string */ int n; /* Number of characters in the TO string */ EditDist3To *a; /* Extra info about each char of the TO string */ }; /* ** Clear or delete an instance of the object that records all edit-distance ** weights. */ static void editDist3ConfigClear(EditDist3Config *p){ int i; if( p==0 ) return; for(i=0; i<p->nLang; i++){ EditDist3Cost *pCost, *pNext; pCost = p->a[i].pCost; while( pCost ){ pNext = pCost->pNext; sqlite3_free(pCost); pCost = pNext; } } sqlite3_free(p->a); memset(p, 0, sizeof(*p)); } static void editDist3ConfigDelete(void *pIn){ EditDist3Config *p = (EditDist3Config*)pIn; editDist3ConfigClear(p); sqlite3_free(p); } /* ** Load all edit-distance weights from a table. */ static int editDist3ConfigLoad( EditDist3Config *p, /* The edit distance configuration to load */ sqlite3 *db, /* Load from this database */ const char *zTable /* Name of the table from which to load */ ){ sqlite3_stmt *pStmt; int rc, rc2; char *zSql; int iLangPrev = -9999; EditDist3Lang *pLang = 0; zSql = sqlite3_mprintf("SELECT iLang, cFrom, cTo, iCost" " FROM \"%w\" WHERE iLang>=0 ORDER BY iLang", zTable); if( zSql==0 ) return SQLITE_NOMEM; rc = sqlite3_prepare(db, zSql, -1, &pStmt, 0); sqlite3_free(zSql); if( rc ) return rc; editDist3ConfigClear(p); while( sqlite3_step(pStmt)==SQLITE_ROW ){ int iLang = sqlite3_column_int(pStmt, 0); const char *zFrom = (const char*)sqlite3_column_text(pStmt, 1); int nFrom = zFrom ? sqlite3_column_bytes(pStmt, 1) : 0; const char *zTo = (const char*)sqlite3_column_text(pStmt, 2); int nTo = zTo ? sqlite3_column_bytes(pStmt, 2) : 0; int iCost = sqlite3_column_int(pStmt, 3); assert( zFrom!=0 || nFrom==0 ); assert( zTo!=0 || nTo==0 ); if( nFrom>100 || nTo>100 ) continue; if( iCost<0 ) continue; if( pLang==0 || iLang!=iLangPrev ){ EditDist3Lang *pNew; pNew = sqlite3_realloc(p->a, (p->nLang+1)*sizeof(p->a[0])); if( pNew==0 ){ rc = SQLITE_NOMEM; break; } p->a = pNew; pLang = &p->a[p->nLang]; p->nLang++; pLang->iLang = iLang; pLang->iInsCost = 100; pLang->iDelCost = 100; pLang->iSubCost = 150; pLang->pCost = 0; iLangPrev = iLang; } if( nFrom==1 && zFrom[0]=='?' && nTo==0 ){ pLang->iDelCost = iCost; }else if( nFrom==0 && nTo==1 && zTo[0]=='?' ){ pLang->iInsCost = iCost; }else if( nFrom==1 && nTo==1 && zFrom[0]=='?' && zTo[0]=='?' ){ pLang->iSubCost = iCost; }else{ EditDist3Cost *pCost; int nExtra = nFrom + nTo - 4; if( nExtra<0 ) nExtra = 0; pCost = sqlite3_malloc( sizeof(*pCost) + nExtra ); if( pCost==0 ){ rc = SQLITE_NOMEM; break; } pCost->nFrom = nFrom; pCost->nTo = nTo; pCost->iCost = iCost; memcpy(pCost->a, zFrom, nFrom); memcpy(pCost->a + nFrom, zTo, nTo); pCost->pNext = pLang->pCost; pLang->pCost = pCost; } } rc2 = sqlite3_finalize(pStmt); if( rc==SQLITE_OK ) rc = rc2; return rc; } /* ** Return the length (in bytes) of a utf-8 character. Or return a maximum ** of N. */ static int utf8Len(unsigned char c, int N){ int len = 1; if( c>0x7f ){ if( (c&0xe0)==0xc0 ){ len = 2; }else if( (c&0xf0)==0xe0 ){ len = 3; }else{ len = 4; } } if( len>N ) len = N; return len; } /* ** Return TRUE (non-zero) if the To side of the given cost matches ** the given string. */ static int matchTo(EditDist3Cost *p, const char *z, int n){ if( p->nTo>n ) return 0; if( strncmp(p->a+p->nFrom, z, p->nTo)!=0 ) return 0; return 1; } /* ** Return TRUE (non-zero) if the From side of the given cost matches ** the given string. */ static int matchFrom(EditDist3Cost *p, const char *z, int n){ assert( p->nFrom<=n ); if( strncmp(p->a, z, p->nFrom)!=0 ) return 0; return 1; } /* ** Return TRUE (non-zero) of the next FROM character and the next TO ** character are the same. */ static int matchFromTo( EditDist3FromString *pStr, /* Left hand string */ int n1, /* Index of comparison character on the left */ const char *z2, /* Right-handl comparison character */ int n2 /* Bytes remaining in z2[] */ ){ int b1 = pStr->a[n1].nByte; if( b1>n2 ) return 0; if( memcmp(pStr->z+n1, z2, b1)!=0 ) return 0; return 1; } /* ** Delete an EditDist3FromString objecct */ static void editDist3FromStringDelete(EditDist3FromString *p){ int i; if( p ){ for(i=0; i<p->n; i++){ sqlite3_free(p->a[i].apDel); sqlite3_free(p->a[i].apSubst); } sqlite3_free(p); } } /* ** Create a EditDist3FromString object. */ static EditDist3FromString *editDist3FromStringNew( const EditDist3Lang *pLang, const char *z, int n ){ EditDist3FromString *pStr; EditDist3Cost *p; int i; if( z==0 ) return 0; if( n<0 ) n = (int)strlen(z); pStr = sqlite3_malloc( sizeof(*pStr) + sizeof(pStr->a[0])*n + n + 1 ); if( pStr==0 ) return 0; pStr->a = (EditDist3From*)&pStr[1]; memset(pStr->a, 0, sizeof(pStr->a[0])*n); pStr->n = n; pStr->z = (char*)&pStr->a[n]; memcpy(pStr->z, z, n+1); if( n && z[n-1]=='*' ){ pStr->isPrefix = 1; n--; pStr->n--; pStr->z[n] = 0; }else{ pStr->isPrefix = 0; } for(i=0; i<n; i++){ EditDist3From *pFrom = &pStr->a[i]; memset(pFrom, 0, sizeof(*pFrom)); pFrom->nByte = utf8Len((unsigned char)z[i], n-i); for(p=pLang->pCost; p; p=p->pNext){ EditDist3Cost **apNew; if( i+p->nFrom>n ) continue; if( matchFrom(p, z+i, n-i)==0 ) continue; if( p->nTo==0 ){ apNew = sqlite3_realloc(pFrom->apDel, sizeof(*apNew)*(pFrom->nDel+1)); if( apNew==0 ) break; pFrom->apDel = apNew; apNew[pFrom->nDel++] = p; }else{ apNew = sqlite3_realloc(pFrom->apSubst, sizeof(*apNew)*(pFrom->nSubst+1)); if( apNew==0 ) break; pFrom->apSubst = apNew; apNew[pFrom->nSubst++] = p; } } if( p ){ editDist3FromStringDelete(pStr); pStr = 0; break; } } return pStr; } /* ** Update entry m[i] such that it is the minimum of its current value ** and m[j]+iCost. ** ** If the iCost is 1,000,000 or greater, then consider the cost to be ** infinite and skip the update. */ static void updateCost( unsigned int *m, int i, int j, int iCost ){ assert( iCost>=0 ); if( iCost<10000 ){ unsigned int b = m[j] + iCost; if( b<m[i] ) m[i] = b; } } /* Compute the edit distance between two strings. ** ** If an error occurs, return a negative number which is the error code. ** ** If pnMatch is not NULL, then *pnMatch is set to the number of characters ** (not bytes) in z2 that matched the search pattern in *pFrom. If pFrom does ** not contain the pattern for a prefix-search, then this is always the number ** of characters in z2. If pFrom does contain a prefix search pattern, then ** it is the number of characters in the prefix of z2 that was deemed to ** match pFrom. */ static int editDist3Core( EditDist3FromString *pFrom, /* The FROM string */ const char *z2, /* The TO string */ int n2, /* Length of the TO string */ const EditDist3Lang *pLang, /* Edit weights for a particular language ID */ int *pnMatch /* OUT: Characters in matched prefix */ ){ int k, n; int i1, b1; int i2, b2; EditDist3FromString f = *pFrom; EditDist3To *a2; unsigned int *m; int szRow; EditDist3Cost *p; int res; /* allocate the Wagner matrix and the aTo[] array for the TO string */ n = (f.n+1)*(n2+1); n = (n+1)&~1; m = sqlite3_malloc( n*sizeof(m[0]) + sizeof(a2[0])*n2 ); if( m==0 ) return -1; /* Out of memory */ a2 = (EditDist3To*)&m[n]; memset(a2, 0, sizeof(a2[0])*n2); /* Fill in the a1[] matrix for all characters of the TO string */ for(i2=0; i2<n2; i2++){ a2[i2].nByte = utf8Len((unsigned char)z2[i2], n2-i2); for(p=pLang->pCost; p; p=p->pNext){ EditDist3Cost **apNew; if( p->nFrom>0 ) continue; if( i2+p->nTo>n2 ) continue; if( matchTo(p, z2+i2, n2-i2)==0 ) continue; a2[i2].nIns++; apNew = sqlite3_realloc(a2[i2].apIns, sizeof(*apNew)*a2[i2].nIns); if( apNew==0 ){ res = -1; /* Out of memory */ goto editDist3Abort; } a2[i2].apIns = apNew; a2[i2].apIns[a2[i2].nIns-1] = p; } } /* Prepare to compute the minimum edit distance */ szRow = f.n+1; memset(m, 0x01, (n2+1)*szRow*sizeof(m[0])); m[0] = 0; /* First fill in the top-row of the matrix with FROM deletion costs */ for(i1=0; i1<f.n; i1 += b1){ b1 = f.a[i1].nByte; updateCost(m, i1+b1, i1, pLang->iDelCost); for(k=0; k<f.a[i1].nDel; k++){ p = f.a[i1].apDel[k]; updateCost(m, i1+p->nFrom, i1, p->iCost); } } /* Fill in all subsequent rows, top-to-bottom, left-to-right */ for(i2=0; i2<n2; i2 += b2){ int rx; /* Starting index for current row */ int rxp; /* Starting index for previous row */ b2 = a2[i2].nByte; rx = szRow*(i2+b2); rxp = szRow*i2; updateCost(m, rx, rxp, pLang->iInsCost); for(k=0; k<a2[i2].nIns; k++){ p = a2[i2].apIns[k]; updateCost(m, szRow*(i2+p->nTo), rxp, p->iCost); } for(i1=0; i1<f.n; i1+=b1){ int cx; /* Index of current cell */ int cxp; /* Index of cell immediately to the left */ int cxd; /* Index of cell to the left and one row above */ int cxu; /* Index of cell immediately above */ b1 = f.a[i1].nByte; cxp = rx + i1; cx = cxp + b1; cxd = rxp + i1; cxu = cxd + b1; updateCost(m, cx, cxp, pLang->iDelCost); for(k=0; k<f.a[i1].nDel; k++){ p = f.a[i1].apDel[k]; updateCost(m, cxp+p->nFrom, cxp, p->iCost); } updateCost(m, cx, cxu, pLang->iInsCost); if( matchFromTo(&f, i1, z2+i2, n2-i2) ){ updateCost(m, cx, cxd, 0); } updateCost(m, cx, cxd, pLang->iSubCost); for(k=0; k<f.a[i1].nSubst; k++){ p = f.a[i1].apSubst[k]; if( matchTo(p, z2+i2, n2-i2) ){ updateCost(m, cxd+p->nFrom+szRow*p->nTo, cxd, p->iCost); } } } } #if 0 /* Enable for debugging */ printf(" ^"); for(i1=0; i1<f.n; i1++) printf(" %c-%2x", f.z[i1], f.z[i1]&0xff); printf("\n ^:"); for(i1=0; i1<szRow; i1++){ int v = m[i1]; if( v>9999 ) printf(" ****"); else printf(" %4d", v); } printf("\n"); for(i2=0; i2<n2; i2++){ printf("%c-%02x:", z2[i2], z2[i2]&0xff); for(i1=0; i1<szRow; i1++){ int v = m[(i2+1)*szRow+i1]; if( v>9999 ) printf(" ****"); else printf(" %4d", v); } printf("\n"); } #endif /* Free memory allocations and return the result */ res = (int)m[szRow*(n2+1)-1]; n = n2; if( f.isPrefix ){ for(i2=1; i2<=n2; i2++){ int b = m[szRow*i2-1]; if( b<=res ){ res = b; n = i2 - 1; } } } if( pnMatch ){ int nExtra = 0; for(k=0; k<n; k++){ if( (z2[k] & 0xc0)==0x80 ) nExtra++; } *pnMatch = n - nExtra; } editDist3Abort: for(i2=0; i2<n2; i2++) sqlite3_free(a2[i2].apIns); sqlite3_free(m); return res; } /* ** Get an appropriate EditDist3Lang object. */ static const EditDist3Lang *editDist3FindLang( EditDist3Config *pConfig, int iLang ){ int i; for(i=0; i<pConfig->nLang; i++){ if( pConfig->a[i].iLang==iLang ) return &pConfig->a[i]; } return &editDist3Lang; } /* ** Function: editdist3(A,B,iLang) ** editdist3(tablename) ** ** Return the cost of transforming string A into string B using edit ** weights for iLang. ** ** The second form loads edit weights into memory from a table. */ static void editDist3SqlFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ EditDist3Config *pConfig = (EditDist3Config*)sqlite3_user_data(context); sqlite3 *db = sqlite3_context_db_handle(context); int rc; if( argc==1 ){ const char *zTable = (const char*)sqlite3_value_text(argv[0]); rc = editDist3ConfigLoad(pConfig, db, zTable); if( rc ) sqlite3_result_error_code(context, rc); }else{ const char *zA = (const char*)sqlite3_value_text(argv[0]); const char *zB = (const char*)sqlite3_value_text(argv[1]); int nA = sqlite3_value_bytes(argv[0]); int nB = sqlite3_value_bytes(argv[1]); int iLang = argc==3 ? sqlite3_value_int(argv[2]) : 0; const EditDist3Lang *pLang = editDist3FindLang(pConfig, iLang); EditDist3FromString *pFrom; int dist; pFrom = editDist3FromStringNew(pLang, zA, nA); if( pFrom==0 ){ sqlite3_result_error_nomem(context); return; } dist = editDist3Core(pFrom, zB, nB, pLang, 0); editDist3FromStringDelete(pFrom); if( dist==(-1) ){ sqlite3_result_error_nomem(context); }else{ sqlite3_result_int(context, dist); } } } /* ** Register the editDist3 function with SQLite */ static int editDist3Install(sqlite3 *db){ int rc; EditDist3Config *pConfig = sqlite3_malloc( sizeof(*pConfig) ); if( pConfig==0 ) return SQLITE_NOMEM; memset(pConfig, 0, sizeof(*pConfig)); rc = sqlite3_create_function_v2(db, "editdist3", 2, SQLITE_UTF8, pConfig, editDist3SqlFunc, 0, 0, 0); if( rc==SQLITE_OK ){ rc = sqlite3_create_function_v2(db, "editdist3", 3, SQLITE_UTF8, pConfig, editDist3SqlFunc, 0, 0, 0); } if( rc==SQLITE_OK ){ rc = sqlite3_create_function_v2(db, "editdist3", 1, SQLITE_UTF8, pConfig, editDist3SqlFunc, 0, 0, editDist3ConfigDelete); }else{ sqlite3_free(pConfig); } return rc; } /* End configurable cost unicode edit distance routines ****************************************************************************** ****************************************************************************** ** Begin transliterate unicode-to-ascii implementation */ #if !SQLITE_AMALGAMATION /* ** This lookup table is used to help decode the first byte of ** a multi-byte UTF8 character. */ static const unsigned char sqlite3Utf8Trans1[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03, 0x00, 0x01, 0x00, 0x00, }; #endif /* ** Return the value of the first UTF-8 character in the string. */ static int utf8Read(const unsigned char *z, int n, int *pSize){ int c, i; /* All callers to this routine (in the current implementation) ** always have n>0. */ if( NEVER(n==0) ){ c = i = 0; }else{ c = z[0]; i = 1; if( c>=0xc0 ){ c = sqlite3Utf8Trans1[c-0xc0]; while( i<n && (z[i] & 0xc0)==0x80 ){ c = (c<<6) + (0x3f & z[i++]); } } } *pSize = i; return c; } /* ** Return the number of characters in the utf-8 string in the nIn byte ** buffer pointed to by zIn. */ static int utf8Charlen(const char *zIn, int nIn){ int i; int nChar = 0; for(i=0; i<nIn; nChar++){ int sz; utf8Read((const unsigned char *)&zIn[i], nIn-i, &sz); i += sz; } return nChar; } /* ** Table of translations from unicode characters into ASCII. */ static const struct { unsigned short int cFrom; unsigned char cTo0, cTo1; } translit[] = { { 0x00A0, 0x20, 0x00 }, /*   to */ { 0x00B5, 0x75, 0x00 }, /* µ to u */ { 0x00C0, 0x41, 0x00 }, /* À to A */ { 0x00C1, 0x41, 0x00 }, /* Á to A */ { 0x00C2, 0x41, 0x00 }, /*  to A */ { 0x00C3, 0x41, 0x00 }, /* à to A */ { 0x00C4, 0x41, 0x65 }, /* Ä to Ae */ { 0x00C5, 0x41, 0x61 }, /* Å to Aa */ { 0x00C6, 0x41, 0x45 }, /* Æ to AE */ { 0x00C7, 0x43, 0x00 }, /* Ç to C */ { 0x00C8, 0x45, 0x00 }, /* È to E */ { 0x00C9, 0x45, 0x00 }, /* É to E */ { 0x00CA, 0x45, 0x00 }, /* Ê to E */ { 0x00CB, 0x45, 0x00 }, /* Ë to E */ { 0x00CC, 0x49, 0x00 }, /* Ì to I */ { 0x00CD, 0x49, 0x00 }, /* Í to I */ { 0x00CE, 0x49, 0x00 }, /* Î to I */ { 0x00CF, 0x49, 0x00 }, /* Ï to I */ { 0x00D0, 0x44, 0x00 }, /* Ð to D */ { 0x00D1, 0x4E, 0x00 }, /* Ñ to N */ { 0x00D2, 0x4F, 0x00 }, /* Ò to O */ { 0x00D3, 0x4F, 0x00 }, /* Ó to O */ { 0x00D4, 0x4F, 0x00 }, /* Ô to O */ { 0x00D5, 0x4F, 0x00 }, /* Õ to O */ { 0x00D6, 0x4F, 0x65 }, /* Ö to Oe */ { 0x00D7, 0x78, 0x00 }, /* × to x */ { 0x00D8, 0x4F, 0x00 }, /* Ø to O */ { 0x00D9, 0x55, 0x00 }, /* Ù to U */ { 0x00DA, 0x55, 0x00 }, /* Ú to U */ { 0x00DB, 0x55, 0x00 }, /* Û to U */ { 0x00DC, 0x55, 0x65 }, /* Ü to Ue */ { 0x00DD, 0x59, 0x00 }, /* Ý to Y */ { 0x00DE, 0x54, 0x68 }, /* Þ to Th */ { 0x00DF, 0x73, 0x73 }, /* ß to ss */ { 0x00E0, 0x61, 0x00 }, /* à to a */ { 0x00E1, 0x61, 0x00 }, /* á to a */ { 0x00E2, 0x61, 0x00 }, /* â to a */ { 0x00E3, 0x61, 0x00 }, /* ã to a */ { 0x00E4, 0x61, 0x65 }, /* ä to ae */ { 0x00E5, 0x61, 0x61 }, /* å to aa */ { 0x00E6, 0x61, 0x65 }, /* æ to ae */ { 0x00E7, 0x63, 0x00 }, /* ç to c */ { 0x00E8, 0x65, 0x00 }, /* è to e */ { 0x00E9, 0x65, 0x00 }, /* é to e */ { 0x00EA, 0x65, 0x00 }, /* ê to e */ { 0x00EB, 0x65, 0x00 }, /* ë to e */ { 0x00EC, 0x69, 0x00 }, /* ì to i */ { 0x00ED, 0x69, 0x00 }, /* í to i */ { 0x00EE, 0x69, 0x00 }, /* î to i */ { 0x00EF, 0x69, 0x00 }, /* ï to i */ { 0x00F0, 0x64, 0x00 }, /* ð to d */ { 0x00F1, 0x6E, 0x00 }, /* ñ to n */ { 0x00F2, 0x6F, 0x00 }, /* ò to o */ { 0x00F3, 0x6F, 0x00 }, /* ó to o */ { 0x00F4, 0x6F, 0x00 }, /* ô to o */ { 0x00F5, 0x6F, 0x00 }, /* õ to o */ { 0x00F6, 0x6F, 0x65 }, /* ö to oe */ { 0x00F7, 0x3A, 0x00 }, /* ÷ to : */ { 0x00F8, 0x6F, 0x00 }, /* ø to o */ { 0x00F9, 0x75, 0x00 }, /* ù to u */ { 0x00FA, 0x75, 0x00 }, /* ú to u */ { 0x00FB, 0x75, 0x00 }, /* û to u */ { 0x00FC, 0x75, 0x65 }, /* ü to ue */ { 0x00FD, 0x79, 0x00 }, /* ý to y */ { 0x00FE, 0x74, 0x68 }, /* þ to th */ { 0x00FF, 0x79, 0x00 }, /* ÿ to y */ { 0x0100, 0x41, 0x00 }, /* Ā to A */ { 0x0101, 0x61, 0x00 }, /* ā to a */ { 0x0102, 0x41, 0x00 }, /* Ă to A */ { 0x0103, 0x61, 0x00 }, /* ă to a */ { 0x0104, 0x41, 0x00 }, /* Ą to A */ { 0x0105, 0x61, 0x00 }, /* ą to a */ { 0x0106, 0x43, 0x00 }, /* Ć to C */ { 0x0107, 0x63, 0x00 }, /* ć to c */ { 0x0108, 0x43, 0x68 }, /* Ĉ to Ch */ { 0x0109, 0x63, 0x68 }, /* ĉ to ch */ { 0x010A, 0x43, 0x00 }, /* Ċ to C */ { 0x010B, 0x63, 0x00 }, /* ċ to c */ { 0x010C, 0x43, 0x00 }, /* Č to C */ { 0x010D, 0x63, 0x00 }, /* č to c */ { 0x010E, 0x44, 0x00 }, /* Ď to D */ { 0x010F, 0x64, 0x00 }, /* ď to d */ { 0x0110, 0x44, 0x00 }, /* Đ to D */ { 0x0111, 0x64, 0x00 }, /* đ to d */ { 0x0112, 0x45, 0x00 }, /* Ē to E */ { 0x0113, 0x65, 0x00 }, /* ē to e */ { 0x0114, 0x45, 0x00 }, /* Ĕ to E */ { 0x0115, 0x65, 0x00 }, /* ĕ to e */ { 0x0116, 0x45, 0x00 }, /* Ė to E */ { 0x0117, 0x65, 0x00 }, /* ė to e */ { 0x0118, 0x45, 0x00 }, /* Ę to E */ { 0x0119, 0x65, 0x00 }, /* ę to e */ { 0x011A, 0x45, 0x00 }, /* Ě to E */ { 0x011B, 0x65, 0x00 }, /* ě to e */ { 0x011C, 0x47, 0x68 }, /* Ĝ to Gh */ { 0x011D, 0x67, 0x68 }, /* ĝ to gh */ { 0x011E, 0x47, 0x00 }, /* Ğ to G */ { 0x011F, 0x67, 0x00 }, /* ğ to g */ { 0x0120, 0x47, 0x00 }, /* Ġ to G */ { 0x0121, 0x67, 0x00 }, /* ġ to g */ { 0x0122, 0x47, 0x00 }, /* Ģ to G */ { 0x0123, 0x67, 0x00 }, /* ģ to g */ { 0x0124, 0x48, 0x68 }, /* Ĥ to Hh */ { 0x0125, 0x68, 0x68 }, /* ĥ to hh */ { 0x0126, 0x48, 0x00 }, /* Ħ to H */ { 0x0127, 0x68, 0x00 }, /* ħ to h */ { 0x0128, 0x49, 0x00 }, /* Ĩ to I */ { 0x0129, 0x69, 0x00 }, /* ĩ to i */ { 0x012A, 0x49, 0x00 }, /* Ī to I */ { 0x012B, 0x69, 0x00 }, /* ī to i */ { 0x012C, 0x49, 0x00 }, /* Ĭ to I */ { 0x012D, 0x69, 0x00 }, /* ĭ to i */ { 0x012E, 0x49, 0x00 }, /* Į to I */ { 0x012F, 0x69, 0x00 }, /* į to i */ { 0x0130, 0x49, 0x00 }, /* İ to I */ { 0x0131, 0x69, 0x00 }, /* ı to i */ { 0x0132, 0x49, 0x4A }, /* IJ to IJ */ { 0x0133, 0x69, 0x6A }, /* ij to ij */ { 0x0134, 0x4A, 0x68 }, /* Ĵ to Jh */ { 0x0135, 0x6A, 0x68 }, /* ĵ to jh */ { 0x0136, 0x4B, 0x00 }, /* Ķ to K */ { 0x0137, 0x6B, 0x00 }, /* ķ to k */ { 0x0138, 0x6B, 0x00 }, /* ĸ to k */ { 0x0139, 0x4C, 0x00 }, /* Ĺ to L */ { 0x013A, 0x6C, 0x00 }, /* ĺ to l */ { 0x013B, 0x4C, 0x00 }, /* Ļ to L */ { 0x013C, 0x6C, 0x00 }, /* ļ to l */ { 0x013D, 0x4C, 0x00 }, /* Ľ to L */ { 0x013E, 0x6C, 0x00 }, /* ľ to l */ { 0x013F, 0x4C, 0x2E }, /* Ŀ to L. */ { 0x0140, 0x6C, 0x2E }, /* ŀ to l. */ { 0x0141, 0x4C, 0x00 }, /* Ł to L */ { 0x0142, 0x6C, 0x00 }, /* ł to l */ { 0x0143, 0x4E, 0x00 }, /* Ń to N */ { 0x0144, 0x6E, 0x00 }, /* ń to n */ { 0x0145, 0x4E, 0x00 }, /* Ņ to N */ { 0x0146, 0x6E, 0x00 }, /* ņ to n */ { 0x0147, 0x4E, 0x00 }, /* Ň to N */ { 0x0148, 0x6E, 0x00 }, /* ň to n */ { 0x0149, 0x27, 0x6E }, /* ʼn to 'n */ { 0x014A, 0x4E, 0x47 }, /* Ŋ to NG */ { 0x014B, 0x6E, 0x67 }, /* ŋ to ng */ { 0x014C, 0x4F, 0x00 }, /* Ō to O */ { 0x014D, 0x6F, 0x00 }, /* ō to o */ { 0x014E, 0x4F, 0x00 }, /* Ŏ to O */ { 0x014F, 0x6F, 0x00 }, /* ŏ to o */ { 0x0150, 0x4F, 0x00 }, /* Ő to O */ { 0x0151, 0x6F, 0x00 }, /* ő to o */ { 0x0152, 0x4F, 0x45 }, /* Œ to OE */ { 0x0153, 0x6F, 0x65 }, /* œ to oe */ { 0x0154, 0x52, 0x00 }, /* Ŕ to R */ { 0x0155, 0x72, 0x00 }, /* ŕ to r */ { 0x0156, 0x52, 0x00 }, /* Ŗ to R */ { 0x0157, 0x72, 0x00 }, /* ŗ to r */ { 0x0158, 0x52, 0x00 }, /* Ř to R */ { 0x0159, 0x72, 0x00 }, /* ř to r */ { 0x015A, 0x53, 0x00 }, /* Ś to S */ { 0x015B, 0x73, 0x00 }, /* ś to s */ { 0x015C, 0x53, 0x68 }, /* Ŝ to Sh */ { 0x015D, 0x73, 0x68 }, /* ŝ to sh */ { 0x015E, 0x53, 0x00 }, /* Ş to S */ { 0x015F, 0x73, 0x00 }, /* ş to s */ { 0x0160, 0x53, 0x00 }, /* Š to S */ { 0x0161, 0x73, 0x00 }, /* š to s */ { 0x0162, 0x54, 0x00 }, /* Ţ to T */ { 0x0163, 0x74, 0x00 }, /* ţ to t */ { 0x0164, 0x54, 0x00 }, /* Ť to T */ { 0x0165, 0x74, 0x00 }, /* ť to t */ { 0x0166, 0x54, 0x00 }, /* Ŧ to T */ { 0x0167, 0x74, 0x00 }, /* ŧ to t */ { 0x0168, 0x55, 0x00 }, /* Ũ to U */ { 0x0169, 0x75, 0x00 }, /* ũ to u */ { 0x016A, 0x55, 0x00 }, /* Ū to U */ { 0x016B, 0x75, 0x00 }, /* ū to u */ { 0x016C, 0x55, 0x00 }, /* Ŭ to U */ { 0x016D, 0x75, 0x00 }, /* ŭ to u */ { 0x016E, 0x55, 0x00 }, /* Ů to U */ { 0x016F, 0x75, 0x00 }, /* ů to u */ { 0x0170, 0x55, 0x00 }, /* Ű to U */ { 0x0171, 0x75, 0x00 }, /* ű to u */ { 0x0172, 0x55, 0x00 }, /* Ų to U */ { 0x0173, 0x75, 0x00 }, /* ų to u */ { 0x0174, 0x57, 0x00 }, /* Ŵ to W */ { 0x0175, 0x77, 0x00 }, /* ŵ to w */ { 0x0176, 0x59, 0x00 }, /* Ŷ to Y */ { 0x0177, 0x79, 0x00 }, /* ŷ to y */ { 0x0178, 0x59, 0x00 }, /* Ÿ to Y */ { 0x0179, 0x5A, 0x00 }, /* Ź to Z */ { 0x017A, 0x7A, 0x00 }, /* ź to z */ { 0x017B, 0x5A, 0x00 }, /* Ż to Z */ { 0x017C, 0x7A, 0x00 }, /* ż to z */ { 0x017D, 0x5A, 0x00 }, /* Ž to Z */ { 0x017E, 0x7A, 0x00 }, /* ž to z */ { 0x017F, 0x73, 0x00 }, /* ſ to s */ { 0x0192, 0x66, 0x00 }, /* ƒ to f */ { 0x0218, 0x53, 0x00 }, /* Ș to S */ { 0x0219, 0x73, 0x00 }, /* ș to s */ { 0x021A, 0x54, 0x00 }, /* Ț to T */ { 0x021B, 0x74, 0x00 }, /* ț to t */ { 0x0386, 0x41, 0x00 }, /* Ά to A */ { 0x0388, 0x45, 0x00 }, /* Έ to E */ { 0x0389, 0x49, 0x00 }, /* Ή to I */ { 0x038A, 0x49, 0x00 }, /* Ί to I */ { 0x038C, 0x4f, 0x00 }, /* Ό to O */ { 0x038E, 0x59, 0x00 }, /* Ύ to Y */ { 0x038F, 0x4f, 0x00 }, /* Ώ to O */ { 0x0390, 0x69, 0x00 }, /* ΐ to i */ { 0x0391, 0x41, 0x00 }, /* Α to A */ { 0x0392, 0x42, 0x00 }, /* Β to B */ { 0x0393, 0x47, 0x00 }, /* Γ to G */ { 0x0394, 0x44, 0x00 }, /* Δ to D */ { 0x0395, 0x45, 0x00 }, /* Ε to E */ { 0x0396, 0x5a, 0x00 }, /* Ζ to Z */ { 0x0397, 0x49, 0x00 }, /* Η to I */ { 0x0398, 0x54, 0x68 }, /* Θ to Th */ { 0x0399, 0x49, 0x00 }, /* Ι to I */ { 0x039A, 0x4b, 0x00 }, /* Κ to K */ { 0x039B, 0x4c, 0x00 }, /* Λ to L */ { 0x039C, 0x4d, 0x00 }, /* Μ to M */ { 0x039D, 0x4e, 0x00 }, /* Ν to N */ { 0x039E, 0x58, 0x00 }, /* Ξ to X */ { 0x039F, 0x4f, 0x00 }, /* Ο to O */ { 0x03A0, 0x50, 0x00 }, /* Π to P */ { 0x03A1, 0x52, 0x00 }, /* Ρ to R */ { 0x03A3, 0x53, 0x00 }, /* Σ to S */ { 0x03A4, 0x54, 0x00 }, /* Τ to T */ { 0x03A5, 0x59, 0x00 }, /* Υ to Y */ { 0x03A6, 0x46, 0x00 }, /* Φ to F */ { 0x03A7, 0x43, 0x68 }, /* Χ to Ch */ { 0x03A8, 0x50, 0x73 }, /* Ψ to Ps */ { 0x03A9, 0x4f, 0x00 }, /* Ω to O */ { 0x03AA, 0x49, 0x00 }, /* Ϊ to I */ { 0x03AB, 0x59, 0x00 }, /* Ϋ to Y */ { 0x03AC, 0x61, 0x00 }, /* ά to a */ { 0x03AD, 0x65, 0x00 }, /* έ to e */ { 0x03AE, 0x69, 0x00 }, /* ή to i */ { 0x03AF, 0x69, 0x00 }, /* ί to i */ { 0x03B1, 0x61, 0x00 }, /* α to a */ { 0x03B2, 0x62, 0x00 }, /* β to b */ { 0x03B3, 0x67, 0x00 }, /* γ to g */ { 0x03B4, 0x64, 0x00 }, /* δ to d */ { 0x03B5, 0x65, 0x00 }, /* ε to e */ { 0x03B6, 0x7a, 0x00 }, /* ζ to z */ { 0x03B7, 0x69, 0x00 }, /* η to i */ { 0x03B8, 0x74, 0x68 }, /* θ to th */ { 0x03B9, 0x69, 0x00 }, /* ι to i */ { 0x03BA, 0x6b, 0x00 }, /* κ to k */ { 0x03BB, 0x6c, 0x00 }, /* λ to l */ { 0x03BC, 0x6d, 0x00 }, /* μ to m */ { 0x03BD, 0x6e, 0x00 }, /* ν to n */ { 0x03BE, 0x78, 0x00 }, /* ξ to x */ { 0x03BF, 0x6f, 0x00 }, /* ο to o */ { 0x03C0, 0x70, 0x00 }, /* π to p */ { 0x03C1, 0x72, 0x00 }, /* ρ to r */ { 0x03C3, 0x73, 0x00 }, /* σ to s */ { 0x03C4, 0x74, 0x00 }, /* τ to t */ { 0x03C5, 0x79, 0x00 }, /* υ to y */ { 0x03C6, 0x66, 0x00 }, /* φ to f */ { 0x03C7, 0x63, 0x68 }, /* χ to ch */ { 0x03C8, 0x70, 0x73 }, /* ψ to ps */ { 0x03C9, 0x6f, 0x00 }, /* ω to o */ { 0x03CA, 0x69, 0x00 }, /* ϊ to i */ { 0x03CB, 0x79, 0x00 }, /* ϋ to y */ { 0x03CC, 0x6f, 0x00 }, /* ό to o */ { 0x03CD, 0x79, 0x00 }, /* ύ to y */ { 0x03CE, 0x69, 0x00 }, /* ώ to i */ { 0x0400, 0x45, 0x00 }, /* Ѐ to E */ { 0x0401, 0x45, 0x00 }, /* Ё to E */ { 0x0402, 0x44, 0x00 }, /* Ђ to D */ { 0x0403, 0x47, 0x00 }, /* Ѓ to G */ { 0x0404, 0x45, 0x00 }, /* Є to E */ { 0x0405, 0x5a, 0x00 }, /* Ѕ to Z */ { 0x0406, 0x49, 0x00 }, /* І to I */ { 0x0407, 0x49, 0x00 }, /* Ї to I */ { 0x0408, 0x4a, 0x00 }, /* Ј to J */ { 0x0409, 0x49, 0x00 }, /* Љ to I */ { 0x040A, 0x4e, 0x00 }, /* Њ to N */ { 0x040B, 0x44, 0x00 }, /* Ћ to D */ { 0x040C, 0x4b, 0x00 }, /* Ќ to K */ { 0x040D, 0x49, 0x00 }, /* Ѝ to I */ { 0x040E, 0x55, 0x00 }, /* Ў to U */ { 0x040F, 0x44, 0x00 }, /* Џ to D */ { 0x0410, 0x41, 0x00 }, /* А to A */ { 0x0411, 0x42, 0x00 }, /* Б to B */ { 0x0412, 0x56, 0x00 }, /* В to V */ { 0x0413, 0x47, 0x00 }, /* Г to G */ { 0x0414, 0x44, 0x00 }, /* Д to D */ { 0x0415, 0x45, 0x00 }, /* Е to E */ { 0x0416, 0x5a, 0x68 }, /* Ж to Zh */ { 0x0417, 0x5a, 0x00 }, /* З to Z */ { 0x0418, 0x49, 0x00 }, /* И to I */ { 0x0419, 0x49, 0x00 }, /* Й to I */ { 0x041A, 0x4b, 0x00 }, /* К to K */ { 0x041B, 0x4c, 0x00 }, /* Л to L */ { 0x041C, 0x4d, 0x00 }, /* М to M */ { 0x041D, 0x4e, 0x00 }, /* Н to N */ { 0x041E, 0x4f, 0x00 }, /* О to O */ { 0x041F, 0x50, 0x00 }, /* П to P */ { 0x0420, 0x52, 0x00 }, /* Р to R */ { 0x0421, 0x53, 0x00 }, /* С to S */ { 0x0422, 0x54, 0x00 }, /* Т to T */ { 0x0423, 0x55, 0x00 }, /* У to U */ { 0x0424, 0x46, 0x00 }, /* Ф to F */ { 0x0425, 0x4b, 0x68 }, /* Х to Kh */ { 0x0426, 0x54, 0x63 }, /* Ц to Tc */ { 0x0427, 0x43, 0x68 }, /* Ч to Ch */ { 0x0428, 0x53, 0x68 }, /* Ш to Sh */ { 0x0429, 0x53, 0x68 }, /* Щ to Shch */ { 0x042A, 0x61, 0x00 }, /* to A */ { 0x042B, 0x59, 0x00 }, /* Ы to Y */ { 0x042C, 0x59, 0x00 }, /* to Y */ { 0x042D, 0x45, 0x00 }, /* Э to E */ { 0x042E, 0x49, 0x75 }, /* Ю to Iu */ { 0x042F, 0x49, 0x61 }, /* Я to Ia */ { 0x0430, 0x61, 0x00 }, /* а to a */ { 0x0431, 0x62, 0x00 }, /* б to b */ { 0x0432, 0x76, 0x00 }, /* в to v */ { 0x0433, 0x67, 0x00 }, /* г to g */ { 0x0434, 0x64, 0x00 }, /* д to d */ { 0x0435, 0x65, 0x00 }, /* е to e */ { 0x0436, 0x7a, 0x68 }, /* ж to zh */ { 0x0437, 0x7a, 0x00 }, /* з to z */ { 0x0438, 0x69, 0x00 }, /* и to i */ { 0x0439, 0x69, 0x00 }, /* й to i */ { 0x043A, 0x6b, 0x00 }, /* к to k */ { 0x043B, 0x6c, 0x00 }, /* л to l */ { 0x043C, 0x6d, 0x00 }, /* м to m */ { 0x043D, 0x6e, 0x00 }, /* н to n */ { 0x043E, 0x6f, 0x00 }, /* о to o */ { 0x043F, 0x70, 0x00 }, /* п to p */ { 0x0440, 0x72, 0x00 }, /* р to r */ { 0x0441, 0x73, 0x00 }, /* с to s */ { 0x0442, 0x74, 0x00 }, /* т to t */ { 0x0443, 0x75, 0x00 }, /* у to u */ { 0x0444, 0x66, 0x00 }, /* ф to f */ { 0x0445, 0x6b, 0x68 }, /* х to kh */ { 0x0446, 0x74, 0x63 }, /* ц to tc */ { 0x0447, 0x63, 0x68 }, /* ч to ch */ { 0x0448, 0x73, 0x68 }, /* ш to sh */ { 0x0449, 0x73, 0x68 }, /* щ to shch */ { 0x044A, 0x61, 0x00 }, /* to a */ { 0x044B, 0x79, 0x00 }, /* ы to y */ { 0x044C, 0x79, 0x00 }, /* to y */ { 0x044D, 0x65, 0x00 }, /* э to e */ { 0x044E, 0x69, 0x75 }, /* ю to iu */ { 0x044F, 0x69, 0x61 }, /* я to ia */ { 0x0450, 0x65, 0x00 }, /* ѐ to e */ { 0x0451, 0x65, 0x00 }, /* ё to e */ { 0x0452, 0x64, 0x00 }, /* ђ to d */ { 0x0453, 0x67, 0x00 }, /* ѓ to g */ { 0x0454, 0x65, 0x00 }, /* є to e */ { 0x0455, 0x7a, 0x00 }, /* ѕ to z */ { 0x0456, 0x69, 0x00 }, /* і to i */ { 0x0457, 0x69, 0x00 }, /* ї to i */ { 0x0458, 0x6a, 0x00 }, /* ј to j */ { 0x0459, 0x69, 0x00 }, /* љ to i */ { 0x045A, 0x6e, 0x00 }, /* њ to n */ { 0x045B, 0x64, 0x00 }, /* ћ to d */ { 0x045C, 0x6b, 0x00 }, /* ќ to k */ { 0x045D, 0x69, 0x00 }, /* ѝ to i */ { 0x045E, 0x75, 0x00 }, /* ў to u */ { 0x045F, 0x64, 0x00 }, /* џ to d */ { 0x1E02, 0x42, 0x00 }, /* Ḃ to B */ { 0x1E03, 0x62, 0x00 }, /* ḃ to b */ { 0x1E0A, 0x44, 0x00 }, /* Ḋ to D */ { 0x1E0B, 0x64, 0x00 }, /* ḋ to d */ { 0x1E1E, 0x46, 0x00 }, /* Ḟ to F */ { 0x1E1F, 0x66, 0x00 }, /* ḟ to f */ { 0x1E40, 0x4D, 0x00 }, /* Ṁ to M */ { 0x1E41, 0x6D, 0x00 }, /* ṁ to m */ { 0x1E56, 0x50, 0x00 }, /* Ṗ to P */ { 0x1E57, 0x70, 0x00 }, /* ṗ to p */ { 0x1E60, 0x53, 0x00 }, /* Ṡ to S */ { 0x1E61, 0x73, 0x00 }, /* ṡ to s */ { 0x1E6A, 0x54, 0x00 }, /* Ṫ to T */ { 0x1E6B, 0x74, 0x00 }, /* ṫ to t */ { 0x1E80, 0x57, 0x00 }, /* Ẁ to W */ { 0x1E81, 0x77, 0x00 }, /* ẁ to w */ { 0x1E82, 0x57, 0x00 }, /* Ẃ to W */ { 0x1E83, 0x77, 0x00 }, /* ẃ to w */ { 0x1E84, 0x57, 0x00 }, /* Ẅ to W */ { 0x1E85, 0x77, 0x00 }, /* ẅ to w */ { 0x1EF2, 0x59, 0x00 }, /* Ỳ to Y */ { 0x1EF3, 0x79, 0x00 }, /* ỳ to y */ { 0xFB00, 0x66, 0x66 }, /* ff to ff */ { 0xFB01, 0x66, 0x69 }, /* fi to fi */ { 0xFB02, 0x66, 0x6C }, /* fl to fl */ { 0xFB05, 0x73, 0x74 }, /* ſt to st */ { 0xFB06, 0x73, 0x74 }, /* st to st */ }; /* ** Convert the input string from UTF-8 into pure ASCII by converting ** all non-ASCII characters to some combination of characters in the ** ASCII subset. ** ** The returned string might contain more characters than the input. ** ** Space to hold the returned string comes from sqlite3_malloc() and ** should be freed by the caller. */ static unsigned char *transliterate(const unsigned char *zIn, int nIn){ unsigned char *zOut = sqlite3_malloc( nIn*4 + 1 ); int c, sz, nOut; if( zOut==0 ) return 0; nOut = 0; while( nIn>0 ){ c = utf8Read(zIn, nIn, &sz); zIn += sz; nIn -= sz; if( c<=127 ){ zOut[nOut++] = c; }else{ int xTop, xBtm, x; xTop = sizeof(translit)/sizeof(translit[0]) - 1; xBtm = 0; while( xTop>=xBtm ){ x = (xTop + xBtm)/2; if( translit[x].cFrom==c ){ zOut[nOut++] = translit[x].cTo0; if( translit[x].cTo1 ){ zOut[nOut++] = translit[x].cTo1; /* Add an extra "ch" after the "sh" for Щ and щ */ if( c==0x0429 || c== 0x0449 ){ zOut[nOut++] = 'c'; zOut[nOut++] = 'h'; } } c = 0; break; }else if( translit[x].cFrom>c ){ xTop = x-1; }else{ xBtm = x+1; } } if( c ) zOut[nOut++] = '?'; } } zOut[nOut] = 0; return zOut; } /* ** Return the number of characters in the shortest prefix of the input ** string that transliterates to an ASCII string nTrans bytes or longer. ** Or, if the transliteration of the input string is less than nTrans ** bytes in size, return the number of characters in the input string. */ static int translen_to_charlen(const char *zIn, int nIn, int nTrans){ int i, c, sz, nOut; int nChar; i = nOut = 0; for(nChar=0; i<nIn && nOut<nTrans; nChar++){ c = utf8Read((const unsigned char *)&zIn[i], nIn-i, &sz); i += sz; nOut++; if( c>=128 ){ int xTop, xBtm, x; xTop = sizeof(translit)/sizeof(translit[0]) - 1; xBtm = 0; while( xTop>=xBtm ){ x = (xTop + xBtm)/2; if( translit[x].cFrom==c ){ if( translit[x].cTo1 ) nOut++; if( c==0x0429 || c== 0x0449 ) nOut += 2; break; }else if( translit[x].cFrom>c ){ xTop = x-1; }else{ xBtm = x+1; } } } } return nChar; } /* ** spellfix1_translit(X) ** ** Convert a string that contains non-ASCII Roman characters into ** pure ASCII. */ static void transliterateSqlFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ const unsigned char *zIn = sqlite3_value_text(argv[0]); int nIn = sqlite3_value_bytes(argv[0]); unsigned char *zOut = transliterate(zIn, nIn); if( zOut==0 ){ sqlite3_result_error_nomem(context); }else{ sqlite3_result_text(context, (char*)zOut, -1, sqlite3_free); } } /* ** spellfix1_scriptcode(X) ** ** Try to determine the dominant script used by the word X and return ** its ISO 15924 numeric code. ** ** The current implementation only understands the following scripts: ** ** 215 (Latin) ** 220 (Cyrillic) ** 200 (Greek) ** ** This routine will return 998 if the input X contains characters from ** two or more of the above scripts or 999 if X contains no characters ** from any of the above scripts. */ static void scriptCodeSqlFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ const unsigned char *zIn = sqlite3_value_text(argv[0]); int nIn = sqlite3_value_bytes(argv[0]); int c, sz; int scriptMask = 0; int res; # define SCRIPT_LATIN 0x0001 # define SCRIPT_CYRILLIC 0x0002 # define SCRIPT_GREEK 0x0004 while( nIn>0 ){ c = utf8Read(zIn, nIn, &sz); zIn += sz; nIn -= sz; if( c<0x02af ){ scriptMask |= SCRIPT_LATIN; }else if( c>=0x0400 && c<=0x04ff ){ scriptMask |= SCRIPT_CYRILLIC; }else if( c>=0x0386 && c<=0x03ce ){ scriptMask |= SCRIPT_GREEK; } } switch( scriptMask ){ case 0: res = 999; break; case SCRIPT_LATIN: res = 215; break; case SCRIPT_CYRILLIC: res = 220; break; case SCRIPT_GREEK: res = 200; break; default: res = 998; break; } sqlite3_result_int(context, res); } /* End transliterate ****************************************************************************** ****************************************************************************** ** Begin spellfix1 virtual table. */ /* Maximum length of a phonehash used for querying the shadow table */ #define SPELLFIX_MX_HASH 8 /* Maximum number of hash strings to examine per query */ #define SPELLFIX_MX_RUN 1 typedef struct spellfix1_vtab spellfix1_vtab; typedef struct spellfix1_cursor spellfix1_cursor; /* Fuzzy-search virtual table object */ struct spellfix1_vtab { sqlite3_vtab base; /* Base class - must be first */ sqlite3 *db; /* Database connection */ char *zDbName; /* Name of database holding this table */ char *zTableName; /* Name of the virtual table */ char *zCostTable; /* Table holding edit-distance cost numbers */ EditDist3Config *pConfig3; /* Parsed edit distance costs */ }; /* Fuzzy-search cursor object */ struct spellfix1_cursor { sqlite3_vtab_cursor base; /* Base class - must be first */ spellfix1_vtab *pVTab; /* The table to which this cursor belongs */ char *zPattern; /* rhs of MATCH clause */ int nRow; /* Number of rows of content */ int nAlloc; /* Number of allocated rows */ int iRow; /* Current row of content */ int iLang; /* Value of the langid= constraint */ int iTop; /* Value of the top= constraint */ int iScope; /* Value of the scope= constraint */ int nSearch; /* Number of vocabulary items checked */ sqlite3_stmt *pFullScan; /* Shadow query for a full table scan */ struct spellfix1_row { /* For each row of content */ sqlite3_int64 iRowid; /* Rowid for this row */ char *zWord; /* Text for this row */ int iRank; /* Rank for this row */ int iDistance; /* Distance from pattern for this row */ int iScore; /* Score for sorting */ int iMatchlen; /* Value of matchlen column (or -1) */ char zHash[SPELLFIX_MX_HASH]; /* the phonehash used for this match */ } *a; }; /* ** Construct one or more SQL statements from the format string given ** and then evaluate those statements. The success code is written ** into *pRc. ** ** If *pRc is initially non-zero then this routine is a no-op. */ static void spellfix1DbExec( int *pRc, /* Success code */ sqlite3 *db, /* Database in which to run SQL */ const char *zFormat, /* Format string for SQL */ ... /* Arguments to the format string */ ){ va_list ap; char *zSql; if( *pRc ) return; va_start(ap, zFormat); zSql = sqlite3_vmprintf(zFormat, ap); va_end(ap); if( zSql==0 ){ *pRc = SQLITE_NOMEM; }else{ *pRc = sqlite3_exec(db, zSql, 0, 0, 0); sqlite3_free(zSql); } } /* ** xDisconnect/xDestroy method for the fuzzy-search module. */ static int spellfix1Uninit(int isDestroy, sqlite3_vtab *pVTab){ spellfix1_vtab *p = (spellfix1_vtab*)pVTab; int rc = SQLITE_OK; if( isDestroy ){ sqlite3 *db = p->db; spellfix1DbExec(&rc, db, "DROP TABLE IF EXISTS \"%w\".\"%w_vocab\"", p->zDbName, p->zTableName); } if( rc==SQLITE_OK ){ sqlite3_free(p->zTableName); editDist3ConfigDelete(p->pConfig3); sqlite3_free(p->zCostTable); sqlite3_free(p); } return rc; } static int spellfix1Disconnect(sqlite3_vtab *pVTab){ return spellfix1Uninit(0, pVTab); } static int spellfix1Destroy(sqlite3_vtab *pVTab){ return spellfix1Uninit(1, pVTab); } /* ** Make a copy of a string. Remove leading and trailing whitespace ** and dequote it. */ static char *spellfix1Dequote(const char *zIn){ char *zOut; int i, j; char c; while( isspace(zIn[0]) ) zIn++; zOut = sqlite3_mprintf("%s", zIn); if( zOut==0 ) return 0; i = (int)strlen(zOut); #if 0 /* The parser will never leave spaces at the end */ while( i>0 && isspace(zOut[i-1]) ){ i--; } #endif zOut[i] = 0; c = zOut[0]; if( c=='\'' || c=='"' ){ for(i=1, j=0; ALWAYS(zOut[i]); i++){ zOut[j++] = zOut[i]; if( zOut[i]==c ){ if( zOut[i+1]==c ){ i++; }else{ zOut[j-1] = 0; break; } } } } return zOut; } /* ** xConnect/xCreate method for the spellfix1 module. Arguments are: ** ** argv[0] -> module name ("spellfix1") ** argv[1] -> database name ** argv[2] -> table name ** argv[3].. -> optional arguments (i.e. "edit_cost_table" parameter) */ static int spellfix1Init( int isCreate, sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char **pzErr ){ spellfix1_vtab *pNew = 0; const char *zModule = argv[0]; const char *zDbName = argv[1]; const char *zTableName = argv[2]; int nDbName; int rc = SQLITE_OK; int i; nDbName = (int)strlen(zDbName); pNew = sqlite3_malloc( sizeof(*pNew) + nDbName + 1); if( pNew==0 ){ rc = SQLITE_NOMEM; }else{ memset(pNew, 0, sizeof(*pNew)); pNew->zDbName = (char*)&pNew[1]; memcpy(pNew->zDbName, zDbName, nDbName+1); pNew->zTableName = sqlite3_mprintf("%s", zTableName); pNew->db = db; if( pNew->zTableName==0 ){ rc = SQLITE_NOMEM; }else{ rc = sqlite3_declare_vtab(db, "CREATE TABLE x(word,rank,distance,langid, " "score, matchlen, phonehash HIDDEN, " "top HIDDEN, scope HIDDEN, srchcnt HIDDEN, " "soundslike HIDDEN, command HIDDEN)" ); #define SPELLFIX_COL_WORD 0 #define SPELLFIX_COL_RANK 1 #define SPELLFIX_COL_DISTANCE 2 #define SPELLFIX_COL_LANGID 3 #define SPELLFIX_COL_SCORE 4 #define SPELLFIX_COL_MATCHLEN 5 #define SPELLFIX_COL_PHONEHASH 6 #define SPELLFIX_COL_TOP 7 #define SPELLFIX_COL_SCOPE 8 #define SPELLFIX_COL_SRCHCNT 9 #define SPELLFIX_COL_SOUNDSLIKE 10 #define SPELLFIX_COL_COMMAND 11 } if( rc==SQLITE_OK && isCreate ){ sqlite3_uint64 r; spellfix1DbExec(&rc, db, "CREATE TABLE IF NOT EXISTS \"%w\".\"%w_vocab\"(\n" " id INTEGER PRIMARY KEY,\n" " rank INT,\n" " langid INT,\n" " word TEXT,\n" " k1 TEXT,\n" " k2 TEXT\n" ");\n", zDbName, zTableName ); sqlite3_randomness(sizeof(r), &r); spellfix1DbExec(&rc, db, "CREATE INDEX IF NOT EXISTS \"%w\".\"%w_index_%llx\" " "ON \"%w_vocab\"(langid,k2);", zDbName, zModule, r, zTableName ); } for(i=3; rc==SQLITE_OK && i<argc; i++){ if( strncmp(argv[i],"edit_cost_table=",16)==0 && pNew->zCostTable==0 ){ pNew->zCostTable = spellfix1Dequote(&argv[i][16]); if( pNew->zCostTable==0 ) rc = SQLITE_NOMEM; continue; } *pzErr = sqlite3_mprintf("bad argument to spellfix1(): \"%s\"", argv[i]); rc = SQLITE_ERROR; } } if( rc && pNew ){ *ppVTab = 0; spellfix1Uninit(0, &pNew->base); }else{ *ppVTab = (sqlite3_vtab *)pNew; } return rc; } /* ** The xConnect and xCreate methods */ static int spellfix1Connect( sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char **pzErr ){ return spellfix1Init(0, db, pAux, argc, argv, ppVTab, pzErr); } static int spellfix1Create( sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char **pzErr ){ return spellfix1Init(1, db, pAux, argc, argv, ppVTab, pzErr); } /* ** Clear all of the content from a cursor. */ static void spellfix1ResetCursor(spellfix1_cursor *pCur){ int i; for(i=0; i<pCur->nRow; i++){ sqlite3_free(pCur->a[i].zWord); } pCur->nRow = 0; pCur->iRow = 0; pCur->nSearch = 0; if( pCur->pFullScan ){ sqlite3_finalize(pCur->pFullScan); pCur->pFullScan = 0; } } /* ** Resize the cursor to hold up to N rows of content */ static void spellfix1ResizeCursor(spellfix1_cursor *pCur, int N){ struct spellfix1_row *aNew; assert( N>=pCur->nRow ); aNew = sqlite3_realloc(pCur->a, sizeof(pCur->a[0])*N); if( aNew==0 && N>0 ){ spellfix1ResetCursor(pCur); sqlite3_free(pCur->a); pCur->nAlloc = 0; pCur->a = 0; }else{ pCur->nAlloc = N; pCur->a = aNew; } } /* ** Close a fuzzy-search cursor. */ static int spellfix1Close(sqlite3_vtab_cursor *cur){ spellfix1_cursor *pCur = (spellfix1_cursor *)cur; spellfix1ResetCursor(pCur); spellfix1ResizeCursor(pCur, 0); sqlite3_free(pCur->zPattern); sqlite3_free(pCur); return SQLITE_OK; } /* ** Search for terms of these forms: ** ** (A) word MATCH $str ** (B) langid == $langid ** (C) top = $top ** (D) scope = $scope ** (E) distance < $distance ** (F) distance <= $distance ** ** The plan number is a bit mask formed with these bits: ** ** 0x01 (A) is found ** 0x02 (B) is found ** 0x04 (C) is found ** 0x08 (D) is found ** 0x10 (E) is found ** 0x20 (F) is found ** ** filter.argv[*] values contains $str, $langid, $top, and $scope, ** if specified and in that order. */ static int spellfix1BestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ int iPlan = 0; int iLangTerm = -1; int iTopTerm = -1; int iScopeTerm = -1; int iDistTerm = -1; int i; const struct sqlite3_index_constraint *pConstraint; pConstraint = pIdxInfo->aConstraint; for(i=0; i<pIdxInfo->nConstraint; i++, pConstraint++){ if( pConstraint->usable==0 ) continue; /* Terms of the form: word MATCH $str */ if( (iPlan & 1)==0 && pConstraint->iColumn==SPELLFIX_COL_WORD && pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ iPlan |= 1; pIdxInfo->aConstraintUsage[i].argvIndex = 1; pIdxInfo->aConstraintUsage[i].omit = 1; } /* Terms of the form: langid = $langid */ if( (iPlan & 2)==0 && pConstraint->iColumn==SPELLFIX_COL_LANGID && pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ iPlan |= 2; iLangTerm = i; } /* Terms of the form: top = $top */ if( (iPlan & 4)==0 && pConstraint->iColumn==SPELLFIX_COL_TOP && pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ iPlan |= 4; iTopTerm = i; } /* Terms of the form: scope = $scope */ if( (iPlan & 8)==0 && pConstraint->iColumn==SPELLFIX_COL_SCOPE && pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ iPlan |= 8; iScopeTerm = i; } /* Terms of the form: distance < $dist or distance <= $dist */ if( (iPlan & (16|32))==0 && pConstraint->iColumn==SPELLFIX_COL_DISTANCE && (pConstraint->op==SQLITE_INDEX_CONSTRAINT_LT || pConstraint->op==SQLITE_INDEX_CONSTRAINT_LE) ){ iPlan |= pConstraint->op==SQLITE_INDEX_CONSTRAINT_LT ? 16 : 32; iDistTerm = i; } } if( iPlan&1 ){ int idx = 2; pIdxInfo->idxNum = iPlan; if( pIdxInfo->nOrderBy==1 && pIdxInfo->aOrderBy[0].iColumn==SPELLFIX_COL_SCORE && pIdxInfo->aOrderBy[0].desc==0 ){ pIdxInfo->orderByConsumed = 1; /* Default order by iScore */ } if( iPlan&2 ){ pIdxInfo->aConstraintUsage[iLangTerm].argvIndex = idx++; pIdxInfo->aConstraintUsage[iLangTerm].omit = 1; } if( iPlan&4 ){ pIdxInfo->aConstraintUsage[iTopTerm].argvIndex = idx++; pIdxInfo->aConstraintUsage[iTopTerm].omit = 1; } if( iPlan&8 ){ pIdxInfo->aConstraintUsage[iScopeTerm].argvIndex = idx++; pIdxInfo->aConstraintUsage[iScopeTerm].omit = 1; } if( iPlan&(16|32) ){ pIdxInfo->aConstraintUsage[iDistTerm].argvIndex = idx++; pIdxInfo->aConstraintUsage[iDistTerm].omit = 1; } pIdxInfo->estimatedCost = (double)10000; }else{ pIdxInfo->idxNum = 0; pIdxInfo->estimatedCost = (double)10000000; } return SQLITE_OK; } /* ** Open a new fuzzy-search cursor. */ static int spellfix1Open(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ spellfix1_vtab *p = (spellfix1_vtab*)pVTab; spellfix1_cursor *pCur; pCur = sqlite3_malloc( sizeof(*pCur) ); if( pCur==0 ) return SQLITE_NOMEM; memset(pCur, 0, sizeof(*pCur)); pCur->pVTab = p; *ppCursor = &pCur->base; return SQLITE_OK; } /* ** Adjust a distance measurement by the words rank in order to show ** preference to common words. */ static int spellfix1Score(int iDistance, int iRank){ int iLog2; for(iLog2=0; iRank>0; iLog2++, iRank>>=1){} return iDistance + 32 - iLog2; } /* ** Compare two spellfix1_row objects for sorting purposes in qsort() such ** that they sort in order of increasing distance. */ static int spellfix1RowCompare(const void *A, const void *B){ const struct spellfix1_row *a = (const struct spellfix1_row*)A; const struct spellfix1_row *b = (const struct spellfix1_row*)B; return a->iScore - b->iScore; } /* ** A structure used to pass information from spellfix1FilterForMatch() ** into spellfix1RunQuery(). */ typedef struct MatchQuery { spellfix1_cursor *pCur; /* The cursor being queried */ sqlite3_stmt *pStmt; /* shadow table query statment */ char zHash[SPELLFIX_MX_HASH]; /* The current phonehash for zPattern */ const char *zPattern; /* Transliterated input string */ int nPattern; /* Length of zPattern */ EditDist3FromString *pMatchStr3; /* Original unicode string */ EditDist3Config *pConfig3; /* Edit-distance cost coefficients */ const EditDist3Lang *pLang; /* The selected language coefficients */ int iLang; /* The language id */ int iScope; /* Default scope */ int iMaxDist; /* Maximum allowed edit distance, or -1 */ int rc; /* Error code */ int nRun; /* Number of prior runs for the same zPattern */ char azPrior[SPELLFIX_MX_RUN][SPELLFIX_MX_HASH]; /* Prior hashes */ } MatchQuery; /* ** Run a query looking for the best matches against zPattern using ** zHash as the character class seed hash. */ static void spellfix1RunQuery(MatchQuery *p, const char *zQuery, int nQuery){ const char *zK1; const char *zWord; int iDist; int iRank; int iScore; int iWorst = 0; int idx; int idxWorst = -1; int i; int iScope = p->iScope; spellfix1_cursor *pCur = p->pCur; sqlite3_stmt *pStmt = p->pStmt; char zHash1[SPELLFIX_MX_HASH]; char zHash2[SPELLFIX_MX_HASH]; char *zClass; int nClass; int rc; if( pCur->a==0 || p->rc ) return; /* Prior memory allocation failure */ zClass = (char*)phoneticHash((unsigned char*)zQuery, nQuery); if( zClass==0 ){ p->rc = SQLITE_NOMEM; return; } nClass = (int)strlen(zClass); if( nClass>SPELLFIX_MX_HASH-2 ){ nClass = SPELLFIX_MX_HASH-2; zClass[nClass] = 0; } if( nClass<=iScope ){ if( nClass>2 ){ iScope = nClass-1; }else{ iScope = nClass; } } memcpy(zHash1, zClass, iScope); sqlite3_free(zClass); zHash1[iScope] = 0; memcpy(zHash2, zHash1, iScope); zHash2[iScope] = 'Z'; zHash2[iScope+1] = 0; #if SPELLFIX_MX_RUN>1 for(i=0; i<p->nRun; i++){ if( strcmp(p->azPrior[i], zHash1)==0 ) return; } #endif assert( p->nRun<SPELLFIX_MX_RUN ); memcpy(p->azPrior[p->nRun++], zHash1, iScope+1); if( sqlite3_bind_text(pStmt, 1, zHash1, -1, SQLITE_STATIC)==SQLITE_NOMEM || sqlite3_bind_text(pStmt, 2, zHash2, -1, SQLITE_STATIC)==SQLITE_NOMEM ){ p->rc = SQLITE_NOMEM; return; } #if SPELLFIX_MX_RUN>1 for(i=0; i<pCur->nRow; i++){ if( pCur->a[i].iScore>iWorst ){ iWorst = pCur->a[i].iScore; idxWorst = i; } } #endif while( sqlite3_step(pStmt)==SQLITE_ROW ){ int iMatchlen = -1; iRank = sqlite3_column_int(pStmt, 2); if( p->pMatchStr3 ){ int nWord = sqlite3_column_bytes(pStmt, 1); zWord = (const char*)sqlite3_column_text(pStmt, 1); iDist = editDist3Core(p->pMatchStr3, zWord, nWord, p->pLang, &iMatchlen); }else{ zK1 = (const char*)sqlite3_column_text(pStmt, 3); if( zK1==0 ) continue; iDist = editdist1(p->zPattern, zK1, 0); } if( iDist<0 ){ p->rc = SQLITE_NOMEM; break; } pCur->nSearch++; iScore = spellfix1Score(iDist,iRank); if( p->iMaxDist>=0 ){ if( iDist>p->iMaxDist ) continue; if( pCur->nRow>=pCur->nAlloc-1 ){ spellfix1ResizeCursor(pCur, pCur->nAlloc*2 + 10); if( pCur->a==0 ) break; } idx = pCur->nRow; }else if( pCur->nRow<pCur->nAlloc ){ idx = pCur->nRow; }else if( iScore<iWorst ){ idx = idxWorst; sqlite3_free(pCur->a[idx].zWord); }else{ continue; } pCur->a[idx].zWord = sqlite3_mprintf("%s", sqlite3_column_text(pStmt, 1)); if( pCur->a[idx].zWord==0 ){ p->rc = SQLITE_NOMEM; break; } pCur->a[idx].iRowid = sqlite3_column_int64(pStmt, 0); pCur->a[idx].iRank = iRank; pCur->a[idx].iDistance = iDist; pCur->a[idx].iScore = iScore; pCur->a[idx].iMatchlen = iMatchlen; memcpy(pCur->a[idx].zHash, zHash1, iScope+1); if( pCur->nRow<pCur->nAlloc ) pCur->nRow++; if( pCur->nRow==pCur->nAlloc ){ iWorst = pCur->a[0].iScore; idxWorst = 0; for(i=1; i<pCur->nRow; i++){ iScore = pCur->a[i].iScore; if( iWorst<iScore ){ iWorst = iScore; idxWorst = i; } } } } rc = sqlite3_reset(pStmt); if( rc ) p->rc = rc; } /* ** This version of the xFilter method work if the MATCH term is present ** and we are doing a scan. */ static int spellfix1FilterForMatch( spellfix1_cursor *pCur, int idxNum, int argc, sqlite3_value **argv ){ const unsigned char *zMatchThis; /* RHS of the MATCH operator */ EditDist3FromString *pMatchStr3 = 0; /* zMatchThis as an editdist string */ char *zPattern; /* Transliteration of zMatchThis */ int nPattern; /* Length of zPattern */ int iLimit = 20; /* Max number of rows of output */ int iScope = 3; /* Use this many characters of zClass */ int iLang = 0; /* Language code */ char *zSql; /* SQL of shadow table query */ sqlite3_stmt *pStmt = 0; /* Shadow table query */ int rc; /* Result code */ int idx = 1; /* Next available filter parameter */ spellfix1_vtab *p = pCur->pVTab; /* The virtual table that owns pCur */ MatchQuery x; /* For passing info to RunQuery() */ /* Load the cost table if we have not already done so */ if( p->zCostTable!=0 && p->pConfig3==0 ){ p->pConfig3 = sqlite3_malloc( sizeof(p->pConfig3[0]) ); if( p->pConfig3==0 ) return SQLITE_NOMEM; memset(p->pConfig3, 0, sizeof(p->pConfig3[0])); rc = editDist3ConfigLoad(p->pConfig3, p->db, p->zCostTable); if( rc ) return rc; } memset(&x, 0, sizeof(x)); x.iScope = 3; /* Default scope if none specified by "WHERE scope=N" */ x.iMaxDist = -1; /* Maximum allowed edit distance */ if( idxNum&2 ){ iLang = sqlite3_value_int(argv[idx++]); } if( idxNum&4 ){ iLimit = sqlite3_value_int(argv[idx++]); if( iLimit<1 ) iLimit = 1; } if( idxNum&8 ){ x.iScope = sqlite3_value_int(argv[idx++]); if( x.iScope<1 ) x.iScope = 1; if( x.iScope>SPELLFIX_MX_HASH-2 ) x.iScope = SPELLFIX_MX_HASH-2; } if( idxNum&(16|32) ){ x.iMaxDist = sqlite3_value_int(argv[idx++]); if( idxNum&16 ) x.iMaxDist--; if( x.iMaxDist<0 ) x.iMaxDist = 0; } spellfix1ResetCursor(pCur); spellfix1ResizeCursor(pCur, iLimit); zMatchThis = sqlite3_value_text(argv[0]); if( zMatchThis==0 ) return SQLITE_OK; if( p->pConfig3 ){ x.pLang = editDist3FindLang(p->pConfig3, iLang); pMatchStr3 = editDist3FromStringNew(x.pLang, (const char*)zMatchThis, -1); if( pMatchStr3==0 ){ x.rc = SQLITE_NOMEM; goto filter_exit; } }else{ x.pLang = 0; } zPattern = (char*)transliterate(zMatchThis, sqlite3_value_bytes(argv[0])); sqlite3_free(pCur->zPattern); pCur->zPattern = zPattern; if( zPattern==0 ){ x.rc = SQLITE_NOMEM; goto filter_exit; } nPattern = (int)strlen(zPattern); if( zPattern[nPattern-1]=='*' ) nPattern--; zSql = sqlite3_mprintf( "SELECT id, word, rank, k1" " FROM \"%w\".\"%w_vocab\"" " WHERE langid=%d AND k2>=?1 AND k2<?2", p->zDbName, p->zTableName, iLang ); if( zSql==0 ){ x.rc = SQLITE_NOMEM; pStmt = 0; goto filter_exit; } rc = sqlite3_prepare_v2(p->db, zSql, -1, &pStmt, 0); sqlite3_free(zSql); pCur->iLang = iLang; x.pCur = pCur; x.pStmt = pStmt; x.zPattern = zPattern; x.nPattern = nPattern; x.pMatchStr3 = pMatchStr3; x.iLang = iLang; x.rc = rc; x.pConfig3 = p->pConfig3; if( x.rc==SQLITE_OK ){ spellfix1RunQuery(&x, zPattern, nPattern); } if( pCur->a ){ qsort(pCur->a, pCur->nRow, sizeof(pCur->a[0]), spellfix1RowCompare); pCur->iTop = iLimit; pCur->iScope = iScope; }else{ x.rc = SQLITE_NOMEM; } filter_exit: sqlite3_finalize(pStmt); editDist3FromStringDelete(pMatchStr3); return x.rc; } /* ** This version of xFilter handles a full-table scan case */ static int spellfix1FilterForFullScan( spellfix1_cursor *pCur, int idxNum, int argc, sqlite3_value **argv ){ int rc; char *zSql; spellfix1_vtab *pVTab = pCur->pVTab; spellfix1ResetCursor(pCur); zSql = sqlite3_mprintf( "SELECT word, rank, NULL, langid, id FROM \"%w\".\"%w_vocab\"", pVTab->zDbName, pVTab->zTableName); if( zSql==0 ) return SQLITE_NOMEM; rc = sqlite3_prepare_v2(pVTab->db, zSql, -1, &pCur->pFullScan, 0); sqlite3_free(zSql); pCur->nRow = pCur->iRow = 0; if( rc==SQLITE_OK ){ rc = sqlite3_step(pCur->pFullScan); if( rc==SQLITE_ROW ){ pCur->iRow = -1; rc = SQLITE_OK; } if( rc==SQLITE_DONE ){ rc = SQLITE_OK; } }else{ pCur->iRow = 0; } return rc; } /* ** Called to "rewind" a cursor back to the beginning so that ** it starts its output over again. Always called at least once ** prior to any spellfix1Column, spellfix1Rowid, or spellfix1Eof call. */ static int spellfix1Filter( sqlite3_vtab_cursor *cur, int idxNum, const char *idxStr, int argc, sqlite3_value **argv ){ spellfix1_cursor *pCur = (spellfix1_cursor *)cur; int rc; if( idxNum & 1 ){ rc = spellfix1FilterForMatch(pCur, idxNum, argc, argv); }else{ rc = spellfix1FilterForFullScan(pCur, idxNum, argc, argv); } return rc; } /* ** Advance a cursor to its next row of output */ static int spellfix1Next(sqlite3_vtab_cursor *cur){ spellfix1_cursor *pCur = (spellfix1_cursor *)cur; int rc = SQLITE_OK; if( pCur->iRow < pCur->nRow ){ if( pCur->pFullScan ){ rc = sqlite3_step(pCur->pFullScan); if( rc!=SQLITE_ROW ) pCur->iRow = pCur->nRow; if( rc==SQLITE_ROW || rc==SQLITE_DONE ) rc = SQLITE_OK; }else{ pCur->iRow++; } } return rc; } /* ** Return TRUE if we are at the end-of-file */ static int spellfix1Eof(sqlite3_vtab_cursor *cur){ spellfix1_cursor *pCur = (spellfix1_cursor *)cur; return pCur->iRow>=pCur->nRow; } /* ** Return columns from the current row. */ static int spellfix1Column( sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i ){ spellfix1_cursor *pCur = (spellfix1_cursor*)cur; if( pCur->pFullScan ){ if( i<=SPELLFIX_COL_LANGID ){ sqlite3_result_value(ctx, sqlite3_column_value(pCur->pFullScan, i)); }else{ sqlite3_result_null(ctx); } return SQLITE_OK; } switch( i ){ case SPELLFIX_COL_WORD: { sqlite3_result_text(ctx, pCur->a[pCur->iRow].zWord, -1, SQLITE_STATIC); break; } case SPELLFIX_COL_RANK: { sqlite3_result_int(ctx, pCur->a[pCur->iRow].iRank); break; } case SPELLFIX_COL_DISTANCE: { sqlite3_result_int(ctx, pCur->a[pCur->iRow].iDistance); break; } case SPELLFIX_COL_LANGID: { sqlite3_result_int(ctx, pCur->iLang); break; } case SPELLFIX_COL_SCORE: { sqlite3_result_int(ctx, pCur->a[pCur->iRow].iScore); break; } case SPELLFIX_COL_MATCHLEN: { int iMatchlen = pCur->a[pCur->iRow].iMatchlen; if( iMatchlen<0 ){ int nPattern = (int)strlen(pCur->zPattern); char *zWord = pCur->a[pCur->iRow].zWord; int nWord = (int)strlen(zWord); if( nPattern>0 && pCur->zPattern[nPattern-1]=='*' ){ char *zTranslit; int res; zTranslit = (char *)transliterate((unsigned char *)zWord, nWord); if( !zTranslit ) return SQLITE_NOMEM; res = editdist1(pCur->zPattern, zTranslit, &iMatchlen); sqlite3_free(zTranslit); if( res<0 ) return SQLITE_NOMEM; iMatchlen = translen_to_charlen(zWord, nWord, iMatchlen); }else{ iMatchlen = utf8Charlen(zWord, nWord); } } sqlite3_result_int(ctx, iMatchlen); break; } case SPELLFIX_COL_PHONEHASH: { sqlite3_result_text(ctx, pCur->a[pCur->iRow].zHash, -1, SQLITE_STATIC); break; } case SPELLFIX_COL_TOP: { sqlite3_result_int(ctx, pCur->iTop); break; } case SPELLFIX_COL_SCOPE: { sqlite3_result_int(ctx, pCur->iScope); break; } case SPELLFIX_COL_SRCHCNT: { sqlite3_result_int(ctx, pCur->nSearch); break; } default: { sqlite3_result_null(ctx); break; } } return SQLITE_OK; } /* ** The rowid. */ static int spellfix1Rowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){ spellfix1_cursor *pCur = (spellfix1_cursor*)cur; if( pCur->pFullScan ){ *pRowid = sqlite3_column_int64(pCur->pFullScan, 4); }else{ *pRowid = pCur->a[pCur->iRow].iRowid; } return SQLITE_OK; } /* ** The xUpdate() method. */ static int spellfix1Update( sqlite3_vtab *pVTab, int argc, sqlite3_value **argv, sqlite_int64 *pRowid ){ int rc = SQLITE_OK; sqlite3_int64 rowid, newRowid; spellfix1_vtab *p = (spellfix1_vtab*)pVTab; sqlite3 *db = p->db; if( argc==1 ){ /* A delete operation on the rowid given by argv[0] */ rowid = *pRowid = sqlite3_value_int64(argv[0]); spellfix1DbExec(&rc, db, "DELETE FROM \"%w\".\"%w_vocab\" " " WHERE id=%lld", p->zDbName, p->zTableName, rowid); }else{ const unsigned char *zWord = sqlite3_value_text(argv[SPELLFIX_COL_WORD+2]); int nWord = sqlite3_value_bytes(argv[SPELLFIX_COL_WORD+2]); int iLang = sqlite3_value_int(argv[SPELLFIX_COL_LANGID+2]); int iRank = sqlite3_value_int(argv[SPELLFIX_COL_RANK+2]); const unsigned char *zSoundslike = sqlite3_value_text(argv[SPELLFIX_COL_SOUNDSLIKE+2]); int nSoundslike = sqlite3_value_bytes(argv[SPELLFIX_COL_SOUNDSLIKE+2]); char *zK1, *zK2; int i; char c; if( zWord==0 ){ /* Inserts of the form: INSERT INTO table(command) VALUES('xyzzy'); ** cause zWord to be NULL, so we look at the "command" column to see ** what special actions to take */ const char *zCmd = (const char*)sqlite3_value_text(argv[SPELLFIX_COL_COMMAND+2]); if( zCmd==0 ){ pVTab->zErrMsg = sqlite3_mprintf("%s.word may not be NULL", p->zTableName); return SQLITE_CONSTRAINT_NOTNULL; } if( strcmp(zCmd,"reset")==0 ){ /* Reset the edit cost table (if there is one). */ editDist3ConfigDelete(p->pConfig3); p->pConfig3 = 0; return SQLITE_OK; } if( strncmp(zCmd,"edit_cost_table=",16)==0 ){ editDist3ConfigDelete(p->pConfig3); p->pConfig3 = 0; sqlite3_free(p->zCostTable); p->zCostTable = spellfix1Dequote(zCmd+16); if( p->zCostTable==0 ) return SQLITE_NOMEM; if( p->zCostTable[0]==0 || sqlite3_stricmp(p->zCostTable,"null")==0 ){ sqlite3_free(p->zCostTable); p->zCostTable = 0; } return SQLITE_OK; } pVTab->zErrMsg = sqlite3_mprintf("unknown value for %s.command: \"%w\"", p->zTableName, zCmd); return SQLITE_ERROR; } if( iRank<1 ) iRank = 1; if( zSoundslike ){ zK1 = (char*)transliterate(zSoundslike, nSoundslike); }else{ zK1 = (char*)transliterate(zWord, nWord); } if( zK1==0 ) return SQLITE_NOMEM; for(i=0; (c = zK1[i])!=0; i++){ if( c>='A' && c<='Z' ) zK1[i] += 'a' - 'A'; } zK2 = (char*)phoneticHash((const unsigned char*)zK1, i); if( zK2==0 ){ sqlite3_free(zK1); return SQLITE_NOMEM; } if( sqlite3_value_type(argv[0])==SQLITE_NULL ){ spellfix1DbExec(&rc, db, "INSERT INTO \"%w\".\"%w_vocab\"(rank,langid,word,k1,k2) " "VALUES(%d,%d,%Q,%Q,%Q)", p->zDbName, p->zTableName, iRank, iLang, zWord, zK1, zK2 ); *pRowid = sqlite3_last_insert_rowid(db); }else{ rowid = sqlite3_value_int64(argv[0]); newRowid = *pRowid = sqlite3_value_int64(argv[1]); spellfix1DbExec(&rc, db, "UPDATE \"%w\".\"%w_vocab\" SET id=%lld, rank=%d, langid=%d," " word=%Q, k1=%Q, k2=%Q WHERE id=%lld", p->zDbName, p->zTableName, newRowid, iRank, iLang, zWord, zK1, zK2, rowid ); } sqlite3_free(zK1); sqlite3_free(zK2); } return rc; } /* ** Rename the spellfix1 table. */ static int spellfix1Rename(sqlite3_vtab *pVTab, const char *zNew){ spellfix1_vtab *p = (spellfix1_vtab*)pVTab; sqlite3 *db = p->db; int rc = SQLITE_OK; char *zNewName = sqlite3_mprintf("%s", zNew); if( zNewName==0 ){ return SQLITE_NOMEM; } spellfix1DbExec(&rc, db, "ALTER TABLE \"%w\".\"%w_vocab\" RENAME TO \"%w_vocab\"", p->zDbName, p->zTableName, zNewName ); if( rc==SQLITE_OK ){ sqlite3_free(p->zTableName); p->zTableName = zNewName; }else{ sqlite3_free(zNewName); } return rc; } /* ** A virtual table module that provides fuzzy search. */ static sqlite3_module spellfix1Module = { 0, /* iVersion */ spellfix1Create, /* xCreate - handle CREATE VIRTUAL TABLE */ spellfix1Connect, /* xConnect - reconnected to an existing table */ spellfix1BestIndex, /* xBestIndex - figure out how to do a query */ spellfix1Disconnect, /* xDisconnect - close a connection */ spellfix1Destroy, /* xDestroy - handle DROP TABLE */ spellfix1Open, /* xOpen - open a cursor */ spellfix1Close, /* xClose - close a cursor */ spellfix1Filter, /* xFilter - configure scan constraints */ spellfix1Next, /* xNext - advance a cursor */ spellfix1Eof, /* xEof - check for end of scan */ spellfix1Column, /* xColumn - read data */ spellfix1Rowid, /* xRowid - read data */ spellfix1Update, /* xUpdate */ 0, /* xBegin */ 0, /* xSync */ 0, /* xCommit */ 0, /* xRollback */ 0, /* xFindMethod */ spellfix1Rename, /* xRename */ }; /* ** Register the various functions and the virtual table. */ static int spellfix1Register(sqlite3 *db){ int rc = SQLITE_OK; int i; rc = sqlite3_create_function(db, "spellfix1_translit", 1, SQLITE_UTF8, 0, transliterateSqlFunc, 0, 0); if( rc==SQLITE_OK ){ rc = sqlite3_create_function(db, "spellfix1_editdist", 2, SQLITE_UTF8, 0, editdistSqlFunc, 0, 0); } if( rc==SQLITE_OK ){ rc = sqlite3_create_function(db, "spellfix1_phonehash", 1, SQLITE_UTF8, 0, phoneticHashSqlFunc, 0, 0); } if( rc==SQLITE_OK ){ rc = sqlite3_create_function(db, "spellfix1_scriptcode", 1, SQLITE_UTF8, 0, scriptCodeSqlFunc, 0, 0); } if( rc==SQLITE_OK ){ rc = sqlite3_create_module(db, "spellfix1", &spellfix1Module, 0); } if( rc==SQLITE_OK ){ rc = editDist3Install(db); } /* Verify sanity of the translit[] table */ for(i=0; i<sizeof(translit)/sizeof(translit[0])-1; i++){ assert( translit[i].cFrom<translit[i+1].cFrom ); } return rc; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ /* ** Extension load function. */ #ifdef _WIN32 __declspec(dllexport) #endif int sqlite3_spellfix_init( sqlite3 *db, char **pzErrMsg, const sqlite3_api_routines *pApi ){ SQLITE_EXTENSION_INIT2(pApi); #ifndef SQLITE_OMIT_VIRTUALTABLE return spellfix1Register(db); #endif return SQLITE_OK; }
4152.c
/* $OpenBSD$ */ /* * Copyright (c) 2007 Nicholas Marriott <[email protected]> * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <sys/types.h> #include "tmux.h" /* * Destroy window. */ static enum cmd_retval cmd_kill_window_exec(struct cmd *, struct cmdq_item *); const struct cmd_entry cmd_kill_window_entry = { .name = "kill-window", .alias = "killw", .args = { "at:", 0, 0, NULL }, .usage = "[-a] " CMD_TARGET_WINDOW_USAGE, .target = { 't', CMD_FIND_WINDOW, 0 }, .flags = 0, .exec = cmd_kill_window_exec }; const struct cmd_entry cmd_unlink_window_entry = { .name = "unlink-window", .alias = "unlinkw", .args = { "kt:", 0, 0, NULL }, .usage = "[-k] " CMD_TARGET_WINDOW_USAGE, .target = { 't', CMD_FIND_WINDOW, 0 }, .flags = 0, .exec = cmd_kill_window_exec }; static enum cmd_retval cmd_kill_window_exec(struct cmd *self, struct cmdq_item *item) { struct args *args = cmd_get_args(self); struct cmd_find_state *target = cmdq_get_target(item); struct winlink *wl = target->wl, *loop; struct window *w = wl->window; struct session *s = target->s; u_int found; if (cmd_get_entry(self) == &cmd_unlink_window_entry) { if (!args_has(args, 'k') && !session_is_linked(s, w)) { cmdq_error(item, "window only linked to one session"); return (CMD_RETURN_ERROR); } server_unlink_window(s, wl); recalculate_sizes(); return (CMD_RETURN_NORMAL); } if (args_has(args, 'a')) { if (RB_PREV(winlinks, &s->windows, wl) == NULL && RB_NEXT(winlinks, &s->windows, wl) == NULL) return (CMD_RETURN_NORMAL); /* Kill all windows except the current one. */ do { found = 0; RB_FOREACH(loop, winlinks, &s->windows) { if (loop->window != wl->window) { server_kill_window(loop->window, 0); found++; break; } } } while (found != 0); /* * If the current window appears in the session more than once, * kill it as well. */ found = 0; RB_FOREACH(loop, winlinks, &s->windows) { if (loop->window == wl->window) found++; } if (found > 1) server_kill_window(wl->window, 0); server_renumber_all(); return (CMD_RETURN_NORMAL); } server_kill_window(wl->window, 1); return (CMD_RETURN_NORMAL); }
878288.c
// SPDX-License-Identifier: BSD-2-Clause /* LibTomCrypt, modular cryptographic library -- Tom St Denis * * LibTomCrypt is a library that provides various cryptographic * algorithms in a highly modular and flexible manner. * * The library is free for all purposes without any express * guarantee it works. */ /**********************************************************************\ * To commemorate the 1996 RSA Data Security Conference, the following * * code is released into the public domain by its author. Prost! * * * * This cipher uses 16-bit words and little-endian byte ordering. * * I wonder which processor it was optimized for? * * * * Thanks to CodeView, SoftIce, and D86 for helping bring this code to * * the public. * \**********************************************************************/ #include "tomcrypt_private.h" /** @file rc2.c Implementation of RC2 with fixed effective key length of 64bits */ #ifdef LTC_RC2 const struct ltc_cipher_descriptor rc2_desc = { "rc2", 12, 8, 128, 8, 16, &rc2_setup, &rc2_ecb_encrypt, &rc2_ecb_decrypt, &rc2_test, &rc2_done, &rc2_keysize, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL }; /* 256-entry permutation table, probably derived somehow from pi */ static const unsigned char permute[256] = { 217,120,249,196, 25,221,181,237, 40,233,253,121, 74,160,216,157, 198,126, 55,131, 43,118, 83,142, 98, 76,100,136, 68,139,251,162, 23,154, 89,245,135,179, 79, 19, 97, 69,109,141, 9,129,125, 50, 189,143, 64,235,134,183,123, 11,240,149, 33, 34, 92,107, 78,130, 84,214,101,147,206, 96,178, 28,115, 86,192, 20,167,140,241,220, 18,117,202, 31, 59,190,228,209, 66, 61,212, 48,163, 60,182, 38, 111,191, 14,218, 70,105, 7, 87, 39,242, 29,155,188,148, 67, 3, 248, 17,199,246,144,239, 62,231, 6,195,213, 47,200,102, 30,215, 8,232,234,222,128, 82,238,247,132,170,114,172, 53, 77,106, 42, 150, 26,210,113, 90, 21, 73,116, 75,159,208, 94, 4, 24,164,236, 194,224, 65,110, 15, 81,203,204, 36,145,175, 80,161,244,112, 57, 153,124, 58,133, 35,184,180,122,252, 2, 54, 91, 37, 85,151, 49, 45, 93,250,152,227,138,146,174, 5,223, 41, 16,103,108,186,201, 211, 0,230,207,225,158,168, 44, 99, 22, 1, 63, 88,226,137,169, 13, 56, 52, 27,171, 51,255,176,187, 72, 12, 95,185,177,205, 46, 197,243,219, 71,229,165,156,119, 10,166, 32,104,254,127,193,173 }; /** Initialize the RC2 block cipher @param key The symmetric key you wish to pass @param keylen The key length in bytes @param bits The effective key length in bits @param num_rounds The number of rounds desired (0 for default) @param skey The key in as scheduled by this function. @return CRYPT_OK if successful */ int rc2_setup_ex(const unsigned char *key, int keylen, int bits, int num_rounds, symmetric_key *skey) { unsigned *xkey = skey->rc2.xkey; unsigned char tmp[128]; unsigned T8, TM; int i; LTC_ARGCHK(key != NULL); LTC_ARGCHK(skey != NULL); if (keylen == 0 || keylen > 128 || bits > 1024) { return CRYPT_INVALID_KEYSIZE; } if (bits == 0) { bits = 1024; } if (num_rounds != 0 && num_rounds != 16) { return CRYPT_INVALID_ROUNDS; } for (i = 0; i < keylen; i++) { tmp[i] = key[i] & 255; } /* Phase 1: Expand input key to 128 bytes */ if (keylen < 128) { for (i = keylen; i < 128; i++) { tmp[i] = permute[(tmp[i - 1] + tmp[i - keylen]) & 255]; } } /* Phase 2 - reduce effective key size to "bits" */ T8 = (unsigned)(bits+7)>>3; TM = (255 >> (unsigned)(7 & -bits)); tmp[128 - T8] = permute[tmp[128 - T8] & TM]; for (i = 127 - T8; i >= 0; i--) { tmp[i] = permute[tmp[i + 1] ^ tmp[i + T8]]; } /* Phase 3 - copy to xkey in little-endian order */ for (i = 0; i < 64; i++) { xkey[i] = (unsigned)tmp[2*i] + ((unsigned)tmp[2*i+1] << 8); } #ifdef LTC_CLEAN_STACK zeromem(tmp, sizeof(tmp)); #endif return CRYPT_OK; } /** Initialize the RC2 block cipher The effective key length is here always keylen * 8 @param key The symmetric key you wish to pass @param keylen The key length in bytes @param num_rounds The number of rounds desired (0 for default) @param skey The key in as scheduled by this function. @return CRYPT_OK if successful */ int rc2_setup(const unsigned char *key, int keylen, int num_rounds, symmetric_key *skey) { return rc2_setup_ex(key, keylen, keylen * 8, num_rounds, skey); } /**********************************************************************\ * Encrypt an 8-byte block of plaintext using the given key. * \**********************************************************************/ /** Encrypts a block of text with RC2 @param pt The input plaintext (8 bytes) @param ct The output ciphertext (8 bytes) @param skey The key as scheduled @return CRYPT_OK if successful */ #ifdef LTC_CLEAN_STACK static int _rc2_ecb_encrypt( const unsigned char *pt, unsigned char *ct, const symmetric_key *skey) #else int rc2_ecb_encrypt( const unsigned char *pt, unsigned char *ct, const symmetric_key *skey) #endif { const unsigned *xkey; unsigned x76, x54, x32, x10, i; LTC_ARGCHK(pt != NULL); LTC_ARGCHK(ct != NULL); LTC_ARGCHK(skey != NULL); xkey = skey->rc2.xkey; x76 = ((unsigned)pt[7] << 8) + (unsigned)pt[6]; x54 = ((unsigned)pt[5] << 8) + (unsigned)pt[4]; x32 = ((unsigned)pt[3] << 8) + (unsigned)pt[2]; x10 = ((unsigned)pt[1] << 8) + (unsigned)pt[0]; for (i = 0; i < 16; i++) { x10 = (x10 + (x32 & ~x76) + (x54 & x76) + xkey[4*i+0]) & 0xFFFF; x10 = ((x10 << 1) | (x10 >> 15)); x32 = (x32 + (x54 & ~x10) + (x76 & x10) + xkey[4*i+1]) & 0xFFFF; x32 = ((x32 << 2) | (x32 >> 14)); x54 = (x54 + (x76 & ~x32) + (x10 & x32) + xkey[4*i+2]) & 0xFFFF; x54 = ((x54 << 3) | (x54 >> 13)); x76 = (x76 + (x10 & ~x54) + (x32 & x54) + xkey[4*i+3]) & 0xFFFF; x76 = ((x76 << 5) | (x76 >> 11)); if (i == 4 || i == 10) { x10 = (x10 + xkey[x76 & 63]) & 0xFFFF; x32 = (x32 + xkey[x10 & 63]) & 0xFFFF; x54 = (x54 + xkey[x32 & 63]) & 0xFFFF; x76 = (x76 + xkey[x54 & 63]) & 0xFFFF; } } ct[0] = (unsigned char)x10; ct[1] = (unsigned char)(x10 >> 8); ct[2] = (unsigned char)x32; ct[3] = (unsigned char)(x32 >> 8); ct[4] = (unsigned char)x54; ct[5] = (unsigned char)(x54 >> 8); ct[6] = (unsigned char)x76; ct[7] = (unsigned char)(x76 >> 8); return CRYPT_OK; } #ifdef LTC_CLEAN_STACK int rc2_ecb_encrypt( const unsigned char *pt, unsigned char *ct, const symmetric_key *skey) { int err = _rc2_ecb_encrypt(pt, ct, skey); burn_stack(sizeof(unsigned *) + sizeof(unsigned) * 5); return err; } #endif /**********************************************************************\ * Decrypt an 8-byte block of ciphertext using the given key. * \**********************************************************************/ /** Decrypts a block of text with RC2 @param ct The input ciphertext (8 bytes) @param pt The output plaintext (8 bytes) @param skey The key as scheduled @return CRYPT_OK if successful */ #ifdef LTC_CLEAN_STACK static int _rc2_ecb_decrypt( const unsigned char *ct, unsigned char *pt, const symmetric_key *skey) #else int rc2_ecb_decrypt( const unsigned char *ct, unsigned char *pt, const symmetric_key *skey) #endif { unsigned x76, x54, x32, x10; const unsigned *xkey; int i; LTC_ARGCHK(pt != NULL); LTC_ARGCHK(ct != NULL); LTC_ARGCHK(skey != NULL); xkey = skey->rc2.xkey; x76 = ((unsigned)ct[7] << 8) + (unsigned)ct[6]; x54 = ((unsigned)ct[5] << 8) + (unsigned)ct[4]; x32 = ((unsigned)ct[3] << 8) + (unsigned)ct[2]; x10 = ((unsigned)ct[1] << 8) + (unsigned)ct[0]; for (i = 15; i >= 0; i--) { if (i == 4 || i == 10) { x76 = (x76 - xkey[x54 & 63]) & 0xFFFF; x54 = (x54 - xkey[x32 & 63]) & 0xFFFF; x32 = (x32 - xkey[x10 & 63]) & 0xFFFF; x10 = (x10 - xkey[x76 & 63]) & 0xFFFF; } x76 = ((x76 << 11) | (x76 >> 5)); x76 = (x76 - ((x10 & ~x54) + (x32 & x54) + xkey[4*i+3])) & 0xFFFF; x54 = ((x54 << 13) | (x54 >> 3)); x54 = (x54 - ((x76 & ~x32) + (x10 & x32) + xkey[4*i+2])) & 0xFFFF; x32 = ((x32 << 14) | (x32 >> 2)); x32 = (x32 - ((x54 & ~x10) + (x76 & x10) + xkey[4*i+1])) & 0xFFFF; x10 = ((x10 << 15) | (x10 >> 1)); x10 = (x10 - ((x32 & ~x76) + (x54 & x76) + xkey[4*i+0])) & 0xFFFF; } pt[0] = (unsigned char)x10; pt[1] = (unsigned char)(x10 >> 8); pt[2] = (unsigned char)x32; pt[3] = (unsigned char)(x32 >> 8); pt[4] = (unsigned char)x54; pt[5] = (unsigned char)(x54 >> 8); pt[6] = (unsigned char)x76; pt[7] = (unsigned char)(x76 >> 8); return CRYPT_OK; } #ifdef LTC_CLEAN_STACK int rc2_ecb_decrypt( const unsigned char *ct, unsigned char *pt, const symmetric_key *skey) { int err = _rc2_ecb_decrypt(ct, pt, skey); burn_stack(sizeof(unsigned *) + sizeof(unsigned) * 4 + sizeof(int)); return err; } #endif /** Performs a self-test of the RC2 block cipher @return CRYPT_OK if functional, CRYPT_NOP if self-test has been disabled */ int rc2_test(void) { #ifndef LTC_TEST return CRYPT_NOP; #else static const struct { int keylen, bits; unsigned char key[16], pt[8], ct[8]; } tests[] = { { 8, 63, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0xeb, 0xb7, 0x73, 0xf9, 0x93, 0x27, 0x8e, 0xff } }, { 8, 64, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, { 0x27, 0x8b, 0x27, 0xe4, 0x2e, 0x2f, 0x0d, 0x49 } }, { 8, 64, { 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }, { 0x30, 0x64, 0x9e, 0xdf, 0x9b, 0xe7, 0xd2, 0xc2 } }, { 1, 64, { 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x61, 0xa8, 0xa2, 0x44, 0xad, 0xac, 0xcc, 0xf0 } }, { 7, 64, { 0x88, 0xbc, 0xa9, 0x0e, 0x90, 0x87, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x6c, 0xcf, 0x43, 0x08, 0x97, 0x4c, 0x26, 0x7f } }, { 16, 64, { 0x88, 0xbc, 0xa9, 0x0e, 0x90, 0x87, 0x5a, 0x7f, 0x0f, 0x79, 0xc3, 0x84, 0x62, 0x7b, 0xaf, 0xb2 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x1a, 0x80, 0x7d, 0x27, 0x2b, 0xbe, 0x5d, 0xb1 } }, { 16, 128, { 0x88, 0xbc, 0xa9, 0x0e, 0x90, 0x87, 0x5a, 0x7f, 0x0f, 0x79, 0xc3, 0x84, 0x62, 0x7b, 0xaf, 0xb2 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, { 0x22, 0x69, 0x55, 0x2a, 0xb0, 0xf8, 0x5c, 0xa6 } } }; int x, y, err; symmetric_key skey; unsigned char tmp[2][8]; for (x = 0; x < (int)(sizeof(tests) / sizeof(tests[0])); x++) { zeromem(tmp, sizeof(tmp)); if (tests[x].bits == (tests[x].keylen * 8)) { if ((err = rc2_setup(tests[x].key, tests[x].keylen, 0, &skey)) != CRYPT_OK) { return err; } } else { if ((err = rc2_setup_ex(tests[x].key, tests[x].keylen, tests[x].bits, 0, &skey)) != CRYPT_OK) { return err; } } rc2_ecb_encrypt(tests[x].pt, tmp[0], &skey); rc2_ecb_decrypt(tmp[0], tmp[1], &skey); if (compare_testvector(tmp[0], 8, tests[x].ct, 8, "RC2 CT", x) || compare_testvector(tmp[1], 8, tests[x].pt, 8, "RC2 PT", x)) { return CRYPT_FAIL_TESTVECTOR; } /* now see if we can encrypt all zero bytes 1000 times, decrypt and come back where we started */ for (y = 0; y < 8; y++) tmp[0][y] = 0; for (y = 0; y < 1000; y++) rc2_ecb_encrypt(tmp[0], tmp[0], &skey); for (y = 0; y < 1000; y++) rc2_ecb_decrypt(tmp[0], tmp[0], &skey); for (y = 0; y < 8; y++) if (tmp[0][y] != 0) return CRYPT_FAIL_TESTVECTOR; } return CRYPT_OK; #endif } /** Terminate the context @param skey The scheduled key */ void rc2_done(symmetric_key *skey) { LTC_UNUSED_PARAM(skey); } /** Gets suitable key size @param keysize [in/out] The length of the recommended key (in bytes). This function will store the suitable size back in this variable. @return CRYPT_OK if the input key size is acceptable. */ int rc2_keysize(int *keysize) { LTC_ARGCHK(keysize != NULL); if (*keysize < 1) { return CRYPT_INVALID_KEYSIZE; } if (*keysize > 128) { *keysize = 128; } return CRYPT_OK; } #endif /* ref: $Format:%D$ */ /* git commit: $Format:%H$ */ /* commit time: $Format:%ai$ */
899446.c
/**************************************************************************** * arch/arm/src/stm32/stm32_sdio.c * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. The * ASF licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> #include <stdint.h> #include <stdbool.h> #include <string.h> #include <assert.h> #include <debug.h> #include <errno.h> #include <nuttx/arch.h> #include <nuttx/wdog.h> #include <nuttx/clock.h> #include <nuttx/sdio.h> #include <nuttx/wqueue.h> #include <nuttx/semaphore.h> #include <nuttx/mmcsd.h> #include <nuttx/irq.h> #include <arch/board/board.h> #include "chip.h" #include "arm_arch.h" #include "stm32.h" #include "stm32_dma.h" #include "stm32_sdio.h" #ifdef CONFIG_STM32_SDIO /**************************************************************************** * Pre-processor Definitions ****************************************************************************/ /* Configuration ************************************************************/ /* Required system configuration options: * * CONFIG_ARCH_DMA - Enable architecture-specific DMA subsystem * initialization. Required if CONFIG_STM32_SDIO_DMA is enabled. * CONFIG_STM32_DMA2 - Enable STM32 DMA2 support. Required if * CONFIG_STM32_SDIO_DMA is enabled * CONFIG_SCHED_WORKQUEUE -- Callback support requires work queue support. * * Driver-specific configuration options: * * CONFIG_SDIO_MUXBUS - Setting this configuration enables some locking * APIs to manage concurrent accesses on the SDIO bus. This is not * needed for the simple case of a single SD card, for example. * CONFIG_STM32_SDIO_DMA - Enable SDIO. This is a marginally optional. * For most usages, SDIO will cause data overruns if used without DMA. * NOTE the above system DMA configuration options. * CONFIG_STM32_SDIO_WIDTH_D1_ONLY - This may be selected to force the * driver operate with only a single data line (the default is to use * all 4 SD data lines). * CONFIG_SDM_DMAPRIO - SDIO DMA priority. This can be selected if * CONFIG_STM32_SDIO_DMA is enabled. * CONFIG_SDIO_XFRDEBUG - Enables some very low-level debug output * This also requires CONFIG_DEBUG_FS and CONFIG_DEBUG_INFO */ #if !defined(CONFIG_STM32_SDIO_DMA) # warning "Large Non-DMA transfer may result in RX overrun failures" #else # ifndef CONFIG_STM32_DMA2 # error "CONFIG_STM32_SDIO_DMA support requires CONFIG_STM32_DMA2" # endif # ifndef CONFIG_SDIO_DMA # error CONFIG_SDIO_DMA must be defined with CONFIG_STM32_SDIO_DMA # endif #endif #ifndef CONFIG_STM32_SDIO_DMA # warning "Large Non-DMA transfer may result in RX overrun failures" #endif #ifndef CONFIG_SCHED_WORKQUEUE # error "Callback support requires CONFIG_SCHED_WORKQUEUE" #endif #ifdef CONFIG_STM32_SDIO_DMA # ifndef CONFIG_STM32_SDIO_DMAPRIO # if defined(CONFIG_STM32_STM32F10XX) # define CONFIG_STM32_SDIO_DMAPRIO DMA_CCR_PRIMED # elif defined(CONFIG_STM32_STM32F20XX) || defined(CONFIG_STM32_STM32F4XXX) # define CONFIG_STM32_SDIO_DMAPRIO DMA_SCR_PRIVERYHI # else # error "Unknown STM32 DMA" # endif # endif # if defined(CONFIG_STM32_STM32F10XX) # if (CONFIG_STM32_SDIO_DMAPRIO & ~DMA_CCR_PL_MASK) != 0 # error "Illegal value for CONFIG_STM32_SDIO_DMAPRIO" # endif # elif defined(CONFIG_STM32_STM32F20XX) || defined(CONFIG_STM32_STM32F4XXX) # if (CONFIG_STM32_SDIO_DMAPRIO & ~DMA_SCR_PL_MASK) != 0 # error "Illegal value for CONFIG_STM32_SDIO_DMAPRIO" # endif # else # error "Unknown STM32 DMA" # endif #else # undef CONFIG_STM32_SDIO_DMAPRIO #endif #ifndef CONFIG_DEBUG_MEMCARD_INFO # undef CONFIG_SDIO_XFRDEBUG #endif /* Enable the SDIO pull-up resistors if needed */ #ifdef CONFIG_STM32_SDIO_PULLUP # define SDIO_PULLUP_ENABLE GPIO_PULLUP #else # define SDIO_PULLUP_ENABLE 0 #endif /* Friendly CLKCR bit re-definitions ****************************************/ #define SDIO_CLKCR_RISINGEDGE (0) #define SDIO_CLKCR_FALLINGEDGE SDIO_CLKCR_NEGEDGE /* Use the default of the rising edge but allow a configuration, * that does not have the errata, to override the edge the SDIO * command and data is changed on. */ #if !defined(SDIO_CLKCR_EDGE) # define SDIO_CLKCR_EDGE SDIO_CLKCR_RISINGEDGE #endif /* Mode dependent settings. These depend on clock divisor settings that must * be defined in the board-specific board.h header file: SDIO_INIT_CLKDIV, * SDIO_MMCXFR_CLKDIV, and SDIO_SDXFR_CLKDIV. */ #define STM32_CLCKCR_INIT (SDIO_INIT_CLKDIV | SDIO_CLKCR_EDGE | \ SDIO_CLKCR_WIDBUS_D1) #define SDIO_CLKCR_MMCXFR (SDIO_MMCXFR_CLKDIV | SDIO_CLKCR_EDGE | \ SDIO_CLKCR_WIDBUS_D1) #define SDIO_CLCKR_SDXFR (SDIO_SDXFR_CLKDIV | SDIO_CLKCR_EDGE | \ SDIO_CLKCR_WIDBUS_D1) #define SDIO_CLCKR_SDWIDEXFR (SDIO_SDXFR_CLKDIV | SDIO_CLKCR_EDGE | \ SDIO_CLKCR_WIDBUS_D4) /* Timing */ #define SDIO_CMDTIMEOUT (100000) #define SDIO_LONGTIMEOUT (0x7fffffff) /* DTIMER setting */ /* Assuming Max timeout in bypass 48 Mhz */ #define IP_CLCK_FREQ UINT32_C(48000000) #define SDIO_DTIMER_DATATIMEOUT_MS 250 /* DMA channel/stream configuration register settings. The following * must be selected. The DMA driver will select the remaining fields. * * - 32-bit DMA * - Memory increment * - Direction (memory-to-peripheral, peripheral-to-memory) * - Memory burst size (F4 only) */ /* STM32 F1 channel configuration register (CCR) settings */ #if defined(CONFIG_STM32_STM32F10XX) # define SDIO_RXDMA32_CONFIG (CONFIG_STM32_SDIO_DMAPRIO | DMA_CCR_MSIZE_32BITS | \ DMA_CCR_PSIZE_32BITS | DMA_CCR_MINC) # define SDIO_TXDMA32_CONFIG (CONFIG_STM32_SDIO_DMAPRIO | DMA_CCR_MSIZE_32BITS | \ DMA_CCR_PSIZE_32BITS | DMA_CCR_MINC | DMA_CCR_DIR) /* STM32 F4 stream configuration register (SCR) settings. */ #elif defined(CONFIG_STM32_STM32F20XX) || defined(CONFIG_STM32_STM32F4XXX) # define SDIO_RXDMA32_CONFIG (DMA_SCR_PFCTRL | DMA_SCR_DIR_P2M|DMA_SCR_MINC | \ DMA_SCR_PSIZE_32BITS | DMA_SCR_MSIZE_32BITS | \ CONFIG_STM32_SDIO_DMAPRIO | DMA_SCR_PBURST_INCR4 | \ DMA_SCR_MBURST_INCR4) # define SDIO_TXDMA32_CONFIG (DMA_SCR_PFCTRL | DMA_SCR_DIR_M2P | DMA_SCR_MINC | \ DMA_SCR_PSIZE_32BITS | DMA_SCR_MSIZE_32BITS | \ CONFIG_STM32_SDIO_DMAPRIO | DMA_SCR_PBURST_INCR4 | \ DMA_SCR_MBURST_INCR4) #else # error "Unknown STM32 DMA" #endif /* SDIO DMA Channel/Stream selection. For the case of the STM32 F4, there * are multiple DMA stream options that must be dis-ambiguated in the board.h * file. */ #if defined(CONFIG_STM32_STM32F10XX) # define SDIO_DMACHAN DMACHAN_SDIO #elif defined(CONFIG_STM32_STM32F20XX) || defined(CONFIG_STM32_STM32F4XXX) # define SDIO_DMACHAN DMAMAP_SDIO #else # error "Unknown STM32 DMA" #endif /* FIFO sizes */ #define SDIO_HALFFIFO_WORDS (8) #define SDIO_HALFFIFO_BYTES (8*4) /* Data transfer interrupt mask bits */ #define SDIO_RECV_MASK (SDIO_MASK_DCRCFAILIE | SDIO_MASK_DTIMEOUTIE | \ SDIO_MASK_DATAENDIE | SDIO_MASK_RXOVERRIE | \ SDIO_MASK_RXFIFOHFIE | SDIO_MASK_STBITERRIE) #define SDIO_SEND_MASK (SDIO_MASK_DCRCFAILIE | SDIO_MASK_DTIMEOUTIE | \ SDIO_MASK_DATAENDIE | SDIO_MASK_TXUNDERRIE | \ SDIO_MASK_TXFIFOHEIE | SDIO_MASK_STBITERRIE) #define SDIO_DMARECV_MASK (SDIO_MASK_DCRCFAILIE | SDIO_MASK_DTIMEOUTIE | \ SDIO_MASK_DATAENDIE | SDIO_MASK_RXOVERRIE | \ SDIO_MASK_STBITERRIE) #define SDIO_DMASEND_MASK (SDIO_MASK_DCRCFAILIE | SDIO_MASK_DTIMEOUTIE | \ SDIO_MASK_DATAENDIE | SDIO_MASK_TXUNDERRIE | \ SDIO_MASK_STBITERRIE) /* Event waiting interrupt mask bits */ #define SDIO_CMDDONE_STA (SDIO_STA_CMDSENT) #define SDIO_RESPDONE_STA (SDIO_STA_CTIMEOUT | SDIO_STA_CCRCFAIL | \ SDIO_STA_CMDREND) #define SDIO_XFRDONE_STA (0) #define SDIO_CMDDONE_MASK (SDIO_MASK_CMDSENTIE) #define SDIO_RESPDONE_MASK (SDIO_MASK_CCRCFAILIE | SDIO_MASK_CTIMEOUTIE | \ SDIO_MASK_CMDRENDIE) #define SDIO_XFRDONE_MASK (0) #define SDIO_CMDDONE_ICR (SDIO_ICR_CMDSENTC | SDIO_ICR_DBCKENDC) #define SDIO_RESPDONE_ICR (SDIO_ICR_CTIMEOUTC | SDIO_ICR_CCRCFAILC | \ SDIO_ICR_CMDRENDC | SDIO_ICR_DBCKENDC) #define SDIO_XFRDONE_ICR (SDIO_ICR_DATAENDC | SDIO_ICR_DCRCFAILC | \ SDIO_ICR_DTIMEOUTC | SDIO_ICR_RXOVERRC | \ SDIO_ICR_TXUNDERRC | SDIO_ICR_STBITERRC | \ SDIO_ICR_DBCKENDC) #define SDIO_WAITALL_ICR (SDIO_CMDDONE_ICR | SDIO_RESPDONE_ICR | \ SDIO_XFRDONE_ICR | SDIO_ICR_DBCKENDC) /* Let's wait until we have both SDIO transfer complete and DMA complete. */ #define SDIO_XFRDONE_FLAG (1) #define SDIO_DMADONE_FLAG (2) #define SDIO_ALLDONE (3) /* Register logging support */ #ifdef CONFIG_SDIO_XFRDEBUG # ifdef CONFIG_STM32_SDIO_DMA # define SAMPLENDX_BEFORE_SETUP 0 # define SAMPLENDX_BEFORE_ENABLE 1 # define SAMPLENDX_AFTER_SETUP 2 # define SAMPLENDX_END_TRANSFER 3 # define SAMPLENDX_DMA_CALLBACK 4 # define DEBUG_NSAMPLES 5 # else # define SAMPLENDX_BEFORE_SETUP 0 # define SAMPLENDX_AFTER_SETUP 1 # define SAMPLENDX_END_TRANSFER 2 # define DEBUG_NSAMPLES 3 # endif #endif #define STM32_SDIO_USE_DEFAULT_BLOCKSIZE ((uint8_t)-1) /**************************************************************************** * Private Types ****************************************************************************/ /* This structure defines the state of the STM32 SDIO interface */ struct stm32_dev_s { struct sdio_dev_s dev; /* Standard, base SDIO interface */ /* STM32-specific extensions */ /* Event support */ sem_t waitsem; /* Implements event waiting */ sdio_eventset_t waitevents; /* Set of events to be waited for */ uint32_t waitmask; /* Interrupt enables for event waiting */ volatile sdio_eventset_t wkupevent; /* The event that caused the wakeup */ struct wdog_s waitwdog; /* Watchdog that handles event timeouts */ /* Callback support */ sdio_statset_t cdstatus; /* Card status */ sdio_eventset_t cbevents; /* Set of events to be cause callbacks */ worker_t callback; /* Registered callback function */ void *cbarg; /* Registered callback argument */ struct work_s cbwork; /* Callback work queue structure */ /* Interrupt mode data transfer support */ uint32_t *buffer; /* Address of current R/W buffer */ size_t remaining; /* Number of bytes remaining in the transfer */ uint32_t xfrmask; /* Interrupt enables for data transfer */ #ifdef CONFIG_STM32_SDIO_CARD /* Interrupt at SDIO_D1 pin, only for SDIO cards */ uint32_t sdiointmask; /* STM32 SDIO register mask */ int (*do_sdio_card)(void *); /* SDIO card ISR */ void *do_sdio_arg; /* arg for SDIO card ISR */ #endif /* Fixed transfer block size support */ #ifdef CONFIG_SDIO_BLOCKSETUP uint8_t block_size; #endif /* DMA data transfer support */ bool widebus; /* Required for DMA support */ #ifdef CONFIG_STM32_SDIO_DMA volatile uint8_t xfrflags; /* Used to synchronize SDIO and * DMA completion events */ bool dmamode; /* true: DMA mode transfer */ DMA_HANDLE dma; /* Handle for DMA channel */ #endif }; /* Register logging support */ #ifdef CONFIG_SDIO_XFRDEBUG struct stm32_sdioregs_s { uint8_t power; uint16_t clkcr; uint16_t dctrl; uint32_t dtimer; uint32_t dlen; uint32_t dcount; uint32_t sta; uint32_t mask; uint32_t fifocnt; }; struct stm32_sampleregs_s { struct stm32_sdioregs_s sdio; #if defined(CONFIG_DEBUG_DMA_INFO) && defined(CONFIG_STM32_SDIO_DMA) struct stm32_dmaregs_s dma; #endif }; #endif /**************************************************************************** * Private Function Prototypes ****************************************************************************/ /* Low-level helpers ********************************************************/ static int stm32_takesem(struct stm32_dev_s *priv); #define stm32_givesem(priv) (nxsem_post(&priv->waitsem)) static inline void stm32_setclkcr(uint32_t clkcr); static void stm32_configwaitints(struct stm32_dev_s *priv, uint32_t waitmask, sdio_eventset_t waitevents, sdio_eventset_t wkupevents); static void stm32_configxfrints(struct stm32_dev_s *priv, uint32_t xfrmask); static void stm32_setpwrctrl(uint32_t pwrctrl); static inline uint32_t stm32_getpwrctrl(void); /* DMA Helpers **************************************************************/ #ifdef CONFIG_SDIO_XFRDEBUG static void stm32_sampleinit(void); static void stm32_sdiosample(struct stm32_sdioregs_s *regs); static void stm32_sample(struct stm32_dev_s *priv, int index); static void stm32_sdiodump(struct stm32_sdioregs_s *regs, const char *msg); static void stm32_dumpsample(struct stm32_dev_s *priv, struct stm32_sampleregs_s *regs, const char *msg); static void stm32_dumpsamples(struct stm32_dev_s *priv); #else # define stm32_sampleinit() # define stm32_sample(priv,index) # define stm32_dumpsamples(priv) #endif #ifdef CONFIG_STM32_SDIO_DMA static void stm32_dmacallback(DMA_HANDLE handle, uint8_t status, void *arg); #endif /* Data Transfer Helpers ****************************************************/ static uint8_t stm32_log2(uint16_t value); static void stm32_dataconfig(uint32_t timeout, uint32_t dlen, uint32_t dctrl); static void stm32_datadisable(void); static void stm32_sendfifo(struct stm32_dev_s *priv); static void stm32_recvfifo(struct stm32_dev_s *priv); static void stm32_eventtimeout(wdparm_t arg); static void stm32_endwait(struct stm32_dev_s *priv, sdio_eventset_t wkupevent); static void stm32_endtransfer(struct stm32_dev_s *priv, sdio_eventset_t wkupevent); /* Interrupt Handling *******************************************************/ static int stm32_interrupt(int irq, void *context, void *arg); #ifdef CONFIG_MMCSD_SDIOWAIT_WRCOMPLETE static int stm32_rdyinterrupt(int irq, void *context, void *arg); #endif /* SDIO interface methods ***************************************************/ /* Mutual exclusion */ #ifdef CONFIG_SDIO_MUXBUS static int stm32_lock(FAR struct sdio_dev_s *dev, bool lock); #endif /* Initialization/setup */ static void stm32_reset(FAR struct sdio_dev_s *dev); static sdio_capset_t stm32_capabilities(FAR struct sdio_dev_s *dev); static sdio_statset_t stm32_status(FAR struct sdio_dev_s *dev); static void stm32_widebus(FAR struct sdio_dev_s *dev, bool enable); static void stm32_clock(FAR struct sdio_dev_s *dev, enum sdio_clock_e rate); static int stm32_attach(FAR struct sdio_dev_s *dev); /* Command/Status/Data Transfer */ static int stm32_sendcmd(FAR struct sdio_dev_s *dev, uint32_t cmd, uint32_t arg); #ifdef CONFIG_SDIO_BLOCKSETUP static void stm32_blocksetup(FAR struct sdio_dev_s *dev, unsigned int blocklen, unsigned int nblocks); #endif static int stm32_recvsetup(FAR struct sdio_dev_s *dev, FAR uint8_t *buffer, size_t nbytes); static int stm32_sendsetup(FAR struct sdio_dev_s *dev, FAR const uint8_t *buffer, uint32_t nbytes); static int stm32_cancel(FAR struct sdio_dev_s *dev); static int stm32_waitresponse(FAR struct sdio_dev_s *dev, uint32_t cmd); static int stm32_recvshortcrc(FAR struct sdio_dev_s *dev, uint32_t cmd, uint32_t *rshort); static int stm32_recvlong(FAR struct sdio_dev_s *dev, uint32_t cmd, uint32_t rlong[4]); static int stm32_recvshort(FAR struct sdio_dev_s *dev, uint32_t cmd, uint32_t *rshort); /* EVENT handler */ static void stm32_waitenable(FAR struct sdio_dev_s *dev, sdio_eventset_t eventset, uint32_t timeout); static sdio_eventset_t stm32_eventwait(FAR struct sdio_dev_s *dev); static void stm32_callbackenable(FAR struct sdio_dev_s *dev, sdio_eventset_t eventset); static int stm32_registercallback(FAR struct sdio_dev_s *dev, worker_t callback, void *arg); /* DMA */ #ifdef CONFIG_STM32_SDIO_DMA #ifdef CONFIG_ARCH_HAVE_SDIO_PREFLIGHT static int stm32_dmapreflight(FAR struct sdio_dev_s *dev, FAR const uint8_t *buffer, size_t buflen); #endif static int stm32_dmarecvsetup(FAR struct sdio_dev_s *dev, FAR uint8_t *buffer, size_t buflen); static int stm32_dmasendsetup(FAR struct sdio_dev_s *dev, FAR const uint8_t *buffer, size_t buflen); #endif /* Initialization/uninitialization/reset ************************************/ static void stm32_callback(void *arg); static void stm32_default(void); /**************************************************************************** * Private Data ****************************************************************************/ struct stm32_dev_s g_sdiodev = { .dev = { #ifdef CONFIG_SDIO_MUXBUS .lock = stm32_lock, #endif .reset = stm32_reset, .capabilities = stm32_capabilities, .status = stm32_status, .widebus = stm32_widebus, .clock = stm32_clock, .attach = stm32_attach, .sendcmd = stm32_sendcmd, #ifdef CONFIG_SDIO_BLOCKSETUP .blocksetup = stm32_blocksetup, #endif .recvsetup = stm32_recvsetup, .sendsetup = stm32_sendsetup, .cancel = stm32_cancel, .waitresponse = stm32_waitresponse, .recv_r1 = stm32_recvshortcrc, .recv_r2 = stm32_recvlong, .recv_r3 = stm32_recvshort, .recv_r4 = stm32_recvshort, .recv_r5 = stm32_recvshortcrc, .recv_r6 = stm32_recvshortcrc, .recv_r7 = stm32_recvshort, .waitenable = stm32_waitenable, .eventwait = stm32_eventwait, .callbackenable = stm32_callbackenable, .registercallback = stm32_registercallback, #ifdef CONFIG_SDIO_DMA #ifdef CONFIG_STM32_SDIO_DMA #ifdef CONFIG_ARCH_HAVE_SDIO_PREFLIGHT .dmapreflight = stm32_dmapreflight, #endif .dmarecvsetup = stm32_dmarecvsetup, .dmasendsetup = stm32_dmasendsetup, #else #ifdef CONFIG_ARCH_HAVE_SDIO_PREFLIGHT .dmapreflight = NULL, #endif .dmarecvsetup = stm32_recvsetup, .dmasendsetup = stm32_sendsetup, #endif #endif }, }; /* Register logging support */ #ifdef CONFIG_SDIO_XFRDEBUG static struct stm32_sampleregs_s g_sampleregs[DEBUG_NSAMPLES]; #endif /**************************************************************************** * Private Functions ****************************************************************************/ /**************************************************************************** * Name: stm32_takesem * * Description: * Take the wait semaphore (handling false alarm wakeups due to the receipt * of signals). * * Input Parameters: * dev - Instance of the SDIO device driver state structure. * * Returned Value: * Normally OK, but may return -ECANCELED in the rare event that the task * has been canceled. * ****************************************************************************/ static int stm32_takesem(struct stm32_dev_s *priv) { return nxsem_wait_uninterruptible(&priv->waitsem); } /**************************************************************************** * Name: stm32_setclkcr * * Description: * Modify oft-changed bits in the CLKCR register. Only the following bit- * fields are changed: * * CLKDIV, PWRSAV, BYPASS, WIDBUS, NEGEDGE, and HWFC_EN * * Input Parameters: * clkcr - A new CLKCR setting for the above mentions bits (other bits * are ignored. * * Returned Value: * None * ****************************************************************************/ static inline void stm32_setclkcr(uint32_t clkcr) { uint32_t regval = getreg32(STM32_SDIO_CLKCR); /* Clear CLKDIV, PWRSAV, BYPASS, WIDBUS, NEGEDGE, HWFC_EN bits */ regval &= ~(SDIO_CLKCR_CLKDIV_MASK | SDIO_CLKCR_PWRSAV | SDIO_CLKCR_BYPASS | SDIO_CLKCR_WIDBUS_MASK | SDIO_CLKCR_NEGEDGE | SDIO_CLKCR_HWFC_EN | SDIO_CLKCR_CLKEN); /* Replace with user provided settings */ clkcr &= (SDIO_CLKCR_CLKDIV_MASK | SDIO_CLKCR_PWRSAV | SDIO_CLKCR_BYPASS | SDIO_CLKCR_WIDBUS_MASK | SDIO_CLKCR_NEGEDGE | SDIO_CLKCR_HWFC_EN | SDIO_CLKCR_CLKEN); regval |= clkcr; putreg32(regval, STM32_SDIO_CLKCR); mcinfo("CLKCR: %08x PWR: %08x\n", getreg32(STM32_SDIO_CLKCR), getreg32(STM32_SDIO_POWER)); } /**************************************************************************** * Name: stm32_configwaitints * * Description: * Enable/disable SDIO interrupts needed to support the wait function * * Input Parameters: * priv - A reference to the SDIO device state structure * waitmask - The set of bits in the SDIO MASK register to set * waitevents - Waited for events * wkupevent - Wake-up events * * Returned Value: * None * ****************************************************************************/ static void stm32_configwaitints(struct stm32_dev_s *priv, uint32_t waitmask, sdio_eventset_t waitevents, sdio_eventset_t wkupevent) { irqstate_t flags; #ifdef CONFIG_MMCSD_SDIOWAIT_WRCOMPLETE int pinset; #endif /* Save all of the data and set the new interrupt mask in one, atomic * operation. */ flags = enter_critical_section(); #ifdef CONFIG_MMCSD_SDIOWAIT_WRCOMPLETE if ((waitevents & SDIOWAIT_WRCOMPLETE) != 0) { pinset = GPIO_SDIO_D0 & (GPIO_PORT_MASK | GPIO_PIN_MASK); pinset |= (GPIO_INPUT | GPIO_FLOAT | GPIO_EXTI); /* Arm the SDIO_D Ready and install Isr */ stm32_gpiosetevent(pinset, true, false, false, stm32_rdyinterrupt, priv); } /* Disarm SDIO_D ready */ if ((wkupevent & SDIOWAIT_WRCOMPLETE) != 0) { stm32_gpiosetevent(GPIO_SDIO_D0, false, false, false, NULL, NULL); stm32_configgpio(GPIO_SDIO_D0); } #endif priv->waitevents = waitevents; priv->wkupevent = wkupevent; priv->waitmask = waitmask; #ifdef CONFIG_STM32_SDIO_DMA priv->xfrflags = 0; #endif #ifdef CONFIG_STM32_SDIO_CARD putreg32(priv->xfrmask | priv->waitmask | priv->sdiointmask, STM32_SDIO_MASK); #else putreg32(priv->xfrmask | priv->waitmask, STM32_SDIO_MASK); #endif leave_critical_section(flags); } /**************************************************************************** * Name: stm32_configxfrints * * Description: * Enable SDIO interrupts needed to support the data transfer event * * Input Parameters: * priv - A reference to the SDIO device state structure * xfrmask - The set of bits in the SDIO MASK register to set * * Returned Value: * None * ****************************************************************************/ static void stm32_configxfrints(struct stm32_dev_s *priv, uint32_t xfrmask) { irqstate_t flags; flags = enter_critical_section(); priv->xfrmask = xfrmask; #ifdef CONFIG_STM32_SDIO_CARD putreg32(priv->xfrmask | priv->waitmask | priv->sdiointmask, STM32_SDIO_MASK); #else putreg32(priv->xfrmask | priv->waitmask, STM32_SDIO_MASK); #endif leave_critical_section(flags); } /**************************************************************************** * Name: stm32_setpwrctrl * * Description: * Change the PWRCTRL field of the SDIO POWER register to turn the SDIO * ON or OFF * * Input Parameters: * clkcr - A new PWRCTRL setting * * Returned Value: * None * ****************************************************************************/ static void stm32_setpwrctrl(uint32_t pwrctrl) { uint32_t regval; regval = getreg32(STM32_SDIO_POWER); regval &= ~SDIO_POWER_PWRCTRL_MASK; regval |= pwrctrl; putreg32(regval, STM32_SDIO_POWER); } /**************************************************************************** * Name: stm32_getpwrctrl * * Description: * Return the current value of the the PWRCTRL field of the SDIO POWER * register. This function can be used to see if the SDIO is powered ON * or OFF * * Input Parameters: * None * * Returned Value: * The current value of the the PWRCTRL field of the SDIO POWER register. * ****************************************************************************/ static inline uint32_t stm32_getpwrctrl(void) { return getreg32(STM32_SDIO_POWER) & SDIO_POWER_PWRCTRL_MASK; } /**************************************************************************** * Name: stm32_sampleinit * * Description: * Setup prior to collecting DMA samples * ****************************************************************************/ #ifdef CONFIG_SDIO_XFRDEBUG static void stm32_sampleinit(void) { memset(g_sampleregs, 0xff, DEBUG_NSAMPLES * sizeof(struct stm32_sampleregs_s)); } #endif /**************************************************************************** * Name: stm32_sdiosample * * Description: * Sample SDIO registers * ****************************************************************************/ #ifdef CONFIG_SDIO_XFRDEBUG static void stm32_sdiosample(struct stm32_sdioregs_s *regs) { regs->power = (uint8_t)getreg32(STM32_SDIO_POWER); regs->clkcr = (uint16_t)getreg32(STM32_SDIO_CLKCR); regs->dctrl = (uint16_t)getreg32(STM32_SDIO_DCTRL); regs->dtimer = getreg32(STM32_SDIO_DTIMER); regs->dlen = getreg32(STM32_SDIO_DLEN); regs->dcount = getreg32(STM32_SDIO_DCOUNT); regs->sta = getreg32(STM32_SDIO_STA); regs->mask = getreg32(STM32_SDIO_MASK); regs->fifocnt = getreg32(STM32_SDIO_FIFOCNT); } #endif /**************************************************************************** * Name: stm32_sample * * Description: * Sample SDIO/DMA registers * ****************************************************************************/ #ifdef CONFIG_SDIO_XFRDEBUG static void stm32_sample(struct stm32_dev_s *priv, int index) { struct stm32_sampleregs_s *regs = &g_sampleregs[index]; #if defined(CONFIG_DEBUG_DMA_INFO) && defined(CONFIG_STM32_SDIO_DMA) if (priv->dmamode) { stm32_dmasample(priv->dma, &regs->dma); } #endif stm32_sdiosample(&regs->sdio); } #endif /**************************************************************************** * Name: stm32_sdiodump * * Description: * Dump one register sample * ****************************************************************************/ #ifdef CONFIG_SDIO_XFRDEBUG static void stm32_sdiodump(struct stm32_sdioregs_s *regs, const char *msg) { mcinfo("SDIO Registers: %s\n", msg); mcinfo(" POWER[%08x]: %08x\n", STM32_SDIO_POWER, regs->power); mcinfo(" CLKCR[%08x]: %08x\n", STM32_SDIO_CLKCR, regs->clkcr); mcinfo(" DCTRL[%08x]: %08x\n", STM32_SDIO_DCTRL, regs->dctrl); mcinfo(" DTIMER[%08x]: %08x\n", STM32_SDIO_DTIMER, regs->dtimer); mcinfo(" DLEN[%08x]: %08x\n", STM32_SDIO_DLEN, regs->dlen); mcinfo(" DCOUNT[%08x]: %08x\n", STM32_SDIO_DCOUNT, regs->dcount); mcinfo(" STA[%08x]: %08x\n", STM32_SDIO_STA, regs->sta); mcinfo(" MASK[%08x]: %08x\n", STM32_SDIO_MASK, regs->mask); mcinfo("FIFOCNT[%08x]: %08x\n", STM32_SDIO_FIFOCNT, regs->fifocnt); } #endif /**************************************************************************** * Name: stm32_dumpsample * * Description: * Dump one register sample * ****************************************************************************/ #ifdef CONFIG_SDIO_XFRDEBUG static void stm32_dumpsample(struct stm32_dev_s *priv, struct stm32_sampleregs_s *regs, const char *msg) { #if defined(CONFIG_DEBUG_DMA_INFO) && defined(CONFIG_STM32_SDIO_DMA) if (priv->dmamode) { stm32_dmadump(priv->dma, &regs->dma, msg); } #endif stm32_sdiodump(&regs->sdio, msg); } #endif /**************************************************************************** * Name: stm32_dumpsamples * * Description: * Dump all sampled register data * ****************************************************************************/ #ifdef CONFIG_SDIO_XFRDEBUG static void stm32_dumpsamples(struct stm32_dev_s *priv) { stm32_dumpsample(priv, &g_sampleregs[SAMPLENDX_BEFORE_SETUP], "Before setup"); #if defined(CONFIG_DEBUG_DMA_INFO) && defined(CONFIG_STM32_SDIO_DMA) if (priv->dmamode) { stm32_dumpsample(priv, &g_sampleregs[SAMPLENDX_BEFORE_ENABLE], "Before DMA enable"); } #endif stm32_dumpsample(priv, &g_sampleregs[SAMPLENDX_AFTER_SETUP], "After setup"); stm32_dumpsample(priv, &g_sampleregs[SAMPLENDX_END_TRANSFER], "End of transfer"); #if defined(CONFIG_DEBUG_DMA_INFO) && defined(CONFIG_STM32_SDIO_DMA) if (priv->dmamode) { stm32_dumpsample(priv, &g_sampleregs[SAMPLENDX_DMA_CALLBACK], "DMA Callback"); } #endif } #endif /**************************************************************************** * Name: stm32_dmacallback * * Description: * Called when SDIO DMA completes * ****************************************************************************/ #ifdef CONFIG_STM32_SDIO_DMA static void stm32_dmacallback(DMA_HANDLE handle, uint8_t status, void *arg) { FAR struct stm32_dev_s *priv = (FAR struct stm32_dev_s *)arg; DEBUGASSERT(priv->dmamode); sdio_eventset_t result; /* In the normal case, SDIO appears to handle the End-Of-Transfer interrupt * first with the End-Of-DMA event occurring significantly later. On * transfer errors, however, the DMA error will occur before the End-of- * Transfer. */ stm32_sample((struct stm32_dev_s *)arg, SAMPLENDX_DMA_CALLBACK); /* Get the result of the DMA transfer */ if ((status & DMA_STATUS_ERROR) != 0) { mcerr("ERROR: DMA error %02x, remaining: %d\n", status, priv->remaining); result = SDIOWAIT_ERROR; } else { result = SDIOWAIT_TRANSFERDONE; } /* Then terminate the transfer if this completes all of the steps in the * transfer OR if a DMA error occurred. In the non-error case, we should * already have the SDIO transfer done interrupt. If not, the transfer * will appropriately time out. */ priv->xfrflags |= SDIO_DMADONE_FLAG; if (priv->xfrflags == SDIO_ALLDONE || result == SDIOWAIT_ERROR) { stm32_endtransfer(priv, result); } } #endif /**************************************************************************** * Name: stm32_log2 * * Description: * Take (approximate) log base 2 of the provided number (Only works if the * provided number is a power of 2). * ****************************************************************************/ static uint8_t stm32_log2(uint16_t value) { uint8_t log2 = 0; /* 0000 0000 0000 0001 -> return 0, * 0000 0000 0000 001x -> return 1, * 0000 0000 0000 01xx -> return 2, * 0000 0000 0000 1xxx -> return 3, * ... * 1xxx xxxx xxxx xxxx -> return 15, */ DEBUGASSERT(value > 0); while (value != 1) { value >>= 1; log2++; } return log2; } /**************************************************************************** * Name: stm32_dataconfig * * Description: * Configure the SDIO data path for the next data transfer * ****************************************************************************/ static void stm32_dataconfig(uint32_t timeout, uint32_t dlen, uint32_t dctrl) { uint32_t clkdiv; uint32_t regval; uint32_t sdio_clk = IP_CLCK_FREQ; /* Enable data path using a timeout scaled to the SD_CLOCK (the card * clock). */ regval = getreg32(STM32_SDIO_CLKCR); clkdiv = (regval & SDIO_CLKCR_CLKDIV_MASK) >> SDIO_CLKCR_CLKDIV_SHIFT; if ((regval & SDIO_CLKCR_BYPASS) == 0) { sdio_clk = sdio_clk / (2 + clkdiv); } /* Convert Timeout in Ms to SD_CLK counts */ timeout = timeout * (sdio_clk / 1000); putreg32(timeout, STM32_SDIO_DTIMER); /* Set DTIMER */ putreg32(dlen, STM32_SDIO_DLEN); /* Set DLEN */ /* Configure DCTRL DTDIR, DTMODE, and DBLOCKSIZE fields and set the DTEN * field */ regval = getreg32(STM32_SDIO_DCTRL); regval &= ~(SDIO_DCTRL_DTDIR | SDIO_DCTRL_DTMODE | SDIO_DCTRL_DBLOCKSIZE_MASK); dctrl &= (SDIO_DCTRL_DTDIR | SDIO_DCTRL_DTMODE | SDIO_DCTRL_DBLOCKSIZE_MASK); regval |= (dctrl | SDIO_DCTRL_DTEN | SDIO_DCTRL_SDIOEN); putreg32(regval, STM32_SDIO_DCTRL); } /**************************************************************************** * Name: stm32_datadisable * * Description: * Disable the SDIO data path setup by stm32_dataconfig() and * disable DMA. * ****************************************************************************/ static void stm32_datadisable(void) { uint32_t regval; /* Disable the data path */ /* Reset DTIMER */ putreg32(UINT32_MAX, STM32_SDIO_DTIMER); /* Reset DLEN */ putreg32(0, STM32_SDIO_DLEN); /* Reset DCTRL DTEN, DTDIR, DTMODE, DMAEN, and DBLOCKSIZE fields */ regval = getreg32(STM32_SDIO_DCTRL); regval &= ~(SDIO_DCTRL_DTEN | SDIO_DCTRL_DTDIR | SDIO_DCTRL_DTMODE | SDIO_DCTRL_DMAEN | SDIO_DCTRL_DBLOCKSIZE_MASK); putreg32(regval, STM32_SDIO_DCTRL); } /**************************************************************************** * Name: stm32_sendfifo * * Description: * Send SDIO data in interrupt mode * * Input Parameters: * priv - An instance of the SDIO device interface * * Returned Value: * None * ****************************************************************************/ static void stm32_sendfifo(struct stm32_dev_s *priv) { union { uint32_t w; uint8_t b[4]; } data; /* Loop while there is more data to be sent and the RX FIFO is not full */ while (priv->remaining > 0 && (getreg32(STM32_SDIO_STA) & SDIO_STA_TXFIFOF) == 0) { /* Is there a full word remaining in the user buffer? */ if (priv->remaining >= sizeof(uint32_t)) { /* Yes, transfer the word to the TX FIFO */ data.w = *priv->buffer++; priv->remaining -= sizeof(uint32_t); } else { /* No.. transfer just the bytes remaining in the user buffer, * padding with zero as necessary to extend to a full word. */ uint8_t *ptr = (uint8_t *)priv->remaining; int i; data.w = 0; for (i = 0; i < (int)priv->remaining; i++) { data.b[i] = *ptr++; } /* Now the transfer is finished */ priv->remaining = 0; } /* Put the word in the FIFO */ putreg32(data.w, STM32_SDIO_FIFO); } } /**************************************************************************** * Name: stm32_recvfifo * * Description: * Receive SDIO data in interrupt mode * * Input Parameters: * priv - An instance of the SDIO device interface * * Returned Value: * None * ****************************************************************************/ static void stm32_recvfifo(struct stm32_dev_s *priv) { union { uint32_t w; uint8_t b[4]; } data; /* Loop while there is space to store the data and there is more * data available in the RX FIFO. */ while (priv->remaining > 0 && (getreg32(STM32_SDIO_STA) & SDIO_STA_RXDAVL) != 0) { /* Read the next word from the RX FIFO */ data.w = getreg32(STM32_SDIO_FIFO); if (priv->remaining >= sizeof(uint32_t)) { /* Transfer the whole word to the user buffer */ *priv->buffer++ = data.w; priv->remaining -= sizeof(uint32_t); } else { /* Transfer any trailing fractional word */ uint8_t *ptr = (uint8_t *)priv->buffer; int i; for (i = 0; i < (int)priv->remaining; i++) { *ptr++ = data.b[i]; } /* Now the transfer is finished */ priv->remaining = 0; } } } /**************************************************************************** * Name: stm32_eventtimeout * * Description: * The watchdog timeout setup when the event wait start has expired without * any other waited-for event occurring. * * Input Parameters: * arg - The argument * * Returned Value: * None * * Assumptions: * Always called from the interrupt level with interrupts disabled. * ****************************************************************************/ static void stm32_eventtimeout(wdparm_t arg) { struct stm32_dev_s *priv = (struct stm32_dev_s *)arg; /* There is always race conditions with timer expirations. */ DEBUGASSERT((priv->waitevents & SDIOWAIT_TIMEOUT) != 0 || priv->wkupevent != 0); /* Is a data transfer complete event expected? */ if ((priv->waitevents & SDIOWAIT_TIMEOUT) != 0) { /* Yes.. wake up any waiting threads */ #ifdef CONFIG_MMCSD_SDIOWAIT_WRCOMPLETE stm32_endwait(priv, SDIOWAIT_TIMEOUT | (priv->waitevents & SDIOWAIT_WRCOMPLETE)); #else stm32_endwait(priv, SDIOWAIT_TIMEOUT); #endif mcerr("Timeout: remaining: %d\n", priv->remaining); } } /**************************************************************************** * Name: stm32_endwait * * Description: * Wake up a waiting thread if the waited-for event has occurred. * * Input Parameters: * priv - An instance of the SDIO device interface * wkupevent - The event that caused the wait to end * * Returned Value: * None * * Assumptions: * Always called from the interrupt level with interrupts disabled. * ****************************************************************************/ static void stm32_endwait(struct stm32_dev_s *priv, sdio_eventset_t wkupevent) { /* Cancel the watchdog timeout */ wd_cancel(&priv->waitwdog); /* Disable event-related interrupts */ stm32_configwaitints(priv, 0, 0, wkupevent); /* Wake up the waiting thread */ stm32_givesem(priv); } /**************************************************************************** * Name: stm32_endtransfer * * Description: * Terminate a transfer with the provided status. This function is called * only from the SDIO interrupt handler when end-of-transfer conditions * are detected. * * Input Parameters: * priv - An instance of the SDIO device interface * wkupevent - The event that caused the transfer to end * * Returned Value: * None * * Assumptions: * Always called from the interrupt level with interrupts disabled. * ****************************************************************************/ static void stm32_endtransfer(struct stm32_dev_s *priv, sdio_eventset_t wkupevent) { /* Disable all transfer related interrupts */ stm32_configxfrints(priv, 0); /* Clearing pending interrupt status on all transfer related interrupts */ putreg32(SDIO_XFRDONE_ICR, STM32_SDIO_ICR); /* If this was a DMA transfer, make sure that DMA is stopped */ #ifdef CONFIG_STM32_SDIO_DMA if (priv->dmamode) { /* DMA debug instrumentation */ stm32_sample(priv, SAMPLENDX_END_TRANSFER); /* Make sure that the DMA is stopped (it will be stopped automatically * on normal transfers, but not necessarily when the transfer * terminates on an error condition). */ stm32_dmastop(priv->dma); } #endif /* Mark the transfer finished */ priv->remaining = 0; /* Is a thread wait for these data transfer complete events? */ if ((priv->waitevents & wkupevent) != 0) { /* Yes.. wake up any waiting threads */ stm32_endwait(priv, wkupevent); } } /**************************************************************************** * Name: stm32_rdyinterrupt * * Description: * SDIO ready interrupt handler * * Input Parameters: * dev - An instance of the SDIO device interface * * Returned Value: * None * ****************************************************************************/ #ifdef CONFIG_MMCSD_SDIOWAIT_WRCOMPLETE static int stm32_rdyinterrupt(int irq, void *context, FAR void *arg) { struct stm32_dev_s *priv = (struct stm32_dev_s *)arg; /* Avoid noise, check the state */ if (stm32_gpioread(GPIO_SDIO_D0)) { stm32_endwait(priv, SDIOWAIT_WRCOMPLETE); } return OK; } #endif /**************************************************************************** * Name: stm32_interrupt * * Description: * SDIO interrupt handler * * Input Parameters: * dev - An instance of the SDIO device interface * * Returned Value: * None * ****************************************************************************/ static int stm32_interrupt(int irq, void *context, FAR void *arg) { struct stm32_dev_s *priv = &g_sdiodev; uint32_t enabled; uint32_t pending; /* Loop while there are pending interrupts. Check the SDIO status * register. Mask out all bits that don't correspond to enabled * interrupts. (This depends on the fact that bits are ordered * the same in both the STA and MASK register). If there are non-zero * bits remaining, then we have work to do here. */ while ((enabled = getreg32(STM32_SDIO_STA) & getreg32(STM32_SDIO_MASK)) != 0) { /* Handle in progress, interrupt driven data transfers ****************/ pending = enabled & priv->xfrmask; if (pending != 0) { #ifdef CONFIG_STM32_SDIO_DMA if (!priv->dmamode) #endif { /* Is the RX FIFO half full or more? Is so then we must be * processing a receive transaction. */ if ((pending & SDIO_STA_RXFIFOHF) != 0) { /* Receive data from the RX FIFO */ stm32_recvfifo(priv); } /* Otherwise, Is the transmit FIFO half empty or less? If so * we must be processing a send transaction. NOTE: We can't * be processing both! */ else if ((pending & SDIO_STA_TXFIFOHE) != 0) { /* Send data via the TX FIFO */ stm32_sendfifo(priv); } } /* Handle data end events */ if ((pending & SDIO_STA_DATAEND) != 0) { /* Handle any data remaining the RX FIFO. If the RX FIFO is * less than half full at the end of the transfer, then no * half-full interrupt will be received. */ /* Was this transfer performed in DMA mode? */ #ifdef CONFIG_STM32_SDIO_DMA if (priv->dmamode) { /* Yes.. Terminate the transfers only if the DMA has also * finished. */ priv->xfrflags |= SDIO_XFRDONE_FLAG; if (priv->xfrflags == SDIO_ALLDONE) { stm32_endtransfer(priv, SDIOWAIT_TRANSFERDONE); } /* Otherwise, just disable further transfer interrupts and * wait for the DMA complete event. */ else { stm32_configxfrints(priv, 0); } } else #endif { /* Receive data from the RX FIFO */ stm32_recvfifo(priv); /* Then terminate the transfer */ stm32_endtransfer(priv, SDIOWAIT_TRANSFERDONE); } } /* Handle data block send/receive CRC failure */ else if ((pending & SDIO_STA_DCRCFAIL) != 0) { /* Terminate the transfer with an error */ mcerr("ERROR: Data block CRC failure, remaining: %d\n", priv->remaining); stm32_endtransfer(priv, SDIOWAIT_TRANSFERDONE | SDIOWAIT_ERROR); } /* Handle data timeout error */ else if ((pending & SDIO_STA_DTIMEOUT) != 0) { /* Terminate the transfer with an error */ mcerr("ERROR: Data timeout, remaining: %d\n", priv->remaining); stm32_endtransfer(priv, SDIOWAIT_TRANSFERDONE | SDIOWAIT_TIMEOUT); } /* Handle RX FIFO overrun error */ else if ((pending & SDIO_STA_RXOVERR) != 0) { /* Terminate the transfer with an error */ mcerr("ERROR: RX FIFO overrun, remaining: %d\n", priv->remaining); stm32_endtransfer(priv, SDIOWAIT_TRANSFERDONE | SDIOWAIT_ERROR); } /* Handle TX FIFO underrun error */ else if ((pending & SDIO_STA_TXUNDERR) != 0) { /* Terminate the transfer with an error */ mcerr("ERROR: TX FIFO underrun, remaining: %d\n", priv->remaining); stm32_endtransfer(priv, SDIOWAIT_TRANSFERDONE | SDIOWAIT_ERROR); } /* Handle start bit error */ else if ((pending & SDIO_STA_STBITERR) != 0) { /* Terminate the transfer with an error */ mcerr("ERROR: Start bit, remaining: %d\n", priv->remaining); stm32_endtransfer(priv, SDIOWAIT_TRANSFERDONE | SDIOWAIT_ERROR); } } /* Handle wait events *************************************************/ pending = enabled & priv->waitmask; if (pending != 0) { /* Is this a response completion event? */ if ((pending & SDIO_RESPDONE_STA) != 0) { /* Yes.. Is their a thread waiting for response done? */ if ((priv->waitevents & SDIOWAIT_RESPONSEDONE) != 0) { /* Yes.. wake the thread up */ putreg32(SDIO_RESPDONE_ICR | SDIO_CMDDONE_ICR, STM32_SDIO_ICR); stm32_endwait(priv, SDIOWAIT_RESPONSEDONE); } } /* Is this a command completion event? */ if ((pending & SDIO_CMDDONE_STA) != 0) { /* Yes.. Is their a thread waiting for command done? */ if ((priv->waitevents & SDIOWAIT_RESPONSEDONE) != 0) { /* Yes.. wake the thread up */ putreg32(SDIO_CMDDONE_ICR, STM32_SDIO_ICR); stm32_endwait(priv, SDIOWAIT_CMDDONE); } } } #ifdef CONFIG_STM32_SDIO_CARD /* Handle SDIO card interrupt */ pending = enabled & priv->sdiointmask; if (pending != 0) { putreg32(SDIO_STA_SDIOIT, STM32_SDIO_ICR); /* Perform callback */ if (priv->do_sdio_card) { priv->do_sdio_card(priv->do_sdio_arg); } } #endif } return OK; } /**************************************************************************** * Name: stm32_lock * * Description: * Locks the bus. Function calls low-level multiplexed bus routines to * resolve bus requests and acknowledgment issues. * * Input Parameters: * dev - An instance of the SDIO device interface * lock - TRUE to lock, FALSE to unlock. * * Returned Value: * OK on success; a negated errno on failure * ****************************************************************************/ #ifdef CONFIG_SDIO_MUXBUS static int stm32_lock(FAR struct sdio_dev_s *dev, bool lock) { /* Single SDIO instance so there is only one possibility. The multiplex * bus is part of board support package. */ stm32_muxbus_sdio_lock(lock); return OK; } #endif /**************************************************************************** * Name: stm32_reset * * Description: * Reset the SDIO controller. Undo all setup and initialization. * * Input Parameters: * dev - An instance of the SDIO device interface * * Returned Value: * None * ****************************************************************************/ static void stm32_reset(FAR struct sdio_dev_s *dev) { FAR struct stm32_dev_s *priv = (FAR struct stm32_dev_s *)dev; irqstate_t flags; /* Disable clocking */ flags = enter_critical_section(); putreg32(0, SDIO_CLKCR_CLKEN_BB); stm32_setpwrctrl(SDIO_POWER_PWRCTRL_OFF); /* Put SDIO registers in their default, reset state */ stm32_default(); /* Reset data */ priv->waitevents = 0; /* Set of events to be waited for */ priv->waitmask = 0; /* Interrupt enables for event waiting */ priv->wkupevent = 0; /* The event that caused the wakeup */ #ifdef CONFIG_STM32_SDIO_DMA priv->xfrflags = 0; /* Used to synchronize SDIO and DMA completion events */ #endif wd_cancel(&priv->waitwdog); /* Cancel any timeouts */ /* Interrupt mode data transfer support */ priv->buffer = 0; /* Address of current R/W buffer */ priv->remaining = 0; /* Number of bytes remaining in the transfer */ priv->xfrmask = 0; /* Interrupt enables for data transfer */ #ifdef CONFIG_STM32_SDIO_CARD priv->sdiointmask = 0; /* SDIO card in-band interrupt mask */ #endif /* DMA data transfer support */ priv->widebus = false; /* Required for DMA support */ #ifdef CONFIG_STM32_SDIO_DMA priv->dmamode = false; /* true: DMA mode transfer */ #endif /* Configure the SDIO peripheral */ stm32_setclkcr(STM32_CLCKCR_INIT | SDIO_CLKCR_CLKEN); stm32_setpwrctrl(SDIO_POWER_PWRCTRL_ON); leave_critical_section(flags); mcinfo("CLCKR: %08x POWER: %08x\n", getreg32(STM32_SDIO_CLKCR), getreg32(STM32_SDIO_POWER)); } /**************************************************************************** * Name: stm32_capabilities * * Description: * Get capabilities (and limitations) of the SDIO driver (optional) * * Input Parameters: * dev - Device-specific state data * * Returned Value: * Returns a bitset of status values (see SDIO_CAPS_* defines) * ****************************************************************************/ static sdio_capset_t stm32_capabilities(FAR struct sdio_dev_s *dev) { sdio_capset_t caps = 0; #ifdef CONFIG_STM32_SDIO_WIDTH_D1_ONLY caps |= SDIO_CAPS_1BIT_ONLY; #endif #ifdef CONFIG_STM32_SDIO_DMA caps |= SDIO_CAPS_DMASUPPORTED; #endif return caps; } /**************************************************************************** * Name: stm32_status * * Description: * Get SDIO status. * * Input Parameters: * dev - Device-specific state data * * Returned Value: * Returns a bitset of status values (see stm32_status_* defines) * ****************************************************************************/ static sdio_statset_t stm32_status(FAR struct sdio_dev_s *dev) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; return priv->cdstatus; } /**************************************************************************** * Name: stm32_widebus * * Description: * Called after change in Bus width has been selected (via ACMD6). Most * controllers will need to perform some special operations to work * correctly in the new bus mode. * * Input Parameters: * dev - An instance of the SDIO device interface * wide - true: wide bus (4-bit) bus mode enabled * * Returned Value: * None * ****************************************************************************/ static void stm32_widebus(FAR struct sdio_dev_s *dev, bool wide) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; priv->widebus = wide; } /**************************************************************************** * Name: stm32_clock * * Description: * Enable/disable SDIO clocking * * Input Parameters: * dev - An instance of the SDIO device interface * rate - Specifies the clocking to use (see enum sdio_clock_e) * * Returned Value: * None * ****************************************************************************/ static void stm32_clock(FAR struct sdio_dev_s *dev, enum sdio_clock_e rate) { uint32_t clckr; switch (rate) { /* Disable clocking (with default ID mode divisor) */ default: case CLOCK_SDIO_DISABLED: clckr = STM32_CLCKCR_INIT; return; /* Enable in initial ID mode clocking (<400KHz) */ case CLOCK_IDMODE: clckr = (STM32_CLCKCR_INIT | SDIO_CLKCR_CLKEN); break; /* Enable in MMC normal operation clocking */ case CLOCK_MMC_TRANSFER: clckr = (SDIO_CLKCR_MMCXFR | SDIO_CLKCR_CLKEN); break; /* SD normal operation clocking (wide 4-bit mode) */ case CLOCK_SD_TRANSFER_4BIT: #ifndef CONFIG_STM32_SDIO_WIDTH_D1_ONLY clckr = (SDIO_CLCKR_SDWIDEXFR | SDIO_CLKCR_CLKEN); break; #endif /* SD normal operation clocking (narrow 1-bit mode) */ case CLOCK_SD_TRANSFER_1BIT: clckr = (SDIO_CLCKR_SDXFR | SDIO_CLKCR_CLKEN); break; } /* Set the new clock frequency along with the clock enable/disable bit */ stm32_setclkcr(clckr); } /**************************************************************************** * Name: stm32_attach * * Description: * Attach and prepare interrupts * * Input Parameters: * dev - An instance of the SDIO device interface * * Returned Value: * OK on success; A negated errno on failure. * ****************************************************************************/ static int stm32_attach(FAR struct sdio_dev_s *dev) { int ret; /* Attach the SDIO interrupt handler */ ret = irq_attach(STM32_IRQ_SDIO, stm32_interrupt, NULL); if (ret == OK) { /* Disable all interrupts at the SDIO controller and clear static * interrupt flags */ putreg32(SDIO_MASK_RESET, STM32_SDIO_MASK); putreg32(SDIO_ICR_STATICFLAGS, STM32_SDIO_ICR); /* Enable SDIO interrupts at the NVIC. They can now be enabled at * the SDIO controller as needed. */ up_enable_irq(STM32_IRQ_SDIO); } return ret; } /**************************************************************************** * Name: stm32_sendcmd * * Description: * Send the SDIO command * * Input Parameters: * dev - An instance of the SDIO device interface * cmd - The command to send (32-bits, encoded) * arg - 32-bit argument required with some commands * * Returned Value: * None * ****************************************************************************/ static int stm32_sendcmd(FAR struct sdio_dev_s *dev, uint32_t cmd, uint32_t arg) { uint32_t regval; uint32_t cmdidx; /* Set the SDIO Argument value */ putreg32(arg, STM32_SDIO_ARG); /* Clear CMDINDEX, WAITRESP, WAITINT, WAITPEND, and CPSMEN bits */ regval = getreg32(STM32_SDIO_CMD); regval &= ~(SDIO_CMD_CMDINDEX_MASK | SDIO_CMD_WAITRESP_MASK | SDIO_CMD_WAITINT | SDIO_CMD_WAITPEND | SDIO_CMD_CPSMEN); /* Set WAITRESP bits */ switch (cmd & MMCSD_RESPONSE_MASK) { case MMCSD_NO_RESPONSE: regval |= SDIO_CMD_NORESPONSE; break; case MMCSD_R1_RESPONSE: case MMCSD_R1B_RESPONSE: case MMCSD_R3_RESPONSE: case MMCSD_R4_RESPONSE: case MMCSD_R5_RESPONSE: case MMCSD_R6_RESPONSE: case MMCSD_R7_RESPONSE: regval |= SDIO_CMD_SHORTRESPONSE; break; case MMCSD_R2_RESPONSE: regval |= SDIO_CMD_LONGRESPONSE; break; } /* Set CPSMEN and the command index */ cmdidx = (cmd & MMCSD_CMDIDX_MASK) >> MMCSD_CMDIDX_SHIFT; regval |= cmdidx | SDIO_CMD_CPSMEN; mcinfo("cmd: %08x arg: %08x regval: %08x\n", cmd, arg, regval); /* Write the SDIO CMD */ putreg32(SDIO_RESPDONE_ICR | SDIO_CMDDONE_ICR, STM32_SDIO_ICR); putreg32(regval, STM32_SDIO_CMD); return OK; } /**************************************************************************** * Name: stm32_blocksetup * * Description: * Configure block size and the number of blocks for next transfer * * Input Parameters: * dev - An instance of the SDIO device interface * blocklen - The selected block size. * nblocklen - The number of blocks to transfer * * Returned Value: * None * ****************************************************************************/ #ifdef CONFIG_SDIO_BLOCKSETUP static void stm32_blocksetup(FAR struct sdio_dev_s *dev, unsigned int blocklen, unsigned int nblocks) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; /* Configure block size for next transfer */ priv->block_size = stm32_log2(blocklen); } #endif /**************************************************************************** * Name: stm32_recvsetup * * Description: * Setup hardware in preparation for data transfer from the card in non-DMA * (interrupt driven mode). This method will do whatever controller setup * is necessary. This would be called for SD memory just BEFORE sending * CMD13 (SEND_STATUS), CMD17 (READ_SINGLE_BLOCK), CMD18 * (READ_MULTIPLE_BLOCKS), ACMD51 (SEND_SCR), etc. Normally, * SDIO_WAITEVENT will be called to receive the indication that the * transfer is complete. * * Input Parameters: * dev - An instance of the SDIO device interface * buffer - Address of the buffer in which to receive the data * nbytes - The number of bytes in the transfer * * Returned Value: * Number of bytes sent on success; a negated errno on failure * ****************************************************************************/ static int stm32_recvsetup(FAR struct sdio_dev_s *dev, FAR uint8_t *buffer, size_t nbytes) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; uint32_t dblocksize; DEBUGASSERT(priv != NULL && buffer != NULL && nbytes > 0); DEBUGASSERT(((uint32_t)buffer & 3) == 0); /* Reset the DPSM configuration */ stm32_datadisable(); stm32_sampleinit(); stm32_sample(priv, SAMPLENDX_BEFORE_SETUP); /* Save the destination buffer information for use by the interrupt * handler. */ priv->buffer = (uint32_t *)buffer; priv->remaining = nbytes; #ifdef CONFIG_STM32_SDIO_DMA priv->dmamode = false; #endif /* Then set up the SDIO data path */ #ifdef CONFIG_SDIO_BLOCKSETUP if (priv->block_size != STM32_SDIO_USE_DEFAULT_BLOCKSIZE) { dblocksize = priv->block_size << SDIO_DCTRL_DBLOCKSIZE_SHIFT; } else #endif { dblocksize = stm32_log2(nbytes) << SDIO_DCTRL_DBLOCKSIZE_SHIFT; } stm32_dataconfig(SDIO_DTIMER_DATATIMEOUT_MS, nbytes, dblocksize | SDIO_DCTRL_DTDIR); /* And enable interrupts */ stm32_configxfrints(priv, SDIO_RECV_MASK); stm32_sample(priv, SAMPLENDX_AFTER_SETUP); return OK; } /**************************************************************************** * Name: stm32_sendsetup * * Description: * Setup hardware in preparation for data transfer from the card. This * method will do whatever controller setup is necessary. This would be * called for SD memory just AFTER sending CMD24 (WRITE_BLOCK), CMD25 * (WRITE_MULTIPLE_BLOCK), ... and before SDIO_SENDDATA is called. * * Input Parameters: * dev - An instance of the SDIO device interface * buffer - Address of the buffer containing the data to send * nbytes - The number of bytes in the transfer * * Returned Value: * Number of bytes sent on success; a negated errno on failure * ****************************************************************************/ static int stm32_sendsetup(FAR struct sdio_dev_s *dev, FAR const uint8_t *buffer, size_t nbytes) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; uint32_t dblocksize; DEBUGASSERT(priv != NULL && buffer != NULL && nbytes > 0); DEBUGASSERT(((uint32_t)buffer & 3) == 0); /* Reset the DPSM configuration */ stm32_datadisable(); stm32_sampleinit(); stm32_sample(priv, SAMPLENDX_BEFORE_SETUP); /* Save the source buffer information for use by the interrupt handler */ priv->buffer = (uint32_t *)buffer; priv->remaining = nbytes; #ifdef CONFIG_STM32_SDIO_DMA priv->dmamode = false; #endif /* Then set up the SDIO data path */ #ifdef CONFIG_SDIO_BLOCKSETUP if (priv->block_size != STM32_SDIO_USE_DEFAULT_BLOCKSIZE) { dblocksize = priv->block_size << SDIO_DCTRL_DBLOCKSIZE_SHIFT; } else #endif { dblocksize = stm32_log2(nbytes) << SDIO_DCTRL_DBLOCKSIZE_SHIFT; } stm32_dataconfig(SDIO_DTIMER_DATATIMEOUT_MS, nbytes, dblocksize); /* Enable TX interrupts */ stm32_configxfrints(priv, SDIO_SEND_MASK); stm32_sample(priv, SAMPLENDX_AFTER_SETUP); return OK; } /**************************************************************************** * Name: stm32_cancel * * Description: * Cancel the data transfer setup of SDIO_RECVSETUP, SDIO_SENDSETUP, * SDIO_DMARECVSETUP or SDIO_DMASENDSETUP. This must be called to cancel * the data transfer setup if, for some reason, you cannot perform the * transfer. * * Input Parameters: * dev - An instance of the SDIO device interface * * Returned Value: * OK is success; a negated errno on failure * ****************************************************************************/ static int stm32_cancel(FAR struct sdio_dev_s *dev) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; /* Disable all transfer- and event- related interrupts */ stm32_configxfrints(priv, 0); stm32_configwaitints(priv, 0, 0, 0); /* Clearing pending interrupt status on all transfer- and event- related * interrupts */ putreg32(SDIO_WAITALL_ICR, STM32_SDIO_ICR); /* Cancel any watchdog timeout */ wd_cancel(&priv->waitwdog); /* If this was a DMA transfer, make sure that DMA is stopped */ #ifdef CONFIG_STM32_SDIO_DMA if (priv->dmamode) { /* Make sure that the DMA is stopped (it will be stopped automatically * on normal transfers, but not necessarily when the transfer * terminates on an error condition. */ stm32_dmastop(priv->dma); } #endif /* Mark no transfer in progress */ priv->remaining = 0; return OK; } /**************************************************************************** * Name: stm32_waitresponse * * Description: * Poll-wait for the response to the last command to be ready. * * Input Parameters: * dev - An instance of the SDIO device interface * cmd - The command that was sent. See 32-bit command definitions above. * * Returned Value: * OK is success; a negated errno on failure * ****************************************************************************/ static int stm32_waitresponse(FAR struct sdio_dev_s *dev, uint32_t cmd) { int32_t timeout; uint32_t events; switch (cmd & MMCSD_RESPONSE_MASK) { case MMCSD_NO_RESPONSE: events = SDIO_CMDDONE_STA; timeout = SDIO_CMDTIMEOUT; break; case MMCSD_R1_RESPONSE: case MMCSD_R1B_RESPONSE: case MMCSD_R2_RESPONSE: case MMCSD_R4_RESPONSE: case MMCSD_R5_RESPONSE: case MMCSD_R6_RESPONSE: events = SDIO_RESPDONE_STA; timeout = SDIO_LONGTIMEOUT; break; case MMCSD_R3_RESPONSE: case MMCSD_R7_RESPONSE: events = SDIO_RESPDONE_STA; timeout = SDIO_CMDTIMEOUT; break; default: return -EINVAL; } /* Then wait for the response (or timeout) */ while ((getreg32(STM32_SDIO_STA) & events) == 0) { if (--timeout <= 0) { mcerr("ERROR: Timeout cmd: %08x events: %08x STA: %08x\n", cmd, events, getreg32(STM32_SDIO_STA)); return -ETIMEDOUT; } } putreg32(SDIO_CMDDONE_ICR, STM32_SDIO_ICR); return OK; } /**************************************************************************** * Name: stm32_recv* * * Description: * Receive response to SDIO command. Only the critical payload is * returned -- that is 32 bits for 48 bit status and 128 bits for 136 bit * status. The driver implementation should verify the correctness of * the remaining, non-returned bits (CRCs, CMD index, etc.). * * Input Parameters: * dev - An instance of the SDIO device interface * Rx - Buffer in which to receive the response * * Returned Value: * Number of bytes sent on success; a negated errno on failure. Here a * failure means only a faiure to obtain the requested response (due to * transport problem -- timeout, CRC, etc.). The implementation only * assures that the response is returned intacta and does not check errors * within the response itself. * ****************************************************************************/ static int stm32_recvshortcrc(FAR struct sdio_dev_s *dev, uint32_t cmd, uint32_t *rshort) { #ifdef CONFIG_DEBUG_MEMCARD_INFO uint32_t respcmd; #endif uint32_t regval; int ret = OK; /* R1 Command response (48-bit) * 47 0 Start bit * 46 0 Transmission bit (0=from card) * 45:40 bit5 - bit0 Command index (0-63) * 39:8 bit31 - bit0 32-bit card status * 7:1 bit6 - bit0 CRC7 * 0 1 End bit * * R1b Identical to R1 with the additional busy signaling via the data * line. * * R6 Published RCA Response (48-bit, SD card only) * 47 0 Start bit * 46 0 Transmission bit (0=from card) * 45:40 bit5 - bit0 Command index (0-63) * 39:8 bit31 - bit0 32-bit Argument Field, consisting of: * [31:16] New published RCA of card * [15:0] Card status bits {23,22,19,12:0} * 7:1 bit6 - bit0 CRC7 * 0 1 End bit */ #ifdef CONFIG_DEBUG_MEMCARD_INFO if (!rshort) { mcerr("ERROR: rshort=NULL\n"); ret = -EINVAL; } /* Check that this is the correct response to this command */ else if ((cmd & MMCSD_RESPONSE_MASK) != MMCSD_R1_RESPONSE && (cmd & MMCSD_RESPONSE_MASK) != MMCSD_R1B_RESPONSE && (cmd & MMCSD_RESPONSE_MASK) != MMCSD_R5_RESPONSE && (cmd & MMCSD_RESPONSE_MASK) != MMCSD_R6_RESPONSE) { mcerr("ERROR: Wrong response CMD=%08x\n", cmd); ret = -EINVAL; } else #endif { /* Check if a timeout or CRC error occurred */ regval = getreg32(STM32_SDIO_STA); if ((regval & SDIO_STA_CTIMEOUT) != 0) { mcerr("ERROR: Command timeout: %08x\n", regval); ret = -ETIMEDOUT; } else if ((regval & SDIO_STA_CCRCFAIL) != 0) { mcerr("ERROR: CRC failure: %08x\n", regval); ret = -EIO; } #ifdef CONFIG_DEBUG_MEMCARD_INFO else { /* Check response received is of desired command */ respcmd = getreg32(STM32_SDIO_RESPCMD); if ((uint8_t)(respcmd & SDIO_RESPCMD_MASK) != (cmd & MMCSD_CMDIDX_MASK)) { mcerr("ERROR: RESCMD=%02x CMD=%08x\n", respcmd, cmd); ret = -EINVAL; } } #endif } /* Clear all pending message completion events and return the R1/R6 * response. */ putreg32(SDIO_RESPDONE_ICR | SDIO_CMDDONE_ICR, STM32_SDIO_ICR); *rshort = getreg32(STM32_SDIO_RESP1); return ret; } static int stm32_recvlong(FAR struct sdio_dev_s *dev, uint32_t cmd, uint32_t rlong[4]) { uint32_t regval; int ret = OK; /* R2 CID, CSD register (136-bit) * 135 0 Start bit * 134 0 Transmission bit (0=from card) * 133:128 bit5 - bit0 Reserved * 127:1 bit127 - bit1 127-bit CID or CSD register * (including internal CRC) * 0 1 End bit */ #ifdef CONFIG_DEBUG_MEMCARD_INFO /* Check that R1 is the correct response to this command */ if ((cmd & MMCSD_RESPONSE_MASK) != MMCSD_R2_RESPONSE) { mcerr("ERROR: Wrong response CMD=%08x\n", cmd); ret = -EINVAL; } else #endif { /* Check if a timeout or CRC error occurred */ regval = getreg32(STM32_SDIO_STA); if (regval & SDIO_STA_CTIMEOUT) { mcerr("ERROR: Timeout STA: %08x\n", regval); ret = -ETIMEDOUT; } else if (regval & SDIO_STA_CCRCFAIL) { mcerr("ERROR: CRC fail STA: %08x\n", regval); ret = -EIO; } } /* Return the long response */ putreg32(SDIO_RESPDONE_ICR | SDIO_CMDDONE_ICR, STM32_SDIO_ICR); if (rlong) { rlong[0] = getreg32(STM32_SDIO_RESP1); rlong[1] = getreg32(STM32_SDIO_RESP2); rlong[2] = getreg32(STM32_SDIO_RESP3); rlong[3] = getreg32(STM32_SDIO_RESP4); } return ret; } static int stm32_recvshort(FAR struct sdio_dev_s *dev, uint32_t cmd, uint32_t *rshort) { uint32_t regval; int ret = OK; /* R3 OCR (48-bit) * 47 0 Start bit * 46 0 Transmission bit (0=from card) * 45:40 bit5 - bit0 Reserved * 39:8 bit31 - bit0 32-bit OCR register * 7:1 bit6 - bit0 Reserved * 0 1 End bit */ /* Check that this is the correct response to this command */ #ifdef CONFIG_DEBUG_MEMCARD_INFO if ((cmd & MMCSD_RESPONSE_MASK) != MMCSD_R3_RESPONSE && (cmd & MMCSD_RESPONSE_MASK) != MMCSD_R4_RESPONSE && (cmd & MMCSD_RESPONSE_MASK) != MMCSD_R7_RESPONSE) { mcerr("ERROR: Wrong response CMD=%08x\n", cmd); ret = -EINVAL; } else #endif { /* Check if a timeout occurred (Apparently a CRC error can terminate * a good response) */ regval = getreg32(STM32_SDIO_STA); if (regval & SDIO_STA_CTIMEOUT) { mcerr("ERROR: Timeout STA: %08x\n", regval); ret = -ETIMEDOUT; } } putreg32(SDIO_RESPDONE_ICR | SDIO_CMDDONE_ICR, STM32_SDIO_ICR); if (rshort) { *rshort = getreg32(STM32_SDIO_RESP1); } return ret; } /**************************************************************************** * Name: stm32_waitenable * * Description: * Enable/disable of a set of SDIO wait events. This is part of the * the SDIO_WAITEVENT sequence. The set of to-be-waited-for events is * configured before calling stm32_eventwait. This is done in this way * to help the driver to eliminate race conditions between the command * setup and the subsequent events. * * The enabled events persist until either (1) SDIO_WAITENABLE is called * again specifying a different set of wait events, or (2) SDIO_EVENTWAIT * returns. * * Input Parameters: * dev - An instance of the SDIO device interface * eventset - A bitset of events to enable or disable (see SDIOWAIT_* * definitions). 0=disable; 1=enable. * * Returned Value: * None * ****************************************************************************/ static void stm32_waitenable(FAR struct sdio_dev_s *dev, sdio_eventset_t eventset, uint32_t timeout) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; uint32_t waitmask; DEBUGASSERT(priv != NULL); /* Disable event-related interrupts */ stm32_configwaitints(priv, 0, 0, 0); /* Select the interrupt mask that will give us the appropriate wakeup * interrupts. */ #if defined(CONFIG_MMCSD_SDIOWAIT_WRCOMPLETE) if ((eventset & SDIOWAIT_WRCOMPLETE) != 0) { /* eventset carries this */ waitmask = 0; } else #endif { waitmask = 0; if ((eventset & SDIOWAIT_CMDDONE) != 0) { waitmask |= SDIO_CMDDONE_MASK; } if ((eventset & SDIOWAIT_RESPONSEDONE) != 0) { waitmask |= SDIO_RESPDONE_MASK; } if ((eventset & SDIOWAIT_TRANSFERDONE) != 0) { waitmask |= SDIO_XFRDONE_MASK; } /* Enable event-related interrupts */ putreg32(SDIO_WAITALL_ICR, STM32_SDIO_ICR); } stm32_configwaitints(priv, waitmask, eventset, 0); /* Check if the timeout event is specified in the event set */ if ((priv->waitevents & SDIOWAIT_TIMEOUT) != 0) { int delay; int ret; /* Yes.. Handle a cornercase: The user request a timeout event but * with timeout == 0? */ if (!timeout) { priv->wkupevent = SDIOWAIT_TIMEOUT; return; } /* Start the watchdog timer */ delay = MSEC2TICK(timeout); ret = wd_start(&priv->waitwdog, delay, stm32_eventtimeout, (wdparm_t)priv); if (ret < 0) { mcerr("ERROR: wd_start failed: %d\n", ret); } } } /**************************************************************************** * Name: stm32_eventwait * * Description: * Wait for one of the enabled events to occur (or a timeout). Note that * all events enabled by SDIO_WAITEVENTS are disabled when stm32_eventwait * returns. SDIO_WAITEVENTS must be called again before stm32_eventwait * can be used again. * * Input Parameters: * dev - An instance of the SDIO device interface * timeout - Maximum time in milliseconds to wait. Zero means immediate * timeout with no wait. The timeout value is ignored if * SDIOWAIT_TIMEOUT is not included in the waited-for eventset. * * Returned Value: * Event set containing the event(s) that ended the wait. Should always * be non-zero. All events are disabled after the wait concludes. * ****************************************************************************/ static sdio_eventset_t stm32_eventwait(FAR struct sdio_dev_s *dev) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; sdio_eventset_t wkupevent = 0; irqstate_t flags; int ret; /* There is a race condition here... the event may have completed before * we get here. In this case waitevents will be zero, but wkupevents will * be non-zero (and, hopefully, the semaphore count will also be non-zero. */ flags = enter_critical_section(); #if defined(CONFIG_MMCSD_SDIOWAIT_WRCOMPLETE) /* A card ejected while in SDIOWAIT_WRCOMPLETE can lead to a * condition where there is no waitevents set and no wkupevent */ if (priv->waitevents == 0 && priv->wkupevent == 0) { wkupevent = SDIOWAIT_ERROR; goto errout_with_waitints; } #else DEBUGASSERT(priv->waitevents != 0 || priv->wkupevent != 0); #endif #if defined(CONFIG_MMCSD_SDIOWAIT_WRCOMPLETE) if ((priv->waitevents & SDIOWAIT_WRCOMPLETE) != 0) { /* Atomically read pin to see if ready (true) and determine if ISR * fired. If Pin is ready and if ISR did NOT fire end the wait here. */ if (stm32_gpioread(GPIO_SDIO_D0) && (priv->wkupevent & SDIOWAIT_WRCOMPLETE) == 0) { stm32_endwait(priv, SDIOWAIT_WRCOMPLETE); } } #endif /* Loop until the event (or the timeout occurs). Race conditions are * avoided by calling stm32_waitenable prior to triggering the logic that * will cause the wait to terminate. Under certain race conditions, the * waited-for may have already occurred before this function was called! */ for (; ; ) { /* Wait for an event in event set to occur. If this the event has * already occurred, then the semaphore will already have been * incremented and there will be no wait. */ ret = stm32_takesem(priv); if (ret < 0) { /* Task canceled. Cancel the wdog (assuming it was started) and * return an SDIO error. */ wd_cancel(&priv->waitwdog); wkupevent = SDIOWAIT_ERROR; goto errout_with_waitints; } wkupevent = priv->wkupevent; /* Check if the event has occurred. When the event has occurred, then * evenset will be set to 0 and wkupevent will be set to a nonzero * value. */ if (wkupevent != 0) { /* Yes... break out of the loop with wkupevent non-zero */ break; } } /* Disable event-related interrupts */ errout_with_waitints: stm32_configwaitints(priv, 0, 0, 0); #ifdef CONFIG_STM32_SDIO_DMA priv->xfrflags = 0; #endif leave_critical_section(flags); stm32_dumpsamples(priv); return wkupevent; } /**************************************************************************** * Name: stm32_callbackenable * * Description: * Enable/disable of a set of SDIO callback events. This is part of the * the SDIO callback sequence. The set of events is configured to enabled * callbacks to the function provided in stm32_registercallback. * * Events are automatically disabled once the callback is performed and no * further callback events will occur until they are again enabled by * calling this method. * * Input Parameters: * dev - An instance of the SDIO device interface * eventset - A bitset of events to enable or disable (see SDIOMEDIA_* * definitions). 0=disable; 1=enable. * * Returned Value: * None * ****************************************************************************/ static void stm32_callbackenable(FAR struct sdio_dev_s *dev, sdio_eventset_t eventset) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; mcinfo("eventset: %02x\n", eventset); DEBUGASSERT(priv != NULL); priv->cbevents = eventset; stm32_callback(priv); } /**************************************************************************** * Name: stm32_registercallback * * Description: * Register a callback that that will be invoked on any media status * change. Callbacks should not be made from interrupt handlers, rather * interrupt level events should be handled by calling back on the work * thread. * * When this method is called, all callbacks should be disabled until they * are enabled via a call to SDIO_CALLBACKENABLE * * Input Parameters: * dev - Device-specific state data * callback - The function to call on the media change * arg - A caller provided value to return with the callback * * Returned Value: * 0 on success; negated errno on failure. * ****************************************************************************/ static int stm32_registercallback(FAR struct sdio_dev_s *dev, worker_t callback, void *arg) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; /* Disable callbacks and register this callback and is argument */ mcinfo("Register %p(%p)\n", callback, arg); DEBUGASSERT(priv != NULL); priv->cbevents = 0; priv->cbarg = arg; priv->callback = callback; return OK; } /**************************************************************************** * Name: stm32_dmapreflight * * Description: * Preflight an SDIO DMA operation. If the buffer is not well-formed for * SDIO DMA transfer (alignment, size, etc.) returns an error. * * Input Parameters: * dev - An instance of the SDIO device interface * buffer - The memory to DMA to/from * buflen - The size of the DMA transfer in bytes * * Returned Value: * OK on success; a negated errno on failure ****************************************************************************/ #if defined(CONFIG_STM32_SDIO_DMA) && defined(CONFIG_ARCH_HAVE_SDIO_PREFLIGHT) static int stm32_dmapreflight(FAR struct sdio_dev_s *dev, FAR const uint8_t *buffer, size_t buflen) { #if !defined(CONFIG_STM32_STM32F4XXX) struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; DEBUGASSERT(priv != NULL && buffer != NULL && buflen > 0); /* Wide bus operation is required for DMA */ if (!priv->widebus) { return -EINVAL; } #endif /* DMA must be possible to the buffer */ if (!stm32_dmacapable((uintptr_t)buffer, (buflen + 3) >> 2, SDIO_RXDMA32_CONFIG)) { return -EFAULT; } return 0; } #endif /**************************************************************************** * Name: stm32_dmarecvsetup * * Description: * Setup to perform a read DMA. If the processor supports a data cache, * then this method will also make sure that the contents of the DMA memory * and the data cache are coherent. For read transfers this may mean * invalidating the data cache. * * Input Parameters: * dev - An instance of the SDIO device interface * buffer - The memory to DMA from * buflen - The size of the DMA transfer in bytes * * Returned Value: * OK on success; a negated errno on failure * ****************************************************************************/ #ifdef CONFIG_STM32_SDIO_DMA static int stm32_dmarecvsetup(FAR struct sdio_dev_s *dev, FAR uint8_t *buffer, size_t buflen) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; uint32_t dblocksize; DEBUGASSERT(priv != NULL && buffer != NULL && buflen > 0); #ifdef CONFIG_ARCH_HAVE_SDIO_PREFLIGHT DEBUGASSERT(stm32_dmapreflight(dev, buffer, buflen) == 0); #endif /* Reset the DPSM configuration */ stm32_datadisable(); /* Initialize register sampling */ stm32_sampleinit(); stm32_sample(priv, SAMPLENDX_BEFORE_SETUP); /* Save the destination buffer information for use by the interrupt * handler. */ priv->buffer = (uint32_t *)buffer; priv->remaining = buflen; priv->dmamode = true; /* Then set up the SDIO data path */ #ifdef CONFIG_SDIO_BLOCKSETUP if (priv->block_size != STM32_SDIO_USE_DEFAULT_BLOCKSIZE) { dblocksize = priv->block_size << SDIO_DCTRL_DBLOCKSIZE_SHIFT; } else #endif { dblocksize = stm32_log2(buflen) << SDIO_DCTRL_DBLOCKSIZE_SHIFT; } stm32_dataconfig(SDIO_DTIMER_DATATIMEOUT_MS, buflen, dblocksize | SDIO_DCTRL_DTDIR); /* Configure the RX DMA */ stm32_configxfrints(priv, SDIO_DMARECV_MASK); putreg32(1, SDIO_DCTRL_DMAEN_BB); stm32_dmasetup(priv->dma, STM32_SDIO_FIFO, (uint32_t)buffer, (buflen + 3) >> 2, SDIO_RXDMA32_CONFIG); /* Start the DMA */ stm32_sample(priv, SAMPLENDX_BEFORE_ENABLE); stm32_dmastart(priv->dma, stm32_dmacallback, priv, false); stm32_sample(priv, SAMPLENDX_AFTER_SETUP); return OK; } #endif /**************************************************************************** * Name: stm32_dmasendsetup * * Description: * Setup to perform a write DMA. If the processor supports a data cache, * then this method will also make sure that the contents of the DMA memory * and the data cache are coherent. For write transfers, this may mean * flushing the data cache. * * Input Parameters: * dev - An instance of the SDIO device interface * buffer - The memory to DMA into * buflen - The size of the DMA transfer in bytes * * Returned Value: * OK on success; a negated errno on failure * ****************************************************************************/ #ifdef CONFIG_STM32_SDIO_DMA static int stm32_dmasendsetup(FAR struct sdio_dev_s *dev, FAR const uint8_t *buffer, size_t buflen) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; uint32_t dblocksize; DEBUGASSERT(priv != NULL && buffer != NULL && buflen > 0); #ifdef CONFIG_ARCH_HAVE_SDIO_PREFLIGHT DEBUGASSERT(stm32_dmapreflight(dev, buffer, buflen) == 0); #endif /* Reset the DPSM configuration */ stm32_datadisable(); /* Initialize register sampling */ stm32_sampleinit(); stm32_sample(priv, SAMPLENDX_BEFORE_SETUP); /* Save the source buffer information for use by the interrupt handler */ priv->buffer = (uint32_t *)buffer; priv->remaining = buflen; priv->dmamode = true; /* Then set up the SDIO data path */ #ifdef CONFIG_SDIO_BLOCKSETUP if (priv->block_size != STM32_SDIO_USE_DEFAULT_BLOCKSIZE) { dblocksize = priv->block_size << SDIO_DCTRL_DBLOCKSIZE_SHIFT; } else #endif { dblocksize = stm32_log2(buflen) << SDIO_DCTRL_DBLOCKSIZE_SHIFT; } stm32_dataconfig(SDIO_DTIMER_DATATIMEOUT_MS, buflen, dblocksize); /* Configure the TX DMA */ stm32_dmasetup(priv->dma, STM32_SDIO_FIFO, (uint32_t)buffer, (buflen + 3) >> 2, SDIO_TXDMA32_CONFIG); stm32_sample(priv, SAMPLENDX_BEFORE_ENABLE); putreg32(1, SDIO_DCTRL_DMAEN_BB); /* Start the DMA */ stm32_dmastart(priv->dma, stm32_dmacallback, priv, false); stm32_sample(priv, SAMPLENDX_AFTER_SETUP); /* Enable TX interrupts */ stm32_configxfrints(priv, SDIO_DMASEND_MASK); return OK; } #endif /**************************************************************************** * Name: stm32_callback * * Description: * Perform callback. * * Assumptions: * This function does not execute in the context of an interrupt handler. * It may be invoked on any user thread or scheduled on the work thread * from an interrupt handler. * ****************************************************************************/ static void stm32_callback(void *arg) { struct stm32_dev_s *priv = (struct stm32_dev_s *)arg; /* Is a callback registered? */ DEBUGASSERT(priv != NULL); mcinfo("Callback %p(%p) cbevents: %02x cdstatus: %02x\n", priv->callback, priv->cbarg, priv->cbevents, priv->cdstatus); if (priv->callback) { /* Yes.. Check for enabled callback events */ if ((priv->cdstatus & SDIO_STATUS_PRESENT) != 0) { /* Media is present. Is the media inserted event enabled? */ if ((priv->cbevents & SDIOMEDIA_INSERTED) == 0) { /* No... return without performing the callback */ return; } } else { /* Media is not present. Is the media eject event enabled? */ if ((priv->cbevents & SDIOMEDIA_EJECTED) == 0) { /* No... return without performing the callback */ return; } } /* Perform the callback, disabling further callbacks. Of course, the * the callback can (and probably should) re-enable callbacks. */ priv->cbevents = 0; /* Callbacks cannot be performed in the context of an interrupt * handler. If we are in an interrupt handler, then queue the * callback to be performed later on the work thread. */ if (up_interrupt_context()) { /* Yes.. queue it */ mcinfo("Queuing callback to %p(%p)\n", priv->callback, priv->cbarg); work_queue(HPWORK, &priv->cbwork, (worker_t)priv->callback, priv->cbarg, 0); } else { /* No.. then just call the callback here */ mcinfo("Callback to %p(%p)\n", priv->callback, priv->cbarg); priv->callback(priv->cbarg); } } } /**************************************************************************** * Name: stm32_default * * Description: * Restore SDIO registers to their default, reset values * ****************************************************************************/ static void stm32_default(void) { putreg32(SDIO_POWER_RESET, STM32_SDIO_POWER); putreg32(SDIO_CLKCR_RESET, STM32_SDIO_CLKCR); putreg32(SDIO_ARG_RESET, STM32_SDIO_ARG); putreg32(SDIO_CMD_RESET, STM32_SDIO_CMD); putreg32(SDIO_DTIMER_RESET, STM32_SDIO_DTIMER); putreg32(SDIO_DLEN_RESET, STM32_SDIO_DLEN); putreg32(SDIO_DCTRL_RESET, STM32_SDIO_DCTRL); putreg32(SDIO_ICR_RESET, STM32_SDIO_ICR); putreg32(SDIO_MASK_RESET, STM32_SDIO_MASK); } /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Name: sdio_initialize * * Description: * Initialize SDIO for operation. * * Input Parameters: * slotno - Not used. * * Returned Value: * A reference to an SDIO interface structure. NULL is returned on * failures. * ****************************************************************************/ FAR struct sdio_dev_s *sdio_initialize(int slotno) { /* There is only one slot */ struct stm32_dev_s *priv = &g_sdiodev; /* Initialize the SDIO slot structure */ /* Initialize semaphores */ nxsem_init(&priv->waitsem, 0, 0); /* The waitsem semaphore is used for signaling and, hence, should not have * priority inheritance enabled. */ nxsem_set_protocol(&priv->waitsem, SEM_PRIO_NONE); /* Allocate a DMA channel */ #ifdef CONFIG_STM32_SDIO_DMA priv->dma = stm32_dmachannel(SDIO_DMACHAN); DEBUGASSERT(priv->dma); #endif /* Configure GPIOs for 4-bit, wide-bus operation (the chip is capable of * 8-bit wide bus operation but D4-D7 are not configured). * * If bus is multiplexed then there is a custom bus configuration utility * in the scope of the board support package. */ #ifndef CONFIG_SDIO_MUXBUS stm32_configgpio(GPIO_SDIO_D0 | SDIO_PULLUP_ENABLE); #ifndef CONFIG_STM32_SDIO_WIDTH_D1_ONLY stm32_configgpio(GPIO_SDIO_D1 | SDIO_PULLUP_ENABLE); stm32_configgpio(GPIO_SDIO_D2 | SDIO_PULLUP_ENABLE); stm32_configgpio(GPIO_SDIO_D3 | SDIO_PULLUP_ENABLE); #endif stm32_configgpio(GPIO_SDIO_CK | SDIO_PULLUP_ENABLE); stm32_configgpio(GPIO_SDIO_CMD | SDIO_PULLUP_ENABLE); #endif /* Reset the card and assure that it is in the initial, unconfigured * state. */ stm32_reset(&priv->dev); return &g_sdiodev.dev; } /**************************************************************************** * Name: sdio_mediachange * * Description: * Called by board-specific logic -- possibly from an interrupt handler -- * in order to signal to the driver that a card has been inserted or * removed from the slot * * Input Parameters: * dev - An instance of the SDIO driver device state structure. * cardinslot - true is a card has been detected in the slot; false if a * card has been removed from the slot. Only transitions * (inserted->removed or removed->inserted should be reported) * * Returned Value: * None * ****************************************************************************/ void sdio_mediachange(FAR struct sdio_dev_s *dev, bool cardinslot) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; sdio_statset_t cdstatus; irqstate_t flags; /* Update card status */ flags = enter_critical_section(); cdstatus = priv->cdstatus; if (cardinslot) { priv->cdstatus |= SDIO_STATUS_PRESENT; } else { priv->cdstatus &= ~SDIO_STATUS_PRESENT; } leave_critical_section(flags); mcinfo("cdstatus OLD: %02x NEW: %02x\n", cdstatus, priv->cdstatus); /* Perform any requested callback if the status has changed */ if (cdstatus != priv->cdstatus) { stm32_callback(priv); } } /**************************************************************************** * Name: sdio_wrprotect * * Description: * Called by board-specific logic to report if the card in the slot is * mechanically write protected. * * Input Parameters: * dev - An instance of the SDIO driver device state structure. * wrprotect - true is a card is writeprotected. * * Returned Value: * None * ****************************************************************************/ void sdio_wrprotect(FAR struct sdio_dev_s *dev, bool wrprotect) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; irqstate_t flags; /* Update card status */ flags = enter_critical_section(); if (wrprotect) { priv->cdstatus |= SDIO_STATUS_WRPROTECTED; } else { priv->cdstatus &= ~SDIO_STATUS_WRPROTECTED; } mcinfo("cdstatus: %02x\n", priv->cdstatus); leave_critical_section(flags); } /**************************************************************************** * Name: sdio_set_sdio_card_isr * * Description: * SDIO card generates interrupt via SDIO_DATA_1 pin. * Called by board-specific logic to register an ISR for SDIO card. * * Input Parameters: * func - callback function. * arg - arg to be passed to the function. * * Returned Value: * None * ****************************************************************************/ #ifdef CONFIG_STM32_SDIO_CARD void sdio_set_sdio_card_isr(FAR struct sdio_dev_s *dev, int (*func)(void *), void *arg) { struct stm32_dev_s *priv = (struct stm32_dev_s *)dev; priv->do_sdio_card = func; if (func != NULL) { priv->sdiointmask = SDIO_STA_SDIOIT; priv->do_sdio_arg = arg; } else { priv->sdiointmask = 0; } putreg32(priv->xfrmask | priv->waitmask | priv->sdiointmask, STM32_SDIO_MASK); } #endif #endif /* CONFIG_STM32_SDIO */
203735.c
/* * Copyright (c) 2015, Freescale Semiconductor, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * o Redistributions of source code must retain the above copyright notice, this list * of conditions and the following disclaimer. * * o Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or * other materials provided with the distribution. * * o Neither the name of Freescale Semiconductor, Inc. nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "usb_device_config.h" #include "usb.h" #include "usb_device.h" #include "usb_device_class.h" #if ((defined(USB_DEVICE_CONFIG_MSC)) && (USB_DEVICE_CONFIG_MSC > 0U)) #include "usb_device_msc.h" /******************************************************************************* * Definitions ******************************************************************************/ /******************************************************************************* * Prototypes ******************************************************************************/ usb_status_t USB_DeviceMscRecv(usb_device_msc_struct_t *mscHandle); usb_status_t USB_DeviceMscSend(usb_device_msc_struct_t *mscHandle); /******************************************************************************* * Variables ******************************************************************************/ USB_GLOBAL usb_device_msc_struct_t g_msc_handle[USB_DEVICE_CONFIG_MSC_MAX_INSTANCE]; /******************************************************************************* * Code ******************************************************************************/ /*! * @brief Allocate a device msc class handle. * * This function allocates a device msc class handle. * * @param handle It is out parameter, is used to return pointer of the device msc class handle to the caller. * * @retval kStatus_USB_Success Get a device msc class handle successfully. * @retval kStatus_USB_Busy Cannot allocate a device msc class handle. */ static usb_status_t USB_DeviceMscAllocateHandle(usb_device_msc_struct_t **handle) { uint32_t count; for (count = 0; count < USB_DEVICE_CONFIG_MSC_MAX_INSTANCE; count++) { if (NULL == g_msc_handle[count].handle) { *handle = &g_msc_handle[count]; return kStatus_USB_Success; } } return kStatus_USB_Busy; } /*! * @brief Free a device msc class hanlde. * * This function frees a device msc class hanlde. * * @param handle The device msc class hanlde. * * @retval kStatus_USB_Success Free device msc class hanlde successfully. */ static usb_status_t USB_DeviceMscFreeHandle(usb_device_msc_struct_t *handle) { handle->handle = NULL; handle->configurationStruct = (usb_device_class_config_struct_t *)NULL; handle->configuration = 0; handle->alternate = 0; return kStatus_USB_Success; } /*! * @brief Process usb msc ufi command. * * This function analyse the cbw , get the command code. * * @param handle The device msc class hanlde. * * @retval kStatus_USB_Success Free device msc class hanlde successfully. */ usb_status_t USB_DeviceMscProcessUfiCommand(usb_device_msc_struct_t *mscHandle) { usb_status_t error = kStatus_USB_Error; usb_device_msc_ufi_struct_t *ufi = NULL; ufi = &mscHandle->mscUfi; ufi->requestSense.senseKey = USB_DEVICE_MSC_UFI_NO_SENSE; ufi->requestSense.additionalSenseCode = USB_DEVICE_MSC_UFI_NO_SENSE; ufi->requestSense.additionalSenseQualifer = USB_DEVICE_MSC_UFI_NO_SENSE; ufi->thirteenCase.hostExpectedDataLength = mscHandle->mscCbw.dataTransferLength; ufi->thirteenCase.hostExpectedDirection = (uint8_t)(mscHandle->mscCbw.flags >> USB_DEVICE_MSC_CBW_DIRECTION_SHIFT); /*The first byte of all ufi command blocks shall contain an Operation Code, refer to ufi spec*/ switch (mscHandle->mscCbw.cbwcb[0]) { /* ufi commmand operation code*/ case USB_DEVICE_MSC_INQUIRY_COMMAND: /*operation code : 0x12*/ error = USB_DeviceMscUfiInquiryCommand(mscHandle); break; case USB_DEVICE_MSC_READ_10_COMMAND: /*operation code : 0x28 */ case USB_DEVICE_MSC_READ_12_COMMAND: /*operation code : 0xA8 */ error = USB_DeviceMscUfiReadCommand(mscHandle); break; case USB_DEVICE_MSC_REQUEST_SENSE_COMMAND: /*operation code : 0x03*/ error = USB_DeviceMscUfiRequestSenseCommand(mscHandle); break; case USB_DEVICE_MSC_TEST_UNIT_READY_COMMAND: /*operation code : 0x00 */ error = USB_DeviceMscUfiTestUnitReadyCommand(mscHandle); break; case USB_DEVICE_MSC_WRITE_10_COMMAND: /*operation code : 0x2A */ case USB_DEVICE_MSC_WRITE_12_COMMAND: /*operation code : 0xAA */ error = USB_DeviceMscUfiWriteCommand(mscHandle); break; case USB_DEVICE_MSC_PREVENT_ALLOW_MEDIUM_REM_COMMAND: /*operation code :0x1E */ error = USB_DeviceMscUfiPreventAllowMediumCommand(mscHandle); break; case USB_DEVICE_MSC_FORMAT_UNIT_COMMAND: /*operation code : 0x04*/ error = USB_DeviceMscUfiFormatUnitCommand(mscHandle); break; case USB_DEVICE_MSC_READ_CAPACITY_10_COMMAND: /*operation code : 0x25*/ case USB_DEVICE_MSC_READ_CAPACITY_16_COMMAND: /*operation code : 0x9E*/ error = USB_DeviceMscUfiReadCapacityCommand(mscHandle); break; case USB_DEVICE_MSC_MODE_SENSE_10_COMMAND: /* operation code :0x5A*/ case USB_DEVICE_MSC_MODE_SENSE_6_COMMAND: /* operation code : 0x1A */ error = USB_DeviceMscUfiModeSenseCommand(mscHandle); break; case USB_DEVICE_MSC_MODE_SELECT_10_COMMAND: /*operation code : 0x55 */ case USB_DEVICE_MSC_MODE_SELECT_6_COMMAND: /*operation code : 0x15 */ error = USB_DeviceMscUfiModeSelectCommand(mscHandle); break; case USB_DEVICE_MSC_READ_FORMAT_CAPACITIES_COMMAND: /*operation code : 0x23 */ error = USB_DeviceMscUfiReadFormatCapacityCommand(mscHandle); break; case USB_DEVICE_MSC_SEND_DIAGNOSTIC_COMMAND: /*operation code : 0x1D*/ error = USB_DeviceMscUfiSendDiagnosticCommand(mscHandle); break; case USB_DEVICE_MSC_VERIFY_COMMAND: /*operation code : 0x2F*/ error = USB_DeviceMscUfiVerifyCommand(mscHandle); break; case USB_DEVICE_MSC_START_STOP_UNIT_COMMAND: /*operation code : 0x1B*/ error = USB_DeviceMscUfiStartStopUnitCommand(mscHandle); break; default: error = USB_DeviceMscUfiUnsupportCommand(mscHandle); mscHandle->dataOutFlag = 0; mscHandle->dataInFlag = 0; mscHandle->outEndpointStallFlag = 0; mscHandle->inEndpointStallFlag = 0; mscHandle->needOutStallFlag = 0; mscHandle->needInStallFlag = 0; break; } return error; } /*! * @brief Bulk IN endpoint callback function. * * This callback function is used to notify uplayer the tranfser result of a transfer. * This callback pointer is passed when the Bulk IN pipe initialized. * * @param handle The device handle. It equals the value returned from USB_DeviceInit. * @param message The result of the Bulk IN pipe transfer. * @param callbackParam The paramter for this callback. It is same with * usb_device_endpoint_callback_struct_t::callbackParam. In the class, the value is the MSC class handle. * * @return A USB error code or kStatus_USB_Success. */ static usb_status_t USB_DeviceMscBulkIn(usb_device_handle handle, usb_device_endpoint_callback_message_struct_t *message, void *callbackParam) { usb_device_msc_struct_t *mscHandle = (usb_device_msc_struct_t *)callbackParam; usb_device_msc_csw_t *csw; usb_status_t error = kStatus_USB_Error; if (message->length == USB_UNINITIALIZED_VAL_32) { if ((mscHandle->dataInFlag) && (mscHandle->configurationStruct->classCallback != NULL) && ((USB_DEVICE_MSC_READ_10_COMMAND == mscHandle->mscCbw.cbwcb[0]) || (USB_DEVICE_MSC_READ_12_COMMAND == mscHandle->mscCbw.cbwcb[0]))) { usb_device_lba_app_struct_t lbaData; lbaData.size = 0; lbaData.buffer = message->buffer; lbaData.offset = 0; mscHandle->configurationStruct->classCallback((class_handle_t)mscHandle, kUSB_DeviceMscEventReadResponse, (void *)&lbaData); } return error; } if (mscHandle->transferRemaining >= message->length) { mscHandle->transferRemaining -= message->length; } if (mscHandle->needInStallFlag == 1) { mscHandle->needInStallFlag = 0; mscHandle->inEndpointStallFlag = 1; mscHandle->dataInFlag = 0; USB_DeviceStallEndpoint(mscHandle->handle, mscHandle->bulkInEndpoint); return error; } if ((!mscHandle->dataInFlag) && (message->length == USB_DEVICE_MSC_CSW_LENGTH)) { csw = (usb_device_msc_csw_t *)(message->buffer); } if (mscHandle->dataInFlag) { if ((mscHandle->configurationStruct->classCallback != NULL)) { usb_device_lba_app_struct_t lbaData; lbaData.size = message->length; lbaData.buffer = message->buffer; lbaData.offset = mscHandle->currentOffset; if ((USB_DEVICE_MSC_READ_10_COMMAND == mscHandle->mscCbw.cbwcb[0]) || (USB_DEVICE_MSC_READ_12_COMMAND == mscHandle->mscCbw.cbwcb[0])) { mscHandle->configurationStruct->classCallback((class_handle_t)mscHandle, kUSB_DeviceMscEventReadResponse, (void *)&lbaData); } if (mscHandle->transferRemaining) { mscHandle->currentOffset += message->length; error = USB_DeviceMscSend(mscHandle); } if (!mscHandle->transferRemaining) { mscHandle->dataInFlag = 0; /*data transfer has been done, send the csw to host */ USB_DeviceSendRequest(mscHandle->handle, mscHandle->bulkInEndpoint, (uint8_t *)&mscHandle->mscCsw, USB_DEVICE_MSC_CSW_LENGTH); } } } else if ((message->length == USB_DEVICE_MSC_CSW_LENGTH) && (csw->signature == USB_DEVICE_MSC_DCSWSIGNATURE)) { mscHandle->cbwValidFlag = 1; (void)USB_DeviceRecvRequest(mscHandle->handle, mscHandle->bulkOutEndpoint, (uint8_t *)&mscHandle->mscCbw, USB_DEVICE_MSC_CBW_LENGTH); mscHandle->cbwPrimeFlag = 1; } else { } return error; } /*! * @brief Bulk OUT endpoint callback function. * * This callback function is used to notify uplayer the tranfser result of a transfer. * This callback pointer is passed when the Bulk OUT pipe initialized. * * @param handle The device handle. It equals the value returned from USB_DeviceInit. * @param message The result of the Bulk OUT pipe transfer. * @param callbackParam The paramter for this callback. It is same with * usb_device_endpoint_callback_struct_t::callbackParam. In the class, the value is the MSC class handle. * * @return A USB error code or kStatus_USB_Success. */ usb_status_t USB_DeviceMscBulkOut(usb_device_handle handle, usb_device_endpoint_callback_message_struct_t *message, void *callbackParam) { usb_device_msc_struct_t *mscHandle = (usb_device_msc_struct_t *)callbackParam; usb_status_t error = kStatus_USB_Success; if (message->length == USB_UNINITIALIZED_VAL_32) { if ((mscHandle->dataInFlag) && (mscHandle->configurationStruct->classCallback != NULL) && ((USB_DEVICE_MSC_WRITE_10_COMMAND == mscHandle->mscCbw.cbwcb[0]) || (USB_DEVICE_MSC_WRITE_12_COMMAND == mscHandle->mscCbw.cbwcb[0]))) { usb_device_lba_app_struct_t lbaData; lbaData.size = 0; lbaData.buffer = message->buffer; lbaData.offset = 0; mscHandle->configurationStruct->classCallback((class_handle_t)mscHandle, kUSB_DeviceMscEventWriteResponse, (void *)&lbaData); } return error; } if (mscHandle->transferRemaining >= message->length) { mscHandle->transferRemaining -= message->length; } if (mscHandle->needOutStallFlag == 1) { mscHandle->needOutStallFlag = 0; mscHandle->outEndpointStallFlag = 1; mscHandle->dataOutFlag = 0; mscHandle->cbwPrimeFlag = 0; USB_DeviceStallEndpoint(mscHandle->handle, mscHandle->bulkOutEndpoint); return error; } if (mscHandle->dataOutFlag) { usb_device_lba_app_struct_t lbaData; lbaData.size = message->length; lbaData.buffer = message->buffer; lbaData.offset = mscHandle->currentOffset; if ((mscHandle->configurationStruct->classCallback != NULL)) { if ((USB_DEVICE_MSC_WRITE_10_COMMAND == mscHandle->mscCbw.cbwcb[0]) || (USB_DEVICE_MSC_WRITE_12_COMMAND == mscHandle->mscCbw.cbwcb[0])) { mscHandle->configurationStruct->classCallback((class_handle_t)mscHandle, kUSB_DeviceMscEventWriteResponse, (void *)&lbaData); } if (mscHandle->transferRemaining) { mscHandle->currentOffset += message->length; error = USB_DeviceMscRecv(mscHandle); } } if (!mscHandle->transferRemaining) { mscHandle->dataOutFlag = 0; { USB_DeviceSendRequest(mscHandle->handle, mscHandle->bulkInEndpoint, (uint8_t *)&mscHandle->mscCsw, USB_DEVICE_MSC_CSW_LENGTH); mscHandle->cswPrimeFlag = 1; } } } else if ((mscHandle->cbwValidFlag) && (message->length == USB_DEVICE_MSC_CBW_LENGTH) && (mscHandle->mscCbw.signature == USB_DEVICE_MSC_DCBWSIGNATURE) && (!((mscHandle->mscCbw.logicalUnitNumber & 0xF0) || (mscHandle->mscCbw.cbLength & 0xE0))) && (mscHandle->mscCbw.logicalUnitNumber < (mscHandle->logicalUnitNumber + 1)) && ((mscHandle->mscCbw.cbLength >= 0x01) && (mscHandle->mscCbw.cbLength <= 0x10))) { mscHandle->cbwPrimeFlag = 0; mscHandle->transferRemaining = 0; mscHandle->mscCsw.signature = USB_DEVICE_MSC_DCSWSIGNATURE; mscHandle->mscCsw.dataResidue = 0; mscHandle->mscCsw.tag = mscHandle->mscCbw.tag; mscHandle->cbwValidFlag = 0; mscHandle->mscCbw.dataTransferLength = USB_LONG_TO_LITTLE_ENDIAN(mscHandle->mscCbw.dataTransferLength); mscHandle->dataOutFlag = (uint8_t)(((!(mscHandle->mscCbw.flags & USB_DEVICE_MSC_CBW_DIRECTION_BIT)) && (mscHandle->mscCbw.dataTransferLength)) ? 1 : 0); mscHandle->dataInFlag = (uint8_t)( ((mscHandle->mscCbw.flags & USB_DEVICE_MSC_CBW_DIRECTION_BIT) && (mscHandle->mscCbw.dataTransferLength)) ? 1 : 0); if ((0 != mscHandle->dataInFlag) && (0 != mscHandle->inEndpointStallFlag)) { error = kStatus_USB_Error; return error; } error = USB_DeviceMscProcessUfiCommand(mscHandle); if (error == kStatus_USB_InvalidRequest) { if (mscHandle->dataOutFlag == 1) { if (mscHandle->outEndpointStallFlag == 0) { mscHandle->needOutStallFlag = 1; } mscHandle->dataOutFlag = 0; } else if (mscHandle->dataInFlag == 1) { if (mscHandle->inEndpointStallFlag == 0) { mscHandle->needInStallFlag = 1; } mscHandle->dataInFlag = 0; } else { } mscHandle->stallStatus = (uint8_t)USB_DEVICE_MSC_STALL_IN_DATA; } if (!((mscHandle->dataOutFlag) || ((mscHandle->dataInFlag) || (mscHandle->needInStallFlag)))) { USB_DeviceSendRequest(mscHandle->handle, mscHandle->bulkInEndpoint, (uint8_t *)&mscHandle->mscCsw, USB_DEVICE_MSC_CSW_LENGTH); mscHandle->cswPrimeFlag = 1; } } else { USB_DeviceStallEndpoint(mscHandle->handle, mscHandle->bulkOutEndpoint); USB_DeviceStallEndpoint(mscHandle->handle, mscHandle->bulkInEndpoint); mscHandle->cbwValidFlag = 0; mscHandle->outEndpointStallFlag = 1; mscHandle->inEndpointStallFlag = 1; mscHandle->stallStatus = (uint8_t)USB_DEVICE_MSC_STALL_IN_CBW; mscHandle->performResetRecover = 1; } return error; } /*! * @brief Initialize the endpoints of the msc class. * * This callback function is used to initialize the endpoints of the msc class. * * @param mscHandle The device msc class handle. It equals the value returned from * usb_device_class_config_struct_t::classHandle. * * @return A USB error code or kStatus_USB_Success. */ usb_status_t USB_DeviceMscEndpointsInit(usb_device_msc_struct_t *mscHandle) { usb_device_interface_list_t *interfaceList; usb_device_interface_struct_t *interface = (usb_device_interface_struct_t *)NULL; usb_status_t error = kStatus_USB_Error; /* Check the configuration is valid or not. */ if (mscHandle->configuration > mscHandle->configurationStruct->classInfomation->configurations) { return error; } /* Get the interface list of the new configuration. */ /* Check the interface list is valid or not. */ if (NULL == mscHandle->configurationStruct->classInfomation->interfaceList) { return error; } interfaceList = &mscHandle->configurationStruct->classInfomation->interfaceList[mscHandle->configuration - 1]; /* Find interface by using the alternate setting of the interface. */ for (int count = 0; count < interfaceList->count; count++) { if (USB_DEVICE_CONFIG_MSC_CLASS_CODE == interfaceList->interfaces[count].classCode) { for (int index = 0; index < interfaceList->interfaces[count].count; index++) { if (interfaceList->interfaces[count].interface[index].alternateSetting == mscHandle->alternate) { interface = &interfaceList->interfaces[count].interface[index]; break; } } mscHandle->interfaceNumber = interfaceList->interfaces[count].interfaceNumber; break; } } if (!interface) { /* Return error if the interface is not found. */ return error; } /* Keep new interface handle. */ mscHandle->interfaceHandle = interface; /* Initialize the endpoints of the new interface. */ for (int count = 0; count < interface->endpointList.count; count++) { usb_device_endpoint_init_struct_t epInitStruct; usb_device_endpoint_callback_struct_t ep_callback; epInitStruct.zlt = 0; epInitStruct.endpointAddress = interface->endpointList.endpoint[count].endpointAddress; epInitStruct.maxPacketSize = interface->endpointList.endpoint[count].maxPacketSize; epInitStruct.transferType = interface->endpointList.endpoint[count].transferType; if (USB_IN == ((epInitStruct.endpointAddress & USB_DESCRIPTOR_ENDPOINT_ADDRESS_DIRECTION_MASK) >> USB_DESCRIPTOR_ENDPOINT_ADDRESS_DIRECTION_SHIFT)) { mscHandle->bulkInEndpoint = epInitStruct.endpointAddress; ep_callback.callbackFn = USB_DeviceMscBulkIn; } else { mscHandle->bulkOutEndpoint = epInitStruct.endpointAddress; ep_callback.callbackFn = USB_DeviceMscBulkOut; } ep_callback.callbackParam = mscHandle; error = USB_DeviceInitEndpoint(mscHandle->handle, &epInitStruct, &ep_callback); } mscHandle->dataOutFlag = 0; mscHandle->dataInFlag = 0; mscHandle->outEndpointStallFlag = 0; mscHandle->inEndpointStallFlag = 0; mscHandle->needOutStallFlag = 0; mscHandle->needInStallFlag = 0; mscHandle->cbwValidFlag = 1; mscHandle->transferRemaining = 0; mscHandle->performResetRecover = 0; mscHandle->performResetDoneFlag = 0; mscHandle->stallStatus = 0; if (mscHandle->cbwPrimeFlag == 1) { USB_DeviceCancel(mscHandle->handle, mscHandle->bulkOutEndpoint); } USB_DeviceRecvRequest(mscHandle->handle, mscHandle->bulkOutEndpoint, (uint8_t *)&mscHandle->mscCbw, USB_DEVICE_MSC_CBW_LENGTH); mscHandle->cbwPrimeFlag = 1; return error; } /*! * @brief De-initialize the endpoints of the msc class. * * This callback function is used to de-initialize the endpoints of the msc class. * * @param mscHandle The device msc class handle. It equals the value returned from * usb_device_class_config_struct_t::classHandle. * * @return A USB error code or kStatus_USB_Success. */ usb_status_t USB_DeviceMscEndpointsDeinit(usb_device_msc_struct_t *mscHandle) { usb_status_t error = kStatus_USB_Error; if (!mscHandle->interfaceHandle) { return error; } /* De-initialize all endpoints of the interface */ for (int count = 0; count < mscHandle->interfaceHandle->endpointList.count; count++) { error = USB_DeviceDeinitEndpoint(mscHandle->handle, mscHandle->interfaceHandle->endpointList.endpoint[count].endpointAddress); } return error; } /*! * @brief Initialize the msc class. * * This function is used to initialize the msc class. * * @param controllerId The controller id of the USB IP. Please refer to the enumeration usb_controller_index_t. * @param config The class configuration information. * @param handle It is out parameter, is used to return pointer of the msc class handle to the caller. * * @return A USB error code or kStatus_USB_Success. */ usb_status_t USB_DeviceMscInit(uint8_t controllerId, usb_device_class_config_struct_t *config, class_handle_t *handle) { usb_device_msc_struct_t *mscHandle; usb_status_t error = kStatus_USB_Error; uint32_t implementingDiskDrive = USB_DEVICE_CONFIG_MSC_IMPLEMENTING_DISK_DRIVE; usb_device_lba_information_struct_t diskInformation; usb_device_msc_ufi_struct_t *ufi = NULL; /* Allocate a msc class handle. */ error = USB_DeviceMscAllocateHandle(&mscHandle); if (kStatus_USB_Success != error) { return error; } /* Get the device handle according to the controller id. */ error = USB_DeviceClassGetDeviceHandle(controllerId, &mscHandle->handle); if (kStatus_USB_Success != error) { USB_DeviceMscFreeHandle(mscHandle); return error; } if (!mscHandle->handle) { USB_DeviceMscFreeHandle(mscHandle); return kStatus_USB_InvalidHandle; } /* Save the configuration of the class. */ mscHandle->configurationStruct = config; /* Clear the configuration value. */ mscHandle->configuration = 0; mscHandle->alternate = 0xff; /* Get device information. */ error = mscHandle->configurationStruct->classCallback( (class_handle_t)mscHandle, kUSB_DeviceMscEventGetLbaInformation, (void *)&diskInformation); if (((diskInformation.lengthOfEachLba) && (diskInformation.totalLbaNumberSupports)) == 0) { error = kStatus_USB_Error; USB_DeviceMscFreeHandle(mscHandle); return error; } mscHandle->logicalUnitNumber = diskInformation.logicalUnitNumberSupported; /*initialize the basic device information*/ ufi = &mscHandle->mscUfi; mscHandle->totalLogicalBlockNumber = diskInformation.totalLbaNumberSupports; mscHandle->lengthOfEachLba = diskInformation.lengthOfEachLba; mscHandle->logicalUnitNumber = diskInformation.logicalUnitNumberSupported - 1; mscHandle->bulkInBufferSize = diskInformation.bulkInBufferSize; mscHandle->bulkOutBufferSize = diskInformation.bulkOutBufferSize; mscHandle->implementingDiskDrive = implementingDiskDrive; ufi->requestSense.validErrorCode = USB_DEVICE_MSC_UFI_REQ_SENSE_VALID_ERROR_CODE; ufi->requestSense.additionalSenseLength = USB_DEVICE_MSC_UFI_REQ_SENSE_ADDITIONAL_SENSE_LEN; ufi->requestSense.senseKey = USB_DEVICE_MSC_UFI_NO_SENSE; ufi->requestSense.additionalSenseCode = USB_DEVICE_MSC_UFI_NO_SENSE; ufi->requestSense.additionalSenseQualifer = USB_DEVICE_MSC_UFI_NO_SENSE; ufi->readCapacity.lastLogicalBlockAddress = USB_LONG_TO_BIG_ENDIAN(mscHandle->totalLogicalBlockNumber - 1); ufi->readCapacity.blockSize = USB_LONG_TO_BIG_ENDIAN((uint32_t)mscHandle->lengthOfEachLba); ufi->readCapacity16.lastLogicalBlockAddress1 = USB_LONG_TO_BIG_ENDIAN(mscHandle->totalLogicalBlockNumber - 1); ufi->readCapacity16.blockSize = USB_LONG_TO_BIG_ENDIAN((uint32_t)mscHandle->lengthOfEachLba); mscHandle->cbwPrimeFlag = 0; mscHandle->cswPrimeFlag = 0; *handle = (class_handle_t)mscHandle; return error; } /*! * @brief De-initialize the device msc class. * * The function de-initializes the device msc class. * * @param handle The msc class handle got from usb_device_class_config_struct_t::classHandle. * * @return A USB error code or kStatus_USB_Success. */ usb_status_t USB_DeviceMscDeinit(class_handle_t handle) { usb_device_msc_struct_t *mscHandle; usb_status_t error = kStatus_USB_Error; mscHandle = (usb_device_msc_struct_t *)handle; if (!mscHandle) { return kStatus_USB_InvalidHandle; } error = USB_DeviceMscEndpointsDeinit(mscHandle); USB_DeviceMscFreeHandle(mscHandle); return error; } /*! * @brief Handle the event passed to the msc class. * * This function handles the event passed to the msc class. * * @param handle The msc class handle, got from the usb_device_class_config_struct_t::classHandle. * @param event The event codes. Please refer to the enumeration usb_device_class_event_t. * @param param The param type is determined by the event code. * * @return A USB error code or kStatus_USB_Success. * @retval kStatus_USB_Success Free device handle successfully. * @retval kStatus_USB_InvalidParameter The device handle not be found. * @retval kStatus_USB_InvalidRequest The request is invalid, and the control pipe will be stalled by the caller. */ usb_status_t USB_DeviceMscEvent(void *handle, uint32_t event, void *param) { usb_status_t error = kStatus_USB_Error; usb_device_msc_struct_t *mscHandle; uint16_t interfaceAlternate; uint8_t *temp8; uint8_t alternate; if ((!param) || (!handle)) { return kStatus_USB_InvalidHandle; } /* Get the msc class handle. */ mscHandle = (usb_device_msc_struct_t *)handle; switch (event) { case kUSB_DeviceClassEventDeviceReset: /* Bus reset, clear the configuration. */ mscHandle->configuration = 0; break; case kUSB_DeviceClassEventSetConfiguration: /* Get the new configuration. */ temp8 = ((uint8_t *)param); if (!mscHandle->configurationStruct) { break; } if (*temp8 == mscHandle->configuration) { break; } if (mscHandle->configuration) { /* De-initialize the endpoints when current configuration is none zero. */ error = USB_DeviceMscEndpointsDeinit(mscHandle); } /* Save new configuration. */ mscHandle->configuration = *temp8; /* Clear the alternate setting value. */ mscHandle->alternate = 0; /* Initialize the endpoints of the new current configuration by using the alternate setting 0. */ error = USB_DeviceMscEndpointsInit(mscHandle); break; case kUSB_DeviceClassEventSetInterface: if (!mscHandle->configurationStruct) { break; } /* Get the new alternate setting of the interface */ interfaceAlternate = *((uint16_t *)param); /* Get the alternate setting value */ alternate = (uint8_t)(interfaceAlternate & 0xFF); /* Whether the interface belongs to the class. */ if (mscHandle->interfaceNumber != ((uint8_t)(interfaceAlternate >> 8))) { break; } /* Only handle new alternate setting. */ if (alternate == mscHandle->alternate) { break; } /* De-initialize old endpoints */ error = USB_DeviceMscEndpointsDeinit(mscHandle); mscHandle->alternate = alternate; /* Initialize new endpoints */ error = USB_DeviceMscEndpointsInit(mscHandle); break; case kUSB_DeviceClassEventSetEndpointHalt: if ((!mscHandle->configurationStruct) || (!mscHandle->interfaceHandle)) { break; } /* Get the endpoint address */ temp8 = ((uint8_t *)param); for (int count = 0; count < mscHandle->interfaceHandle->endpointList.count; count++) { if (*temp8 == mscHandle->interfaceHandle->endpointList.endpoint[count].endpointAddress) { if (mscHandle->inEndpointStallFlag == 0) { /* Only stall the endpoint belongs to the class */ error = USB_DeviceStallEndpoint(mscHandle->handle, *temp8); mscHandle->inEndpointStallFlag = 1; } if (mscHandle->outEndpointStallFlag == 0) { error = USB_DeviceStallEndpoint(mscHandle->handle, *temp8); mscHandle->inEndpointStallFlag = 0; } } } break; case kUSB_DeviceClassEventClearEndpointHalt: if ((!mscHandle->configurationStruct) || (!mscHandle->interfaceHandle) || (mscHandle->performResetRecover == 1)) { break; } /* Get the endpoint address */ temp8 = ((uint8_t *)param); for (int count = 0; count < mscHandle->interfaceHandle->endpointList.count; count++) { if (*temp8 == mscHandle->interfaceHandle->endpointList.endpoint[count].endpointAddress) { /* Only un-stall the endpoint belongs to the class , If the dedpoint is in stall status ,then * un-stall it*/ if (mscHandle->inEndpointStallFlag == 1) { error = USB_DeviceUnstallEndpoint(mscHandle->handle, *temp8); mscHandle->inEndpointStallFlag = 0; } if (mscHandle->outEndpointStallFlag == 1) { error = USB_DeviceUnstallEndpoint(mscHandle->handle, *temp8); mscHandle->inEndpointStallFlag = 0; } } } if (((mscHandle->stallStatus == USB_DEVICE_MSC_STALL_IN_CSW) || (mscHandle->stallStatus == USB_DEVICE_MSC_STALL_IN_DATA)) && (mscHandle->performResetDoneFlag != 1)) { if (mscHandle->cswPrimeFlag == 1) { USB_DeviceCancel(mscHandle->handle, mscHandle->bulkInEndpoint); } /*send csw*/ USB_DeviceSendRequest(mscHandle->handle, mscHandle->bulkInEndpoint, (uint8_t *)&mscHandle->mscCsw, USB_DEVICE_MSC_CSW_LENGTH); mscHandle->cswPrimeFlag = 0; mscHandle->stallStatus = 0; } if ((mscHandle->performResetDoneFlag == 1) && (mscHandle->inEndpointStallFlag == 0) && (mscHandle->outEndpointStallFlag == 0)) { mscHandle->performResetDoneFlag = 0; if (mscHandle->cswPrimeFlag == 1) { USB_DeviceCancel(mscHandle->handle, mscHandle->bulkInEndpoint); } /*prime cbw for new transfer*/ USB_DeviceRecvRequest(mscHandle->handle, mscHandle->bulkOutEndpoint, (uint8_t *)&mscHandle->mscCbw, USB_DEVICE_MSC_CBW_LENGTH); mscHandle->cswPrimeFlag = 0; mscHandle->stallStatus = 0; } break; case kUSB_DeviceClassEventClassRequest: if (param) { /* Handle the msc class specific request. */ usb_device_control_request_struct_t *control_request = (usb_device_control_request_struct_t *)param; if ((control_request->setup->bmRequestType & USB_REQUEST_TYPE_RECIPIENT_MASK) != USB_REQUEST_TYPE_RECIPIENT_INTERFACE) { break; } if ((control_request->setup->wIndex & 0xFF) != mscHandle->interfaceNumber) { break; } switch (control_request->setup->bRequest) { case USB_DEVICE_MSC_GET_MAX_LUN: /*Get Max LUN */ if ((control_request->setup->wIndex == mscHandle->interfaceNumber) && (!control_request->setup->wValue) && (control_request->setup->wLength <= 0x0001) && ((control_request->setup->bmRequestType & USB_REQUSET_TYPE_DIR_MASK) == USB_REQUEST_TYPE_DIR_IN)) { control_request->buffer = &mscHandle->logicalUnitNumber; control_request->length = (uint32_t)control_request->setup->wLength; } else { error = kStatus_USB_InvalidRequest; } break; case USB_DEVICE_MSC_BULK_ONLY_MASS_STORAGE_RESET: /*Bulk-Only Mass Storage Reset (class-specific request)*/ if ((control_request->setup->wIndex == mscHandle->interfaceNumber) && (!control_request->setup->wValue) && (!control_request->setup->wLength) && ((control_request->setup->bmRequestType & USB_REQUSET_TYPE_DIR_MASK) == USB_REQUEST_TYPE_DIR_OUT)) { error = USB_DeviceMscEndpointsDeinit(mscHandle); error = USB_DeviceMscEndpointsInit(mscHandle); mscHandle->performResetRecover = 0; mscHandle->performResetDoneFlag = 1; } else { error = kStatus_USB_InvalidRequest; } break; default: break; } } break; default: break; } return error; } /*! * @brief Send data through a specified endpoint. * * The function is used to send data through a specified endpoint. * The function calls USB_DeviceSendRequest internally. * * @param handle The msc class handle got from usb_device_class_config_struct_t::classHandle. * * @return A USB error code or kStatus_USB_Success. * * @note The return value just means if the sending request is successful or not; the transfer done is notified by * USB_DeviceMscBulkIn. * Currently, only one transfer request can be supported for one specific endpoint. * If there is a specific requirement to support multiple transfer requests for one specific endpoint, the application * should implement a queue in the application level. * The subsequent transfer could begin only when the previous transfer is done (get notification through the endpoint * callback). */ usb_status_t USB_DeviceMscSend(usb_device_msc_struct_t *mscHandle) { usb_status_t error = kStatus_USB_Success; usb_device_lba_app_struct_t lba; lba.offset = mscHandle->currentOffset; /*bulkInBufferSize is the application buffer size, USB_DEVICE_MSC_MAX_SEND_TRANSFER_LENGTH is the max transfer length by the hardware, lba.size is the data pending for transfer ,select the minimum size to transfer ,the remaining will be transfer next time*/ lba.size = (mscHandle->bulkInBufferSize > USB_DEVICE_MSC_MAX_SEND_TRANSFER_LENGTH) ? USB_DEVICE_MSC_MAX_SEND_TRANSFER_LENGTH : mscHandle->bulkInBufferSize; lba.size = (mscHandle->transferRemaining > lba.size) ? lba.size : mscHandle->transferRemaining; /* which one is smaller */ lba.buffer = NULL; mscHandle->configurationStruct->classCallback((class_handle_t)mscHandle, kUSB_DeviceMscEventReadRequest, &lba); if (mscHandle->currentOffset < (mscHandle->totalLogicalBlockNumber * mscHandle->lengthOfEachLba)) { error = USB_DeviceSendRequest(mscHandle->handle, mscHandle->bulkInEndpoint, lba.buffer, lba.size); } else { mscHandle->needInStallFlag = 0; mscHandle->inEndpointStallFlag = 1; mscHandle->dataInFlag = 0; mscHandle->stallStatus = (uint8_t)USB_DEVICE_MSC_STALL_IN_DATA; USB_DeviceStallEndpoint(mscHandle->handle, mscHandle->bulkInEndpoint); } return error; } /*! * @brief Receive data through a specified endpoint. * * The function is used to receive data through a specified endpoint. * The function calls USB_DeviceRecvRequest internally. * * @param handle The msc class handle got from usb_device_class_config_struct_t::classHandle. * * @return A USB error code or kStatus_USB_Success. * * @note The return value just means if the receiving request is successful or not; the transfer done is notified by * USB_DeviceMscBulkOut. * Currently, only one transfer request can be supported for one specific endpoint. * If there is a specific requirement to support multiple transfer requests for one specific endpoint, the application * should implement a queue in the application level. * The subsequent transfer could begin only when the previous transfer is done (get notification through the endpoint * callback). */ usb_status_t USB_DeviceMscRecv(usb_device_msc_struct_t *mscHandle) { usb_status_t error = kStatus_USB_Success; usb_device_lba_app_struct_t lba; lba.offset = mscHandle->currentOffset; /*bulkOutBufferSize is the application buffer size, USB_DEVICE_MSC_MAX_RECV_TRANSFER_LENGTH is the max transfer length by the hardware, lba.size is the data pending for transfer ,select the minimum size to transfer ,the remaining will be transfer next time*/ lba.size = (mscHandle->bulkOutBufferSize > USB_DEVICE_MSC_MAX_RECV_TRANSFER_LENGTH) ? USB_DEVICE_MSC_MAX_RECV_TRANSFER_LENGTH : mscHandle->bulkOutBufferSize; lba.size = (mscHandle->transferRemaining > lba.size) ? lba.size : mscHandle->transferRemaining; /* whichever is smaller */ lba.buffer = NULL; mscHandle->configurationStruct->classCallback((class_handle_t)mscHandle, kUSB_DeviceMscEventWriteRequest, &lba); if (mscHandle->currentOffset < (mscHandle->totalLogicalBlockNumber * mscHandle->lengthOfEachLba)) { error = USB_DeviceRecvRequest(mscHandle->handle, mscHandle->bulkOutEndpoint, lba.buffer, lba.size); } else { mscHandle->needOutStallFlag = 0; mscHandle->outEndpointStallFlag = 1; mscHandle->dataOutFlag = 0; mscHandle->stallStatus = (uint8_t)USB_DEVICE_MSC_STALL_IN_DATA; USB_DeviceStallEndpoint(mscHandle->handle, mscHandle->bulkOutEndpoint); } return error; } /*! * @brief Recv Send data through a specified endpoint. * * The function is used when ufi process read/write command . * The function calls USB_DeviceMscRecv or usb_device_send_recv as the direction internally. * * @param handle The msc class handle got from usb_device_class_config_struct_t::classHandle. * @param direction Data direction: 0 = Data-Out from host to the device, 1 = Data-In from the device to the host. * @param buffer The memory address to hold the data need to be sent. * @return A USB error code or kStatus_USB_Success. * * @note The return value just means if the sending or reciving request is successful or not. */ usb_status_t USB_DeviceMscLbaTransfer(usb_device_msc_struct_t *mscHandle, uint8_t direction, usb_lba_transfer_information_struct_t *lba_info_ptr) { usb_status_t error = kStatus_USB_Success; mscHandle->transferRemaining = lba_info_ptr->transferNumber * mscHandle->lengthOfEachLba; mscHandle->currentOffset = lba_info_ptr->startingLogicalBlockAddress * mscHandle->lengthOfEachLba; if (direction == USB_IN) { error = USB_DeviceMscSend(mscHandle); } else { error = USB_DeviceMscRecv(mscHandle); } return error; } #endif
224073.c
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include "kvspool.h" #include "kvspool_internal.h" void sp_oom(void) { fprintf(stderr, "out of memory\n"); exit(-1); } void* kv_set_new(void) { kvset_t *set; if ( (set = malloc(sizeof(kvset_t))) == NULL) sp_oom(); memset(set,0,sizeof(*set)); return set; } void kv_set_clear(void*_set) { kvset_t *set = (kvset_t*)_set; kv_t *kv, *tmp; HASH_ITER(hh, set->kvs, kv, tmp) { HASH_DEL(set->kvs, kv); free(kv->key); free(kv->val); free(kv); } } void kv_set_dump(void*_set,FILE *out) { kvset_t *set = (kvset_t*)_set; kv_t *kv; char c; int i; kv=NULL; while ( (kv=kv_next(set,kv))) { fprintf(out," %.*s: ", kv->klen, kv->key); fprintf(out, "%.*s\n", kv->vlen, kv->val); } } void kv_set_free(void*_set) { kvset_t *set = (kvset_t*)_set; kv_t *kv, *tmp; HASH_ITER(hh, set->kvs, kv, tmp) { HASH_DEL(set->kvs, kv); free(kv->key); free(kv->val); free(kv); } assert(set->kvs == NULL); free(set); } kv_t *kv_get(void*_set, char *key) { kv_t *kv; kvset_t *set = (kvset_t*)_set; HASH_FIND(hh, set->kvs, key, strlen(key), kv); return kv; } void kv_add(void*_set, const char *key, int klen, const char *val, int vlen) { kvset_t *set = (kvset_t*)_set; assert(klen); //assert(vlen); kv_t *kv; /* check if we're replacing an existing key */ HASH_FIND(hh, set->kvs, key, klen, kv); if (kv) { /* yes, free the old value and replace it */ free(kv->val); if ( (kv->val = malloc(vlen+1)) == NULL) sp_oom(); kv->vlen = vlen; memcpy(kv->val, val, vlen); kv->val[vlen]='\0'; return; } /* new key. deep copy the key/val and add it, null term for convenience */ if ( (kv = malloc(sizeof(*kv))) == NULL) sp_oom(); if ( (kv->key = malloc(klen+1)) == NULL) sp_oom(); kv->klen = klen; if ( (kv->val = malloc(vlen+1)) == NULL) sp_oom(); kv->vlen = vlen; memcpy(kv->key, key, klen); kv->key[klen]='\0'; memcpy(kv->val, val, vlen); kv->val[vlen]='\0'; HASH_ADD_KEYPTR(hh,set->kvs,kv->key,kv->klen,kv); } int kv_len(void*_set) { kvset_t *set = (kvset_t*)_set; return set->kvs ? (HASH_COUNT(set->kvs)) : 0; } kv_t *kv_next(void*_set,kv_t *kv) { kvset_t *set = (kvset_t*)_set; if (!kv) return set->kvs; /* get first element */ return kv->hh.next; }
970175.c
#include <pebble.h> static void (*bearing_changed)(int new_direction); void compass_register_ui(void (*callback)(int new_direction)) { bearing_changed = callback; } void compass_handler(CompassHeadingData data) { static char s_buffer[32]; switch(data.compass_status) { case CompassStatusCalibrating: bearing_changed(TRIGANGLE_TO_DEG((int)data.true_heading)); break; case CompassStatusCalibrated: bearing_changed(TRIGANGLE_TO_DEG((int)data.true_heading)); break; case CompassStatusDataInvalid: default: break; } }
644191.c
/* -*- linux-c -*- * * (C) Copyright IBM Corp. 2004 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. This * file and program are licensed under a BSD style license. See * the Copying file included with the OpenHPI distribution for * full licensing terms. * * Author(s): * Chris Chia <[email protected]> * Steve Sherman <[email protected]> */ #include <string.h> #include <stdio.h> #include <SaHpi.h> #include <oh_utils.h> /* oh_set_ep_location: 4 element entity path, victim element at head. * Only head element's instance number changed. */ int main(int argc, char **argv) { SaErrorT err; SaHpiEntityPathT ep = {{{SAHPI_ENT_ADD_IN_CARD, 101}, {SAHPI_ENT_POWER_MODULE, 2020}, {SAHPI_ENT_POWER_MGMNT, 30303}, {SAHPI_ENT_SUB_CHASSIS, 404040}, {0}}}; SaHpiEntityLocationT x = 555555; err = oh_set_ep_location(&ep, SAHPI_ENT_ADD_IN_CARD, x); if (err) { printf(" Error! Testcase failed. Line=%d\n", __LINE__); printf(" Received error=%s\n", oh_lookup_error(err)); return -1; } if (ep.Entry[0].EntityLocation != x) { printf(" Error! Testcase failed. Line=%d\n", __LINE__); return -1; } if (ep.Entry[0].EntityType != SAHPI_ENT_ADD_IN_CARD) { printf(" Error! Testcase failed. Line=%d\n", __LINE__); return -1; } if (ep.Entry[1].EntityLocation != 2020) { printf(" Error! Testcase failed. Line=%d\n", __LINE__); return -1; } if (ep.Entry[1].EntityType != SAHPI_ENT_POWER_MODULE) { printf(" Error! Testcase failed. Line=%d\n", __LINE__); return -1; } if (ep.Entry[2].EntityLocation != 30303) { printf(" Error! Testcase failed. Line=%d\n", __LINE__); return -1; } if (ep.Entry[2].EntityType != SAHPI_ENT_POWER_MGMNT) { printf(" Error! Testcase failed. Line=%d\n", __LINE__); return -1; } if (ep.Entry[3].EntityLocation != 404040) { printf(" Error! Testcase failed. Line=%d\n", __LINE__); return -1; } if (ep.Entry[3].EntityType != SAHPI_ENT_SUB_CHASSIS) { printf(" Error! Testcase failed. Line=%d\n", __LINE__); return -1; } return 0; }
957633.c
/* Copyright (c) 2015, Google Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "internal.h" #if !defined(OPENSSL_THREADS) void CRYPTO_MUTEX_init(CRYPTO_MUTEX *lock) {} void CRYPTO_MUTEX_lock_read(CRYPTO_MUTEX *lock) {} void CRYPTO_MUTEX_lock_write(CRYPTO_MUTEX *lock) {} void CRYPTO_MUTEX_unlock_read(CRYPTO_MUTEX *lock) {} void CRYPTO_MUTEX_unlock_write(CRYPTO_MUTEX *lock) {} void CRYPTO_MUTEX_cleanup(CRYPTO_MUTEX *lock) {} void CRYPTO_STATIC_MUTEX_lock_read(struct CRYPTO_STATIC_MUTEX *lock) {} void CRYPTO_STATIC_MUTEX_lock_write(struct CRYPTO_STATIC_MUTEX *lock) {} void CRYPTO_STATIC_MUTEX_unlock_read(struct CRYPTO_STATIC_MUTEX *lock) {} void CRYPTO_STATIC_MUTEX_unlock_write(struct CRYPTO_STATIC_MUTEX *lock) {} void CRYPTO_once(CRYPTO_once_t *once, void (*init)(void)) { if (*once) { return; } *once = 1; init(); } void CRYPTO_add_cleanup(void (*cleanup)(void *), void *context) { } static void *g_thread_locals[NUM_OPENSSL_THREAD_LOCALS]; void *CRYPTO_get_thread_local(thread_local_data_t index) { return g_thread_locals[index]; } int CRYPTO_set_thread_local(thread_local_data_t index, void *value, thread_local_destructor_t destructor) { g_thread_locals[index] = value; return 1; } #endif // !OPENSSL_THREADS
423485.c
/* { dg-do run } */ /* { dg-require-effective-target arm_thumb2_ok } */ /* { dg-options "-mthumb -O -mrestrict-it" } */ int a; __attribute__((noinline, noclone)) int fn1 (int c, int d) { a -= c == d; return a; } int main (void) { a = 10; if (fn1 (4, 4) != 9) __builtin_abort (); a = 5; if (fn1 (3, 4) != 5) __builtin_abort (); return 0; }
682019.c
// vim: syntax=c tabstop=4 softtabstop=0 noexpandtab laststatus=1 ruler /** * wrappers/include/afsocket.c * * Socket functions for wrappers. * * @author Andrea Dainese <[email protected]> * @copyright 2014-2016 Andrea Dainese * @license BSD-3-Clause https://github.com/dainok/unetlab/blob/master/LICENSE * @link http://www.unetlab.com/ * @version 20160719 */ #include <arpa/inet.h> #include <errno.h> #include <stdio.h> #include <sys/un.h> #include <unistd.h> #include "log.h" #include "params.h" extern int device_id; extern int tenant_id; // AF_UNIX socket: listen int afsocket_listen(char *server_socketfile, char *remote_socketfile, int *server_socket, int *remote_socket) { int rc = -1; struct sockaddr_un remote_addr; memset(&remote_addr, 0, sizeof(remote_addr)); struct sockaddr_un server_addr; memset(&server_addr, 0, sizeof(server_addr)); // Setting AF_UNIX remote (sending) socket *remote_socket = socket(AF_UNIX, SOCK_DGRAM, 0); if (*remote_socket < 0) { rc = 1; UNLLog(LLERROR, "Error while setting remote AF_UNIX: %s (%i)\n",strerror(errno), rc); return rc; } remote_addr.sun_family = AF_UNIX; strncpy(remote_addr.sun_path, remote_socketfile, sizeof(remote_addr.sun_path) - 1); while (connect(*remote_socket, (struct sockaddr *)&remote_addr, sizeof(struct sockaddr_un)) < 0) { rc = 2; UNLLog(LLERROR, "Error while connecting local AF_UNIX: %s (%i)\n",strerror(errno), rc); return rc; } // Setting AF_UNIX local (receiving) socket *server_socket = socket(AF_UNIX, SOCK_DGRAM, 0); if (*server_socket < 0) { rc = 3; UNLLog(LLERROR, "Error while setting local AF_UNIX: %s (%i)\n",strerror(errno), rc); return rc; } server_addr.sun_family = AF_UNIX; strncpy(server_addr.sun_path, server_socketfile, sizeof(server_addr.sun_path) -1 ); if (bind(*server_socket, (struct sockaddr *)&server_addr, sizeof(struct sockaddr_un))) { rc = 4; UNLLog(LLERROR, "Error while binding local AF_UNIX: %s (%i)\n", strerror(errno), rc); return rc; } UNLLog(LLINFO, "Local (%i) and remote (%i) AF_UNIX are configured.\n", *server_socket, *remote_socket); return 0; } // AF_UNIX socket: receive int afsocket_receive(void *c, int server_socket, int bytesToRead) { int length = 0; //memset(c, 0, sizeof(*c)); what's that for??? if ((length = read(server_socket, c, bytesToRead)) <= 0) { // Read error UNLLog(LLERROR, "Failed to receive data from local AF_UNIX (s=%i, l=%i): %s (%i)\n", server_socket, length, strerror(errno), length); return length; } UNLLog(LLVERBOSE, "Received data from local AF_UNIX (s=%i, l=%i).\n", server_socket, length); return length; }
105816.c
/* * OLE 2 default object handler * * Copyright 1999 Francis Beaudet * Copyright 2000 Abey George * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA * * NOTES: * The OLE2 default object handler supports a whole whack of * interfaces including: * IOleObject, IDataObject, IPersistStorage, IViewObject2, * IRunnableObject, IOleCache2, IOleCacheControl and much more. * * All the implementation details are taken from: Inside OLE * second edition by Kraig Brockschmidt, * * TODO * - This implementation of the default handler does not launch the * server in the DoVerb, Update, GetData, GetDataHere and Run * methods. When it is fixed to do so, all the methods will have * to be revisited to allow delegating to the running object * * - All methods in the class that use the class ID should be * aware that it is possible for a class to be treated as * another one and go into emulation mode. Nothing has been * done in this area. * * - Some functions still return E_NOTIMPL they have to be * implemented. Most of those are related to the running of the * actual server. * * - All the methods related to notification and advise sinks are * in place but no notifications are sent to the sinks yet. */ #include <assert.h> #include <stdarg.h> #include <string.h> #define COBJMACROS #include "windef.h" #include "winbase.h" #include "winuser.h" #include "winerror.h" #include "ole2.h" #include "compobj_private.h" #include "storage32.h" #include "wine/unicode.h" #include "wine/debug.h" WINE_DEFAULT_DEBUG_CHANNEL(ole); enum storage_state { storage_state_uninitialised, storage_state_initialised, storage_state_loaded }; enum object_state { object_state_not_running, object_state_running, object_state_deferred_close }; /**************************************************************************** * DefaultHandler * */ struct DefaultHandler { IOleObject IOleObject_iface; IUnknown IUnknown_iface; IDataObject IDataObject_iface; IRunnableObject IRunnableObject_iface; IAdviseSink IAdviseSink_iface; IPersistStorage IPersistStorage_iface; /* Reference count of this object */ LONG ref; /* IUnknown implementation of the outer object. */ IUnknown* outerUnknown; /* Class Id that this handler object represents. */ CLSID clsid; /* IUnknown implementation of the datacache. */ IUnknown* dataCache; /* IPersistStorage implementation of the datacache. */ IPersistStorage* dataCache_PersistStg; /* Client site for the embedded object. */ IOleClientSite* clientSite; /* * The IOleAdviseHolder maintains the connections * on behalf of the default handler. */ IOleAdviseHolder* oleAdviseHolder; /* * The IDataAdviseHolder maintains the data * connections on behalf of the default handler. */ IDataAdviseHolder* dataAdviseHolder; /* Name of the container and object contained */ LPWSTR containerApp; LPWSTR containerObj; /* IOleObject delegate */ IOleObject *pOleDelegate; /* IPersistStorage delegate */ IPersistStorage *pPSDelegate; /* IDataObject delegate */ IDataObject *pDataDelegate; enum object_state object_state; ULONG in_call; /* connection cookie for the advise on the delegate OLE object */ DWORD dwAdvConn; /* storage passed to Load or InitNew */ IStorage *storage; enum storage_state storage_state; /* optional class factory for object */ IClassFactory *pCFObject; /* TRUE if acting as an inproc server instead of an inproc handler */ BOOL inproc_server; }; typedef struct DefaultHandler DefaultHandler; static inline DefaultHandler *impl_from_IOleObject( IOleObject *iface ) { return CONTAINING_RECORD(iface, DefaultHandler, IOleObject_iface); } static inline DefaultHandler *impl_from_IUnknown( IUnknown *iface ) { return CONTAINING_RECORD(iface, DefaultHandler, IUnknown_iface); } static inline DefaultHandler *impl_from_IDataObject( IDataObject *iface ) { return CONTAINING_RECORD(iface, DefaultHandler, IDataObject_iface); } static inline DefaultHandler *impl_from_IRunnableObject( IRunnableObject *iface ) { return CONTAINING_RECORD(iface, DefaultHandler, IRunnableObject_iface); } static inline DefaultHandler *impl_from_IAdviseSink( IAdviseSink *iface ) { return CONTAINING_RECORD(iface, DefaultHandler, IAdviseSink_iface); } static inline DefaultHandler *impl_from_IPersistStorage( IPersistStorage *iface ) { return CONTAINING_RECORD(iface, DefaultHandler, IPersistStorage_iface); } static void DefaultHandler_Destroy(DefaultHandler* This); static inline BOOL object_is_running(DefaultHandler *This) { return IRunnableObject_IsRunning(&This->IRunnableObject_iface); } static void DefaultHandler_Stop(DefaultHandler *This); static inline void start_object_call(DefaultHandler *This) { This->in_call++; } static inline void end_object_call(DefaultHandler *This) { This->in_call--; if (This->in_call == 0 && This->object_state == object_state_deferred_close) DefaultHandler_Stop( This ); } /********************************************************* * Method implementation for the non delegating IUnknown * part of the DefaultHandler class. */ /************************************************************************ * DefaultHandler_NDIUnknown_QueryInterface (IUnknown) * * See Windows documentation for more details on IUnknown methods. * * This version of QueryInterface will not delegate its implementation * to the outer unknown. */ static HRESULT WINAPI DefaultHandler_NDIUnknown_QueryInterface( IUnknown* iface, REFIID riid, void** ppvObject) { DefaultHandler *This = impl_from_IUnknown(iface); if (!ppvObject) return E_INVALIDARG; *ppvObject = NULL; if (IsEqualIID(&IID_IUnknown, riid)) *ppvObject = iface; else if (IsEqualIID(&IID_IOleObject, riid)) *ppvObject = &This->IOleObject_iface; else if (IsEqualIID(&IID_IDataObject, riid)) *ppvObject = &This->IDataObject_iface; else if (IsEqualIID(&IID_IRunnableObject, riid)) *ppvObject = &This->IRunnableObject_iface; else if (IsEqualIID(&IID_IPersist, riid) || IsEqualIID(&IID_IPersistStorage, riid)) *ppvObject = &This->IPersistStorage_iface; else if (IsEqualIID(&IID_IViewObject, riid) || IsEqualIID(&IID_IViewObject2, riid) || IsEqualIID(&IID_IOleCache, riid) || IsEqualIID(&IID_IOleCache2, riid)) { HRESULT hr = IUnknown_QueryInterface(This->dataCache, riid, ppvObject); if (FAILED(hr)) FIXME("interface %s not implemented by data cache\n", debugstr_guid(riid)); return hr; } else if (This->inproc_server && This->pOleDelegate) { return IOleObject_QueryInterface(This->pOleDelegate, riid, ppvObject); } /* Check that we obtained an interface. */ if (*ppvObject == NULL) { WARN( "() : asking for unsupported interface %s\n", debugstr_guid(riid)); return E_NOINTERFACE; } /* * Query Interface always increases the reference count by one when it is * successful. */ IUnknown_AddRef((IUnknown*)*ppvObject); return S_OK; } /************************************************************************ * DefaultHandler_NDIUnknown_AddRef (IUnknown) * * See Windows documentation for more details on IUnknown methods. * * This version of QueryInterface will not delegate its implementation * to the outer unknown. */ static ULONG WINAPI DefaultHandler_NDIUnknown_AddRef( IUnknown* iface) { DefaultHandler *This = impl_from_IUnknown(iface); return InterlockedIncrement(&This->ref); } /************************************************************************ * DefaultHandler_NDIUnknown_Release (IUnknown) * * See Windows documentation for more details on IUnknown methods. * * This version of QueryInterface will not delegate its implementation * to the outer unknown. */ static ULONG WINAPI DefaultHandler_NDIUnknown_Release( IUnknown* iface) { DefaultHandler *This = impl_from_IUnknown(iface); ULONG ref; ref = InterlockedDecrement(&This->ref); if (!ref) DefaultHandler_Destroy(This); return ref; } /********************************************************* * Methods implementation for the IOleObject part of * the DefaultHandler class. */ /************************************************************************ * DefaultHandler_QueryInterface (IUnknown) * * See Windows documentation for more details on IUnknown methods. */ static HRESULT WINAPI DefaultHandler_QueryInterface( IOleObject* iface, REFIID riid, void** ppvObject) { DefaultHandler *This = impl_from_IOleObject(iface); return IUnknown_QueryInterface(This->outerUnknown, riid, ppvObject); } /************************************************************************ * DefaultHandler_AddRef (IUnknown) * * See Windows documentation for more details on IUnknown methods. */ static ULONG WINAPI DefaultHandler_AddRef( IOleObject* iface) { DefaultHandler *This = impl_from_IOleObject(iface); return IUnknown_AddRef(This->outerUnknown); } /************************************************************************ * DefaultHandler_Release (IUnknown) * * See Windows documentation for more details on IUnknown methods. */ static ULONG WINAPI DefaultHandler_Release( IOleObject* iface) { DefaultHandler *This = impl_from_IOleObject(iface); return IUnknown_Release(This->outerUnknown); } /************************************************************************ * DefaultHandler_SetClientSite (IOleObject) * * The default handler's implementation of this method only keeps the * client site pointer for future reference. * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_SetClientSite( IOleObject* iface, IOleClientSite* pClientSite) { DefaultHandler *This = impl_from_IOleObject(iface); HRESULT hr = S_OK; TRACE("(%p, %p)\n", iface, pClientSite); if (object_is_running(This)) { start_object_call( This ); hr = IOleObject_SetClientSite(This->pOleDelegate, pClientSite); end_object_call( This ); } /* * Make sure we release the previous client site if there * was one. */ if (This->clientSite) IOleClientSite_Release(This->clientSite); This->clientSite = pClientSite; if (This->clientSite) IOleClientSite_AddRef(This->clientSite); return hr; } /************************************************************************ * DefaultHandler_GetClientSite (IOleObject) * * The default handler's implementation of this method returns the * last pointer set in IOleObject_SetClientSite. * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_GetClientSite( IOleObject* iface, IOleClientSite** ppClientSite) { DefaultHandler *This = impl_from_IOleObject(iface); if (!ppClientSite) return E_POINTER; *ppClientSite = This->clientSite; if (This->clientSite) IOleClientSite_AddRef(This->clientSite); return S_OK; } /************************************************************************ * DefaultHandler_SetHostNames (IOleObject) * * The default handler's implementation of this method just stores * the strings and returns S_OK. * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_SetHostNames( IOleObject* iface, LPCOLESTR szContainerApp, LPCOLESTR szContainerObj) { DefaultHandler *This = impl_from_IOleObject(iface); TRACE("(%p, %s, %s)\n", iface, debugstr_w(szContainerApp), debugstr_w(szContainerObj)); if (object_is_running(This)) { start_object_call( This ); IOleObject_SetHostNames(This->pOleDelegate, szContainerApp, szContainerObj); end_object_call( This ); } /* Be sure to cleanup before re-assigning the strings. */ HeapFree( GetProcessHeap(), 0, This->containerApp ); This->containerApp = NULL; HeapFree( GetProcessHeap(), 0, This->containerObj ); This->containerObj = NULL; if (szContainerApp) { if ((This->containerApp = HeapAlloc( GetProcessHeap(), 0, (lstrlenW(szContainerApp) + 1) * sizeof(WCHAR) ))) strcpyW( This->containerApp, szContainerApp ); } if (szContainerObj) { if ((This->containerObj = HeapAlloc( GetProcessHeap(), 0, (lstrlenW(szContainerObj) + 1) * sizeof(WCHAR) ))) strcpyW( This->containerObj, szContainerObj ); } return S_OK; } static void release_delegates(DefaultHandler *This) { if (This->pDataDelegate) { IDataObject_Release(This->pDataDelegate); This->pDataDelegate = NULL; } if (This->pPSDelegate) { IPersistStorage_Release(This->pPSDelegate); This->pPSDelegate = NULL; } if (This->pOleDelegate) { IOleObject_Release(This->pOleDelegate); This->pOleDelegate = NULL; } } /* undoes the work done by DefaultHandler_Run */ static void DefaultHandler_Stop(DefaultHandler *This) { IOleCacheControl *cache_ctrl; HRESULT hr; if (This->object_state == object_state_not_running) return; hr = IUnknown_QueryInterface( This->dataCache, &IID_IOleCacheControl, (void **)&cache_ctrl ); if (SUCCEEDED(hr)) { hr = IOleCacheControl_OnStop( cache_ctrl ); IOleCacheControl_Release( cache_ctrl ); } IOleObject_Unadvise(This->pOleDelegate, This->dwAdvConn); if (This->dataAdviseHolder) DataAdviseHolder_OnDisconnect(This->dataAdviseHolder); This->object_state = object_state_not_running; release_delegates( This ); } /************************************************************************ * DefaultHandler_Close (IOleObject) * * The default handler's implementation of this method is meaningless * without a running server so it does nothing. * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_Close( IOleObject* iface, DWORD dwSaveOption) { DefaultHandler *This = impl_from_IOleObject(iface); HRESULT hr; TRACE("(%d)\n", dwSaveOption); if (!object_is_running(This)) return S_OK; start_object_call( This ); hr = IOleObject_Close(This->pOleDelegate, dwSaveOption); end_object_call( This ); DefaultHandler_Stop(This); return hr; } /************************************************************************ * DefaultHandler_SetMoniker (IOleObject) * * The default handler's implementation of this method does nothing. * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_SetMoniker( IOleObject* iface, DWORD dwWhichMoniker, IMoniker* pmk) { DefaultHandler *This = impl_from_IOleObject(iface); HRESULT hr = S_OK; TRACE("(%p, %d, %p)\n", iface, dwWhichMoniker, pmk); if (object_is_running(This)) { start_object_call( This ); hr = IOleObject_SetMoniker(This->pOleDelegate, dwWhichMoniker, pmk); end_object_call( This ); } return hr; } /************************************************************************ * DefaultHandler_GetMoniker (IOleObject) * * Delegate this request to the client site if we have one. * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_GetMoniker( IOleObject* iface, DWORD dwAssign, DWORD dwWhichMoniker, IMoniker** ppmk) { DefaultHandler *This = impl_from_IOleObject(iface); HRESULT hr; TRACE("(%p, %d, %d, %p)\n", iface, dwAssign, dwWhichMoniker, ppmk); if (object_is_running(This)) { start_object_call( This ); hr = IOleObject_GetMoniker(This->pOleDelegate, dwAssign, dwWhichMoniker, ppmk); end_object_call( This ); return hr; } /* FIXME: dwWhichMoniker == OLEWHICHMK_CONTAINER only? */ if (This->clientSite) { return IOleClientSite_GetMoniker(This->clientSite, dwAssign, dwWhichMoniker, ppmk); } return E_FAIL; } /************************************************************************ * DefaultHandler_InitFromData (IOleObject) * * This method is meaningless if the server is not running * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_InitFromData( IOleObject* iface, IDataObject* pDataObject, BOOL fCreation, DWORD dwReserved) { DefaultHandler *This = impl_from_IOleObject(iface); HRESULT hr = OLE_E_NOTRUNNING; TRACE("(%p, %p, %d, %d)\n", iface, pDataObject, fCreation, dwReserved); if (object_is_running(This)) { start_object_call( This ); hr = IOleObject_InitFromData(This->pOleDelegate, pDataObject, fCreation, dwReserved); end_object_call( This ); } return hr; } /************************************************************************ * DefaultHandler_GetClipboardData (IOleObject) * * This method is meaningless if the server is not running * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_GetClipboardData( IOleObject* iface, DWORD dwReserved, IDataObject** ppDataObject) { DefaultHandler *This = impl_from_IOleObject(iface); HRESULT hr = OLE_E_NOTRUNNING; TRACE("(%p, %d, %p)\n", iface, dwReserved, ppDataObject); if (object_is_running(This)) { start_object_call( This ); hr = IOleObject_GetClipboardData(This->pOleDelegate, dwReserved, ppDataObject); end_object_call( This ); } return hr; } static HRESULT WINAPI DefaultHandler_DoVerb( IOleObject* iface, LONG iVerb, struct tagMSG* lpmsg, IOleClientSite* pActiveSite, LONG lindex, HWND hwndParent, LPCRECT lprcPosRect) { DefaultHandler *This = impl_from_IOleObject(iface); IRunnableObject *pRunnableObj = &This->IRunnableObject_iface; HRESULT hr; TRACE("(%d, %p, %p, %d, %p, %s)\n", iVerb, lpmsg, pActiveSite, lindex, hwndParent, wine_dbgstr_rect(lprcPosRect)); hr = IRunnableObject_Run(pRunnableObj, NULL); if (FAILED(hr)) return hr; start_object_call( This ); hr = IOleObject_DoVerb(This->pOleDelegate, iVerb, lpmsg, pActiveSite, lindex, hwndParent, lprcPosRect); end_object_call( This ); return hr; } /************************************************************************ * DefaultHandler_EnumVerbs (IOleObject) * * The default handler implementation of this method simply delegates * to OleRegEnumVerbs * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_EnumVerbs( IOleObject* iface, IEnumOLEVERB** ppEnumOleVerb) { DefaultHandler *This = impl_from_IOleObject(iface); HRESULT hr = OLE_S_USEREG; TRACE("(%p, %p)\n", iface, ppEnumOleVerb); if (object_is_running(This)) { start_object_call( This ); hr = IOleObject_EnumVerbs(This->pOleDelegate, ppEnumOleVerb); end_object_call( This ); } if (hr == OLE_S_USEREG) return OleRegEnumVerbs(&This->clsid, ppEnumOleVerb); else return hr; } static HRESULT WINAPI DefaultHandler_Update( IOleObject* iface) { DefaultHandler *This = impl_from_IOleObject(iface); HRESULT hr; TRACE("(%p)\n", iface); if (!object_is_running(This)) { FIXME("Should run object\n"); return E_NOTIMPL; } start_object_call( This ); hr = IOleObject_Update(This->pOleDelegate); end_object_call( This ); return hr; } /************************************************************************ * DefaultHandler_IsUpToDate (IOleObject) * * This method is meaningless if the server is not running * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_IsUpToDate( IOleObject* iface) { DefaultHandler *This = impl_from_IOleObject(iface); HRESULT hr = OLE_E_NOTRUNNING; TRACE("(%p)\n", iface); if (object_is_running(This)) { start_object_call( This ); hr = IOleObject_IsUpToDate(This->pOleDelegate); end_object_call( This ); } return hr; } /************************************************************************ * DefaultHandler_GetUserClassID (IOleObject) * * TODO: Map to a new class ID if emulation is active. * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_GetUserClassID( IOleObject* iface, CLSID* pClsid) { DefaultHandler *This = impl_from_IOleObject(iface); HRESULT hr; TRACE("(%p, %p)\n", iface, pClsid); if (object_is_running(This)) { start_object_call( This ); hr = IOleObject_GetUserClassID(This->pOleDelegate, pClsid); end_object_call( This ); return hr; } if (!pClsid) return E_POINTER; *pClsid = This->clsid; return S_OK; } /************************************************************************ * DefaultHandler_GetUserType (IOleObject) * * The default handler implementation of this method simply delegates * to OleRegGetUserType * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_GetUserType( IOleObject* iface, DWORD dwFormOfType, LPOLESTR* pszUserType) { DefaultHandler *This = impl_from_IOleObject(iface); HRESULT hr; TRACE("(%p, %d, %p)\n", iface, dwFormOfType, pszUserType); if (object_is_running(This)) { start_object_call( This ); hr = IOleObject_GetUserType(This->pOleDelegate, dwFormOfType, pszUserType); end_object_call( This ); return hr; } return OleRegGetUserType(&This->clsid, dwFormOfType, pszUserType); } /************************************************************************ * DefaultHandler_SetExtent (IOleObject) * * This method is meaningless if the server is not running * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_SetExtent( IOleObject* iface, DWORD dwDrawAspect, SIZEL* psizel) { DefaultHandler *This = impl_from_IOleObject(iface); HRESULT hr = OLE_E_NOTRUNNING; TRACE("(%p, %x, (%d x %d))\n", iface, dwDrawAspect, psizel->cx, psizel->cy); if (object_is_running(This)) { start_object_call( This ); hr = IOleObject_SetExtent(This->pOleDelegate, dwDrawAspect, psizel); end_object_call( This ); } return hr; } /************************************************************************ * DefaultHandler_GetExtent (IOleObject) * * The default handler's implementation of this method returns uses * the cache to locate the aspect and extract the extent from it. * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_GetExtent( IOleObject* iface, DWORD dwDrawAspect, SIZEL* psizel) { DVTARGETDEVICE* targetDevice; IViewObject2* cacheView = NULL; HRESULT hres; DefaultHandler *This = impl_from_IOleObject(iface); TRACE("(%p, %x, %p)\n", iface, dwDrawAspect, psizel); if (object_is_running(This)) { start_object_call( This ); hres = IOleObject_GetExtent(This->pOleDelegate, dwDrawAspect, psizel); end_object_call( This ); return hres; } hres = IUnknown_QueryInterface(This->dataCache, &IID_IViewObject2, (void**)&cacheView); if (FAILED(hres)) return E_UNEXPECTED; /* * Prepare the call to the cache's GetExtent method. * * Here we would build a valid DVTARGETDEVICE structure * but, since we are calling into the data cache, we * know its implementation and we'll skip this * extra work until later. */ targetDevice = NULL; hres = IViewObject2_GetExtent(cacheView, dwDrawAspect, -1, targetDevice, psizel); IViewObject2_Release(cacheView); return hres; } /************************************************************************ * DefaultHandler_Advise (IOleObject) * * The default handler's implementation of this method simply * delegates to the OleAdviseHolder. * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_Advise( IOleObject* iface, IAdviseSink* pAdvSink, DWORD* pdwConnection) { HRESULT hres = S_OK; DefaultHandler *This = impl_from_IOleObject(iface); TRACE("(%p, %p, %p)\n", iface, pAdvSink, pdwConnection); /* Make sure we have an advise holder before we start. */ if (!This->oleAdviseHolder) hres = CreateOleAdviseHolder(&This->oleAdviseHolder); if (SUCCEEDED(hres)) hres = IOleAdviseHolder_Advise(This->oleAdviseHolder, pAdvSink, pdwConnection); return hres; } /************************************************************************ * DefaultHandler_Unadvise (IOleObject) * * The default handler's implementation of this method simply * delegates to the OleAdviseHolder. * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_Unadvise( IOleObject* iface, DWORD dwConnection) { DefaultHandler *This = impl_from_IOleObject(iface); TRACE("(%p, %d)\n", iface, dwConnection); /* * If we don't have an advise holder yet, it means we don't have * a connection. */ if (!This->oleAdviseHolder) return OLE_E_NOCONNECTION; return IOleAdviseHolder_Unadvise(This->oleAdviseHolder, dwConnection); } /************************************************************************ * DefaultHandler_EnumAdvise (IOleObject) * * The default handler's implementation of this method simply * delegates to the OleAdviseHolder. * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_EnumAdvise( IOleObject* iface, IEnumSTATDATA** ppenumAdvise) { DefaultHandler *This = impl_from_IOleObject(iface); TRACE("(%p, %p)\n", iface, ppenumAdvise); if (!ppenumAdvise) return E_POINTER; *ppenumAdvise = NULL; if (!This->oleAdviseHolder) return S_OK; return IOleAdviseHolder_EnumAdvise(This->oleAdviseHolder, ppenumAdvise); } /************************************************************************ * DefaultHandler_GetMiscStatus (IOleObject) * * The default handler's implementation of this method simply delegates * to OleRegGetMiscStatus. * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_GetMiscStatus( IOleObject* iface, DWORD dwAspect, DWORD* pdwStatus) { HRESULT hres; DefaultHandler *This = impl_from_IOleObject(iface); TRACE("(%p, %x, %p)\n", iface, dwAspect, pdwStatus); if (object_is_running(This)) { start_object_call( This ); hres = IOleObject_GetMiscStatus(This->pOleDelegate, dwAspect, pdwStatus); end_object_call( This ); return hres; } hres = OleRegGetMiscStatus(&This->clsid, dwAspect, pdwStatus); if (FAILED(hres)) *pdwStatus = 0; return hres; } /************************************************************************ * DefaultHandler_SetColorScheme (IOleObject) * * This method is meaningless if the server is not running * * See Windows documentation for more details on IOleObject methods. */ static HRESULT WINAPI DefaultHandler_SetColorScheme( IOleObject* iface, struct tagLOGPALETTE* pLogpal) { DefaultHandler *This = impl_from_IOleObject(iface); HRESULT hr = OLE_E_NOTRUNNING; TRACE("(%p, %p))\n", iface, pLogpal); if (object_is_running(This)) { start_object_call( This ); hr = IOleObject_SetColorScheme(This->pOleDelegate, pLogpal); end_object_call( This ); } return hr; } /********************************************************* * Methods implementation for the IDataObject part of * the DefaultHandler class. */ /************************************************************************ * DefaultHandler_IDataObject_QueryInterface (IUnknown) * * See Windows documentation for more details on IUnknown methods. */ static HRESULT WINAPI DefaultHandler_IDataObject_QueryInterface( IDataObject* iface, REFIID riid, void** ppvObject) { DefaultHandler *This = impl_from_IDataObject(iface); return IUnknown_QueryInterface(This->outerUnknown, riid, ppvObject); } /************************************************************************ * DefaultHandler_IDataObject_AddRef (IUnknown) * * See Windows documentation for more details on IUnknown methods. */ static ULONG WINAPI DefaultHandler_IDataObject_AddRef( IDataObject* iface) { DefaultHandler *This = impl_from_IDataObject(iface); return IUnknown_AddRef(This->outerUnknown); } /************************************************************************ * DefaultHandler_IDataObject_Release (IUnknown) * * See Windows documentation for more details on IUnknown methods. */ static ULONG WINAPI DefaultHandler_IDataObject_Release( IDataObject* iface) { DefaultHandler *This = impl_from_IDataObject(iface); return IUnknown_Release(This->outerUnknown); } /************************************************************************ * DefaultHandler_GetData * * Get Data from a source dataobject using format pformatetcIn->cfFormat * See Windows documentation for more details on GetData. * Default handler's implementation of this method delegates to the cache. */ static HRESULT WINAPI DefaultHandler_GetData( IDataObject* iface, LPFORMATETC pformatetcIn, STGMEDIUM* pmedium) { IDataObject* cacheDataObject = NULL; HRESULT hres; DefaultHandler *This = impl_from_IDataObject(iface); TRACE("(%p, %p, %p)\n", iface, pformatetcIn, pmedium); hres = IUnknown_QueryInterface(This->dataCache, &IID_IDataObject, (void**)&cacheDataObject); if (FAILED(hres)) return E_UNEXPECTED; hres = IDataObject_GetData(cacheDataObject, pformatetcIn, pmedium); IDataObject_Release(cacheDataObject); if (hres == S_OK) return hres; if (object_is_running( This )) { start_object_call(This); hres = IDataObject_GetData(This->pDataDelegate, pformatetcIn, pmedium); end_object_call(This); if (hres == S_OK) return hres; } /* Query running state again, as the object may have closed during _GetData call */ if (!object_is_running( This )) hres = OLE_E_NOTRUNNING; return hres; } static HRESULT WINAPI DefaultHandler_GetDataHere( IDataObject* iface, LPFORMATETC pformatetc, STGMEDIUM* pmedium) { FIXME(": Stub\n"); return E_NOTIMPL; } /************************************************************************ * DefaultHandler_QueryGetData (IDataObject) * * The default handler's implementation of this method delegates to * the cache. * * See Windows documentation for more details on IDataObject methods. */ static HRESULT WINAPI DefaultHandler_QueryGetData( IDataObject* iface, LPFORMATETC pformatetc) { IDataObject* cacheDataObject = NULL; HRESULT hres; DefaultHandler *This = impl_from_IDataObject(iface); TRACE("(%p, %p)\n", iface, pformatetc); hres = IUnknown_QueryInterface(This->dataCache, &IID_IDataObject, (void**)&cacheDataObject); if (FAILED(hres)) return E_UNEXPECTED; hres = IDataObject_QueryGetData(cacheDataObject, pformatetc); IDataObject_Release(cacheDataObject); if (hres == S_OK) return hres; if (object_is_running( This )) { start_object_call( This ); hres = IDataObject_QueryGetData(This->pDataDelegate, pformatetc); end_object_call( This ); if (hres == S_OK) return hres; } /* Query running state again, as the object may have closed during _QueryGetData call */ if (!object_is_running( This )) hres = OLE_E_NOTRUNNING; return hres; } /************************************************************************ * DefaultHandler_GetCanonicalFormatEtc (IDataObject) * * This method is meaningless if the server is not running * * See Windows documentation for more details on IDataObject methods. */ static HRESULT WINAPI DefaultHandler_GetCanonicalFormatEtc( IDataObject* iface, LPFORMATETC pformatetcIn, LPFORMATETC pformatetcOut) { DefaultHandler *This = impl_from_IDataObject(iface); HRESULT hr; TRACE("(%p, %p, %p)\n", iface, pformatetcIn, pformatetcOut); if (!object_is_running( This )) return OLE_E_NOTRUNNING; start_object_call( This ); hr = IDataObject_GetCanonicalFormatEtc(This->pDataDelegate, pformatetcIn, pformatetcOut); end_object_call( This ); return hr; } /************************************************************************ * DefaultHandler_SetData (IDataObject) * * The default handler's implementation of this method delegates to * the cache. * * See Windows documentation for more details on IDataObject methods. */ static HRESULT WINAPI DefaultHandler_SetData( IDataObject* iface, LPFORMATETC pformatetc, STGMEDIUM* pmedium, BOOL fRelease) { DefaultHandler *This = impl_from_IDataObject(iface); IDataObject* cacheDataObject = NULL; HRESULT hres; TRACE("(%p, %p, %p, %d)\n", iface, pformatetc, pmedium, fRelease); hres = IUnknown_QueryInterface(This->dataCache, &IID_IDataObject, (void**)&cacheDataObject); if (FAILED(hres)) return E_UNEXPECTED; hres = IDataObject_SetData(cacheDataObject, pformatetc, pmedium, fRelease); IDataObject_Release(cacheDataObject); return hres; } /************************************************************************ * DefaultHandler_EnumFormatEtc (IDataObject) * * The default handler's implementation of This method simply delegates * to OleRegEnumFormatEtc. * * See Windows documentation for more details on IDataObject methods. */ static HRESULT WINAPI DefaultHandler_EnumFormatEtc( IDataObject* iface, DWORD dwDirection, IEnumFORMATETC** ppenumFormatEtc) { DefaultHandler *This = impl_from_IDataObject(iface); TRACE("(%p, %x, %p)\n", iface, dwDirection, ppenumFormatEtc); return OleRegEnumFormatEtc(&This->clsid, dwDirection, ppenumFormatEtc); } /************************************************************************ * DefaultHandler_DAdvise (IDataObject) * * The default handler's implementation of this method simply * delegates to the DataAdviseHolder. * * See Windows documentation for more details on IDataObject methods. */ static HRESULT WINAPI DefaultHandler_DAdvise( IDataObject* iface, FORMATETC* pformatetc, DWORD advf, IAdviseSink* pAdvSink, DWORD* pdwConnection) { HRESULT hres = S_OK; DefaultHandler *This = impl_from_IDataObject(iface); TRACE("(%p, %p, %d, %p, %p)\n", iface, pformatetc, advf, pAdvSink, pdwConnection); /* Make sure we have a data advise holder before we start. */ if (!This->dataAdviseHolder) { hres = CreateDataAdviseHolder(&This->dataAdviseHolder); if (SUCCEEDED(hres) && object_is_running( This )) { start_object_call( This ); DataAdviseHolder_OnConnect(This->dataAdviseHolder, This->pDataDelegate); end_object_call( This ); } } if (SUCCEEDED(hres)) hres = IDataAdviseHolder_Advise(This->dataAdviseHolder, iface, pformatetc, advf, pAdvSink, pdwConnection); return hres; } /************************************************************************ * DefaultHandler_DUnadvise (IDataObject) * * The default handler's implementation of this method simply * delegates to the DataAdviseHolder. * * See Windows documentation for more details on IDataObject methods. */ static HRESULT WINAPI DefaultHandler_DUnadvise( IDataObject* iface, DWORD dwConnection) { DefaultHandler *This = impl_from_IDataObject(iface); TRACE("(%p, %d)\n", iface, dwConnection); /* * If we don't have a data advise holder yet, it means that * we don't have any connections.. */ if (!This->dataAdviseHolder) return OLE_E_NOCONNECTION; return IDataAdviseHolder_Unadvise(This->dataAdviseHolder, dwConnection); } /************************************************************************ * DefaultHandler_EnumDAdvise (IDataObject) * * The default handler's implementation of this method simply * delegates to the DataAdviseHolder. * * See Windows documentation for more details on IDataObject methods. */ static HRESULT WINAPI DefaultHandler_EnumDAdvise( IDataObject* iface, IEnumSTATDATA** ppenumAdvise) { DefaultHandler *This = impl_from_IDataObject(iface); TRACE("(%p, %p)\n", iface, ppenumAdvise); if (!ppenumAdvise) return E_POINTER; *ppenumAdvise = NULL; /* If we have a data advise holder object, delegate. */ if (This->dataAdviseHolder) return IDataAdviseHolder_EnumAdvise(This->dataAdviseHolder, ppenumAdvise); return S_OK; } /********************************************************* * Methods implementation for the IRunnableObject part * of the DefaultHandler class. */ /************************************************************************ * DefaultHandler_IRunnableObject_QueryInterface (IUnknown) * * See Windows documentation for more details on IUnknown methods. */ static HRESULT WINAPI DefaultHandler_IRunnableObject_QueryInterface( IRunnableObject* iface, REFIID riid, void** ppvObject) { DefaultHandler *This = impl_from_IRunnableObject(iface); return IUnknown_QueryInterface(This->outerUnknown, riid, ppvObject); } /************************************************************************ * DefaultHandler_IRunnableObject_AddRef (IUnknown) * * See Windows documentation for more details on IUnknown methods. */ static ULONG WINAPI DefaultHandler_IRunnableObject_AddRef( IRunnableObject* iface) { DefaultHandler *This = impl_from_IRunnableObject(iface); return IUnknown_AddRef(This->outerUnknown); } /************************************************************************ * DefaultHandler_IRunnableObject_Release (IUnknown) * * See Windows documentation for more details on IUnknown methods. */ static ULONG WINAPI DefaultHandler_IRunnableObject_Release( IRunnableObject* iface) { DefaultHandler *This = impl_from_IRunnableObject(iface); return IUnknown_Release(This->outerUnknown); } /************************************************************************ * DefaultHandler_GetRunningClass (IRunnableObject) * * See Windows documentation for more details on IRunnableObject methods. */ static HRESULT WINAPI DefaultHandler_GetRunningClass( IRunnableObject* iface, LPCLSID lpClsid) { FIXME("()\n"); return S_OK; } static HRESULT WINAPI DefaultHandler_Run( IRunnableObject* iface, IBindCtx* pbc) { DefaultHandler *This = impl_from_IRunnableObject(iface); HRESULT hr; IOleCacheControl *cache_ctrl; FIXME("(%p): semi-stub\n", pbc); /* already running? if so nothing to do */ if (object_is_running(This)) return S_OK; release_delegates(This); hr = CoCreateInstance(&This->clsid, NULL, CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER, &IID_IOleObject, (void **)&This->pOleDelegate); if (FAILED(hr)) return hr; hr = IOleObject_Advise(This->pOleDelegate, &This->IAdviseSink_iface, &This->dwAdvConn); if (FAILED(hr)) goto fail; if (This->clientSite) { hr = IOleObject_SetClientSite(This->pOleDelegate, This->clientSite); if (FAILED(hr)) goto fail; } hr = IOleObject_QueryInterface(This->pOleDelegate, &IID_IPersistStorage, (void **)&This->pPSDelegate); if (FAILED(hr)) goto fail; if (This->storage_state == storage_state_initialised) hr = IPersistStorage_InitNew(This->pPSDelegate, This->storage); else if (This->storage_state == storage_state_loaded) hr = IPersistStorage_Load(This->pPSDelegate, This->storage); if (FAILED(hr)) goto fail; if (This->containerApp) { hr = IOleObject_SetHostNames(This->pOleDelegate, This->containerApp, This->containerObj); if (FAILED(hr)) goto fail; } /* FIXME: do more stuff here: * - IOleObject_GetMiscStatus * - IOleObject_GetMoniker */ hr = IOleObject_QueryInterface(This->pOleDelegate, &IID_IDataObject, (void **)&This->pDataDelegate); if (FAILED(hr)) goto fail; This->object_state = object_state_running; if (This->dataAdviseHolder) { hr = DataAdviseHolder_OnConnect(This->dataAdviseHolder, This->pDataDelegate); if (FAILED(hr)) goto fail; } hr = IUnknown_QueryInterface( This->dataCache, &IID_IOleCacheControl, (void **)&cache_ctrl ); if (FAILED(hr)) goto fail; hr = IOleCacheControl_OnRun( cache_ctrl, This->pDataDelegate ); IOleCacheControl_Release( cache_ctrl ); if (FAILED(hr)) goto fail; return hr; fail: DefaultHandler_Stop(This); return hr; } /************************************************************************ * DefaultHandler_IsRunning (IRunnableObject) * * See Windows documentation for more details on IRunnableObject methods. */ static BOOL WINAPI DefaultHandler_IsRunning( IRunnableObject* iface) { DefaultHandler *This = impl_from_IRunnableObject(iface); TRACE("()\n"); if (This->object_state == object_state_running) return TRUE; else return FALSE; } /************************************************************************ * DefaultHandler_LockRunning (IRunnableObject) * * See Windows documentation for more details on IRunnableObject methods. */ static HRESULT WINAPI DefaultHandler_LockRunning( IRunnableObject* iface, BOOL fLock, BOOL fLastUnlockCloses) { FIXME("()\n"); return S_OK; } /************************************************************************ * DefaultHandler_SetContainedObject (IRunnableObject) * * See Windows documentation for more details on IRunnableObject methods. */ static HRESULT WINAPI DefaultHandler_SetContainedObject( IRunnableObject* iface, BOOL fContained) { FIXME("()\n"); return S_OK; } static HRESULT WINAPI DefaultHandler_IAdviseSink_QueryInterface( IAdviseSink *iface, REFIID riid, void **ppvObject) { if (IsEqualIID(riid, &IID_IUnknown) || IsEqualIID(riid, &IID_IAdviseSink)) { *ppvObject = iface; IAdviseSink_AddRef(iface); return S_OK; } return E_NOINTERFACE; } static ULONG WINAPI DefaultHandler_IAdviseSink_AddRef( IAdviseSink *iface) { DefaultHandler *This = impl_from_IAdviseSink(iface); return IUnknown_AddRef(&This->IUnknown_iface); } static ULONG WINAPI DefaultHandler_IAdviseSink_Release( IAdviseSink *iface) { DefaultHandler *This = impl_from_IAdviseSink(iface); return IUnknown_Release(&This->IUnknown_iface); } static void WINAPI DefaultHandler_IAdviseSink_OnDataChange( IAdviseSink *iface, FORMATETC *pFormatetc, STGMEDIUM *pStgmed) { FIXME(": stub\n"); } static void WINAPI DefaultHandler_IAdviseSink_OnViewChange( IAdviseSink *iface, DWORD dwAspect, LONG lindex) { FIXME(": stub\n"); } static void WINAPI DefaultHandler_IAdviseSink_OnRename( IAdviseSink *iface, IMoniker *pmk) { DefaultHandler *This = impl_from_IAdviseSink(iface); TRACE("(%p)\n", pmk); if (This->oleAdviseHolder) IOleAdviseHolder_SendOnRename(This->oleAdviseHolder, pmk); } static void WINAPI DefaultHandler_IAdviseSink_OnSave( IAdviseSink *iface) { DefaultHandler *This = impl_from_IAdviseSink(iface); TRACE("()\n"); if (This->oleAdviseHolder) IOleAdviseHolder_SendOnSave(This->oleAdviseHolder); } static void WINAPI DefaultHandler_IAdviseSink_OnClose( IAdviseSink *iface) { DefaultHandler *This = impl_from_IAdviseSink(iface); TRACE("()\n"); if (This->oleAdviseHolder) IOleAdviseHolder_SendOnClose(This->oleAdviseHolder); if(!This->in_call) DefaultHandler_Stop(This); else { TRACE("OnClose during call. Deferring shutdown\n"); This->object_state = object_state_deferred_close; } } /************************************************************************ * DefaultHandler_IPersistStorage_QueryInterface * */ static HRESULT WINAPI DefaultHandler_IPersistStorage_QueryInterface( IPersistStorage* iface, REFIID riid, void** ppvObject) { DefaultHandler *This = impl_from_IPersistStorage(iface); return IUnknown_QueryInterface(This->outerUnknown, riid, ppvObject); } /************************************************************************ * DefaultHandler_IPersistStorage_AddRef * */ static ULONG WINAPI DefaultHandler_IPersistStorage_AddRef( IPersistStorage* iface) { DefaultHandler *This = impl_from_IPersistStorage(iface); return IUnknown_AddRef(This->outerUnknown); } /************************************************************************ * DefaultHandler_IPersistStorage_Release * */ static ULONG WINAPI DefaultHandler_IPersistStorage_Release( IPersistStorage* iface) { DefaultHandler *This = impl_from_IPersistStorage(iface); return IUnknown_Release(This->outerUnknown); } /************************************************************************ * DefaultHandler_IPersistStorage_GetClassID * */ static HRESULT WINAPI DefaultHandler_IPersistStorage_GetClassID( IPersistStorage* iface, CLSID* clsid) { DefaultHandler *This = impl_from_IPersistStorage(iface); HRESULT hr; TRACE("(%p)->(%p)\n", iface, clsid); if(object_is_running(This)) { start_object_call( This ); hr = IPersistStorage_GetClassID(This->pPSDelegate, clsid); end_object_call( This ); } else hr = IPersistStorage_GetClassID(This->dataCache_PersistStg, clsid); return hr; } /************************************************************************ * DefaultHandler_IPersistStorage_IsDirty * */ static HRESULT WINAPI DefaultHandler_IPersistStorage_IsDirty( IPersistStorage* iface) { DefaultHandler *This = impl_from_IPersistStorage(iface); HRESULT hr; TRACE("(%p)\n", iface); hr = IPersistStorage_IsDirty(This->dataCache_PersistStg); if(hr != S_FALSE) return hr; if(object_is_running(This)) { start_object_call( This ); hr = IPersistStorage_IsDirty(This->pPSDelegate); end_object_call( This ); } return hr; } /*********************************************************************** * * The format of '\1Ole' stream is as follows: * * DWORD Version == 0x02000001 * DWORD Flags - low bit set indicates the object is a link otherwise it's embedded. * DWORD LinkupdateOption - [MS-OLEDS describes this as an implementation specific hint * supplied by the app that creates the data structure. May be * ignored on processing]. * * DWORD Reserved == 0 * DWORD MonikerStreamSize - size of the rest of the data (ie CLSID + moniker stream data). * CLSID clsid - class id of object capable of processing the moniker * BYTE data[] - moniker data for a link */ static const WCHAR OleStream[] = {1,'O','l','e',0}; typedef struct { DWORD version; DWORD flags; DWORD link_update_opt; DWORD res; DWORD moniker_size; } ole_stream_header_t; static const DWORD ole_stream_version = 0x02000001; static HRESULT load_ole_stream(DefaultHandler *This, IStorage *storage) { IStream *stream; HRESULT hr; hr = IStorage_OpenStream(storage, OleStream, NULL, STGM_READ | STGM_SHARE_EXCLUSIVE, 0, &stream); if(SUCCEEDED(hr)) { DWORD read; ole_stream_header_t header; hr = IStream_Read(stream, &header, sizeof(header), &read); if(hr == S_OK && read == sizeof(header) && header.version == ole_stream_version) { if(header.flags & 1) { /* FIXME: Read the moniker and deal with the link */ FIXME("Linked objects are not supported yet\n"); } } else { WARN("Incorrect OleStream header\n"); hr = DV_E_CLIPFORMAT; } IStream_Release(stream); } else hr = STORAGE_CreateOleStream(storage, 0); return hr; } /************************************************************************ * DefaultHandler_IPersistStorage_InitNew * */ static HRESULT WINAPI DefaultHandler_IPersistStorage_InitNew( IPersistStorage* iface, IStorage* pStg) { DefaultHandler *This = impl_from_IPersistStorage(iface); HRESULT hr; TRACE("(%p)->(%p)\n", iface, pStg); hr = STORAGE_CreateOleStream(pStg, 0); if (hr != S_OK) return hr; hr = IPersistStorage_InitNew(This->dataCache_PersistStg, pStg); if(SUCCEEDED(hr) && object_is_running(This)) { start_object_call( This ); hr = IPersistStorage_InitNew(This->pPSDelegate, pStg); end_object_call( This ); } if(SUCCEEDED(hr)) { IStorage_AddRef(pStg); This->storage = pStg; This->storage_state = storage_state_initialised; } return hr; } /************************************************************************ * DefaultHandler_IPersistStorage_Load * */ static HRESULT WINAPI DefaultHandler_IPersistStorage_Load( IPersistStorage* iface, IStorage* pStg) { DefaultHandler *This = impl_from_IPersistStorage(iface); HRESULT hr; TRACE("(%p)->(%p)\n", iface, pStg); hr = load_ole_stream(This, pStg); if(SUCCEEDED(hr)) hr = IPersistStorage_Load(This->dataCache_PersistStg, pStg); if(SUCCEEDED(hr) && object_is_running(This)) { start_object_call( This ); hr = IPersistStorage_Load(This->pPSDelegate, pStg); end_object_call( This ); } if(SUCCEEDED(hr)) { IStorage_AddRef(pStg); This->storage = pStg; This->storage_state = storage_state_loaded; } return hr; } /************************************************************************ * DefaultHandler_IPersistStorage_Save * */ static HRESULT WINAPI DefaultHandler_IPersistStorage_Save( IPersistStorage* iface, IStorage* pStgSave, BOOL fSameAsLoad) { DefaultHandler *This = impl_from_IPersistStorage(iface); HRESULT hr; TRACE("(%p)->(%p, %d)\n", iface, pStgSave, fSameAsLoad); hr = IPersistStorage_Save(This->dataCache_PersistStg, pStgSave, fSameAsLoad); if(SUCCEEDED(hr) && object_is_running(This)) { start_object_call( This ); hr = IPersistStorage_Save(This->pPSDelegate, pStgSave, fSameAsLoad); end_object_call( This ); } return hr; } /************************************************************************ * DefaultHandler_IPersistStorage_SaveCompleted * */ static HRESULT WINAPI DefaultHandler_IPersistStorage_SaveCompleted( IPersistStorage* iface, IStorage* pStgNew) { DefaultHandler *This = impl_from_IPersistStorage(iface); HRESULT hr; TRACE("(%p)->(%p)\n", iface, pStgNew); hr = IPersistStorage_SaveCompleted(This->dataCache_PersistStg, pStgNew); if(SUCCEEDED(hr) && object_is_running(This)) { start_object_call( This ); hr = IPersistStorage_SaveCompleted(This->pPSDelegate, pStgNew); end_object_call( This ); } if(pStgNew) { IStorage_AddRef(pStgNew); if(This->storage) IStorage_Release(This->storage); This->storage = pStgNew; This->storage_state = storage_state_loaded; } return hr; } /************************************************************************ * DefaultHandler_IPersistStorage_HandsOffStorage * */ static HRESULT WINAPI DefaultHandler_IPersistStorage_HandsOffStorage( IPersistStorage* iface) { DefaultHandler *This = impl_from_IPersistStorage(iface); HRESULT hr; TRACE("(%p)\n", iface); hr = IPersistStorage_HandsOffStorage(This->dataCache_PersistStg); if(SUCCEEDED(hr) && object_is_running(This)) { start_object_call( This ); hr = IPersistStorage_HandsOffStorage(This->pPSDelegate); end_object_call( This ); } if(This->storage) IStorage_Release(This->storage); This->storage = NULL; This->storage_state = storage_state_uninitialised; return hr; } /* * Virtual function tables for the DefaultHandler class. */ static const IOleObjectVtbl DefaultHandler_IOleObject_VTable = { DefaultHandler_QueryInterface, DefaultHandler_AddRef, DefaultHandler_Release, DefaultHandler_SetClientSite, DefaultHandler_GetClientSite, DefaultHandler_SetHostNames, DefaultHandler_Close, DefaultHandler_SetMoniker, DefaultHandler_GetMoniker, DefaultHandler_InitFromData, DefaultHandler_GetClipboardData, DefaultHandler_DoVerb, DefaultHandler_EnumVerbs, DefaultHandler_Update, DefaultHandler_IsUpToDate, DefaultHandler_GetUserClassID, DefaultHandler_GetUserType, DefaultHandler_SetExtent, DefaultHandler_GetExtent, DefaultHandler_Advise, DefaultHandler_Unadvise, DefaultHandler_EnumAdvise, DefaultHandler_GetMiscStatus, DefaultHandler_SetColorScheme }; static const IUnknownVtbl DefaultHandler_NDIUnknown_VTable = { DefaultHandler_NDIUnknown_QueryInterface, DefaultHandler_NDIUnknown_AddRef, DefaultHandler_NDIUnknown_Release, }; static const IDataObjectVtbl DefaultHandler_IDataObject_VTable = { DefaultHandler_IDataObject_QueryInterface, DefaultHandler_IDataObject_AddRef, DefaultHandler_IDataObject_Release, DefaultHandler_GetData, DefaultHandler_GetDataHere, DefaultHandler_QueryGetData, DefaultHandler_GetCanonicalFormatEtc, DefaultHandler_SetData, DefaultHandler_EnumFormatEtc, DefaultHandler_DAdvise, DefaultHandler_DUnadvise, DefaultHandler_EnumDAdvise }; static const IRunnableObjectVtbl DefaultHandler_IRunnableObject_VTable = { DefaultHandler_IRunnableObject_QueryInterface, DefaultHandler_IRunnableObject_AddRef, DefaultHandler_IRunnableObject_Release, DefaultHandler_GetRunningClass, DefaultHandler_Run, DefaultHandler_IsRunning, DefaultHandler_LockRunning, DefaultHandler_SetContainedObject }; static const IAdviseSinkVtbl DefaultHandler_IAdviseSink_VTable = { DefaultHandler_IAdviseSink_QueryInterface, DefaultHandler_IAdviseSink_AddRef, DefaultHandler_IAdviseSink_Release, DefaultHandler_IAdviseSink_OnDataChange, DefaultHandler_IAdviseSink_OnViewChange, DefaultHandler_IAdviseSink_OnRename, DefaultHandler_IAdviseSink_OnSave, DefaultHandler_IAdviseSink_OnClose }; static const IPersistStorageVtbl DefaultHandler_IPersistStorage_VTable = { DefaultHandler_IPersistStorage_QueryInterface, DefaultHandler_IPersistStorage_AddRef, DefaultHandler_IPersistStorage_Release, DefaultHandler_IPersistStorage_GetClassID, DefaultHandler_IPersistStorage_IsDirty, DefaultHandler_IPersistStorage_InitNew, DefaultHandler_IPersistStorage_Load, DefaultHandler_IPersistStorage_Save, DefaultHandler_IPersistStorage_SaveCompleted, DefaultHandler_IPersistStorage_HandsOffStorage }; /********************************************************* * Methods implementation for the DefaultHandler class. */ static DefaultHandler* DefaultHandler_Construct( REFCLSID clsid, LPUNKNOWN pUnkOuter, DWORD flags, IClassFactory *pCF) { DefaultHandler* This = NULL; HRESULT hr; This = HeapAlloc(GetProcessHeap(), 0, sizeof(DefaultHandler)); if (!This) return This; This->IOleObject_iface.lpVtbl = &DefaultHandler_IOleObject_VTable; This->IUnknown_iface.lpVtbl = &DefaultHandler_NDIUnknown_VTable; This->IDataObject_iface.lpVtbl = &DefaultHandler_IDataObject_VTable; This->IRunnableObject_iface.lpVtbl = &DefaultHandler_IRunnableObject_VTable; This->IAdviseSink_iface.lpVtbl = &DefaultHandler_IAdviseSink_VTable; This->IPersistStorage_iface.lpVtbl = &DefaultHandler_IPersistStorage_VTable; This->inproc_server = (flags & EMBDHLP_INPROC_SERVER) != 0; /* * Start with one reference count. The caller of this function * must release the interface pointer when it is done. */ This->ref = 1; /* * Initialize the outer unknown * We don't keep a reference on the outer unknown since, the way * aggregation works, our lifetime is at least as large as its * lifetime. */ if (!pUnkOuter) pUnkOuter = &This->IUnknown_iface; This->outerUnknown = pUnkOuter; /* * Create a datacache object. * We aggregate with the datacache. Make sure we pass our outer * unknown as the datacache's outer unknown. */ hr = CreateDataCache(This->outerUnknown, clsid, &IID_IUnknown, (void**)&This->dataCache); if(SUCCEEDED(hr)) { hr = IUnknown_QueryInterface(This->dataCache, &IID_IPersistStorage, (void**)&This->dataCache_PersistStg); /* keeping a reference to This->dataCache_PersistStg causes us to keep a * reference on the outer object */ if (SUCCEEDED(hr)) IUnknown_Release(This->outerUnknown); else IUnknown_Release(This->dataCache); } if(FAILED(hr)) { ERR("Unexpected error creating data cache\n"); HeapFree(GetProcessHeap(), 0, This); return NULL; } This->clsid = *clsid; This->clientSite = NULL; This->oleAdviseHolder = NULL; This->dataAdviseHolder = NULL; This->containerApp = NULL; This->containerObj = NULL; This->pOleDelegate = NULL; This->pPSDelegate = NULL; This->pDataDelegate = NULL; This->object_state = object_state_not_running; This->in_call = 0; This->dwAdvConn = 0; This->storage = NULL; This->storage_state = storage_state_uninitialised; if (This->inproc_server && !(flags & EMBDHLP_DELAYCREATE)) { HRESULT hr; This->pCFObject = NULL; if (pCF) hr = IClassFactory_CreateInstance(pCF, NULL, &IID_IOleObject, (void **)&This->pOleDelegate); else hr = CoCreateInstance(&This->clsid, NULL, CLSCTX_INPROC_SERVER, &IID_IOleObject, (void **)&This->pOleDelegate); if (SUCCEEDED(hr)) hr = IOleObject_QueryInterface(This->pOleDelegate, &IID_IPersistStorage, (void **)&This->pPSDelegate); if (SUCCEEDED(hr)) hr = IOleObject_QueryInterface(This->pOleDelegate, &IID_IDataObject, (void **)&This->pDataDelegate); if (SUCCEEDED(hr)) This->object_state = object_state_running; if (FAILED(hr)) WARN("object creation failed with error %08x\n", hr); } else { This->pCFObject = pCF; if (pCF) IClassFactory_AddRef(pCF); } return This; } static void DefaultHandler_Destroy( DefaultHandler* This) { TRACE("(%p)\n", This); /* AddRef/Release may be called on this object during destruction. * Prevent the object being destroyed recursively by artificially raising * the reference count. */ This->ref = 10000; /* release delegates */ DefaultHandler_Stop(This); HeapFree( GetProcessHeap(), 0, This->containerApp ); This->containerApp = NULL; HeapFree( GetProcessHeap(), 0, This->containerObj ); This->containerObj = NULL; if (This->dataCache) { /* to balance out the release of dataCache_PersistStg which will result * in a reference being released from the outer unknown */ IUnknown_AddRef(This->outerUnknown); IPersistStorage_Release(This->dataCache_PersistStg); IUnknown_Release(This->dataCache); This->dataCache_PersistStg = NULL; This->dataCache = NULL; } if (This->clientSite) { IOleClientSite_Release(This->clientSite); This->clientSite = NULL; } if (This->oleAdviseHolder) { IOleAdviseHolder_Release(This->oleAdviseHolder); This->oleAdviseHolder = NULL; } if (This->dataAdviseHolder) { IDataAdviseHolder_Release(This->dataAdviseHolder); This->dataAdviseHolder = NULL; } if (This->storage) { IStorage_Release(This->storage); This->storage = NULL; } if (This->pCFObject) { IClassFactory_Release(This->pCFObject); This->pCFObject = NULL; } HeapFree(GetProcessHeap(), 0, This); } /****************************************************************************** * OleCreateEmbeddingHelper [OLE32.@] */ HRESULT WINAPI OleCreateEmbeddingHelper( REFCLSID clsid, LPUNKNOWN pUnkOuter, DWORD flags, IClassFactory *pCF, REFIID riid, LPVOID* ppvObj) { DefaultHandler* newHandler = NULL; HRESULT hr = S_OK; TRACE("(%s, %p, %08x, %p, %s, %p)\n", debugstr_guid(clsid), pUnkOuter, flags, pCF, debugstr_guid(riid), ppvObj); if (!ppvObj) return E_POINTER; *ppvObj = NULL; /* * If This handler is constructed for aggregation, make sure * the caller is requesting the IUnknown interface. * This is necessary because it's the only time the non-delegating * IUnknown pointer can be returned to the outside. */ if (pUnkOuter && !IsEqualIID(&IID_IUnknown, riid)) return CLASS_E_NOAGGREGATION; /* * Try to construct a new instance of the class. */ newHandler = DefaultHandler_Construct(clsid, pUnkOuter, flags, pCF); if (!newHandler) return E_OUTOFMEMORY; /* * Make sure it supports the interface required by the caller. */ hr = IUnknown_QueryInterface(&newHandler->IUnknown_iface, riid, ppvObj); /* * Release the reference obtained in the constructor. If * the QueryInterface was unsuccessful, it will free the class. */ IUnknown_Release(&newHandler->IUnknown_iface); return hr; } /****************************************************************************** * OleCreateDefaultHandler [OLE32.@] */ HRESULT WINAPI OleCreateDefaultHandler(REFCLSID clsid, LPUNKNOWN pUnkOuter, REFIID riid, LPVOID* ppvObj) { TRACE("(%s, %p, %s, %p)\n", debugstr_guid(clsid), pUnkOuter,debugstr_guid(riid), ppvObj); return OleCreateEmbeddingHelper(clsid, pUnkOuter, EMBDHLP_INPROC_HANDLER | EMBDHLP_CREATENOW, NULL, riid, ppvObj); } typedef struct HandlerCF { IClassFactory IClassFactory_iface; LONG refs; CLSID clsid; } HandlerCF; static inline HandlerCF *impl_from_IClassFactory(IClassFactory *iface) { return CONTAINING_RECORD(iface, HandlerCF, IClassFactory_iface); } static HRESULT WINAPI HandlerCF_QueryInterface(LPCLASSFACTORY iface,REFIID riid, LPVOID *ppv) { *ppv = NULL; if (IsEqualIID(riid,&IID_IUnknown) || IsEqualIID(riid,&IID_IClassFactory)) { *ppv = iface; IClassFactory_AddRef(iface); return S_OK; } return E_NOINTERFACE; } static ULONG WINAPI HandlerCF_AddRef(LPCLASSFACTORY iface) { HandlerCF *This = impl_from_IClassFactory(iface); return InterlockedIncrement(&This->refs); } static ULONG WINAPI HandlerCF_Release(LPCLASSFACTORY iface) { HandlerCF *This = impl_from_IClassFactory(iface); ULONG refs = InterlockedDecrement(&This->refs); if (!refs) HeapFree(GetProcessHeap(), 0, This); return refs; } static HRESULT WINAPI HandlerCF_CreateInstance(LPCLASSFACTORY iface, LPUNKNOWN pUnk, REFIID riid, LPVOID *ppv) { HandlerCF *This = impl_from_IClassFactory(iface); return OleCreateDefaultHandler(&This->clsid, pUnk, riid, ppv); } static HRESULT WINAPI HandlerCF_LockServer(LPCLASSFACTORY iface, BOOL fLock) { FIXME("(%d), stub!\n",fLock); return S_OK; } static const IClassFactoryVtbl HandlerClassFactoryVtbl = { HandlerCF_QueryInterface, HandlerCF_AddRef, HandlerCF_Release, HandlerCF_CreateInstance, HandlerCF_LockServer }; HRESULT HandlerCF_Create(REFCLSID rclsid, REFIID riid, LPVOID *ppv) { HRESULT hr; HandlerCF *This = HeapAlloc(GetProcessHeap(), 0, sizeof(*This)); if (!This) return E_OUTOFMEMORY; This->IClassFactory_iface.lpVtbl = &HandlerClassFactoryVtbl; This->refs = 0; This->clsid = *rclsid; hr = IClassFactory_QueryInterface(&This->IClassFactory_iface, riid, ppv); if (FAILED(hr)) HeapFree(GetProcessHeap(), 0, This); return hr; }
879100.c
/* Copyright (C) 2014, The University of Texas at Austin This file is part of libflame and is available under the 3-Clause BSD license, which can be found in the LICENSE file at the top-level directory, or at http://opensource.org/licenses/BSD-3-Clause */ #include "FLAME.h" #ifdef FLA_ENABLE_NON_CRITICAL_CODE FLA_Error FLA_Gemm_tt_unb_var6( FLA_Obj alpha, FLA_Obj A, FLA_Obj B, FLA_Obj beta, FLA_Obj C ) { FLA_Obj AT, A0, AB, a1t, A2; FLA_Obj BL, BR, B0, b1, B2; FLA_Scal_external( beta, C ); FLA_Part_2x1( A, &AT, &AB, 0, FLA_BOTTOM ); FLA_Part_1x2( B, &BL, &BR, 0, FLA_RIGHT ); while ( FLA_Obj_length( AB ) < FLA_Obj_length( A ) ){ FLA_Repart_2x1_to_3x1( AT, &A0, &a1t, /* ** */ /* *** */ AB, &A2, 1, FLA_TOP ); FLA_Repart_1x2_to_1x3( BL, /**/ BR, &B0, &b1, /**/ &B2, 1, FLA_LEFT ); /*------------------------------------------------------------*/ /* C = a1t' * b1' + C */ FLA_Ger_external( alpha, a1t, b1, C ); /*------------------------------------------------------------*/ FLA_Cont_with_3x1_to_2x1( &AT, A0, /* ** */ /* *** */ a1t, &AB, A2, FLA_BOTTOM ); FLA_Cont_with_1x3_to_1x2( &BL, /**/ &BR, B0, /**/ b1, B2, FLA_RIGHT ); } return FLA_SUCCESS; } #endif
172318.c
/** * VUEngine Plugins Library * * (c) Christian Radke and Jorge Eremiev * * For the full copyright and license information, please view the LICENSE file * that was distributed with this source code. */ //--------------------------------------------------------------------------------------------------------- // INCLUDES //--------------------------------------------------------------------------------------------------------- #include <SoundManager.h> //--------------------------------------------------------------------------------------------------------- // DECLARATIONS //--------------------------------------------------------------------------------------------------------- //--------------------------------------------------------------------------------------------------------- // DEFINITIONS //--------------------------------------------------------------------------------------------------------- #include <MIDI.h> const uint16 AutomaticPauseSelectTrack[] = { CS4, PAU, ENDSOUND, 100, 50, 1, 15, 15, 15, }; SoundChannelConfigurationROM AUTOMATIC_PAUSE_SELECT_SND_CHANNEL_1_CONFIGURATION = { /// kMIDI, kPCM kMIDI, /// SxINT 0x9F, /// Volume SxLRV 0xFF, /// SxRAM (this is overrode by the SoundManager) 0x00, /// SxEV0 0xF0, /// SxEV1 0x01, /// SxFQH 0x00, /// SxFQL 0x00, /// Ch. 5 only 0x00, /// Waveform data pointer sawtoothWaveForm, /// kChannelNormal, kChannelModulation, kChannelNoise kChannelNormal, /// Volume __SOUND_LR }; SoundChannelROM AUTOMATIC_PAUSE_SELECT_SND_CHANNEL_1 = { /// Configuration (SoundChannelConfiguration*)&AUTOMATIC_PAUSE_SELECT_SND_CHANNEL_1_CONFIGURATION, /// Length (PCM) 0, /// Sound track { (const uint8*)AutomaticPauseSelectTrack } }; SoundChannelROM* AUTOMATIC_PAUSE_SELECT_SND_CHANNELS[] = { &AUTOMATIC_PAUSE_SELECT_SND_CHANNEL_1, NULL }; SoundROM AUTOMATIC_PAUSE_SELECT_SND = { /// Name "Automatic Pause select", /// Play in loop false, /// Target timer resolution in us 1000, /// Tracks (SoundChannel**)AUTOMATIC_PAUSE_SELECT_SND_CHANNELS };
840482.c
/* * Copyright (c) 2007-2011 Intel Corporation. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define VA_COMPAT_DISABLED 1 #include "sysdeps.h" #include "va.h" #include "va_compat.h" VAStatus vaCreateSurfaces_0_32_0( VADisplay dpy, int width, int height, int format, int num_surfaces, VASurfaceID *surfaces ) { return vaCreateSurfaces(dpy, format, width, height, surfaces, num_surfaces, NULL, 0); } VA_CPP_HELPER_ALIAS(vaCreateSurfaces, 0, 32, 0);
774466.c
void function(int a, int b, int c) { char buffer1[5]; char buffer2[10]; } void main() { function(1,2,3); }
753320.c
/*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1987, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <sys/cdefs.h> #include <ctype.h> int strcasecmp(const char *s1, const char *s2) { const u_char *us1 = (const u_char *)s1, *us2 = (const u_char *)s2; while (tolower(*us1) == tolower(*us2)) { if (*us1++ == '\0') return (0); us2++; } return (tolower(*us1) - tolower(*us2)); } int strncasecmp(const char *s1, const char *s2, size_t n) { if (n != 0) { const u_char *us1 = (const u_char *)s1; const u_char *us2 = (const u_char *)s2; do { if (tolower(*us1) != tolower(*us2)) return (tolower(*us1) - tolower(*us2)); if (*us1++ == '\0') break; us2++; } while (--n != 0); } return (0); }
684.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/libxsmm/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Evangelos Georganas (Intel Corp.) ******************************************************************************/ for (j = t-1; j >= 0; --j) { /* let's run the cell in blocks for good locality */ #ifdef PROFILE if (ltid == 0) _start = _rdtsc(); #endif for (inik = thr_begin_nk; inik < thr_end_nk; ++inik ) { inb = inik % (N/bn); ikb = inik / (N/bn); in = (inik % (N/bn))*bn; ik = (inik / (N/bn))*bk; #if defined(LIBXSMM_RNN_CELL_AVX512) /* Compute dcp, dci, di, df, dp */ cps_ptr = (j == 0) ? &LIBXSMM_VLA_ACCESS(2, cp, in, ik, K) : &LIBXSMM_VLA_ACCESS(3, cs, j-1, in, ik, N, K); if (bcbk_multiples_of_16) { if (K % 2048 != 0 || LIBXSMM_DNN_COMPUTE_KIND_BWD == kind) { #include "libxsmm_internal_lstm_bwdupd_fused_eltwise.tpl.c" } else { /* Also reformat di, dci, df and dp to be used in the UPD pass in blocked format ... */ #include "libxsmm_internal_lstm_bwdupd_fused_eltwise_reformat.tpl.c" } } else { /* compute dhp */ if (j == t-1) { libxsmm_internal_matrix_copy_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, dh, t-1, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, dout, in, ik, K) ); } else { libxsmm_internal_matrix_add_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dout, in, ik, K), &LIBXSMM_VLA_ACCESS(3, dh, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, dout, in, ik, K) ); } /* compute dcp */ libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dout, in, ik, K), &LIBXSMM_VLA_ACCESS(3, o, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K) ); libxsmm_internal_matrix_complement_square_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, co, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K) ); if (j == t-1) { libxsmm_internal_matrix_add_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dcs, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K), &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K) ); } else { libxsmm_internal_matrix_add_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K), &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K) ); } /* compute dci */ libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K), &LIBXSMM_VLA_ACCESS(3, i, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K) ); libxsmm_internal_matrix_complement_square_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, ci, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K), &LIBXSMM_VLA_ACCESS(2, dci, in, ik, K) ); /* compute di */ libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K), &LIBXSMM_VLA_ACCESS(3, ci, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K) ); libxsmm_internal_matrix_complement_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, i, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, i, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K), &LIBXSMM_VLA_ACCESS(2, di, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K), &LIBXSMM_VLA_ACCESS(2, di, in, ik, K), &LIBXSMM_VLA_ACCESS(2, di, in, ik, K) ); /* compute df */ if (j == 0) { libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K), &LIBXSMM_VLA_ACCESS(2, cp, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K) ); } else { libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K), &LIBXSMM_VLA_ACCESS(3, cs, j-1, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K) ); } libxsmm_internal_matrix_complement_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, f, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, f, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K), &LIBXSMM_VLA_ACCESS(2, df, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K), &LIBXSMM_VLA_ACCESS(2, df, in, ik, K), &LIBXSMM_VLA_ACCESS(2, df, in, ik, K) ); /* compute dp */ libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dout, in, ik, K), &LIBXSMM_VLA_ACCESS(3, co, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K) ); libxsmm_internal_matrix_complement_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, o, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, o, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K), &LIBXSMM_VLA_ACCESS(2, dp, in, ik, K) ); /* update dcp */ libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, f, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K), &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K) ); } #else /* compute dhp */ if (j == t-1) { libxsmm_internal_matrix_copy_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, dh, t-1, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, dout, in, ik, K) ); } else { libxsmm_internal_matrix_add_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dout, in, ik, K), &LIBXSMM_VLA_ACCESS(3, dh, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, dout, in, ik, K) ); } /* compute dcp */ libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dout, in, ik, K), &LIBXSMM_VLA_ACCESS(3, o, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K) ); libxsmm_internal_matrix_complement_square_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, co, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K) ); if (j == t-1) { libxsmm_internal_matrix_add_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dcs, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K), &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K) ); } else { libxsmm_internal_matrix_add_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K), &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K) ); } /* compute dci */ libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K), &LIBXSMM_VLA_ACCESS(3, i, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K) ); libxsmm_internal_matrix_complement_square_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, ci, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K), &LIBXSMM_VLA_ACCESS(2, dci, in, ik, K) ); /* compute di */ libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K), &LIBXSMM_VLA_ACCESS(3, ci, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K) ); libxsmm_internal_matrix_complement_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, i, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, i, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K), &LIBXSMM_VLA_ACCESS(2, di, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K), &LIBXSMM_VLA_ACCESS(2, di, in, ik, K), &LIBXSMM_VLA_ACCESS(2, di, in, ik, K) ); /* compute df */ if (j == 0) { libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K), &LIBXSMM_VLA_ACCESS(2, cp, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K) ); } else { libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K), &LIBXSMM_VLA_ACCESS(3, cs, j-1, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K) ); } libxsmm_internal_matrix_complement_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, f, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, f, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K), &LIBXSMM_VLA_ACCESS(2, df, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K), &LIBXSMM_VLA_ACCESS(2, df, in, ik, K), &LIBXSMM_VLA_ACCESS(2, df, in, ik, K) ); /* compute dp */ libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, dout, in, ik, K), &LIBXSMM_VLA_ACCESS(3, co, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K) ); libxsmm_internal_matrix_complement_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, o, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, o, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K) ); libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(2, t1, in, ik, K), &LIBXSMM_VLA_ACCESS(2, t2, in, ik, K), &LIBXSMM_VLA_ACCESS(2, dp, in, ik, K) ); /* update dcp */ libxsmm_internal_matrix_eltwise_mult_ld( bk, bn, K, &LIBXSMM_VLA_ACCESS(3, f, j, in, ik, N, K), &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K), &LIBXSMM_VLA_ACCESS(2, dcp, in, ik, K) ); #endif } #ifdef PROFILE if (ltid == 0) { _end = _rdtsc(); eltwise_cycles += _end - _start; } #endif if ( (LIBXSMM_DNN_COMPUTE_KIND_UPD == kind) || (LIBXSMM_DNN_COMPUTE_KIND_BWDUPD == kind) ) { #ifdef PROFILE if (ltid == 0) _start = _rdtsc(); #endif /* transpose xt for current timestep */ for (icin = thr_begin_nc; icin < thr_end_nc; ++icin ) { in = (icin / (C/bc))*bn; ic = (icin % (C/bc))*bc; for (jc = 0; jc < bc; ++jc) { for (jb = 0; jb < bn; ++jb) { en = in + jb; ec = ic + jc; LIBXSMM_VLA_ACCESS(2, xT, ec, en, N) = LIBXSMM_VLA_ACCESS(3, x, j, en, ec, N, C); } } } /* transpose ht for current timestep */ if (j == 0) { for (ikin = thr_begin_nk; ikin < thr_end_nk; ++ikin ) { in = (ikin / (K/bk))*bn; ik = (ikin % (K/bk))*bk; for (jk = 0; jk < bk; ++jk) { for (jb = 0; jb < bn; ++jb) { en = in + jb; ek = ik + jk; LIBXSMM_VLA_ACCESS(2, hT, ek, en, N) = LIBXSMM_VLA_ACCESS(2, hp, en, ek, K); } } } } else { for (ikin = thr_begin_nk; ikin < thr_end_nk; ++ikin ) { in = (ikin / (K/bk))*bn; ik = (ikin % (K/bk))*bk; for (jk = 0; jk < bk; ++jk) { for (jb = 0; jb < bn; ++jb) { en = in + jb; ek = ik + jk; LIBXSMM_VLA_ACCESS(2, hT, ek, en, N) = LIBXSMM_VLA_ACCESS(3, h, j-1, en, ek, N, K); } } } } #ifdef PROFILE if (ltid == 0) { _end = _rdtsc(); act_trans_cycles += _end - _start; } #endif } libxsmm_barrier_wait(handle->barrier, (int)ltid); if ( (LIBXSMM_DNN_COMPUTE_KIND_BWD == kind) || (LIBXSMM_DNN_COMPUTE_KIND_BWDUPD == kind) ) { #ifdef PROFILE if (ltid == 0) _start = _rdtsc(); #endif /* dx = W^T * difoc */ for (KB = 0; KB < BF; KB++) { for (inic = thr_begin_nc; inic < thr_end_nc; ++inic ) { in = (inic % (N/bn))*bn; icb = inic / (N/bn); ic = icb*bc; for (ik = 0, ikb = 0; ikb < KB_BLOCKS; ik += bk, ikb++) { A_array[ikb] = &LIBXSMM_VLA_ACCESS(4, wiT, icb, ikb + KB*KB_BLOCKS, 0, 0, kBlocks, bk, bc); B_array[ikb] = &LIBXSMM_VLA_ACCESS(2, di, in, ik + KB*KB_BLOCKS*bk, K); } /* Reduce batch gemm call */ blocks = KB_BLOCKS; batchreduce_kernela(A_array, B_array, &LIBXSMM_VLA_ACCESS(3, dx, j, in, ic, N, C) , &blocks); for (ik = 0, ikb = 0; ikb < KB_BLOCKS; ik += bk, ikb++) { A_array[ikb] = &LIBXSMM_VLA_ACCESS(4, wcT, icb, ikb + KB*KB_BLOCKS, 0, 0, kBlocks, bk, bc); B_array[ikb] = &LIBXSMM_VLA_ACCESS(2, dci, in, ik + KB*KB_BLOCKS*bk, K); } /* Reduce batch gemm call */ batchreduce_kernela(A_array, B_array, &LIBXSMM_VLA_ACCESS(3, dx, j, in, ic, N, C) , &blocks); for (ik = 0, ikb = 0; ikb < KB_BLOCKS; ik += bk, ikb++) { A_array[ikb] = &LIBXSMM_VLA_ACCESS(4, wfT, icb, ikb + KB*KB_BLOCKS, 0, 0, kBlocks, bk, bc); B_array[ikb] = &LIBXSMM_VLA_ACCESS(2, df, in, ik + KB*KB_BLOCKS*bk, K); } /* Reduce batch gemm call */ batchreduce_kernela(A_array, B_array, &LIBXSMM_VLA_ACCESS(3, dx, j, in, ic, N, C) , &blocks); for (ik = 0, ikb = 0; ikb < KB_BLOCKS; ik += bk, ikb++) { A_array[ikb] = &LIBXSMM_VLA_ACCESS(4, woT, icb, ikb + KB*KB_BLOCKS, 0, 0, kBlocks, bk, bc); B_array[ikb] = &LIBXSMM_VLA_ACCESS(2, dp, in, ik + KB*KB_BLOCKS*bk, K); } /* Reduce batch gemm call */ batchreduce_kernela(A_array, B_array, &LIBXSMM_VLA_ACCESS(3, dx, j, in, ic, N, C) , &blocks); } } #ifdef PROFILE if (ltid == 0) { _end = _rdtsc(); dx_cycles += _end - _start; } #endif } #ifdef PROFILE if (ltid == 0) _start = _rdtsc(); #endif for (KB = 0; KB < BF; KB++) { for (inik = thr_begin_nk; inik < thr_end_nk; ++inik ) { in = (inik % (N/bn))*bn; ikb = inik / (N/bn); ik = ikb*bk; dout_ptr = (j > 0) ? (element_output_type*) &LIBXSMM_VLA_ACCESS(2, dout, in, ik, K) : (element_output_type*) &LIBXSMM_VLA_ACCESS(2, dhp, in, ik, K); if (KB == 0) libxsmm_internal_matrix_zero_ld( bk, bn, K, dout_ptr); /* dout += R^T * difoc */ for (ic = 0, icb = 0; icb < KB_BLOCKS; ic += bk, icb++) { A_array[icb] = &LIBXSMM_VLA_ACCESS(4, riT, ikb, icb + KB*KB_BLOCKS, 0, 0, kBlocks, bk, bk); B_array[icb] = &LIBXSMM_VLA_ACCESS(2, di, in, ic + KB*KB_BLOCKS*bk, K); } /* Reduce batch gemm call */ blocks = KB_BLOCKS; batchreduce_kerneld(A_array, B_array, dout_ptr, &blocks); for (ic = 0, icb = 0; icb < KB_BLOCKS; ic += bk, icb++) { A_array[icb] = &LIBXSMM_VLA_ACCESS(4, rcT, ikb, icb + KB*KB_BLOCKS, 0, 0, kBlocks, bk, bk); B_array[icb] = &LIBXSMM_VLA_ACCESS(2, dci, in, ic + KB*KB_BLOCKS*bk, K); } /* Reduce batch gemm call */ batchreduce_kerneld(A_array, B_array, dout_ptr, &blocks); for (ic = 0, icb = 0; icb < KB_BLOCKS; ic += bk, icb++) { A_array[icb] = &LIBXSMM_VLA_ACCESS(4, rfT, ikb, icb + KB*KB_BLOCKS, 0, 0, kBlocks, bk, bk); B_array[icb] = &LIBXSMM_VLA_ACCESS(2, df, in, ic + KB*KB_BLOCKS*bk, K); } /* Reduce batch gemm call */ batchreduce_kerneld(A_array, B_array, dout_ptr, &blocks); for (ic = 0, icb = 0; icb < KB_BLOCKS; ic += bk, icb++) { A_array[icb] = &LIBXSMM_VLA_ACCESS(4, roT, ikb, icb + KB*KB_BLOCKS, 0, 0, kBlocks, bk, bk); B_array[icb] = &LIBXSMM_VLA_ACCESS(2, dp, in, ic + KB*KB_BLOCKS*bk, K); } /* Reduce batch gemm call */ batchreduce_kerneld(A_array, B_array, dout_ptr, &blocks); } } #ifdef PROFILE if (ltid == 0) { _end = _rdtsc(); dout_cycles += _end - _start; } #endif if ( (LIBXSMM_DNN_COMPUTE_KIND_UPD == kind) || (LIBXSMM_DNN_COMPUTE_KIND_BWDUPD == kind) ) { #ifdef PROFILE if (ltid == 0) _start = _rdtsc(); #endif if ((C == K) && (bc == bk) && (bcbk_multiples_of_16 == 1)) { #if 0 if (K % 2048 != 0) { #endif /* Interleave computation of dr = difoc * h^T and dw = difoc * x^T to take advantage of temporal locality */ for (ikic = thr_begin_kk; ikic < thr_end_kk; ++ikic ) { icb = ikic / (K/bk); ic = icb*bk; ikb = ikic % (K/bk); ik = ikb*bk; blocks = nBlocks; for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, di, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, hT, ic, in, N); } batchreduce_kernelb1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dri, ikb, icb, 0, 0, kBlocks, bk, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, di, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, xT, ic, in, N); } batchreduce_kernelc1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dwi, ikb, icb, 0, 0, cBlocks, bc, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, dci, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, hT, ic, in, N); } batchreduce_kernelb1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, drc, ikb, icb, 0, 0, kBlocks, bk, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, dci, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, xT, ic, in, N); } batchreduce_kernelc1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dwc, ikb, icb, 0, 0, cBlocks, bc, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, df, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, hT, ic, in, N); } batchreduce_kernelb1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, drf, ikb, icb, 0, 0, kBlocks, bk, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, df, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, xT, ic, in, N); } batchreduce_kernelc1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dwf, ikb, icb, 0, 0, cBlocks, bc, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, dp, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, hT, ic, in, N); } batchreduce_kernelb1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dro, ikb, icb, 0, 0, kBlocks, bk, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, dp, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, xT, ic, in, N); } batchreduce_kernelc1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dwo, ikb, icb, 0, 0, cBlocks, bc, bk), &blocks); } LIBXSMM_UNUSED(diB_); LIBXSMM_UNUSED(dciB_); LIBXSMM_UNUSED(dfB_); LIBXSMM_UNUSED(dpB_); LIBXSMM_UNUSED(batchreduce_kernelc); LIBXSMM_UNUSED(batchreduce_kernelb); #if 0 } else { /* Interleave computation of dr = difoc * h^T and dw = difoc * x^T to take advantage of temporal locality */ /* Use blocked format for di, dci, df and dp */ for (ikic = thr_begin_kk; ikic < thr_end_kk; ++ikic ) { icb = ikic / (K/bk); ic = icb*bk; ikb = ikic % (K/bk); ik = ikb*bk; blocks = nBlocks; for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(4, diB, inb, ikb, 0, 0, kBlocks, bn, bk); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, hT, ic, in, N); } batchreduce_kernelb(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dri, ikb, icb, 0, 0, kBlocks, bk, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(4, diB, inb, ikb, 0, 0, kBlocks, bn, bk); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, xT, ic, in, N); } batchreduce_kernelc(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dwi, ikb, icb, 0, 0, cBlocks, bc, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(4, dciB, inb, ikb, 0, 0, kBlocks, bn, bk); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, hT, ic, in, N); } batchreduce_kernelb(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, drc, ikb, icb, 0, 0, kBlocks, bk, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(4, dciB, inb, ikb, 0, 0, kBlocks, bn, bk); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, xT, ic, in, N); } batchreduce_kernelc(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dwc, ikb, icb, 0, 0, cBlocks, bc, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(4, dfB, inb, ikb, 0, 0, kBlocks, bn, bk); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, hT, ic, in, N); } batchreduce_kernelb(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, drf, ikb, icb, 0, 0, kBlocks, bk, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(4, dfB, inb, ikb, 0, 0, kBlocks, bn, bk); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, xT, ic, in, N); } batchreduce_kernelc(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dwf, ikb, icb, 0, 0, cBlocks, bc, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(4, dpB, inb, ikb, 0, 0, kBlocks, bn, bk); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, hT, ic, in, N); } batchreduce_kernelb(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dro, ikb, icb, 0, 0, kBlocks, bk, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(4, dpB, inb, ikb, 0, 0, kBlocks, bn, bk); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, xT, ic, in, N); } batchreduce_kernelc(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dwo, ikb, icb, 0, 0, cBlocks, bc, bk), &blocks); } } #endif } else { /* dr = difoc * h^T */ for (ikic = thr_begin_kk; ikic < thr_end_kk; ++ikic ) { icb = ikic / (K/bk); ic = icb*bk; ikb = ikic % (K/bk); ik = ikb*bk; for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, di, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, hT, ic, in, N); } blocks = nBlocks; batchreduce_kernelb1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dri, ikb, icb, 0, 0, kBlocks, bk, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, dci, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, hT, ic, in, N); } batchreduce_kernelb1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, drc, ikb, icb, 0, 0, kBlocks, bk, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, df, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, hT, ic, in, N); } batchreduce_kernelb1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, drf, ikb, icb, 0, 0, kBlocks, bk, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, dp, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, hT, ic, in, N); } batchreduce_kernelb1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dro, ikb, icb, 0, 0, kBlocks, bk, bk), &blocks); } /* dw = difoc * x^T */ for (ikic = thr_begin_ck; ikic < thr_end_ck; ++ikic ) { icb = ikic / (K/bk); ic = icb*bc; ikb = ikic % (K/bk); ik = ikb*bk; for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, di, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, xT, ic, in, N); } blocks = nBlocks; batchreduce_kernelc1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dwi, ikb, icb, 0, 0, cBlocks, bc, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, dci, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, xT, ic, in, N); } batchreduce_kernelc1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dwc, ikb, icb, 0, 0, cBlocks, bc, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, df, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, xT, ic, in, N); } batchreduce_kernelc1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dwf, ikb, icb, 0, 0, cBlocks, bc, bk), &blocks); for (in = 0, inb = 0; in < N; in += bn, inb++) { A_array[inb] = &LIBXSMM_VLA_ACCESS(2, dp, in, ik, K); B_array[inb] = &LIBXSMM_VLA_ACCESS(2, xT, ic, in, N); } batchreduce_kernelc1(A_array, B_array, &LIBXSMM_VLA_ACCESS(4, dwo, ikb, icb, 0, 0, cBlocks, bc, bk), &blocks); } } #ifdef PROFILE if (ltid == 0) { _end = _rdtsc(); dwdr_cycles += _end - _start; } #endif #ifdef PROFILE if (ltid == 0) _start = _rdtsc(); #endif /* gradient bias */ #if defined(LIBXSMM_RNN_CELL_AVX512) if (bcbk_multiples_of_16) { for (ik = k_thr_begin; ik < k_thr_end; ik += 16) { dbi_sum = LIBXSMM_INTRINSICS_MM512_LOAD_PS(&dbi[ik]); dbf_sum = LIBXSMM_INTRINSICS_MM512_LOAD_PS(&dbf[ik]); dbo_sum = LIBXSMM_INTRINSICS_MM512_LOAD_PS(&dbo[ik]); dbc_sum = LIBXSMM_INTRINSICS_MM512_LOAD_PS(&dbc[ik]); for (in = 0; in < N; in++) { dbi_sum = _mm512_add_ps(dbi_sum, LIBXSMM_INTRINSICS_MM512_LOAD_PS(&LIBXSMM_VLA_ACCESS(2, di, in, ik, K))); dbf_sum = _mm512_add_ps(dbf_sum, LIBXSMM_INTRINSICS_MM512_LOAD_PS(&LIBXSMM_VLA_ACCESS(2, df, in, ik, K))); dbo_sum = _mm512_add_ps(dbo_sum, LIBXSMM_INTRINSICS_MM512_LOAD_PS(&LIBXSMM_VLA_ACCESS(2, dp, in, ik, K))); dbc_sum = _mm512_add_ps(dbc_sum, LIBXSMM_INTRINSICS_MM512_LOAD_PS(&LIBXSMM_VLA_ACCESS(2, dci, in, ik, K))); } _mm512_storeu_ps(&dbi[ik], dbi_sum); _mm512_storeu_ps(&dbf[ik], dbf_sum); _mm512_storeu_ps(&dbo[ik], dbo_sum); _mm512_storeu_ps(&dbc[ik], dbc_sum); } } else { for (ik = thr_begin_k; ik < thr_end_k; ik++) { for (in = 0; in < N; in++) { dbi[ik] += LIBXSMM_VLA_ACCESS(2, di, in, ik, K); dbf[ik] += LIBXSMM_VLA_ACCESS(2, df, in, ik, K); dbo[ik] += LIBXSMM_VLA_ACCESS(2, dp, in, ik, K); dbc[ik] += LIBXSMM_VLA_ACCESS(2, dci, in, ik, K); } } } #else for (ik = thr_begin_k; ik < thr_end_k; ik++) { for (in = 0; in < N; in++) { dbi[ik] += LIBXSMM_VLA_ACCESS(2, di, in, ik, K); dbf[ik] += LIBXSMM_VLA_ACCESS(2, df, in, ik, K); dbo[ik] += LIBXSMM_VLA_ACCESS(2, dp, in, ik, K); dbc[ik] += LIBXSMM_VLA_ACCESS(2, dci, in, ik, K); } } #endif #ifdef PROFILE if (ltid == 0) { _end = _rdtsc(); gradient_cycles += _end - _start; } #endif } libxsmm_barrier_wait(handle->barrier, (int)ltid); }
64102.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE197_Numeric_Truncation_Error__int_connect_socket_to_char_65a.c Label Definition File: CWE197_Numeric_Truncation_Error__int.label.xml Template File: sources-sink-65a.tmpl.c */ /* * @description * CWE: 197 Numeric Truncation Error * BadSource: connect_socket Read data using a connect socket (client side) * GoodSource: Less than CHAR_MAX * Sinks: to_char * BadSink : Convert data to a char * Flow Variant: 65 Data/control flow: data passed as an argument from one function to a function in a different source file called via a function pointer * * */ #include "std_testcase.h" #ifdef _WIN32 # include <winsock2.h> # include <windows.h> # include <direct.h> # pragma comment(lib, "ws2_32") /* include ws2_32.lib when linking */ # define CLOSE_SOCKET closesocket #else /* NOT _WIN32 */ # define INVALID_SOCKET -1 # define SOCKET_ERROR -1 # define CLOSE_SOCKET close # define SOCKET int #endif #define TCP_PORT 27015 #define CHAR_ARRAY_SIZE sizeof(data)*sizeof(data) #ifndef OMITBAD /* bad function declaration */ void CWE197_Numeric_Truncation_Error__int_connect_socket_to_char_65b_bad_sink(int data); void CWE197_Numeric_Truncation_Error__int_connect_socket_to_char_65_bad() { int data; /* define a function pointer */ void (*func_ptr) (int) = CWE197_Numeric_Truncation_Error__int_connect_socket_to_char_65b_bad_sink; /* Initialize data */ data = -1; { #ifdef _WIN32 WSADATA wsa_data; int wsa_data_init = 0; #endif int recv_rv; struct sockaddr_in s_in; SOCKET connect_socket = INVALID_SOCKET; char input_buf[CHAR_ARRAY_SIZE]; do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsa_data) != NO_ERROR) break; wsa_data_init = 1; #endif connect_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (connect_socket == INVALID_SOCKET) break; memset(&s_in, 0, sizeof(s_in)); s_in.sin_family = AF_INET; s_in.sin_addr.s_addr = inet_addr("127.0.0.1"); s_in.sin_port = htons(TCP_PORT); if (connect(connect_socket, (struct sockaddr*)&s_in, sizeof(s_in)) == SOCKET_ERROR) break; /* Abort on error or the connection was closed, make sure to recv one * less char than is in the recv_buf in order to append a terminator */ recv_rv = recv(connect_socket, input_buf, CHAR_ARRAY_SIZE, 0); if (recv_rv == SOCKET_ERROR || recv_rv == 0) break; /* Convert to int */ data = atoi(input_buf); } while (0); if (connect_socket != INVALID_SOCKET) CLOSE_SOCKET(connect_socket); #ifdef _WIN32 if (wsa_data_init) WSACleanup(); #endif } /* use the function pointer */ func_ptr(data); } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void CWE197_Numeric_Truncation_Error__int_connect_socket_to_char_65b_goodG2B_sink(int data); static void goodG2B() { int data; void (*func_ptr) (int) = CWE197_Numeric_Truncation_Error__int_connect_socket_to_char_65b_goodG2B_sink; /* Initialize data */ data = -1; /* FIX: Use a positive integer less than CHAR_MAX*/ data = CHAR_MAX-5; func_ptr(data); } void CWE197_Numeric_Truncation_Error__int_connect_socket_to_char_65_good() { goodG2B(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE197_Numeric_Truncation_Error__int_connect_socket_to_char_65_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE197_Numeric_Truncation_Error__int_connect_socket_to_char_65_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
513172.c
/*- * Copyright (c) 2005-2007, Kohsuke Ohtani * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * sem.c - semaphore support */ #include <kernel.h> #include <event.h> #include <sched.h> #include <kmem.h> #include <task.h> #include <sync.h> /* forward declarations */ static int sem_valid(sem_t); static void sem_release(sem_t); static void sem_reference(sem_t); static int sem_copyin(sem_t*, sem_t*); static struct sem* sem_list = NULL; /* list head of semaphore list */ /* * sem_init - initialize a semaphore; required before use. * * sem_init() creates a new semaphore if the specified * semaphore does not exist yet. If the semaphore already * exists, it is re-initialized only if nobody is waiting for * it. The initial semaphore value is set to the requested * value. A semaphore can be shared among different tasks. */ int sem_init(sem_t* sp, u_int value) { task_t self = curtask; sem_t s; /* * A couple of quick sanity checks. */ if (self->nsyncs >= MAXSYNCS) return EAGAIN; if (value > MAXSEMVAL) return EINVAL; if (copyin(sp, &s, sizeof(sp))) return EFAULT; /* * An application can call sem_init() to reset the * value of existing semaphore. So, we have to check * whether the semaphore is already allocated. */ sched_lock(); if (s && sem_valid(s)) { /* * Semaphore already exists. */ if (s->owner != self) { sched_unlock(); return EINVAL; } if (event_waiting(&s->event)) { sched_unlock(); return EBUSY; } s->value = value; } else { /* * Create new semaphore. */ if ((s = kmem_alloc(sizeof(struct sem))) == NULL) { sched_unlock(); return ENOSPC; } if (copyout(&s, sp, sizeof(s))) { kmem_free(s); sched_unlock(); return EFAULT; } event_init(&s->event, "semaphore"); s->owner = self; s->refcnt = 1; s->value = value; list_insert(&self->sems, &s->task_link); self->nsyncs++; s->next = sem_list; sem_list = s; } sched_unlock(); return 0; } /* * Destroy a semaphore. * If some thread is waiting for the specified semaphore, * this routine fails with EBUSY. */ int sem_destroy(sem_t* sp) { sem_t s; sched_lock(); if (sem_copyin(sp, &s) || s->owner != curtask) { sched_unlock(); return EINVAL; } if (event_waiting(&s->event) || s->value == 0) { sched_unlock(); return EBUSY; } sem_release(s); sched_unlock(); return 0; } /* * sem_wait - lock a semaphore. * * The value of timeout is msec unit. 0 for no timeout. * * sem_wait() locks the semaphore referred by sem only if the * semaphore value is currently positive. The thread will * sleep while the semaphore value is zero. It decrements the * semaphore value in return. * * If waiting thread receives any exception, this routine * returns with EINTR in order to invoke exception * handler. But, an application assumes this call does NOT * return with an error. So, the system call stub routine will * automatically call sem_wait again if it gets EINTR. */ int sem_wait(sem_t* sp, u_long timeout) { sem_t s; int rc, error = 0; sched_lock(); if (sem_copyin(sp, &s)) { sched_unlock(); return EINVAL; } sem_reference(s); while (s->value == 0) { rc = sched_tsleep(&s->event, timeout); if (rc == SLP_TIMEOUT) { error = ETIMEDOUT; break; } if (rc == SLP_INTR) { error = EINTR; break; } /* * We have to check the semaphore value again * because another thread may run and acquire * the semaphore before us. */ } if (!error) s->value--; sem_release(s); sched_unlock(); return error; } /* * Try to lock a semaphore. * If the semaphore is already locked, it just returns EAGAIN. */ int sem_trywait(sem_t* sp) { sem_t s; sched_lock(); if (sem_copyin(sp, &s)) { sched_unlock(); return EINVAL; } if (s->value == 0) { sched_unlock(); return EAGAIN; } s->value--; sched_unlock(); return 0; } /* * Unlock a semaphore. * * If the semaphore value becomes non zero, then one of * the threads blocked waiting for the semaphore will be * unblocked. This is non-blocking operation. */ int sem_post(sem_t* sp) { sem_t s; sched_lock(); if (sem_copyin(sp, &s)) { sched_unlock(); return EINVAL; } if (s->value >= MAXSEMVAL) { sched_unlock(); return ERANGE; } s->value++; if (s->value > 0) sched_wakeone(&s->event); sched_unlock(); return 0; } /* * Get the semaphore value. */ int sem_getvalue(sem_t* sp, u_int* value) { sem_t s; sched_lock(); if (sem_copyin(sp, &s)) { sched_unlock(); return EINVAL; } if (copyout(&s->value, value, sizeof(s->value))) { sched_unlock(); return EFAULT; } sched_unlock(); return 0; } /* * Take out a reference on a semaphore. */ static void sem_reference(sem_t s) { s->refcnt++; } /* * Release a reference on a semaphore. If this is the last * reference, the semaphore data structure is deallocated. */ static void sem_release(sem_t s) { sem_t* sp; if (--s->refcnt > 0) return; list_remove(&s->task_link); s->owner->nsyncs--; for (sp = &sem_list; *sp; sp = &(*sp)->next) { if (*sp == s) { *sp = s->next; break; } } kmem_free(s); } void sem_cleanup(task_t task) { list_t head, n; sem_t s; head = &task->sems; for (n = list_first(head); n != head; n = list_next(n)) { s = list_entry(n, struct sem, task_link); sem_release(s); } } static int sem_valid(sem_t s) { sem_t tmp; for (tmp = sem_list; tmp; tmp = tmp->next) { if (tmp == s) return 1; } return 0; } /* * sem_copyin - copy a semaphore from user space. * * It also checks whether the passed semaphore is valid. */ static int sem_copyin(sem_t* usp, sem_t* ksp) { sem_t s; if (copyin(usp, &s, sizeof(usp)) || !sem_valid(s)) return EINVAL; *ksp = s; return 0; }
534634.c
/* Copyright (C) 1994-2016 Lawrence Livermore National Security, LLC. LLNL-CODE-425250. All rights reserved. This file is part of Silo. For details, see silo.llnl.gov. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. This work was produced at Lawrence Livermore National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE. Neither the United States Government nor Lawrence Livermore National Security, LLC nor any of their employees, makes any warranty, express or implied, or assumes any liability or responsibility for the accuracy, completeness, or usefulness of any information, apparatus, product, or process disclosed, or represents that its use would not infringe privately-owned rights. Any reference herein to any specific commercial products, process, or services by trade name, trademark, manufacturer or otherwise does not necessarily constitute or imply its endorsement, recommendation, or favoring by the United States Government or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed herein do not necessarily state or reflect those of the United States Government or Lawrence Livermore National Security, LLC, and shall not be used for advertising or product endorsement purposes. */ /* * PDBMM.C - memory management for the PDB library system * * Source Version: 9.0 * Software Release #92-0043 * */ #include <limits.h> #include "pdb.h" /*------------------------------------------------------------------------- * Function: _lite_PD_mk_pdb * * Purpose: Construct and return a pointer to a PDBFile * * Return: Success: Ptr to a new PDB file * * Failure: NULL * * Programmer: Adapted from PACT PDB * Mar 5, 1996 11:45 AM EST * * Modifications: * Eric Brugger, Mon Dec 7 09:50:45 PST 1998 * Remove the caching of pointer references. * * Eric Brugger, Tue Dec 8 15:16:07 PST 1998 * Remove unnecessary calls to lite_SC_mark, since reference count now * set when allocated. * * Mark C. Miller, Fri Apr 13 22:35:57 PDT 2012 * Added options arg and S,M,L,XL hash table size options. Added * ignore_apersand_ia_ptr_syms option. *------------------------------------------------------------------------- */ PDBfile * _lite_PD_mk_pdb (char *name, const char *options) { PDBfile *file; int symtsz = HSZMEDIUM; file = FMAKE(PDBfile, "_PD_MK_PDB:file"); if (file == NULL) return(NULL); file->stream = NULL; file->name = lite_SC_strsavef(name, "char*:_PD_MK_PDB:name"); file->type = NULL; if (strchr(options, 's')) symtsz = HSZSMALL; else if (strchr(options, 'm')) symtsz = HSZMEDIUM; else if (strchr(options, 'l')) symtsz = HSZLARGE; else if (strchr(options, 'x')) symtsz = HSZXLARGE; else symtsz = HSZMEDIUM; file->symtab = lite_SC_make_hash_table(symtsz, NODOC); file->chart = lite_SC_make_hash_table(1, NODOC); file->host_chart = lite_SC_make_hash_table(1, NODOC); file->attrtab = NULL; file->mode = 0; /* read only, write only, read-write ? */ file->maximum_size = LONG_MAX; /* family info */ file->previous_file = NULL; file->flushed = FALSE; /* born unflushed */ file->virtual_internal = FALSE; /* disk file by default */ file->current_prefix = NULL; /* read/write variable name prefix */ file->system_version = 0; file->default_offset = 0; /* default offset for array indexes */ file->major_order = ROW_MAJOR_ORDER; file->std = NULL; file->align = NULL; file->host_std = _lite_PD_copy_standard(lite_INT_STANDARD); file->host_align = _lite_PD_copy_alignment(lite_INT_ALIGNMENT); file->symtaddr = 0L; file->chrtaddr = 0L; file->headaddr = 0L; file->ignore_apersand_ptr_ia_syms = 0; if (strchr(options, 'i')) file->ignore_apersand_ptr_ia_syms = 1; return(file); } /*------------------------------------------------------------------------- * Function: _lite_PD_rl_pdb * * Purpose: Release the storage associated with the PDBfile * * Return: void * * Programmer: Adapted from PACT PDB * Mar 5, 1996 12:03 PM EST * * Modifications: * Eric Brugger, Mon Dec 7 10:51:58 PST 1998 * Removed call to lite_PD_reset_ptr_list since it was removed. I * added a call to free current_prefix to close a memory leak. * *------------------------------------------------------------------------- */ void _lite_PD_rl_pdb (PDBfile *file) { SFREE(file->date); _lite_PD_rl_standard(file->std); _lite_PD_rl_standard(file->host_std); _lite_PD_rl_alignment(file->align); _lite_PD_rl_alignment(file->host_align); if (file->attrtab != NULL) _lite_PD_clr_table(file->attrtab, NULL); _lite_PD_clr_table(file->host_chart,(FreeFuncType)_lite_PD_rl_defstr); _lite_PD_clr_table(file->chart,(FreeFuncType)_lite_PD_rl_defstr); _lite_PD_clr_table(file->symtab,(FreeFuncType)_lite_PD_rl_syment_d); if (file->previous_file != NULL) SFREE(file->previous_file); if (file->current_prefix != NULL) SFREE(file->current_prefix); if (file->type != NULL) SFREE(file->type); if (lite_LAST != NULL) SFREE(lite_LAST); if (lite_PD_DEFSTR_S != NULL) SFREE(lite_PD_DEFSTR_S); lite_PD_DEFSTR_S = NULL; if (lite_PD_SYMENT_S != NULL) SFREE(lite_PD_SYMENT_S); lite_PD_SYMENT_S = NULL; if (lite_io_close_hook == (PFfclose) _lite_PD_pio_close) lite_io_close_hook = (PFfclose) fclose; if (lite_io_seek_hook == (PFfseek) _lite_PD_pio_seek) lite_io_seek_hook = (PFfseek) fseek; if (lite_io_printf_hook == (PFfprintf) _lite_PD_pio_printf) lite_io_printf_hook = (PFfprintf) fprintf; SFREE(file->name); SFREE(file); } /*------------------------------------------------------------------------- * Function: _lite_PD_clr_table * * Purpose: Release the storage associated with a homogeneous hash * table. * * Return: void * * Programmer: Adapted from PACT PDB * Mar 5, 1996 1:36 PM EST * * Modifications: * *------------------------------------------------------------------------- */ void _lite_PD_clr_table (HASHTAB *tab, FreeFuncType rel) { int i, n; hashel **tb, *hp, *nxt; n = tab->size; tb = tab->table; for (i = 0; i < n; i++) { for (hp = tb[i]; hp != NULL; hp = nxt) { nxt = hp->next; SFREE(hp->name); if (rel != NULL) (*rel)(hp->def); SFREE(hp); } tb[i] = NULL; } lite_SC_rl_hash_table(tab); } /*------------------------------------------------------------------------- * Function: _lite_PD_mk_standard * * Purpose: Allocate, initialize, and return a pointer to a * data standard. * * Return: Success: Ptr to the new data standard struct. * * Failure: NULL * * Programmer: Adapted from PACT PDB * Mar 5, 1996 2:15 PM EST * * Modifications: * * Mark C. Miller, Fri Nov 13 15:33:42 PST 2009 * Added support for long long datatype. * * Mark C. Miller, Tue Nov 17 22:23:42 PST 2009 * Changed support for long long to match more closely what PDB * proper does. *------------------------------------------------------------------------- */ data_standard * _lite_PD_mk_standard (void) { data_standard *std; std = FMAKE(data_standard, "_PD_MK_STANDARD:std"); std->ptr_bytes = 0; std->short_bytes = 0; std->short_order = 0; std->int_bytes = 0; std->int_order = 0; std->long_bytes = 0; std->long_order = 0; std->longlong_bytes = 0; std->longlong_order = 0; std->float_bytes = 0; std->float_format = NULL; std->float_order = NULL; std->double_bytes = 0; std->double_format = NULL; std->double_order = NULL; return(std); } /*------------------------------------------------------------------------- * Function: _lite_PD_copy_standard * * Purpose: Copy the given data standard. * * Return: Success: Ptr to the new data standard struct. * * Failure: NULL * * Programmer: Adapted from PACT PDB * Mar 4, 1996 12:59 PM EST * * Modifications: * Eric Brugger, Tue Dec 8 15:16:07 PST 1998 * Remove unnecessary calls to lite_SC_mark, since reference count now * set when allocated. * * Mark C. Miller, Fri Nov 13 15:33:42 PST 2009 * Added support for long long datatype. * * Mark C. Miller, Tue Nov 17 22:23:42 PST 2009 * Changed support for long long to match more closely what PDB * proper does. *------------------------------------------------------------------------- */ data_standard * _lite_PD_copy_standard (data_standard *src) { data_standard *std; int j, n; int *ostd, *osrc; long *fstd, *fsrc; std = FMAKE(data_standard, "_PD_COPY_STANDARD:std"); std->ptr_bytes = src->ptr_bytes; std->short_bytes = src->short_bytes; std->short_order = src->short_order; std->int_bytes = src->int_bytes; std->int_order = src->int_order; std->long_bytes = src->long_bytes; std->long_order = src->long_order; std->longlong_bytes = src->longlong_bytes; std->longlong_order = src->longlong_order; std->float_bytes = src->float_bytes; std->double_bytes = src->double_bytes; n = lite_FORMAT_FIELDS; std->float_format = FMAKE_N(long, n, "_PD_COPY_STANDARD:float_format"); fstd = std->float_format; fsrc = src->float_format; for (j = 0; j < n; j++, *(fstd++) = *(fsrc++)) /*void*/ ; n = std->float_bytes; std->float_order = FMAKE_N(int, n, "_PD_COPY_STANDARD:float_order"); ostd = std->float_order; osrc = src->float_order; for (j = 0; j < n; j++, *(ostd++) = *(osrc++)) /*void*/ ; n = lite_FORMAT_FIELDS; std->double_format = FMAKE_N(long, n, "_PD_COPY_STANDARD:double_format"); fstd = std->double_format; fsrc = src->double_format; for (j = 0; j < n; j++, *(fstd++) = *(fsrc++)) /*void*/ ; n = std->double_bytes; std->double_order = FMAKE_N(int, n, "_PD_COPY_STANDARD:double_order"); ostd = std->double_order; osrc = src->double_order; for (j = 0; j < n; j++, *(ostd++) = *(osrc++)) /*void*/ ; return(std); } /*------------------------------------------------------------------------- * Function: _lite_PD_rl_standard * * Purpose: Release a data standard structure. * * Return: void * * Programmer: Adapted from PACT PDB * Mar 4, 1996 1:01 PM EST * * Modifications: * *------------------------------------------------------------------------- */ void _lite_PD_rl_standard (data_standard *std) { if (lite_SC_arrlen(std) > 0) { SFREE(std->float_format); SFREE(std->float_order); SFREE(std->double_format); SFREE(std->double_order); SFREE(std); } } /*------------------------------------------------------------------------- * Function: _lite_PD_mk_alignment * * Purpose: Allocate, initialize and return a pointer to a * data_alignment. * * Return: Success: Ptr to the new data_alignment struct. * * Failure: NULL * * Programmer: Adapted from PACT PDB * Mar 5, 1996 2:04 PM EST * * Modifications: * * Mark C. Miller, Fri Nov 13 15:33:42 PST 2009 * Added support for long long datatype. * * Mark C. Miller, Tue Nov 17 22:23:42 PST 2009 * Changed support for long long to match more closely what PDB * proper does. *------------------------------------------------------------------------- */ data_alignment * _lite_PD_mk_alignment (char *vals) { data_alignment *align; align = FMAKE(data_alignment, "_PD_MK_ALIGNMENT:align"); align->char_alignment = vals[0]; align->ptr_alignment = vals[1]; align->short_alignment = vals[2]; align->int_alignment = vals[3]; align->long_alignment = vals[4]; align->longlong_alignment = vals[4]; /* default same as long */ align->float_alignment = vals[5]; align->double_alignment = vals[6]; if (strlen(vals) > 7) align->struct_alignment = vals[7]; else align->struct_alignment = 0; return(align); } /*------------------------------------------------------------------------- * Function: _lite_PD_copy_alignment * * Purpose: Copies a data_alignment structure. * * Return: Success: Ptr to new data alignment. * * Failure: NULL * * Programmer: Adapted from PACT PDB * Mar 4, 1996 12:56 PM EST * * Modifications: * *------------------------------------------------------------------------- */ data_alignment * _lite_PD_copy_alignment (data_alignment *src) { data_alignment *align; align = FMAKE(data_alignment, "_PD_COPY_ALIGNMENT:align"); *align = *src; return(align); } /*------------------------------------------------------------------------- * Function: _lite_PD_rl_alignment * * Purpose: Release a data alignment structure. * * Return: void * * Programmer: Adapted from PACT PDB * Mar 4, 1996 12:57 PM EST * * Modifications: * *------------------------------------------------------------------------- */ void _lite_PD_rl_alignment (data_alignment *align) { if (lite_SC_arrlen(align) > 0) { SFREE(align); } } /*------------------------------------------------------------------------- * Function: lite_PD_copy_dims * * Purpose: Make and return a copy of the given dimension list. * * Return: Success: copy of dimension list. * * Failure: NULL * * Programmer: Adapted from PACT PDB * Mar 8, 1996 * * Modifications: * Eric Brugger, Tue Dec 8 15:16:07 PST 1998 * Remove unnecessary calls to lite_SC_mark, since reference count now * set when allocated. * *------------------------------------------------------------------------- */ dimdes * lite_PD_copy_dims (dimdes *odims) { dimdes *od, *ndims, *prev, *next; prev = NULL; ndims = NULL; for (od = odims; od != NULL; od = od->next) { next = FMAKE(dimdes, "PD_COPY_DIMS:next"); *next = *od; next->next = NULL; if (ndims == NULL) { ndims = next; } else { prev->next = next; } prev = next; } return(ndims); } /*------------------------------------------------------------------------- * Function: lite_PD_copy_syment * * Purpose: Make and return a copy of the given syment. * * Return: Success: a new syment * * Failure: * * Programmer: Adapted from PACT PDB * Mar 6, 1996 11:39 AM EST * * Modifications: * Eric Brugger, Tue Dec 8 15:16:07 PST 1998 * Remove unnecessary calls to lite_SC_mark, since reference count now * set when allocated. * *------------------------------------------------------------------------- */ syment * lite_PD_copy_syment (syment *osym) { int i, n; char *ntype; syment *nsym; symblock *nsp, *osp; dimdes *ndims; if (osym == NULL) return(NULL); nsym = FMAKE(syment, "PD_COPY_SYMENT:nsym"); n = PD_n_blocks(osym); osp = PD_entry_blocks(osym); nsp = FMAKE_N(symblock, n, "PD_COPY_SYMENT:blocks"); for (i = 0; i < n; i++) nsp[i] = osp[i]; ntype = lite_SC_strsavef(PD_entry_type(osym), "char*:PD_COPY_SYMENT:type"); ndims = lite_PD_copy_dims(PD_entry_dimensions(osym)); PD_entry_blocks(nsym) = nsp; PD_entry_type(nsym) = ntype; PD_entry_dimensions(nsym) = ndims; PD_entry_number(nsym) = PD_entry_number(osym); PD_entry_indirects(nsym) = PD_entry_indirects(osym); return(nsym); } /*------------------------------------------------------------------------- * Function: _lite_PD_mk_syment * * Purpose: Make and return a pointer to an entry for the symbol table. * * Return: Success: * * Failure: * * Programmer: Adapted from PACT PDB * Mar 5, 1996 2:16 PM EST * * Modifications: * Eric Brugger, Tue Dec 8 15:16:07 PST 1998 * Remove unnecessary calls to lite_SC_mark, since reference count now * set when allocated. * *------------------------------------------------------------------------- */ syment * _lite_PD_mk_syment (char *type, long numb, long addr, symindir *indr, dimdes *dims) { syment *ep; symblock *sp; char *t; ep = FMAKE(syment, "_PD_MK_SYMENT:ep"); sp = FMAKE(symblock, "_PD_MK_SYMENT:sp"); PD_entry_blocks(ep) = sp; sp->number = numb; sp->diskaddr = addr; if (type == NULL) { t = NULL; } else { t = lite_SC_strsavef(type, "char*:_PD_MK_SYMENT:type"); } PD_entry_type(ep) = t; PD_entry_number(ep) = numb; PD_entry_dimensions(ep) = dims; if (indr == NULL) { symindir iloc; iloc.addr = 0L; iloc.n_ind_type = 0L; iloc.arr_offs = 0L; PD_entry_indirects(ep) = iloc; } else { PD_entry_indirects(ep) = *indr; } return(ep); } /*------------------------------------------------------------------------- * Function: _lite_PD_rl_syment * * Purpose: Reclaim the space of the given syment. * * Return: void * * Programmer: Adapted from PACT PDB * Mar 5, 1996 12:05 PM EST * * Modifications: * *------------------------------------------------------------------------- */ void _lite_PD_rl_syment (syment *ep) { SFREE(PD_entry_type(ep)); SFREE(PD_entry_blocks(ep)); SFREE(ep); } /*------------------------------------------------------------------------- * Function: _lite_PD_rl_syment_d * * Purpose: Reclaim the space of the given syment including its * dimensions. * * Return: void * * Programmer: Adapted from PACT PDB * Mar 5, 1996 12:06 PM EST * * Modifications: * *------------------------------------------------------------------------- */ void _lite_PD_rl_syment_d (syment *ep) { if (ep == NULL) return; _lite_PD_rl_dimensions(PD_entry_dimensions(ep)); _lite_PD_rl_syment(ep); } /*------------------------------------------------------------------------- * Function: _lite_PD_mk_defstr * * Purpose: Make a defstr entry for the structure chart. * * Return: Success: * * Failure: * * Programmer: Adapted from PACT PDB * Mar 5, 1996 4:45 PM EST * * Modifications: * Eric Brugger, Tue Dec 8 15:16:07 PST 1998 * Remove unnecessary calls to lite_SC_mark, since reference count now * set when allocated. * *------------------------------------------------------------------------- */ defstr * _lite_PD_mk_defstr (char *type, memdes *lst, long sz, int align, int flg, int conv, int *ordr, long *formt) { defstr *dp; memdes *desc; int n; dp = FMAKE(defstr, "_PD_MK_DEFSTR:dp"); dp->type = lite_SC_strsavef(type, "char*:_PD_MK_DEFSTR:type"); dp->alignment = align; dp->convert = conv; dp->onescmp = 0; dp->unsgned = 0; dp->order_flag = flg; dp->order = ordr; dp->format = formt; dp->members = lst; if (sz >= 0) { dp->size_bits = 0L; dp->size = sz; } else { dp->size_bits = -sz; dp->size = (-sz + 7) >> 3L; dp->unsgned = TRUE; } /* * Find the number of indirects. */ for (n = 0, desc = lst; desc != NULL; desc = desc->next) { if (_lite_PD_indirection(desc->type)) n++; } dp->n_indirects = n; return(dp); } /*------------------------------------------------------------------------- * Function: _lite_PD_rl_defstr * * Purpose: Free up the storage associated with a defstr. * * Return: void * * Programmer: Adapted from PACT PDB * Mar 5, 1996 3:18 PM EST * * Modifications: * *------------------------------------------------------------------------- */ void _lite_PD_rl_defstr (defstr *dp) { memdes *desc, *next; int *ord; long *frm; for (desc = dp->members; desc != NULL; desc = next) { next = desc->next; _lite_PD_rl_descriptor(desc); } ord = dp->order; if ((ord != NULL) && (lite_SC_arrlen(ord) > -1)) SFREE(ord); frm = dp->format; if ((frm != NULL) && (lite_SC_arrlen(frm) > -1)) SFREE(dp->format); SFREE(dp->type); SFREE(dp); } /*------------------------------------------------------------------------- * Function: lite_PD_copy_members * * Purpose: Copy a linked list of members. * * Return: Success: ptr to the new list * * Failure: * * Programmer: Adapted from PACT PDB * Mar 6, 1996 11:38 AM EST * * Modifications: * Eric Brugger, Tue Dec 8 15:16:07 PST 1998 * Remove unnecessary calls to lite_SC_mark, since reference count now * set when allocated. * *------------------------------------------------------------------------- */ memdes * lite_PD_copy_members (memdes *desc) { memdes *newm, *nnxt, *thism, *prevm; char *ms, *ts, *bs, *ns, *cs; dimdes *nd; newm = NULL; prevm = NULL; for (thism = desc; thism != NULL; thism = thism->next) { nnxt = FMAKE(memdes, "PD_COPY_MEMBERS:nnxt"); ms = lite_SC_strsavef(thism->member, "char*:PD_COPY_MEMBERS:member"); ts = lite_SC_strsavef(thism->type, "char*:PD_COPY_MEMBERS:type"); bs = lite_SC_strsavef(thism->base_type, "char*:PD_COPY_MEMBERS:base_type"); ns = lite_SC_strsavef(thism->name, "char*:PD_COPY_MEMBERS:name"); nd = lite_PD_copy_dims(thism->dimensions); nnxt->member = ms; nnxt->type = ts; nnxt->base_type = bs; nnxt->name = ns; nnxt->dimensions = nd; nnxt->next = NULL; nnxt->member_offs = thism->member_offs; nnxt->cast_offs = thism->cast_offs; nnxt->number = thism->number; if (thism->cast_memb != NULL) { cs = lite_SC_strsavef(thism->cast_memb, "char*:PD_COPY_MEMBERS:cast_memb"); nnxt->cast_memb = cs; } else { nnxt->cast_memb = NULL; } if (newm == NULL) { newm = nnxt; } else { prevm->next = nnxt; } prevm = nnxt; } return(newm); } /*------------------------------------------------------------------------- * Function: _lite_PD_mk_descriptor * * Purpose: Build a member descriptor out of the given string. * * Return: Success: * * Failure: * * Programmer: Adapted from PACT PDB * Mar 5, 1996 2:06 PM EST * * Modifications: * Eric Brugger, Tue Dec 8 15:16:07 PST 1998 * Remove unnecessary calls to lite_SC_mark, since reference count now * set when allocated. * *------------------------------------------------------------------------- */ memdes * _lite_PD_mk_descriptor (char *member, int defoff) { memdes *desc; char *ms, *ts, *bs, *ns, *p; dimdes *nd; desc = FMAKE(memdes, "_PD_MK_DESCRIPTOR:desc"); /* * Get rid of any leading white space. */ for (p = member; strchr(" \t\n\r\f", *p) != NULL; p++) /*void*/ ; ms = lite_SC_strsavef(p, "char*:_PD_MK_DESCRIPTOR:member"); ts = _lite_PD_member_type(p); bs = _lite_PD_member_base_type(p); ns = _lite_PD_member_name(p); nd = _lite_PD_ex_dims(p, defoff, FALSE); desc->member = ms; desc->type = ts; desc->base_type = bs; desc->name = ns; desc->dimensions = nd; desc->number = _lite_PD_comp_num(desc->dimensions); desc->member_offs = -1L; desc->cast_offs = -1L; desc->cast_memb = NULL; desc->next = NULL; return(desc); } /*------------------------------------------------------------------------- * Function: _lite_PD_rl_descriptor * * Purpose: Release a member descriptor * * Return: void * * Programmer: Adapted from PACT PDB * Mar 5, 1996 4:55 PM EST * * Modifications: * *------------------------------------------------------------------------- */ void _lite_PD_rl_descriptor (memdes *desc) { SFREE(desc->member); SFREE(desc->name); SFREE(desc->type); SFREE(desc->base_type); SFREE(desc->cast_memb); _lite_PD_rl_dimensions(desc->dimensions); SFREE(desc); } /*------------------------------------------------------------------------- * Function: _lite_PD_mk_dimensions * * Purpose: Build a dimension descriptor out of the given members * * struct s_dimdes * {long index_min; * long index_max; * long number; * struct s_dimdes *next;}; * * Return: Success: * * Failure: * * Programmer: Adapted from PACT PDB * Mar 5, 1996 2:08 PM EST * * Modifications: * *------------------------------------------------------------------------- */ dimdes * _lite_PD_mk_dimensions (long mini, long leng) { dimdes *dims; dims = FMAKE(dimdes, "_PD_MK_DIMENSIONS:dims"); dims->index_min = mini; dims->index_max = mini + leng - 1L; dims->number = leng; dims->next = NULL; return(dims); } /*------------------------------------------------------------------------- * Function: _lite_PD_rl_dimensions * * Purpose: Release a dimension descriptor. * * Return: void * * Programmer: Adapted from PACT PDB * Mar 5, 1996 3:19 PM EST * * Modifications: * *------------------------------------------------------------------------- */ void _lite_PD_rl_dimensions (dimdes *dims) { dimdes *pp, *nxt; int nc; for (pp = dims; pp != NULL; pp = nxt) { nxt = pp->next; nc = lite_SC_ref_count(pp); SFREE(pp); if (nc > 1) break; } }
43644.c
/* mbed Microcontroller Library * Copyright (c) 2018 GigaDevice Semiconductor Inc. * * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "pwmout_api.h" #include "cmsis.h" #include "pinmap.h" #include "mbed_error.h" #include "PeripheralPins.h" #define DEV_PWMOUT_APB_MASK 0x00010000U #define DEV_PWMOUT_APB1 0U #define DEV_PWMOUT_APB2 1U static uint32_t timer_get_clock(uint32_t timer_periph); static void dev_pwmout_init(pwmout_t *obj); /** Initialize the pwm out peripheral and configure the pin * * @param obj The pwmout object to initialize * @param pin The pwmout pin to initialize */ void pwmout_init(pwmout_t *obj, PinName pin) { MBED_ASSERT(obj); obj->pwm = (PWMName)pinmap_peripheral(pin, PinMap_PWM); MBED_ASSERT(obj->pwm != (PWMName)NC); uint32_t function = pinmap_function(pin, PinMap_PWM); MBED_ASSERT(function != (uint32_t)NC); obj->ch = GD_PIN_CHANNEL_GET(function); /* Peripheral initialization */ dev_pwmout_init(obj); /* pin function initialization */ pinmap_pinout(pin, PinMap_PWM); } /** Deinitialize the pwmout object * * @param obj The pwmout object */ void pwmout_free(pwmout_t *obj) { timer_channel_output_state_config(obj->pwm, obj->ch, TIMER_CCX_DISABLE); } /** Set the output duty-cycle in range <0.0f, 1.0f> * * Value 0.0f represents 0 percentage, 1.0f represents 100 percent. * @param obj The pwmout object * @param percent The floating-point percentage number */ void pwmout_write(pwmout_t *obj, float value) { uint16_t period; uint16_t pulse; timer_disable(obj->pwm); /* overflow protection */ if (value < (float)0.0) { value = 0.0; } else if (value > (float)1.0) { value = 1.0; } period = TIMER_CAR(obj->pwm); pulse = (uint16_t)(period * value); timer_channel_output_pulse_value_config(obj->pwm, obj->ch, pulse); timer_enable(obj->pwm); } /** Read the current float-point output duty-cycle * * @param obj The pwmout object * @return A floating-point output duty-cycle */ float pwmout_read(pwmout_t *obj) { float value = 0; uint16_t period; uint16_t pulse; period = TIMER_CAR(obj->pwm); switch (obj->ch) { case TIMER_CH_0: pulse = TIMER_CH0CV(obj->pwm); break; case TIMER_CH_1: pulse = TIMER_CH1CV(obj->pwm); break; case TIMER_CH_2: pulse = TIMER_CH2CV(obj->pwm); break; case TIMER_CH_3: pulse = TIMER_CH3CV(obj->pwm); break; default: error("Error: pwm channel error! \r\n"); } /* calculated waveform duty ratio */ value = (float)(pulse) / (float)(period); if (value > (float)1.0) { value = (float)1.0; } return value; } /** Set the PWM period specified in seconds, keeping the duty cycle the same * * Periods smaller than microseconds (the lowest resolution) are set to zero. * @param obj The pwmout object * @param seconds The floating-point seconds period */ void pwmout_period(pwmout_t *obj, float seconds) { pwmout_period_us(obj, seconds * 1000000.0f); } /** Set the PWM period specified in miliseconds, keeping the duty cycle the same * * @param obj The pwmout object * @param ms The milisecond period */ void pwmout_period_ms(pwmout_t *obj, int ms) { pwmout_period_us(obj, ms * 1000); } /** Set the PWM period specified in microseconds, keeping the duty cycle the same * * @param obj The pwmout object * @param us The microsecond period */ void pwmout_period_us(pwmout_t *obj, int us) { uint32_t ultemp = 0; uint32_t timer_clk = 0; uint32_t period = us - 1; uint32_t prescaler; float duty_ratio; duty_ratio = pwmout_read(obj); timer_disable(obj->pwm); timer_clk = timer_get_clock(obj->pwm); ultemp = (timer_clk / 1000000); prescaler = ultemp; obj->cnt_unit = 1; while (period > 0xFFFF) { obj->cnt_unit = obj->cnt_unit << 1; period = period >> 1; prescaler = ultemp * obj->cnt_unit; } if (prescaler > 0xFFFF) { error("Error: TIMER prescaler value is overflow \r\n"); } timer_autoreload_value_config(obj->pwm, period); timer_prescaler_config(obj->pwm, prescaler - 1, TIMER_PSC_RELOAD_NOW); ultemp = duty_ratio * us; pwmout_pulsewidth_us(obj, ultemp); timer_enable(obj->pwm); } /** Set the PWM pulsewidth specified in seconds, keeping the period the same. * * @param obj The pwmout object * @param seconds The floating-point pulsewidth in seconds */ void pwmout_pulsewidth(pwmout_t *obj, float seconds) { pwmout_pulsewidth_us(obj, seconds * 1000000.0f); } /** Set the PWM pulsewidth specified in miliseconds, keeping the period the same. * * @param obj The pwmout object * @param ms The floating-point pulsewidth in miliseconds */ void pwmout_pulsewidth_ms(pwmout_t *obj, int ms) { pwmout_pulsewidth_us(obj, ms * 1000); } /** Set the PWM pulsewidth specified in microseconds, keeping the period the same. * * @param obj The pwmout object * @param us The floating-point pulsewidth in microseconds */ void pwmout_pulsewidth_us(pwmout_t *obj, int us) { uint32_t pulse; uint32_t period; period = TIMER_CAR(obj->pwm); pulse = us / obj->cnt_unit; if (pulse > period) { pulse = period; } timer_channel_output_pulse_value_config(obj->pwm, obj->ch, pulse); } static uint32_t timer_get_clock(uint32_t timer_periph) { uint32_t timerclk; if ((TIMER0 == timer_periph) || (TIMER7 == timer_periph) || (TIMER8 == timer_periph) || (TIMER9 == timer_periph) || (TIMER10 == timer_periph)) { /* get the current APB2 TIMER clock source */ if (RCU_APB2_CKAHB_DIV1 == (RCU_CFG0 & RCU_CFG0_APB2PSC)) { timerclk = rcu_clock_freq_get(CK_APB2); } else { timerclk = rcu_clock_freq_get(CK_APB2) * 2; } } else { /* get the current APB1 TIMER clock source */ if (RCU_APB1_CKAHB_DIV1 == (RCU_CFG0 & RCU_CFG0_APB1PSC)) { timerclk = rcu_clock_freq_get(CK_APB1); } else { timerclk = rcu_clock_freq_get(CK_APB1) * 2; } } return timerclk; } static void dev_pwmout_init(pwmout_t *obj) { timer_oc_parameter_struct timer_ocintpara; timer_parameter_struct timer_initpara; MBED_ASSERT(obj); uint32_t periph = obj->pwm; switch (periph) { case TIMER0: rcu_periph_clock_enable(RCU_TIMER0); break; case TIMER1: rcu_periph_clock_enable(RCU_TIMER1); break; case TIMER2: rcu_periph_clock_enable(RCU_TIMER2); break; case TIMER3: rcu_periph_clock_enable(RCU_TIMER3); break; case TIMER4: rcu_periph_clock_enable(RCU_TIMER4); break; case TIMER5: rcu_periph_clock_enable(RCU_TIMER5); break; case TIMER6: rcu_periph_clock_enable(RCU_TIMER6); break; case TIMER7: rcu_periph_clock_enable(RCU_TIMER7); break; case TIMER8: rcu_periph_clock_enable(RCU_TIMER8); break; case TIMER9: rcu_periph_clock_enable(RCU_TIMER9); break; case TIMER10: rcu_periph_clock_enable(RCU_TIMER10); break; case TIMER11: rcu_periph_clock_enable(RCU_TIMER11); break; case TIMER12: rcu_periph_clock_enable(RCU_TIMER12); break; case TIMER13: rcu_periph_clock_enable(RCU_TIMER13); break; } /* configure TIMER base function */ timer_initpara.prescaler = 119; timer_initpara.period = 9999; timer_initpara.clockdivision = 0; timer_initpara.counterdirection = TIMER_COUNTER_UP; timer_initpara.alignedmode = TIMER_COUNTER_EDGE; timer_init(obj->pwm, &timer_initpara); /* configure TIMER channel output function */ timer_ocintpara.ocpolarity = TIMER_OC_POLARITY_HIGH; timer_ocintpara.ocnpolarity = TIMER_OCN_POLARITY_LOW; timer_ocintpara.outputstate = TIMER_CCX_ENABLE; timer_ocintpara.outputnstate = TIMER_CCXN_ENABLE; timer_ocintpara.ocidlestate = TIMER_OC_IDLE_STATE_LOW; timer_ocintpara.ocnidlestate = TIMER_OCN_IDLE_STATE_LOW; timer_channel_output_config(obj->pwm, obj->ch, &timer_ocintpara); timer_channel_output_mode_config(obj->pwm, obj->ch, TIMER_OC_MODE_PWM0); timer_channel_output_fast_config(obj->pwm, obj->ch, TIMER_OC_FAST_DISABLE); timer_primary_output_config(obj->pwm, ENABLE); } const PinMap *pwmout_pinmap() { return PinMap_PWM; }
173090.c
/* ************************************************************************** */ /* */ /* ::: :::::::: */ /* ft_lstclear.c :+: :+: :+: */ /* +:+ +:+ +:+ */ /* By: kangkim <[email protected]> +#+ +:+ +#+ */ /* +#+#+#+#+#+ +#+ */ /* Created: 2021/11/19 15:25:20 by kangkim #+# #+# */ /* Updated: 2021/11/19 15:26:42 by kangkim ### ########.fr */ /* */ /* ************************************************************************** */ #include "libft.h" void ft_lstclear(t_list **lst, void (*del)(void *)) { t_list *curr; t_list *next; curr = *lst; while (curr) { next = curr->next; ft_lstdelone(curr, del); curr = next; } *lst = NULL; }
632917.c
/** \file Dimension functions These functions define and inquire about dimensions. Copyright 2010 University Corporation for Atmospheric Research/Unidata. See COPYRIGHT file for more info. */ #include "ncdispatch.h" /*! \defgroup dimensions Dimensions Dimensions are used to define the shape of data in netCDF. Dimensions for a netCDF dataset are defined when it is created, while the netCDF dataset is in define mode. Additional dimensions may be added later by reentering define mode. A netCDF dimension has a name and a length. In a netCDF classic or 64-bit offset file, at most one dimension can have the unlimited length, which means variables using this dimension can grow along this dimension. In a netCDF-4 file multiple unlimited dimensions are supported. There is a suggested limit (1024) to the number of dimensions that can be defined in a single netCDF dataset. The limit is the value of the predefined macro NC_MAX_DIMS. The purpose of the limit is to make writing generic applications simpler. They need only provide an array of NC_MAX_DIMS dimensions to handle any netCDF dataset. The implementation of the netCDF library does not enforce this advisory maximum, so it is possible to use more dimensions, if necessary, but netCDF utilities that assume the advisory maximums may not be able to handle the resulting netCDF datasets. NC_MAX_VAR_DIMS, which must not exceed NC_MAX_DIMS, is the maximum number of dimensions that can be used to specify the shape of a single variable. It is also intended to simplify writing generic applications. Ordinarily, the name and length of a dimension are fixed when the dimension is first defined. The name may be changed later, but the length of a dimension (other than the unlimited dimension) cannot be changed without copying all the data to a new netCDF dataset with a redefined dimension length. Dimension lengths in the C interface are type size_t rather than type int to make it possible to access all the data in a netCDF dataset on a platform that only supports a 16-bit int data type, for example MSDOS. If dimension lengths were type int instead, it would not be possible to access data from variables with a dimension length greater than a 16-bit int can accommodate. A netCDF dimension in an open netCDF dataset is referred to by a small integer called a dimension ID. In the C interface, dimension IDs are 0, 1, 2, ..., in the order in which the dimensions were defined. Operations supported on dimensions are: - Create a dimension, given its name and length. - Get a dimension ID from its name. - Get a dimension's name and length from its ID. - Rename a dimension. */ /*! \{*/ /* All these functions are part of the above defgroup... */ /** \name Deleting and Renaming Dimensions Functions to delete or rename an dimension. */ /*! \{ */ /* All these functions are part of this named group... */ /*! Define a new dimension. The function nc_def_dim adds a new dimension to an open netCDF dataset in define mode. It returns (as an argument) a dimension ID, given the netCDF ID, the dimension name, and the dimension length. At most one unlimited length dimension, called the record dimension, may be defined for each classic or 64-bit offset netCDF dataset. NetCDF-4 datasets may have multiple unlimited dimensions. \param ncid NetCDF or group ID, from a previous call to nc_open(), nc_create(), nc_def_grp(), or associated inquiry functions such as nc_inq_ncid(). \param name Name of the dimension to be created. \param len Length of the dimension to be created. Use NC_UNLIMITED for unlimited dimensions. \param idp Pointer where dimension ID will be stored. \retval ::NC_NOERR No error. \returns ::NC_EBADID Not a valid ID. \returns ::NC_ENOTINDEFINE Not in define mode. \returns ::NC_EDIMSIZE Invalid dimension size. \returns ::NC_EUNLIMIT NC_UNLIMITED size already in use \returns ::NC_EMAXDIMS NC_MAX_DIMS exceeded [not enforced after 4.5.0] \returns ::NC_ENAMEINUSE String match to name in use \returns ::NC_ENOMEM Memory allocation (malloc) failure \returns ::NC_EPERM Write to read only \section nc_def_dim_example Example Here is an example using nc_def_dim() to create a dimension named lat of length 18 and a unlimited dimension named rec in a new netCDF dataset named foo.nc: \code #include <netcdf.h> ... int status, ncid, latid, recid; ... status = nc_create("foo.nc", NC_NOCLOBBER, &ncid); if (status != NC_NOERR) handle_error(status); ... status = nc_def_dim(ncid, "lat", 18L, &latid); if (status != NC_NOERR) handle_error(status); status = nc_def_dim(ncid, "rec", NC_UNLIMITED, &recid); if (status != NC_NOERR) handle_error(status); \endcode */ int nc_def_dim(int ncid, const char *name, size_t len, int *idp) { NC* ncp; int stat = NC_check_id(ncid, &ncp); if(stat != NC_NOERR) return stat; TRACE(nc_def_dim); return ncp->dispatch->def_dim(ncid, name, len, idp); } /*! Find the ID of a dimension from the name. The function nc_inq_dimid returns (as an argument) the ID of a netCDF dimension, given the name of the dimension. If ndims is the number of dimensions defined for a netCDF dataset, each dimension has an ID between 0 and ndims-1. \param ncid NetCDF or group ID, from a previous call to nc_open(), nc_create(), nc_def_grp(), or associated inquiry functions such as nc_inq_ncid(). \param name Name of the dimension. \param idp Pointer where dimension ID will be stored. \returns ::NC_NOERR No error. \returns ::NC_EBADID Not a valid ID. \returns ::NC_EBADDIM Invalid dimension ID or name. */ int nc_inq_dimid(int ncid, const char *name, int *idp) { NC* ncp; int stat = NC_check_id(ncid, &ncp); if(stat != NC_NOERR) return stat; TRACE(nc_inq_dimid); return ncp->dispatch->inq_dimid(ncid,name,idp); } /*! Find the name and length of a dimension. The length for the unlimited dimension, if any, is the number of records written so far. \param ncid NetCDF or group ID, from a previous call to nc_open(), nc_create(), nc_def_grp(), or associated inquiry functions such as nc_inq_ncid(). \param dimid Dimension ID, from a previous call to nc_inq_dimid() or nc_def_dim(). \param name Returned dimension name. The caller must allocate space for the returned name. The maximum possible length, in characters, of a dimension name is given by the predefined constant NC_MAX_NAME. (This doesn't include the null terminator, so declare your array to be size NC_MAX_NAME+1). The returned character array will be null-terminated. \param lenp Pointer to location for returned length of dimension. For the unlimited dimension, this is the number of records written so far. \returns ::NC_NOERR No error. \returns ::NC_EBADID Not a valid ID. \returns ::NC_EBADDIM Invalid dimension ID or name. \section nc_inq_dim_example Example Here is an example using nc_inq_dim() to determine the length of a dimension named lat, and the name and current maximum length of the unlimited dimension for an existing netCDF dataset named foo.nc: \code #include <netcdf.h> ... int status, ncid, latid, recid; size_t latlength, recs; char recname[NC_MAX_NAME+1]; ... status = nc_open("foo.nc", NC_NOWRITE, &ncid); if (status != NC_NOERR) handle_error(status); status = nc_inq_unlimdim(ncid, &recid); if (status != NC_NOERR) handle_error(status); ... status = nc_inq_dimid(ncid, "lat", &latid); if (status != NC_NOERR) handle_error(status); status = nc_inq_dimlen(ncid, latid, &latlength); if (status != NC_NOERR) handle_error(status); status = nc_inq_dim(ncid, recid, recname, &recs); if (status != NC_NOERR) handle_error(status); \endcode */ int nc_inq_dim(int ncid, int dimid, char *name, size_t *lenp) { NC* ncp; int stat = NC_check_id(ncid, &ncp); if(stat != NC_NOERR) return stat; TRACE(nc_inq_dim); return ncp->dispatch->inq_dim(ncid,dimid,name,lenp); } /*! Rename a dimension. This function renames an existing dimension in a netCDF dataset open for writing. You cannot rename a dimension to have the same name as another dimension. For netCDF classic and 64-bit offset files, if the new name is longer than the old name, the netCDF dataset must be in define mode. For netCDF-4 files the dataset is switched to define more for the rename, regardless of the name length. \param ncid NetCDF or group ID, from a previous call to nc_open(), nc_create(), nc_def_grp(), or associated inquiry functions such as nc_inq_ncid(). \param dimid Dimension ID, from a previous call to nc_inq_dimid() or nc_def_dim(). \param name New name for dimension. Must be a null-terminated string with length less than NC_MAX_NAME. \returns ::NC_NOERR No error. \returns ::NC_EBADID Not a valid ID. \returns ::NC_EBADDIM Invalid dimension ID or name. \returns ::NC_ENAMEINUSE String match to name in use \returns ::NC_ENOMEM Memory allocation (malloc) failure \returns ::NC_EPERM Write to read only \section nc_rename_dim_example Example Here is an example using nc_rename_dim to rename the dimension lat to latitude in an existing netCDF dataset named foo.nc: \code #include <netcdf.h> ... int status, ncid, latid; ... status = nc_open("foo.nc", NC_WRITE, &ncid); if (status != NC_NOERR) handle_error(status); ... status = nc_redef(ncid); if (status != NC_NOERR) handle_error(status); status = nc_inq_dimid(ncid, "lat", &latid); if (status != NC_NOERR) handle_error(status); status = nc_rename_dim(ncid, latid, "latitude"); if (status != NC_NOERR) handle_error(status); status = nc_enddef(ncid); if (status != NC_NOERR) handle_error(status); \endcode */ int nc_rename_dim(int ncid, int dimid, const char *name) { NC* ncp; int stat = NC_check_id(ncid, &ncp); if(stat != NC_NOERR) return stat; TRACE(nc_rename_dim); return ncp->dispatch->rename_dim(ncid,dimid,name); } /*! Find the number of dimensions. In a classic model netCDF file, this function returns the number of defined dimensions. In a netCDF-4/HDF5 file, this function returns the number of dimensions available in the group specified by ncid, which may be less than the total number of dimensions in a file. In a netCDF-4/HDF5 file, dimensions are in all sub-groups, sub-sub-groups, etc. \param ncid NetCDF or group ID, from a previous call to nc_open(), nc_create(), nc_def_grp(), or associated inquiry functions such as nc_inq_ncid(). \param ndimsp Pointer where number of dimensions will be written. Ignored if NULL. \returns ::NC_NOERR No error. \returns ::NC_EBADID Not a valid ID. */ int nc_inq_ndims(int ncid, int *ndimsp) { NC* ncp; int stat = NC_check_id(ncid, &ncp); if(stat != NC_NOERR) return stat; if(ndimsp == NULL) return NC_NOERR; TRACE(nc_inq_ndims); return ncp->dispatch->inq(ncid,ndimsp,NULL,NULL,NULL); } /*! Find the ID of the unlimited dimension. This function finds the ID of the unlimited dimension. For netCDF-4/HDF5 files (which may have more than one unlimited dimension), the ID of the first unlimited dimesnion is returned. For these files, nc_inq_unlimdims() will return all the unlimited dimension IDs. \param ncid NetCDF or group ID, from a previous call to nc_open(), nc_create(), nc_def_grp(), or associated inquiry functions such as nc_inq_ncid(). \param unlimdimidp Pointer where unlimited dimension ID will be stored. If there is no unlimited dimension, -1 will be stored here. Ignored if NULL. \returns ::NC_NOERR No error. \returns ::NC_EBADID Not a valid ID. */ int nc_inq_unlimdim(int ncid, int *unlimdimidp) { NC* ncp; int stat = NC_check_id(ncid, &ncp); if(stat != NC_NOERR) return stat; TRACE(nc_inq_unlimdim); return ncp->dispatch->inq_unlimdim(ncid,unlimdimidp); } /*! Find out the name of a dimension. \param ncid NetCDF or group ID, from a previous call to nc_open(), nc_create(), nc_def_grp(), or associated inquiry functions such as nc_inq_ncid(). \param dimid Dimension ID, from a previous call to nc_inq_dimid() or nc_def_dim(). \param name Returned dimension name. The caller must allocate space for the returned name. The maximum possible length, in characters, of a dimension name is given by the predefined constant NC_MAX_NAME. (This doesn't include the null terminator, so declare your array to be size NC_MAX_NAME+1). The returned character array will be null-terminated. Ignored if NULL. \returns ::NC_NOERR No error. \returns ::NC_EBADID Not a valid ID. \returns ::NC_EBADDIM Invalid dimension ID or name. \section nc_inq_dim_example2 Example Here is an example using nc_inq_dim() to determine the length of a dimension named lat, and the name and current maximum length of the unlimited dimension for an existing netCDF dataset named foo.nc: \code #include <netcdf.h> ... int status, ncid, latid, recid; size_t latlength, recs; char recname[NC_MAX_NAME+1]; ... status = nc_open("foo.nc", NC_NOWRITE, &ncid); if (status != NC_NOERR) handle_error(status); status = nc_inq_unlimdim(ncid, &recid); if (status != NC_NOERR) handle_error(status); ... status = nc_inq_dimid(ncid, "lat", &latid); if (status != NC_NOERR) handle_error(status); status = nc_inq_dimlen(ncid, latid, &latlength); if (status != NC_NOERR) handle_error(status); status = nc_inq_dim(ncid, recid, recname, &recs); if (status != NC_NOERR) handle_error(status); \endcode */ int nc_inq_dimname(int ncid, int dimid, char *name) { NC* ncp; int stat = NC_check_id(ncid, &ncp); if(stat != NC_NOERR) return stat; if(name == NULL) return NC_NOERR; TRACE(nc_inq_dimname); return ncp->dispatch->inq_dim(ncid,dimid,name,NULL); } /*! Find the length of a dimension. The length for the unlimited dimension, if any, is the number of records written so far. \param ncid NetCDF or group ID, from a previous call to nc_open(), nc_create(), nc_def_grp(), or associated inquiry functions such as nc_inq_ncid(). \param dimid Dimension ID, from a previous call to nc_inq_dimid() or nc_def_dim(). \param lenp Pointer where the length will be stored. \returns ::NC_NOERR No error. \returns ::NC_EBADID Not a valid ID. \returns ::NC_EBADDIM Invalid dimension ID or name. \section nc_inq_dim_example3 Example Here is an example using nc_inq_dim() to determine the length of a dimension named lat, and the name and current maximum length of the unlimited dimension for an existing netCDF dataset named foo.nc: \code #include <netcdf.h> ... int status, ncid, latid, recid; size_t latlength, recs; char recname[NC_MAX_NAME+1]; ... status = nc_open("foo.nc", NC_NOWRITE, &ncid); if (status != NC_NOERR) handle_error(status); status = nc_inq_unlimdim(ncid, &recid); if (status != NC_NOERR) handle_error(status); ... status = nc_inq_dimid(ncid, "lat", &latid); if (status != NC_NOERR) handle_error(status); status = nc_inq_dimlen(ncid, latid, &latlength); if (status != NC_NOERR) handle_error(status); status = nc_inq_dim(ncid, recid, recname, &recs); if (status != NC_NOERR) handle_error(status); \endcode */ int nc_inq_dimlen(int ncid, int dimid, size_t *lenp) { NC* ncp; int stat = NC_check_id(ncid, &ncp); if(stat != NC_NOERR) return stat; if(lenp == NULL) return NC_NOERR; TRACE(nc_inq_dimlen); return ncp->dispatch->inq_dim(ncid,dimid,NULL,lenp); } /*! \} */ /* End of named group ...*/ /*! \} */ /* End of defgroup. */
314584.c
/* Redis CLI (command line interface) * * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "fmacros.h" #include "version.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include <signal.h> #include <unistd.h> #include <time.h> #include <ctype.h> #include <errno.h> #include <sys/stat.h> #include <sys/time.h> #include <assert.h> #include <fcntl.h> #include <limits.h> #include <math.h> #include <hiredis.h> #ifdef USE_OPENSSL #include <openssl/ssl.h> #include <openssl/err.h> #include <hiredis_ssl.h> #endif #include <sdscompat.h> /* Use hiredis' sds compat header that maps sds calls to their hi_ variants */ #include <sds.h> /* use sds.h from hiredis, so that only one set of sds functions will be present in the binary */ #include "dict.h" #include "adlist.h" #include "zmalloc.h" #include "linenoise.h" #include "help.h" #include "anet.h" #include "ae.h" #include "cli_common.h" #include "mt19937-64.h" #define UNUSED(V) ((void) V) #define OUTPUT_STANDARD 0 #define OUTPUT_RAW 1 #define OUTPUT_CSV 2 #define REDIS_CLI_KEEPALIVE_INTERVAL 15 /* seconds */ #define REDIS_CLI_DEFAULT_PIPE_TIMEOUT 30 /* seconds */ #define REDIS_CLI_HISTFILE_ENV "REDISCLI_HISTFILE" #define REDIS_CLI_HISTFILE_DEFAULT ".rediscli_history" #define REDIS_CLI_RCFILE_ENV "REDISCLI_RCFILE" #define REDIS_CLI_RCFILE_DEFAULT ".redisclirc" #define REDIS_CLI_AUTH_ENV "REDISCLI_AUTH" #define REDIS_CLI_CLUSTER_YES_ENV "REDISCLI_CLUSTER_YES" #define CLUSTER_MANAGER_SLOTS 16384 #define CLUSTER_MANAGER_MIGRATE_TIMEOUT 60000 #define CLUSTER_MANAGER_MIGRATE_PIPELINE 10 #define CLUSTER_MANAGER_REBALANCE_THRESHOLD 2 #define CLUSTER_MANAGER_INVALID_HOST_ARG \ "[ERR] Invalid arguments: you need to pass either a valid " \ "address (ie. 120.0.0.1:7000) or space separated IP " \ "and port (ie. 120.0.0.1 7000)\n" #define CLUSTER_MANAGER_MODE() (config.cluster_manager_command.name != NULL) #define CLUSTER_MANAGER_MASTERS_COUNT(nodes, replicas) (nodes/(replicas + 1)) #define CLUSTER_MANAGER_COMMAND(n,...) \ (redisCommand(n->context, __VA_ARGS__)) #define CLUSTER_MANAGER_NODE_ARRAY_FREE(array) zfree(array->alloc) #define CLUSTER_MANAGER_PRINT_REPLY_ERROR(n, err) \ clusterManagerLogErr("Node %s:%d replied with error:\n%s\n", \ n->ip, n->port, err); #define clusterManagerLogInfo(...) \ clusterManagerLog(CLUSTER_MANAGER_LOG_LVL_INFO,__VA_ARGS__) #define clusterManagerLogErr(...) \ clusterManagerLog(CLUSTER_MANAGER_LOG_LVL_ERR,__VA_ARGS__) #define clusterManagerLogWarn(...) \ clusterManagerLog(CLUSTER_MANAGER_LOG_LVL_WARN,__VA_ARGS__) #define clusterManagerLogOk(...) \ clusterManagerLog(CLUSTER_MANAGER_LOG_LVL_SUCCESS,__VA_ARGS__) #define CLUSTER_MANAGER_FLAG_MYSELF 1 << 0 #define CLUSTER_MANAGER_FLAG_SLAVE 1 << 1 #define CLUSTER_MANAGER_FLAG_FRIEND 1 << 2 #define CLUSTER_MANAGER_FLAG_NOADDR 1 << 3 #define CLUSTER_MANAGER_FLAG_DISCONNECT 1 << 4 #define CLUSTER_MANAGER_FLAG_FAIL 1 << 5 #define CLUSTER_MANAGER_CMD_FLAG_FIX 1 << 0 #define CLUSTER_MANAGER_CMD_FLAG_SLAVE 1 << 1 #define CLUSTER_MANAGER_CMD_FLAG_YES 1 << 2 #define CLUSTER_MANAGER_CMD_FLAG_AUTOWEIGHTS 1 << 3 #define CLUSTER_MANAGER_CMD_FLAG_EMPTYMASTER 1 << 4 #define CLUSTER_MANAGER_CMD_FLAG_SIMULATE 1 << 5 #define CLUSTER_MANAGER_CMD_FLAG_REPLACE 1 << 6 #define CLUSTER_MANAGER_CMD_FLAG_COPY 1 << 7 #define CLUSTER_MANAGER_CMD_FLAG_COLOR 1 << 8 #define CLUSTER_MANAGER_CMD_FLAG_CHECK_OWNERS 1 << 9 #define CLUSTER_MANAGER_CMD_FLAG_FIX_WITH_UNREACHABLE_MASTERS 1 << 10 #define CLUSTER_MANAGER_CMD_FLAG_MASTERS_ONLY 1 << 11 #define CLUSTER_MANAGER_CMD_FLAG_SLAVES_ONLY 1 << 12 #define CLUSTER_MANAGER_OPT_GETFRIENDS 1 << 0 #define CLUSTER_MANAGER_OPT_COLD 1 << 1 #define CLUSTER_MANAGER_OPT_UPDATE 1 << 2 #define CLUSTER_MANAGER_OPT_QUIET 1 << 6 #define CLUSTER_MANAGER_OPT_VERBOSE 1 << 7 #define CLUSTER_MANAGER_LOG_LVL_INFO 1 #define CLUSTER_MANAGER_LOG_LVL_WARN 2 #define CLUSTER_MANAGER_LOG_LVL_ERR 3 #define CLUSTER_MANAGER_LOG_LVL_SUCCESS 4 #define CLUSTER_JOIN_CHECK_AFTER 20 #define LOG_COLOR_BOLD "29;1m" #define LOG_COLOR_RED "31;1m" #define LOG_COLOR_GREEN "32;1m" #define LOG_COLOR_YELLOW "33;1m" #define LOG_COLOR_RESET "0m" /* cliConnect() flags. */ #define CC_FORCE (1<<0) /* Re-connect if already connected. */ #define CC_QUIET (1<<1) /* Don't log connecting errors. */ /* --latency-dist palettes. */ int spectrum_palette_color_size = 19; int spectrum_palette_color[] = {0,233,234,235,237,239,241,243,245,247,144,143,142,184,226,214,208,202,196}; int spectrum_palette_mono_size = 13; int spectrum_palette_mono[] = {0,233,234,235,237,239,241,243,245,247,249,251,253}; /* The actual palette in use. */ int *spectrum_palette; int spectrum_palette_size; /* Dict Helpers */ static uint64_t dictSdsHash(const void *key); static int dictSdsKeyCompare(dict *d, const void *key1, const void *key2); static void dictSdsDestructor(dict *d, void *val); static void dictListDestructor(dict *d, void *val); /* Cluster Manager Command Info */ typedef struct clusterManagerCommand { char *name; int argc; char **argv; int flags; int replicas; char *from; char *to; char **weight; int weight_argc; char *master_id; int slots; int timeout; int pipeline; float threshold; char *backup_dir; char *from_user; char *from_pass; int from_askpass; } clusterManagerCommand; static void createClusterManagerCommand(char *cmdname, int argc, char **argv); static redisContext *context; static struct config { char *hostip; int hostport; char *hostsocket; int tls; cliSSLconfig sslconfig; long repeat; long interval; int dbnum; /* db num currently selected */ int input_dbnum; /* db num user input */ int interactive; int shutdown; int monitor_mode; int pubsub_mode; int blocking_state_aborted; /* used to abort monitor_mode and pubsub_mode. */ int latency_mode; int latency_dist_mode; int latency_history; int lru_test_mode; long long lru_test_sample_size; int cluster_mode; int cluster_reissue_command; int cluster_send_asking; int slave_mode; int pipe_mode; int pipe_timeout; int getrdb_mode; int stat_mode; int scan_mode; int intrinsic_latency_mode; int intrinsic_latency_duration; sds pattern; char *rdb_filename; int bigkeys; int memkeys; unsigned memkeys_samples; int hotkeys; int stdinarg; /* get last arg from stdin. (-x option) */ char *auth; int askpass; char *user; int quoted_input; /* Force input args to be treated as quoted strings */ int output; /* output mode, see OUTPUT_* defines */ int push_output; /* Should we display spontaneous PUSH replies */ sds mb_delim; sds cmd_delim; char prompt[128]; char *eval; int eval_ldb; int eval_ldb_sync; /* Ask for synchronous mode of the Lua debugger. */ int eval_ldb_end; /* Lua debugging session ended. */ int enable_ldb_on_eval; /* Handle manual SCRIPT DEBUG + EVAL commands. */ int last_cmd_type; int verbose; int set_errcode; clusterManagerCommand cluster_manager_command; int no_auth_warning; int resp3; int in_multi; int pre_multi_dbnum; } config; /* User preferences. */ static struct pref { int hints; } pref; static volatile sig_atomic_t force_cancel_loop = 0; static void usage(int err); static void slaveMode(void); char *redisGitSHA1(void); char *redisGitDirty(void); static int cliConnect(int force); static char *getInfoField(char *info, char *field); static long getLongInfoField(char *info, char *field); /*------------------------------------------------------------------------------ * Utility functions *--------------------------------------------------------------------------- */ static void cliPushHandler(void *, void *); uint16_t crc16(const char *buf, int len); static long long ustime(void) { struct timeval tv; long long ust; gettimeofday(&tv, NULL); ust = ((long long)tv.tv_sec)*1000000; ust += tv.tv_usec; return ust; } static long long mstime(void) { return ustime()/1000; } static void cliRefreshPrompt(void) { if (config.eval_ldb) return; sds prompt = sdsempty(); if (config.hostsocket != NULL) { prompt = sdscatfmt(prompt,"redis %s",config.hostsocket); } else { char addr[256]; anetFormatAddr(addr, sizeof(addr), config.hostip, config.hostport); prompt = sdscatlen(prompt,addr,strlen(addr)); } /* Add [dbnum] if needed */ if (config.dbnum != 0) prompt = sdscatfmt(prompt,"[%i]",config.dbnum); /* Add TX if in transaction state*/ if (config.in_multi) prompt = sdscatlen(prompt,"(TX)",4); /* Copy the prompt in the static buffer. */ prompt = sdscatlen(prompt,"> ",2); snprintf(config.prompt,sizeof(config.prompt),"%s",prompt); sdsfree(prompt); } /* Return the name of the dotfile for the specified 'dotfilename'. * Normally it just concatenates user $HOME to the file specified * in 'dotfilename'. However if the environment variable 'envoverride' * is set, its value is taken as the path. * * The function returns NULL (if the file is /dev/null or cannot be * obtained for some error), or an SDS string that must be freed by * the user. */ static sds getDotfilePath(char *envoverride, char *dotfilename) { char *path = NULL; sds dotPath = NULL; /* Check the env for a dotfile override. */ path = getenv(envoverride); if (path != NULL && *path != '\0') { if (!strcmp("/dev/null", path)) { return NULL; } /* If the env is set, return it. */ dotPath = sdsnew(path); } else { char *home = getenv("HOME"); if (home != NULL && *home != '\0') { /* If no override is set use $HOME/<dotfilename>. */ dotPath = sdscatprintf(sdsempty(), "%s/%s", home, dotfilename); } } return dotPath; } /* URL-style percent decoding. */ #define isHexChar(c) (isdigit(c) || (c >= 'a' && c <= 'f')) #define decodeHexChar(c) (isdigit(c) ? c - '0' : c - 'a' + 10) #define decodeHex(h, l) ((decodeHexChar(h) << 4) + decodeHexChar(l)) static sds percentDecode(const char *pe, size_t len) { const char *end = pe + len; sds ret = sdsempty(); const char *curr = pe; while (curr < end) { if (*curr == '%') { if ((end - curr) < 2) { fprintf(stderr, "Incomplete URI encoding\n"); exit(1); } char h = tolower(*(++curr)); char l = tolower(*(++curr)); if (!isHexChar(h) || !isHexChar(l)) { fprintf(stderr, "Illegal character in URI encoding\n"); exit(1); } char c = decodeHex(h, l); ret = sdscatlen(ret, &c, 1); curr++; } else { ret = sdscatlen(ret, curr++, 1); } } return ret; } /* Parse a URI and extract the server connection information. * URI scheme is based on the the provisional specification[1] excluding support * for query parameters. Valid URIs are: * scheme: "redis://" * authority: [[<username> ":"] <password> "@"] [<hostname> [":" <port>]] * path: ["/" [<db>]] * * [1]: https://www.iana.org/assignments/uri-schemes/prov/redis */ static void parseRedisUri(const char *uri) { const char *scheme = "redis://"; const char *tlsscheme = "rediss://"; const char *curr = uri; const char *end = uri + strlen(uri); const char *userinfo, *username, *port, *host, *path; /* URI must start with a valid scheme. */ if (!strncasecmp(tlsscheme, curr, strlen(tlsscheme))) { #ifdef USE_OPENSSL config.tls = 1; curr += strlen(tlsscheme); #else fprintf(stderr,"rediss:// is only supported when redis-cli is compiled with OpenSSL\n"); exit(1); #endif } else if (!strncasecmp(scheme, curr, strlen(scheme))) { curr += strlen(scheme); } else { fprintf(stderr,"Invalid URI scheme\n"); exit(1); } if (curr == end) return; /* Extract user info. */ if ((userinfo = strchr(curr,'@'))) { if ((username = strchr(curr, ':')) && username < userinfo) { config.user = percentDecode(curr, username - curr); curr = username + 1; } config.auth = percentDecode(curr, userinfo - curr); curr = userinfo + 1; } if (curr == end) return; /* Extract host and port. */ path = strchr(curr, '/'); if (*curr != '/') { host = path ? path - 1 : end; if ((port = strchr(curr, ':'))) { config.hostport = atoi(port + 1); host = port - 1; } config.hostip = sdsnewlen(curr, host - curr + 1); } curr = path ? path + 1 : end; if (curr == end) return; /* Extract database number. */ config.input_dbnum = atoi(curr); } static uint64_t dictSdsHash(const void *key) { return dictGenHashFunction((unsigned char*)key, sdslen((char*)key)); } static int dictSdsKeyCompare(dict *d, const void *key1, const void *key2) { int l1,l2; UNUSED(d); l1 = sdslen((sds)key1); l2 = sdslen((sds)key2); if (l1 != l2) return 0; return memcmp(key1, key2, l1) == 0; } static void dictSdsDestructor(dict *d, void *val) { UNUSED(d); sdsfree(val); } void dictListDestructor(dict *d, void *val) { UNUSED(d); listRelease((list*)val); } /*------------------------------------------------------------------------------ * Help functions *--------------------------------------------------------------------------- */ #define CLI_HELP_COMMAND 1 #define CLI_HELP_GROUP 2 typedef struct { int type; int argc; sds *argv; sds full; /* Only used for help on commands */ struct commandHelp *org; } helpEntry; static helpEntry *helpEntries; static int helpEntriesLen; static sds cliVersion(void) { sds version; version = sdscatprintf(sdsempty(), "%s", REDIS_VERSION); /* Add git commit and working tree status when available */ if (strtoll(redisGitSHA1(),NULL,16)) { version = sdscatprintf(version, " (git:%s", redisGitSHA1()); if (strtoll(redisGitDirty(),NULL,10)) version = sdscatprintf(version, "-dirty"); version = sdscat(version, ")"); } return version; } static void cliInitHelp(void) { int commandslen = sizeof(commandHelp)/sizeof(struct commandHelp); int groupslen = sizeof(commandGroups)/sizeof(char*); int i, len, pos = 0; helpEntry tmp; helpEntriesLen = len = commandslen+groupslen; helpEntries = zmalloc(sizeof(helpEntry)*len); for (i = 0; i < groupslen; i++) { tmp.argc = 1; tmp.argv = zmalloc(sizeof(sds)); tmp.argv[0] = sdscatprintf(sdsempty(),"@%s",commandGroups[i]); tmp.full = tmp.argv[0]; tmp.type = CLI_HELP_GROUP; tmp.org = NULL; helpEntries[pos++] = tmp; } for (i = 0; i < commandslen; i++) { tmp.argv = sdssplitargs(commandHelp[i].name,&tmp.argc); tmp.full = sdsnew(commandHelp[i].name); tmp.type = CLI_HELP_COMMAND; tmp.org = &commandHelp[i]; helpEntries[pos++] = tmp; } } /* cliInitHelp() setups the helpEntries array with the command and group * names from the help.h file. However the Redis instance we are connecting * to may support more commands, so this function integrates the previous * entries with additional entries obtained using the COMMAND command * available in recent versions of Redis. */ static void cliIntegrateHelp(void) { if (cliConnect(CC_QUIET) == REDIS_ERR) return; redisReply *reply = redisCommand(context, "COMMAND"); if(reply == NULL || reply->type != REDIS_REPLY_ARRAY) return; /* Scan the array reported by COMMAND and fill only the entries that * don't already match what we have. */ for (size_t j = 0; j < reply->elements; j++) { redisReply *entry = reply->element[j]; if (entry->type != REDIS_REPLY_ARRAY || entry->elements < 4 || entry->element[0]->type != REDIS_REPLY_STRING || entry->element[1]->type != REDIS_REPLY_INTEGER || entry->element[3]->type != REDIS_REPLY_INTEGER) return; char *cmdname = entry->element[0]->str; int i; for (i = 0; i < helpEntriesLen; i++) { helpEntry *he = helpEntries+i; if (!strcasecmp(he->argv[0],cmdname)) break; } if (i != helpEntriesLen) continue; helpEntriesLen++; helpEntries = zrealloc(helpEntries,sizeof(helpEntry)*helpEntriesLen); helpEntry *new = helpEntries+(helpEntriesLen-1); new->argc = 1; new->argv = zmalloc(sizeof(sds)); new->argv[0] = sdsnew(cmdname); new->full = new->argv[0]; new->type = CLI_HELP_COMMAND; sdstoupper(new->argv[0]); struct commandHelp *ch = zmalloc(sizeof(*ch)); ch->name = new->argv[0]; ch->params = sdsempty(); int args = llabs(entry->element[1]->integer); args--; /* Remove the command name itself. */ if (entry->element[3]->integer == 1) { ch->params = sdscat(ch->params,"key "); args--; } while(args-- > 0) ch->params = sdscat(ch->params,"arg "); if (entry->element[1]->integer < 0) ch->params = sdscat(ch->params,"...options..."); ch->summary = "Help not available"; ch->group = 0; ch->since = "not known"; new->org = ch; } freeReplyObject(reply); } /* Output command help to stdout. */ static void cliOutputCommandHelp(struct commandHelp *help, int group) { printf("\r\n \x1b[1m%s\x1b[0m \x1b[90m%s\x1b[0m\r\n", help->name, help->params); printf(" \x1b[33msummary:\x1b[0m %s\r\n", help->summary); printf(" \x1b[33msince:\x1b[0m %s\r\n", help->since); if (group) { printf(" \x1b[33mgroup:\x1b[0m %s\r\n", commandGroups[help->group]); } } /* Print generic help. */ static void cliOutputGenericHelp(void) { sds version = cliVersion(); printf( "redis-cli %s\n" "To get help about Redis commands type:\n" " \"help @<group>\" to get a list of commands in <group>\n" " \"help <command>\" for help on <command>\n" " \"help <tab>\" to get a list of possible help topics\n" " \"quit\" to exit\n" "\n" "To set redis-cli preferences:\n" " \":set hints\" enable online hints\n" " \":set nohints\" disable online hints\n" "Set your preferences in ~/.redisclirc\n", version ); sdsfree(version); } /* Output all command help, filtering by group or command name. */ static void cliOutputHelp(int argc, char **argv) { int i, j, len; int group = -1; helpEntry *entry; struct commandHelp *help; if (argc == 0) { cliOutputGenericHelp(); return; } else if (argc > 0 && argv[0][0] == '@') { len = sizeof(commandGroups)/sizeof(char*); for (i = 0; i < len; i++) { if (strcasecmp(argv[0]+1,commandGroups[i]) == 0) { group = i; break; } } } assert(argc > 0); for (i = 0; i < helpEntriesLen; i++) { entry = &helpEntries[i]; if (entry->type != CLI_HELP_COMMAND) continue; help = entry->org; if (group == -1) { /* Compare all arguments */ if (argc <= entry->argc) { for (j = 0; j < argc; j++) { if (strcasecmp(argv[j],entry->argv[j]) != 0) break; } if (j == argc) { cliOutputCommandHelp(help,1); } } } else { if (group == help->group) { cliOutputCommandHelp(help,0); } } } printf("\r\n"); } /* Linenoise completion callback. */ static void completionCallback(const char *buf, linenoiseCompletions *lc) { size_t startpos = 0; int mask; int i; size_t matchlen; sds tmp; if (strncasecmp(buf,"help ",5) == 0) { startpos = 5; while (isspace(buf[startpos])) startpos++; mask = CLI_HELP_COMMAND | CLI_HELP_GROUP; } else { mask = CLI_HELP_COMMAND; } for (i = 0; i < helpEntriesLen; i++) { if (!(helpEntries[i].type & mask)) continue; matchlen = strlen(buf+startpos); if (strncasecmp(buf+startpos,helpEntries[i].full,matchlen) == 0) { tmp = sdsnewlen(buf,startpos); tmp = sdscat(tmp,helpEntries[i].full); linenoiseAddCompletion(lc,tmp); sdsfree(tmp); } } } /* Linenoise hints callback. */ static char *hintsCallback(const char *buf, int *color, int *bold) { if (!pref.hints) return NULL; int i, rawargc, argc, buflen = strlen(buf), matchlen = 0; sds *rawargv, *argv = sdssplitargs(buf,&argc); int endspace = buflen && isspace(buf[buflen-1]); helpEntry *entry = NULL; /* Check if the argument list is empty and return ASAP. */ if (argc == 0) { sdsfreesplitres(argv,argc); return NULL; } /* Search longest matching prefix command */ for (i = 0; i < helpEntriesLen; i++) { if (!(helpEntries[i].type & CLI_HELP_COMMAND)) continue; rawargv = sdssplitargs(helpEntries[i].full,&rawargc); if (rawargc <= argc) { int j; for (j = 0; j < rawargc; j++) { if (strcasecmp(rawargv[j],argv[j])) { break; } } if (j == rawargc && rawargc > matchlen) { matchlen = rawargc; entry = &helpEntries[i]; } } sdsfreesplitres(rawargv,rawargc); } sdsfreesplitres(argv,argc); if (entry) { *color = 90; *bold = 0; sds hint = sdsnew(entry->org->params); /* Remove arguments from the returned hint to show only the * ones the user did not yet type. */ int toremove = argc-matchlen; while(toremove > 0 && sdslen(hint)) { if (hint[0] == '[') break; if (hint[0] == ' ') toremove--; sdsrange(hint,1,-1); } /* Add an initial space if needed. */ if (!endspace) { sds newhint = sdsnewlen(" ",1); newhint = sdscatsds(newhint,hint); sdsfree(hint); hint = newhint; } return hint; } return NULL; } static void freeHintsCallback(void *ptr) { sdsfree(ptr); } /*------------------------------------------------------------------------------ * Networking / parsing *--------------------------------------------------------------------------- */ /* Send AUTH command to the server */ static int cliAuth(redisContext *ctx, char *user, char *auth) { redisReply *reply; if (auth == NULL) return REDIS_OK; if (user == NULL) reply = redisCommand(ctx,"AUTH %s",auth); else reply = redisCommand(ctx,"AUTH %s %s",user,auth); if (reply == NULL) { fprintf(stderr, "\nI/O error\n"); return REDIS_ERR; } int result = REDIS_OK; if (reply->type == REDIS_REPLY_ERROR) { result = REDIS_ERR; fprintf(stderr, "AUTH failed: %s\n", reply->str); } freeReplyObject(reply); return result; } /* Send SELECT input_dbnum to the server */ static int cliSelect(void) { redisReply *reply; if (config.input_dbnum == config.dbnum) return REDIS_OK; reply = redisCommand(context,"SELECT %d",config.input_dbnum); if (reply == NULL) { fprintf(stderr, "\nI/O error\n"); return REDIS_ERR; } int result = REDIS_OK; if (reply->type == REDIS_REPLY_ERROR) { result = REDIS_ERR; fprintf(stderr,"SELECT %d failed: %s\n",config.input_dbnum,reply->str); } else { config.dbnum = config.input_dbnum; cliRefreshPrompt(); } freeReplyObject(reply); return result; } /* Select RESP3 mode if redis-cli was started with the -3 option. */ static int cliSwitchProto(void) { redisReply *reply; if (config.resp3 == 0) return REDIS_OK; reply = redisCommand(context,"HELLO 3"); if (reply == NULL) { fprintf(stderr, "\nI/O error\n"); return REDIS_ERR; } int result = REDIS_OK; if (reply->type == REDIS_REPLY_ERROR) { result = REDIS_ERR; fprintf(stderr,"HELLO 3 failed: %s\n",reply->str); } freeReplyObject(reply); return result; } /* Connect to the server. It is possible to pass certain flags to the function: * CC_FORCE: The connection is performed even if there is already * a connected socket. * CC_QUIET: Don't print errors if connection fails. */ static int cliConnect(int flags) { if (context == NULL || flags & CC_FORCE) { if (context != NULL) { redisFree(context); config.dbnum = 0; config.in_multi = 0; cliRefreshPrompt(); } /* Do not use hostsocket when we got redirected in cluster mode */ if (config.hostsocket == NULL || (config.cluster_mode && config.cluster_reissue_command)) { context = redisConnect(config.hostip,config.hostport); } else { context = redisConnectUnix(config.hostsocket); } if (!context->err && config.tls) { const char *err = NULL; if (cliSecureConnection(context, config.sslconfig, &err) == REDIS_ERR && err) { fprintf(stderr, "Could not negotiate a TLS connection: %s\n", err); redisFree(context); context = NULL; return REDIS_ERR; } } if (context->err) { if (!(flags & CC_QUIET)) { fprintf(stderr,"Could not connect to Redis at "); if (config.hostsocket == NULL || (config.cluster_mode && config.cluster_reissue_command)) { fprintf(stderr, "%s:%d: %s\n", config.hostip,config.hostport,context->errstr); } else { fprintf(stderr,"%s: %s\n", config.hostsocket,context->errstr); } } redisFree(context); context = NULL; return REDIS_ERR; } /* Set aggressive KEEP_ALIVE socket option in the Redis context socket * in order to prevent timeouts caused by the execution of long * commands. At the same time this improves the detection of real * errors. */ anetKeepAlive(NULL, context->fd, REDIS_CLI_KEEPALIVE_INTERVAL); /* Do AUTH, select the right DB, switch to RESP3 if needed. */ if (cliAuth(context, config.user, config.auth) != REDIS_OK) return REDIS_ERR; if (cliSelect() != REDIS_OK) return REDIS_ERR; if (cliSwitchProto() != REDIS_OK) return REDIS_ERR; } /* Set a PUSH handler if configured to do so. */ if (config.push_output) { redisSetPushCallback(context, cliPushHandler); } return REDIS_OK; } /* In cluster, if server replies ASK, we will redirect to a different node. * Before sending the real command, we need to send ASKING command first. */ static int cliSendAsking() { redisReply *reply; config.cluster_send_asking = 0; if (context == NULL) { return REDIS_ERR; } reply = redisCommand(context,"ASKING"); if (reply == NULL) { fprintf(stderr, "\nI/O error\n"); return REDIS_ERR; } int result = REDIS_OK; if (reply->type == REDIS_REPLY_ERROR) { result = REDIS_ERR; fprintf(stderr,"ASKING failed: %s\n",reply->str); } freeReplyObject(reply); return result; } static void cliPrintContextError(void) { if (context == NULL) return; fprintf(stderr,"Error: %s\n",context->errstr); } static int isInvalidateReply(redisReply *reply) { return reply->type == REDIS_REPLY_PUSH && reply->elements == 2 && reply->element[0]->type == REDIS_REPLY_STRING && !strncmp(reply->element[0]->str, "invalidate", 10) && reply->element[1]->type == REDIS_REPLY_ARRAY; } /* Special display handler for RESP3 'invalidate' messages. * This function does not validate the reply, so it should * already be confirmed correct */ static sds cliFormatInvalidateTTY(redisReply *r) { sds out = sdsnew("-> invalidate: "); for (size_t i = 0; i < r->element[1]->elements; i++) { redisReply *key = r->element[1]->element[i]; assert(key->type == REDIS_REPLY_STRING); out = sdscatfmt(out, "'%s'", key->str, key->len); if (i < r->element[1]->elements - 1) out = sdscatlen(out, ", ", 2); } return sdscatlen(out, "\n", 1); } static sds cliFormatReplyTTY(redisReply *r, char *prefix) { sds out = sdsempty(); switch (r->type) { case REDIS_REPLY_ERROR: out = sdscatprintf(out,"(error) %s\n", r->str); break; case REDIS_REPLY_STATUS: out = sdscat(out,r->str); out = sdscat(out,"\n"); break; case REDIS_REPLY_INTEGER: out = sdscatprintf(out,"(integer) %lld\n",r->integer); break; case REDIS_REPLY_DOUBLE: out = sdscatprintf(out,"(double) %s\n",r->str); break; case REDIS_REPLY_STRING: case REDIS_REPLY_VERB: /* If you are producing output for the standard output we want * a more interesting output with quoted characters and so forth, * unless it's a verbatim string type. */ if (r->type == REDIS_REPLY_STRING) { out = sdscatrepr(out,r->str,r->len); out = sdscat(out,"\n"); } else { out = sdscatlen(out,r->str,r->len); out = sdscat(out,"\n"); } break; case REDIS_REPLY_NIL: out = sdscat(out,"(nil)\n"); break; case REDIS_REPLY_BOOL: out = sdscat(out,r->integer ? "(true)\n" : "(false)\n"); break; case REDIS_REPLY_ARRAY: case REDIS_REPLY_MAP: case REDIS_REPLY_SET: case REDIS_REPLY_PUSH: if (r->elements == 0) { if (r->type == REDIS_REPLY_ARRAY) out = sdscat(out,"(empty array)\n"); else if (r->type == REDIS_REPLY_MAP) out = sdscat(out,"(empty hash)\n"); else if (r->type == REDIS_REPLY_SET) out = sdscat(out,"(empty set)\n"); else if (r->type == REDIS_REPLY_PUSH) out = sdscat(out,"(empty push)\n"); else out = sdscat(out,"(empty aggregate type)\n"); } else { unsigned int i, idxlen = 0; char _prefixlen[16]; char _prefixfmt[16]; sds _prefix; sds tmp; /* Calculate chars needed to represent the largest index */ i = r->elements; if (r->type == REDIS_REPLY_MAP) i /= 2; do { idxlen++; i /= 10; } while(i); /* Prefix for nested multi bulks should grow with idxlen+2 spaces */ memset(_prefixlen,' ',idxlen+2); _prefixlen[idxlen+2] = '\0'; _prefix = sdscat(sdsnew(prefix),_prefixlen); /* Setup prefix format for every entry */ char numsep; if (r->type == REDIS_REPLY_SET) numsep = '~'; else if (r->type == REDIS_REPLY_MAP) numsep = '#'; else numsep = ')'; snprintf(_prefixfmt,sizeof(_prefixfmt),"%%s%%%ud%c ",idxlen,numsep); for (i = 0; i < r->elements; i++) { unsigned int human_idx = (r->type == REDIS_REPLY_MAP) ? i/2 : i; human_idx++; /* Make it 1-based. */ /* Don't use the prefix for the first element, as the parent * caller already prepended the index number. */ out = sdscatprintf(out,_prefixfmt,i == 0 ? "" : prefix,human_idx); /* Format the multi bulk entry */ tmp = cliFormatReplyTTY(r->element[i],_prefix); out = sdscatlen(out,tmp,sdslen(tmp)); sdsfree(tmp); /* For maps, format the value as well. */ if (r->type == REDIS_REPLY_MAP) { i++; sdsrange(out,0,-2); out = sdscat(out," => "); tmp = cliFormatReplyTTY(r->element[i],_prefix); out = sdscatlen(out,tmp,sdslen(tmp)); sdsfree(tmp); } } sdsfree(_prefix); } break; default: fprintf(stderr,"Unknown reply type: %d\n", r->type); exit(1); } return out; } int isColorTerm(void) { char *t = getenv("TERM"); return t != NULL && strstr(t,"xterm") != NULL; } /* Helper function for sdsCatColorizedLdbReply() appending colorize strings * to an SDS string. */ sds sdscatcolor(sds o, char *s, size_t len, char *color) { if (!isColorTerm()) return sdscatlen(o,s,len); int bold = strstr(color,"bold") != NULL; int ccode = 37; /* Defaults to white. */ if (strstr(color,"red")) ccode = 31; else if (strstr(color,"green")) ccode = 32; else if (strstr(color,"yellow")) ccode = 33; else if (strstr(color,"blue")) ccode = 34; else if (strstr(color,"magenta")) ccode = 35; else if (strstr(color,"cyan")) ccode = 36; else if (strstr(color,"white")) ccode = 37; o = sdscatfmt(o,"\033[%i;%i;49m",bold,ccode); o = sdscatlen(o,s,len); o = sdscat(o,"\033[0m"); return o; } /* Colorize Lua debugger status replies according to the prefix they * have. */ sds sdsCatColorizedLdbReply(sds o, char *s, size_t len) { char *color = "white"; if (strstr(s,"<debug>")) color = "bold"; if (strstr(s,"<redis>")) color = "green"; if (strstr(s,"<reply>")) color = "cyan"; if (strstr(s,"<error>")) color = "red"; if (strstr(s,"<hint>")) color = "bold"; if (strstr(s,"<value>") || strstr(s,"<retval>")) color = "magenta"; if (len > 4 && isdigit(s[3])) { if (s[1] == '>') color = "yellow"; /* Current line. */ else if (s[2] == '#') color = "bold"; /* Break point. */ } return sdscatcolor(o,s,len,color); } static sds cliFormatReplyRaw(redisReply *r) { sds out = sdsempty(), tmp; size_t i; switch (r->type) { case REDIS_REPLY_NIL: /* Nothing... */ break; case REDIS_REPLY_ERROR: out = sdscatlen(out,r->str,r->len); out = sdscatlen(out,"\n",1); break; case REDIS_REPLY_STATUS: case REDIS_REPLY_STRING: case REDIS_REPLY_VERB: if (r->type == REDIS_REPLY_STATUS && config.eval_ldb) { /* The Lua debugger replies with arrays of simple (status) * strings. We colorize the output for more fun if this * is a debugging session. */ /* Detect the end of a debugging session. */ if (strstr(r->str,"<endsession>") == r->str) { config.enable_ldb_on_eval = 0; config.eval_ldb = 0; config.eval_ldb_end = 1; /* Signal the caller session ended. */ config.output = OUTPUT_STANDARD; cliRefreshPrompt(); } else { out = sdsCatColorizedLdbReply(out,r->str,r->len); } } else { out = sdscatlen(out,r->str,r->len); } break; case REDIS_REPLY_BOOL: out = sdscat(out,r->integer ? "(true)" : "(false)"); break; case REDIS_REPLY_INTEGER: out = sdscatprintf(out,"%lld",r->integer); break; case REDIS_REPLY_DOUBLE: out = sdscatprintf(out,"%s",r->str); break; case REDIS_REPLY_SET: case REDIS_REPLY_ARRAY: case REDIS_REPLY_PUSH: for (i = 0; i < r->elements; i++) { if (i > 0) out = sdscat(out,config.mb_delim); tmp = cliFormatReplyRaw(r->element[i]); out = sdscatlen(out,tmp,sdslen(tmp)); sdsfree(tmp); } break; case REDIS_REPLY_MAP: for (i = 0; i < r->elements; i += 2) { if (i > 0) out = sdscat(out,config.mb_delim); tmp = cliFormatReplyRaw(r->element[i]); out = sdscatlen(out,tmp,sdslen(tmp)); sdsfree(tmp); out = sdscatlen(out," ",1); tmp = cliFormatReplyRaw(r->element[i+1]); out = sdscatlen(out,tmp,sdslen(tmp)); sdsfree(tmp); } break; default: fprintf(stderr,"Unknown reply type: %d\n", r->type); exit(1); } return out; } static sds cliFormatReplyCSV(redisReply *r) { unsigned int i; sds out = sdsempty(); switch (r->type) { case REDIS_REPLY_ERROR: out = sdscat(out,"ERROR,"); out = sdscatrepr(out,r->str,strlen(r->str)); break; case REDIS_REPLY_STATUS: out = sdscatrepr(out,r->str,r->len); break; case REDIS_REPLY_INTEGER: out = sdscatprintf(out,"%lld",r->integer); break; case REDIS_REPLY_DOUBLE: out = sdscatprintf(out,"%s",r->str); break; case REDIS_REPLY_STRING: case REDIS_REPLY_VERB: out = sdscatrepr(out,r->str,r->len); break; case REDIS_REPLY_NIL: out = sdscat(out,"NULL"); break; case REDIS_REPLY_BOOL: out = sdscat(out,r->integer ? "true" : "false"); break; case REDIS_REPLY_ARRAY: case REDIS_REPLY_SET: case REDIS_REPLY_PUSH: case REDIS_REPLY_MAP: /* CSV has no map type, just output flat list. */ for (i = 0; i < r->elements; i++) { sds tmp = cliFormatReplyCSV(r->element[i]); out = sdscatlen(out,tmp,sdslen(tmp)); if (i != r->elements-1) out = sdscat(out,","); sdsfree(tmp); } break; default: fprintf(stderr,"Unknown reply type: %d\n", r->type); exit(1); } return out; } /* Generate reply strings in various output modes */ static sds cliFormatReply(redisReply *reply, int mode, int verbatim) { sds out; if (verbatim) { out = cliFormatReplyRaw(reply); } else if (mode == OUTPUT_STANDARD) { out = cliFormatReplyTTY(reply, ""); } else if (mode == OUTPUT_RAW) { out = cliFormatReplyRaw(reply); out = sdscatsds(out, config.cmd_delim); } else if (mode == OUTPUT_CSV) { out = cliFormatReplyCSV(reply); out = sdscatlen(out, "\n", 1); } else { fprintf(stderr, "Error: Unknown output encoding %d\n", mode); exit(1); } return out; } /* Output any spontaneous PUSH reply we receive */ static void cliPushHandler(void *privdata, void *reply) { UNUSED(privdata); sds out; if (config.output == OUTPUT_STANDARD && isInvalidateReply(reply)) { out = cliFormatInvalidateTTY(reply); } else { out = cliFormatReply(reply, config.output, 0); } fwrite(out, sdslen(out), 1, stdout); freeReplyObject(reply); sdsfree(out); } static int cliReadReply(int output_raw_strings) { void *_reply; redisReply *reply; sds out = NULL; int output = 1; if (redisGetReply(context,&_reply) != REDIS_OK) { if (config.blocking_state_aborted) { config.blocking_state_aborted = 0; config.monitor_mode = 0; config.pubsub_mode = 0; return cliConnect(CC_FORCE); } if (config.shutdown) { redisFree(context); context = NULL; return REDIS_OK; } if (config.interactive) { /* Filter cases where we should reconnect */ if (context->err == REDIS_ERR_IO && (errno == ECONNRESET || errno == EPIPE)) return REDIS_ERR; if (context->err == REDIS_ERR_EOF) return REDIS_ERR; } cliPrintContextError(); exit(1); return REDIS_ERR; /* avoid compiler warning */ } reply = (redisReply*)_reply; config.last_cmd_type = reply->type; /* Check if we need to connect to a different node and reissue the * request. */ if (config.cluster_mode && reply->type == REDIS_REPLY_ERROR && (!strncmp(reply->str,"MOVED ",6) || !strncmp(reply->str,"ASK ",4))) { char *p = reply->str, *s; int slot; output = 0; /* Comments show the position of the pointer as: * * [S] for pointer 's' * [P] for pointer 'p' */ s = strchr(p,' '); /* MOVED[S]3999 127.0.0.1:6381 */ p = strchr(s+1,' '); /* MOVED[S]3999[P]127.0.0.1:6381 */ *p = '\0'; slot = atoi(s+1); s = strrchr(p+1,':'); /* MOVED 3999[P]127.0.0.1[S]6381 */ *s = '\0'; sdsfree(config.hostip); config.hostip = sdsnew(p+1); config.hostport = atoi(s+1); if (config.interactive) printf("-> Redirected to slot [%d] located at %s:%d\n", slot, config.hostip, config.hostport); config.cluster_reissue_command = 1; if (!strncmp(reply->str,"ASK ",4)) { config.cluster_send_asking = 1; } cliRefreshPrompt(); } else if (!config.interactive && config.set_errcode && reply->type == REDIS_REPLY_ERROR) { fprintf(stderr,"%s\n",reply->str); exit(1); return REDIS_ERR; /* avoid compiler warning */ } if (output) { out = cliFormatReply(reply, config.output, output_raw_strings); fwrite(out,sdslen(out),1,stdout); fflush(stdout); sdsfree(out); } freeReplyObject(reply); return REDIS_OK; } static int cliSendCommand(int argc, char **argv, long repeat) { char *command = argv[0]; size_t *argvlen; int j, output_raw; if (!config.eval_ldb && /* In debugging mode, let's pass "help" to Redis. */ (!strcasecmp(command,"help") || !strcasecmp(command,"?"))) { cliOutputHelp(--argc, ++argv); return REDIS_OK; } if (context == NULL) return REDIS_ERR; output_raw = 0; if (!strcasecmp(command,"info") || !strcasecmp(command,"lolwut") || (argc >= 2 && !strcasecmp(command,"debug") && !strcasecmp(argv[1],"htstats")) || (argc >= 2 && !strcasecmp(command,"debug") && !strcasecmp(argv[1],"htstats-key")) || (argc >= 2 && !strcasecmp(command,"memory") && (!strcasecmp(argv[1],"malloc-stats") || !strcasecmp(argv[1],"doctor"))) || (argc == 2 && !strcasecmp(command,"cluster") && (!strcasecmp(argv[1],"nodes") || !strcasecmp(argv[1],"info"))) || (argc >= 2 && !strcasecmp(command,"client") && (!strcasecmp(argv[1],"list") || !strcasecmp(argv[1],"info"))) || (argc == 3 && !strcasecmp(command,"latency") && !strcasecmp(argv[1],"graph")) || (argc == 2 && !strcasecmp(command,"latency") && !strcasecmp(argv[1],"doctor")) || /* Format PROXY INFO command for Redis Cluster Proxy: * https://github.com/artix75/redis-cluster-proxy */ (argc >= 2 && !strcasecmp(command,"proxy") && !strcasecmp(argv[1],"info"))) { output_raw = 1; } if (!strcasecmp(command,"shutdown")) config.shutdown = 1; if (!strcasecmp(command,"monitor")) config.monitor_mode = 1; if (!strcasecmp(command,"subscribe") || !strcasecmp(command,"psubscribe")) config.pubsub_mode = 1; if (!strcasecmp(command,"sync") || !strcasecmp(command,"psync")) config.slave_mode = 1; /* When the user manually calls SCRIPT DEBUG, setup the activation of * debugging mode on the next eval if needed. */ if (argc == 3 && !strcasecmp(argv[0],"script") && !strcasecmp(argv[1],"debug")) { if (!strcasecmp(argv[2],"yes") || !strcasecmp(argv[2],"sync")) { config.enable_ldb_on_eval = 1; } else { config.enable_ldb_on_eval = 0; } } /* Actually activate LDB on EVAL if needed. */ if (!strcasecmp(command,"eval") && config.enable_ldb_on_eval) { config.eval_ldb = 1; config.output = OUTPUT_RAW; } /* Setup argument length */ argvlen = zmalloc(argc*sizeof(size_t)); for (j = 0; j < argc; j++) argvlen[j] = sdslen(argv[j]); /* Negative repeat is allowed and causes infinite loop, works well with the interval option. */ while(repeat < 0 || repeat-- > 0) { redisAppendCommandArgv(context,argc,(const char**)argv,argvlen); if (config.monitor_mode) { do { if (cliReadReply(output_raw) != REDIS_OK) exit(1); fflush(stdout); } while(config.monitor_mode); zfree(argvlen); return REDIS_OK; } if (config.pubsub_mode) { if (config.output != OUTPUT_RAW) printf("Reading messages... (press Ctrl-C to quit)\n"); /* Unset our default PUSH handler so this works in RESP2/RESP3 */ redisSetPushCallback(context, NULL); while (config.pubsub_mode) { if (cliReadReply(output_raw) != REDIS_OK) exit(1); fflush(stdout); /* Make it grep friendly */ if (!config.pubsub_mode || config.last_cmd_type == REDIS_REPLY_ERROR) { if (config.push_output) { redisSetPushCallback(context, cliPushHandler); } config.pubsub_mode = 0; } } continue; } if (config.slave_mode) { printf("Entering replica output mode... (press Ctrl-C to quit)\n"); slaveMode(); config.slave_mode = 0; zfree(argvlen); return REDIS_ERR; /* Error = slaveMode lost connection to master */ } if (cliReadReply(output_raw) != REDIS_OK) { zfree(argvlen); return REDIS_ERR; } else { /* Store database number when SELECT was successfully executed. */ if (!strcasecmp(command,"select") && argc == 2 && config.last_cmd_type != REDIS_REPLY_ERROR) { config.input_dbnum = config.dbnum = atoi(argv[1]); cliRefreshPrompt(); } else if (!strcasecmp(command,"auth") && (argc == 2 || argc == 3)) { cliSelect(); } else if (!strcasecmp(command,"multi") && argc == 1 && config.last_cmd_type != REDIS_REPLY_ERROR) { config.in_multi = 1; config.pre_multi_dbnum = config.dbnum; cliRefreshPrompt(); } else if (!strcasecmp(command,"exec") && argc == 1 && config.in_multi) { config.in_multi = 0; if (config.last_cmd_type == REDIS_REPLY_ERROR || config.last_cmd_type == REDIS_REPLY_NIL) { config.input_dbnum = config.dbnum = config.pre_multi_dbnum; } cliRefreshPrompt(); } else if (!strcasecmp(command,"discard") && argc == 1 && config.last_cmd_type != REDIS_REPLY_ERROR) { config.in_multi = 0; config.input_dbnum = config.dbnum = config.pre_multi_dbnum; cliRefreshPrompt(); } else if (!strcasecmp(command,"reset") && argc == 1 && config.last_cmd_type != REDIS_REPLY_ERROR) { config.in_multi = 0; config.dbnum = 0; config.input_dbnum = 0; config.resp3 = 0; cliRefreshPrompt(); } } if (config.cluster_reissue_command){ /* If we need to reissue the command, break to prevent a further 'repeat' number of dud interactions */ break; } if (config.interval) usleep(config.interval); fflush(stdout); /* Make it grep friendly */ } zfree(argvlen); return REDIS_OK; } /* Send a command reconnecting the link if needed. */ static redisReply *reconnectingRedisCommand(redisContext *c, const char *fmt, ...) { redisReply *reply = NULL; int tries = 0; va_list ap; assert(!c->err); while(reply == NULL) { while (c->err & (REDIS_ERR_IO | REDIS_ERR_EOF)) { printf("\r\x1b[0K"); /* Cursor to left edge + clear line. */ printf("Reconnecting... %d\r", ++tries); fflush(stdout); redisFree(c); c = redisConnect(config.hostip,config.hostport); if (!c->err && config.tls) { const char *err = NULL; if (cliSecureConnection(c, config.sslconfig, &err) == REDIS_ERR && err) { fprintf(stderr, "TLS Error: %s\n", err); exit(1); } } usleep(1000000); } va_start(ap,fmt); reply = redisvCommand(c,fmt,ap); va_end(ap); if (c->err && !(c->err & (REDIS_ERR_IO | REDIS_ERR_EOF))) { fprintf(stderr, "Error: %s\n", c->errstr); exit(1); } else if (tries > 0) { printf("\r\x1b[0K"); /* Cursor to left edge + clear line. */ } } context = c; return reply; } /*------------------------------------------------------------------------------ * User interface *--------------------------------------------------------------------------- */ static int parseOptions(int argc, char **argv) { int i; for (i = 1; i < argc; i++) { int lastarg = i==argc-1; if (!strcmp(argv[i],"-h") && !lastarg) { sdsfree(config.hostip); config.hostip = sdsnew(argv[++i]); } else if (!strcmp(argv[i],"-h") && lastarg) { usage(0); } else if (!strcmp(argv[i],"--help")) { usage(0); } else if (!strcmp(argv[i],"-x")) { config.stdinarg = 1; } else if (!strcmp(argv[i],"-p") && !lastarg) { config.hostport = atoi(argv[++i]); } else if (!strcmp(argv[i],"-s") && !lastarg) { config.hostsocket = argv[++i]; } else if (!strcmp(argv[i],"-r") && !lastarg) { config.repeat = strtoll(argv[++i],NULL,10); } else if (!strcmp(argv[i],"-i") && !lastarg) { double seconds = atof(argv[++i]); config.interval = seconds*1000000; } else if (!strcmp(argv[i],"-n") && !lastarg) { config.input_dbnum = atoi(argv[++i]); } else if (!strcmp(argv[i], "--no-auth-warning")) { config.no_auth_warning = 1; } else if (!strcmp(argv[i], "--askpass")) { config.askpass = 1; } else if ((!strcmp(argv[i],"-a") || !strcmp(argv[i],"--pass")) && !lastarg) { config.auth = argv[++i]; } else if (!strcmp(argv[i],"--user") && !lastarg) { config.user = argv[++i]; } else if (!strcmp(argv[i],"-u") && !lastarg) { parseRedisUri(argv[++i]); } else if (!strcmp(argv[i],"--raw")) { config.output = OUTPUT_RAW; } else if (!strcmp(argv[i],"--no-raw")) { config.output = OUTPUT_STANDARD; } else if (!strcmp(argv[i],"--quoted-input")) { config.quoted_input = 1; } else if (!strcmp(argv[i],"--csv")) { config.output = OUTPUT_CSV; } else if (!strcmp(argv[i],"--latency")) { config.latency_mode = 1; } else if (!strcmp(argv[i],"--latency-dist")) { config.latency_dist_mode = 1; } else if (!strcmp(argv[i],"--mono")) { spectrum_palette = spectrum_palette_mono; spectrum_palette_size = spectrum_palette_mono_size; } else if (!strcmp(argv[i],"--latency-history")) { config.latency_mode = 1; config.latency_history = 1; } else if (!strcmp(argv[i],"--lru-test") && !lastarg) { config.lru_test_mode = 1; config.lru_test_sample_size = strtoll(argv[++i],NULL,10); } else if (!strcmp(argv[i],"--slave")) { config.slave_mode = 1; } else if (!strcmp(argv[i],"--replica")) { config.slave_mode = 1; } else if (!strcmp(argv[i],"--stat")) { config.stat_mode = 1; } else if (!strcmp(argv[i],"--scan")) { config.scan_mode = 1; } else if (!strcmp(argv[i],"--pattern") && !lastarg) { sdsfree(config.pattern); config.pattern = sdsnew(argv[++i]); } else if (!strcmp(argv[i],"--quoted-pattern") && !lastarg) { sdsfree(config.pattern); config.pattern = unquoteCString(argv[++i]); if (!config.pattern) { fprintf(stderr,"Invalid quoted string specified for --quoted-pattern.\n"); exit(1); } } else if (!strcmp(argv[i],"--intrinsic-latency") && !lastarg) { config.intrinsic_latency_mode = 1; config.intrinsic_latency_duration = atoi(argv[++i]); } else if (!strcmp(argv[i],"--rdb") && !lastarg) { config.getrdb_mode = 1; config.rdb_filename = argv[++i]; } else if (!strcmp(argv[i],"--pipe")) { config.pipe_mode = 1; } else if (!strcmp(argv[i],"--pipe-timeout") && !lastarg) { config.pipe_timeout = atoi(argv[++i]); } else if (!strcmp(argv[i],"--bigkeys")) { config.bigkeys = 1; } else if (!strcmp(argv[i],"--memkeys")) { config.memkeys = 1; config.memkeys_samples = 0; /* use redis default */ } else if (!strcmp(argv[i],"--memkeys-samples")) { config.memkeys = 1; config.memkeys_samples = atoi(argv[++i]); } else if (!strcmp(argv[i],"--hotkeys")) { config.hotkeys = 1; } else if (!strcmp(argv[i],"--eval") && !lastarg) { config.eval = argv[++i]; } else if (!strcmp(argv[i],"--ldb")) { config.eval_ldb = 1; config.output = OUTPUT_RAW; } else if (!strcmp(argv[i],"--ldb-sync-mode")) { config.eval_ldb = 1; config.eval_ldb_sync = 1; config.output = OUTPUT_RAW; } else if (!strcmp(argv[i],"-c")) { config.cluster_mode = 1; } else if (!strcmp(argv[i],"-d") && !lastarg) { sdsfree(config.mb_delim); config.mb_delim = sdsnew(argv[++i]); } else if (!strcmp(argv[i],"-D") && !lastarg) { sdsfree(config.cmd_delim); config.cmd_delim = sdsnew(argv[++i]); } else if (!strcmp(argv[i],"-e")) { config.set_errcode = 1; } else if (!strcmp(argv[i],"--verbose")) { config.verbose = 1; } else if (!strcmp(argv[i],"--cluster") && !lastarg) { if (CLUSTER_MANAGER_MODE()) usage(1); char *cmd = argv[++i]; int j = i; while (j < argc && argv[j][0] != '-') j++; if (j > i) j--; createClusterManagerCommand(cmd, j - i, argv + i + 1); i = j; } else if (!strcmp(argv[i],"--cluster") && lastarg) { usage(1); } else if ((!strcmp(argv[i],"--cluster-only-masters"))) { config.cluster_manager_command.flags |= CLUSTER_MANAGER_CMD_FLAG_MASTERS_ONLY; } else if ((!strcmp(argv[i],"--cluster-only-replicas"))) { config.cluster_manager_command.flags |= CLUSTER_MANAGER_CMD_FLAG_SLAVES_ONLY; } else if (!strcmp(argv[i],"--cluster-replicas") && !lastarg) { config.cluster_manager_command.replicas = atoi(argv[++i]); } else if (!strcmp(argv[i],"--cluster-master-id") && !lastarg) { config.cluster_manager_command.master_id = argv[++i]; } else if (!strcmp(argv[i],"--cluster-from") && !lastarg) { config.cluster_manager_command.from = argv[++i]; } else if (!strcmp(argv[i],"--cluster-to") && !lastarg) { config.cluster_manager_command.to = argv[++i]; } else if (!strcmp(argv[i],"--cluster-from-user") && !lastarg) { config.cluster_manager_command.from_user = argv[++i]; } else if (!strcmp(argv[i],"--cluster-from-pass") && !lastarg) { config.cluster_manager_command.from_pass = argv[++i]; } else if (!strcmp(argv[i], "--cluster-from-askpass")) { config.cluster_manager_command.from_askpass = 1; } else if (!strcmp(argv[i],"--cluster-weight") && !lastarg) { if (config.cluster_manager_command.weight != NULL) { fprintf(stderr, "WARNING: you cannot use --cluster-weight " "more than once.\n" "You can set more weights by adding them " "as a space-separated list, ie:\n" "--cluster-weight n1=w n2=w\n"); exit(1); } int widx = i + 1; char **weight = argv + widx; int wargc = 0; for (; widx < argc; widx++) { if (strstr(argv[widx], "--") == argv[widx]) break; if (strchr(argv[widx], '=') == NULL) break; wargc++; } if (wargc > 0) { config.cluster_manager_command.weight = weight; config.cluster_manager_command.weight_argc = wargc; i += wargc; } } else if (!strcmp(argv[i],"--cluster-slots") && !lastarg) { config.cluster_manager_command.slots = atoi(argv[++i]); } else if (!strcmp(argv[i],"--cluster-timeout") && !lastarg) { config.cluster_manager_command.timeout = atoi(argv[++i]); } else if (!strcmp(argv[i],"--cluster-pipeline") && !lastarg) { config.cluster_manager_command.pipeline = atoi(argv[++i]); } else if (!strcmp(argv[i],"--cluster-threshold") && !lastarg) { config.cluster_manager_command.threshold = atof(argv[++i]); } else if (!strcmp(argv[i],"--cluster-yes")) { config.cluster_manager_command.flags |= CLUSTER_MANAGER_CMD_FLAG_YES; } else if (!strcmp(argv[i],"--cluster-simulate")) { config.cluster_manager_command.flags |= CLUSTER_MANAGER_CMD_FLAG_SIMULATE; } else if (!strcmp(argv[i],"--cluster-replace")) { config.cluster_manager_command.flags |= CLUSTER_MANAGER_CMD_FLAG_REPLACE; } else if (!strcmp(argv[i],"--cluster-copy")) { config.cluster_manager_command.flags |= CLUSTER_MANAGER_CMD_FLAG_COPY; } else if (!strcmp(argv[i],"--cluster-slave")) { config.cluster_manager_command.flags |= CLUSTER_MANAGER_CMD_FLAG_SLAVE; } else if (!strcmp(argv[i],"--cluster-use-empty-masters")) { config.cluster_manager_command.flags |= CLUSTER_MANAGER_CMD_FLAG_EMPTYMASTER; } else if (!strcmp(argv[i],"--cluster-search-multiple-owners")) { config.cluster_manager_command.flags |= CLUSTER_MANAGER_CMD_FLAG_CHECK_OWNERS; } else if (!strcmp(argv[i],"--cluster-fix-with-unreachable-masters")) { config.cluster_manager_command.flags |= CLUSTER_MANAGER_CMD_FLAG_FIX_WITH_UNREACHABLE_MASTERS; #ifdef USE_OPENSSL } else if (!strcmp(argv[i],"--tls")) { config.tls = 1; } else if (!strcmp(argv[i],"--sni") && !lastarg) { config.sslconfig.sni = argv[++i]; } else if (!strcmp(argv[i],"--cacertdir") && !lastarg) { config.sslconfig.cacertdir = argv[++i]; } else if (!strcmp(argv[i],"--cacert") && !lastarg) { config.sslconfig.cacert = argv[++i]; } else if (!strcmp(argv[i],"--cert") && !lastarg) { config.sslconfig.cert = argv[++i]; } else if (!strcmp(argv[i],"--key") && !lastarg) { config.sslconfig.key = argv[++i]; } else if (!strcmp(argv[i],"--tls-ciphers") && !lastarg) { config.sslconfig.ciphers = argv[++i]; } else if (!strcmp(argv[i],"--insecure")) { config.sslconfig.skip_cert_verify = 1; #ifdef TLS1_3_VERSION } else if (!strcmp(argv[i],"--tls-ciphersuites") && !lastarg) { config.sslconfig.ciphersuites = argv[++i]; #endif #endif } else if (!strcmp(argv[i],"-v") || !strcmp(argv[i], "--version")) { sds version = cliVersion(); printf("redis-cli %s\n", version); sdsfree(version); exit(0); } else if (!strcmp(argv[i],"-3")) { config.resp3 = 1; } else if (!strcmp(argv[i],"--show-pushes") && !lastarg) { char *argval = argv[++i]; if (!strncasecmp(argval, "n", 1)) { config.push_output = 0; } else if (!strncasecmp(argval, "y", 1)) { config.push_output = 1; } else { fprintf(stderr, "Unknown --show-pushes value '%s' " "(valid: '[y]es', '[n]o')\n", argval); } } else if (CLUSTER_MANAGER_MODE() && argv[i][0] != '-') { if (config.cluster_manager_command.argc == 0) { int j = i + 1; while (j < argc && argv[j][0] != '-') j++; int cmd_argc = j - i; config.cluster_manager_command.argc = cmd_argc; config.cluster_manager_command.argv = argv + i; if (cmd_argc > 1) i = j - 1; } } else { if (argv[i][0] == '-') { fprintf(stderr, "Unrecognized option or bad number of args for: '%s'\n", argv[i]); exit(1); } else { /* Likely the command name, stop here. */ break; } } } if (config.hostsocket && config.cluster_mode) { fprintf(stderr,"Options -c and -s are mutually exclusive.\n"); exit(1); } /* --ldb requires --eval. */ if (config.eval_ldb && config.eval == NULL) { fprintf(stderr,"Options --ldb and --ldb-sync-mode require --eval.\n"); fprintf(stderr,"Try %s --help for more information.\n", argv[0]); exit(1); } if (!config.no_auth_warning && config.auth != NULL) { fputs("Warning: Using a password with '-a' or '-u' option on the command" " line interface may not be safe.\n", stderr); } return i; } static void parseEnv() { /* Set auth from env, but do not overwrite CLI arguments if passed */ char *auth = getenv(REDIS_CLI_AUTH_ENV); if (auth != NULL && config.auth == NULL) { config.auth = auth; } char *cluster_yes = getenv(REDIS_CLI_CLUSTER_YES_ENV); if (cluster_yes != NULL && !strcmp(cluster_yes, "1")) { config.cluster_manager_command.flags |= CLUSTER_MANAGER_CMD_FLAG_YES; } } static void usage(int err) { sds version = cliVersion(); FILE *target = err ? stderr: stdout; fprintf(target, "redis-cli %s\n" "\n" "Usage: redis-cli [OPTIONS] [cmd [arg [arg ...]]]\n" " -h <hostname> Server hostname (default: 127.0.0.1).\n" " -p <port> Server port (default: 6379).\n" " -s <socket> Server socket (overrides hostname and port).\n" " -a <password> Password to use when connecting to the server.\n" " You can also use the " REDIS_CLI_AUTH_ENV " environment\n" " variable to pass this password more safely\n" " (if both are used, this argument takes precedence).\n" " --user <username> Used to send ACL style 'AUTH username pass'. Needs -a.\n" " --pass <password> Alias of -a for consistency with the new --user option.\n" " --askpass Force user to input password with mask from STDIN.\n" " If this argument is used, '-a' and " REDIS_CLI_AUTH_ENV "\n" " environment variable will be ignored.\n" " -u <uri> Server URI.\n" " -r <repeat> Execute specified command N times.\n" " -i <interval> When -r is used, waits <interval> seconds per command.\n" " It is possible to specify sub-second times like -i 0.1.\n" " -n <db> Database number.\n" " -3 Start session in RESP3 protocol mode.\n" " -x Read last argument from STDIN.\n" " -d <delimiter> Delimiter between response bulks for raw formatting (default: \\n).\n" " -D <delimiter> Delimiter between responses for raw formatting (default: \\n).\n" " -c Enable cluster mode (follow -ASK and -MOVED redirections).\n" " -e Return exit error code when command execution fails.\n" #ifdef USE_OPENSSL " --tls Establish a secure TLS connection.\n" " --sni <host> Server name indication for TLS.\n" " --cacert <file> CA Certificate file to verify with.\n" " --cacertdir <dir> Directory where trusted CA certificates are stored.\n" " If neither cacert nor cacertdir are specified, the default\n" " system-wide trusted root certs configuration will apply.\n" " --insecure Allow insecure TLS connection by skipping cert validation.\n" " --cert <file> Client certificate to authenticate with.\n" " --key <file> Private key file to authenticate with.\n" " --tls-ciphers <list> Sets the list of preferred ciphers (TLSv1.2 and below)\n" " in order of preference from highest to lowest separated by colon (\":\").\n" " See the ciphers(1ssl) manpage for more information about the syntax of this string.\n" #ifdef TLS1_3_VERSION " --tls-ciphersuites <list> Sets the list of preferred ciphersuites (TLSv1.3)\n" " in order of preference from highest to lowest separated by colon (\":\").\n" " See the ciphers(1ssl) manpage for more information about the syntax of this string,\n" " and specifically for TLSv1.3 ciphersuites.\n" #endif #endif " --raw Use raw formatting for replies (default when STDOUT is\n" " not a tty).\n" " --no-raw Force formatted output even when STDOUT is not a tty.\n" " --quoted-input Force input to be handled as quoted strings.\n" " --csv Output in CSV format.\n" " --show-pushes <yn> Whether to print RESP3 PUSH messages. Enabled by default when\n" " STDOUT is a tty but can be overridden with --show-pushes no.\n" " --stat Print rolling stats about server: mem, clients, ...\n" " --latency Enter a special mode continuously sampling latency.\n" " If you use this mode in an interactive session it runs\n" " forever displaying real-time stats. Otherwise if --raw or\n" " --csv is specified, or if you redirect the output to a non\n" " TTY, it samples the latency for 1 second (you can use\n" " -i to change the interval), then produces a single output\n" " and exits.\n",version); fprintf(target, " --latency-history Like --latency but tracking latency changes over time.\n" " Default time interval is 15 sec. Change it using -i.\n" " --latency-dist Shows latency as a spectrum, requires xterm 256 colors.\n" " Default time interval is 1 sec. Change it using -i.\n" " --lru-test <keys> Simulate a cache workload with an 80-20 distribution.\n" " --replica Simulate a replica showing commands received from the master.\n" " --rdb <filename> Transfer an RDB dump from remote server to local file.\n" " Use filename of \"-\" to write to stdout.\n" " --pipe Transfer raw Redis protocol from stdin to server.\n" " --pipe-timeout <n> In --pipe mode, abort with error if after sending all data.\n" " no reply is received within <n> seconds.\n" " Default timeout: %d. Use 0 to wait forever.\n", REDIS_CLI_DEFAULT_PIPE_TIMEOUT); fprintf(target, " --bigkeys Sample Redis keys looking for keys with many elements (complexity).\n" " --memkeys Sample Redis keys looking for keys consuming a lot of memory.\n" " --memkeys-samples <n> Sample Redis keys looking for keys consuming a lot of memory.\n" " And define number of key elements to sample\n" " --hotkeys Sample Redis keys looking for hot keys.\n" " only works when maxmemory-policy is *lfu.\n" " --scan List all keys using the SCAN command.\n" " --pattern <pat> Keys pattern when using the --scan, --bigkeys or --hotkeys\n" " options (default: *).\n" " --quoted-pattern <pat> Same as --pattern, but the specified string can be\n" " quoted, in order to pass an otherwise non binary-safe string.\n" " --intrinsic-latency <sec> Run a test to measure intrinsic system latency.\n" " The test will run for the specified amount of seconds.\n" " --eval <file> Send an EVAL command using the Lua script at <file>.\n" " --ldb Used with --eval enable the Redis Lua debugger.\n" " --ldb-sync-mode Like --ldb but uses the synchronous Lua debugger, in\n" " this mode the server is blocked and script changes are\n" " not rolled back from the server memory.\n" " --cluster <command> [args...] [opts...]\n" " Cluster Manager command and arguments (see below).\n" " --verbose Verbose mode.\n" " --no-auth-warning Don't show warning message when using password on command\n" " line interface.\n" " --help Output this help and exit.\n" " --version Output version and exit.\n" "\n"); /* Using another fprintf call to avoid -Woverlength-strings compile warning */ fprintf(target, "Cluster Manager Commands:\n" " Use --cluster help to list all available cluster manager commands.\n" "\n" "Examples:\n" " cat /etc/passwd | redis-cli -x set mypasswd\n" " redis-cli get mypasswd\n" " redis-cli -r 100 lpush mylist x\n" " redis-cli -r 100 -i 1 info | grep used_memory_human:\n" " redis-cli --quoted-input set '\"null-\\x00-separated\"' value\n" " redis-cli --eval myscript.lua key1 key2 , arg1 arg2 arg3\n" " redis-cli --scan --pattern '*:12345*'\n" "\n" " (Note: when using --eval the comma separates KEYS[] from ARGV[] items)\n" "\n" "When no command is given, redis-cli starts in interactive mode.\n" "Type \"help\" in interactive mode for information on available commands\n" "and settings.\n" "\n"); sdsfree(version); exit(err); } static int confirmWithYes(char *msg, int ignore_force) { /* if --cluster-yes option is set and ignore_force is false, * do not prompt for an answer */ if (!ignore_force && (config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_YES)) { return 1; } printf("%s (type 'yes' to accept): ", msg); fflush(stdout); char buf[4]; int nread = read(fileno(stdin),buf,4); buf[3] = '\0'; return (nread != 0 && !strcmp("yes", buf)); } static int issueCommandRepeat(int argc, char **argv, long repeat) { while (1) { if (config.cluster_reissue_command || context == NULL || context->err == REDIS_ERR_IO || context->err == REDIS_ERR_EOF) { if (cliConnect(CC_FORCE) != REDIS_OK) { cliPrintContextError(); config.cluster_reissue_command = 0; return REDIS_ERR; } } config.cluster_reissue_command = 0; if (config.cluster_send_asking) { if (cliSendAsking() != REDIS_OK) { cliPrintContextError(); return REDIS_ERR; } } if (cliSendCommand(argc,argv,repeat) != REDIS_OK) { cliPrintContextError(); return REDIS_ERR; } /* Issue the command again if we got redirected in cluster mode */ if (config.cluster_mode && config.cluster_reissue_command) { continue; } break; } return REDIS_OK; } static int issueCommand(int argc, char **argv) { return issueCommandRepeat(argc, argv, config.repeat); } /* Split the user provided command into multiple SDS arguments. * This function normally uses sdssplitargs() from sds.c which is able * to understand "quoted strings", escapes and so forth. However when * we are in Lua debugging mode and the "eval" command is used, we want * the remaining Lua script (after "e " or "eval ") to be passed verbatim * as a single big argument. */ static sds *cliSplitArgs(char *line, int *argc) { if (config.eval_ldb && (strstr(line,"eval ") == line || strstr(line,"e ") == line)) { sds *argv = sds_malloc(sizeof(sds)*2); *argc = 2; int len = strlen(line); int elen = line[1] == ' ' ? 2 : 5; /* "e " or "eval "? */ argv[0] = sdsnewlen(line,elen-1); argv[1] = sdsnewlen(line+elen,len-elen); return argv; } else { return sdssplitargs(line,argc); } } /* Set the CLI preferences. This function is invoked when an interactive * ":command" is called, or when reading ~/.redisclirc file, in order to * set user preferences. */ void cliSetPreferences(char **argv, int argc, int interactive) { if (!strcasecmp(argv[0],":set") && argc >= 2) { if (!strcasecmp(argv[1],"hints")) pref.hints = 1; else if (!strcasecmp(argv[1],"nohints")) pref.hints = 0; else { printf("%sunknown redis-cli preference '%s'\n", interactive ? "" : ".redisclirc: ", argv[1]); } } else { printf("%sunknown redis-cli internal command '%s'\n", interactive ? "" : ".redisclirc: ", argv[0]); } } /* Load the ~/.redisclirc file if any. */ void cliLoadPreferences(void) { sds rcfile = getDotfilePath(REDIS_CLI_RCFILE_ENV,REDIS_CLI_RCFILE_DEFAULT); if (rcfile == NULL) return; FILE *fp = fopen(rcfile,"r"); char buf[1024]; if (fp) { while(fgets(buf,sizeof(buf),fp) != NULL) { sds *argv; int argc; argv = sdssplitargs(buf,&argc); if (argc > 0) cliSetPreferences(argv,argc,0); sdsfreesplitres(argv,argc); } fclose(fp); } sdsfree(rcfile); } /* Some commands can include sensitive information and shouldn't be put in the * history file. Currently these commands are include: * - AUTH * - ACL SETUSER * - CONFIG SET masterauth/masteruser/requirepass * - HELLO with [AUTH username password] * - MIGRATE with [AUTH password] or [AUTH2 username password] */ static int isSensitiveCommand(int argc, char **argv) { if (!strcasecmp(argv[0],"auth")) { return 1; } else if (argc > 1 && !strcasecmp(argv[0],"acl") && !strcasecmp(argv[1],"setuser")) { return 1; } else if (argc > 2 && !strcasecmp(argv[0],"config") && !strcasecmp(argv[1],"set") && ( !strcasecmp(argv[2],"masterauth") || !strcasecmp(argv[2],"masteruser") || !strcasecmp(argv[2],"requirepass"))) { return 1; /* HELLO [protover [AUTH username password] [SETNAME clientname]] */ } else if (argc > 4 && !strcasecmp(argv[0],"hello")) { for (int j = 2; j < argc; j++) { int moreargs = argc - 1 - j; if (!strcasecmp(argv[j],"AUTH") && moreargs >= 2) { return 1; } else if (!strcasecmp(argv[j],"SETNAME") && moreargs) { j++; } else { return 0; } } /* MIGRATE host port key|"" destination-db timeout [COPY] [REPLACE] * [AUTH password] [AUTH2 username password] [KEYS key [key ...]] */ } else if (argc > 7 && !strcasecmp(argv[0], "migrate")) { for (int j = 6; j < argc; j++) { int moreargs = argc - 1 - j; if (!strcasecmp(argv[j],"auth") && moreargs) { return 1; } else if (!strcasecmp(argv[j],"auth2") && moreargs >= 2) { return 1; } else if (!strcasecmp(argv[j],"keys") && moreargs) { return 0; } } } return 0; } static void repl(void) { sds historyfile = NULL; int history = 0; char *line; int argc; sds *argv; /* Initialize the help and, if possible, use the COMMAND command in order * to retrieve missing entries. */ cliInitHelp(); cliIntegrateHelp(); config.interactive = 1; linenoiseSetMultiLine(1); linenoiseSetCompletionCallback(completionCallback); linenoiseSetHintsCallback(hintsCallback); linenoiseSetFreeHintsCallback(freeHintsCallback); /* Only use history and load the rc file when stdin is a tty. */ if (isatty(fileno(stdin))) { historyfile = getDotfilePath(REDIS_CLI_HISTFILE_ENV,REDIS_CLI_HISTFILE_DEFAULT); //keep in-memory history always regardless if history file can be determined history = 1; if (historyfile != NULL) { linenoiseHistoryLoad(historyfile); } cliLoadPreferences(); } cliRefreshPrompt(); while((line = linenoise(context ? config.prompt : "not connected> ")) != NULL) { if (line[0] != '\0') { long repeat = 1; int skipargs = 0; char *endptr = NULL; argv = cliSplitArgs(line,&argc); if (argv == NULL) { printf("Invalid argument(s)\n"); fflush(stdout); if (history) linenoiseHistoryAdd(line); if (historyfile) linenoiseHistorySave(historyfile); linenoiseFree(line); continue; } else if (argc == 0) { sdsfreesplitres(argv,argc); linenoiseFree(line); continue; } /* check if we have a repeat command option and * need to skip the first arg */ errno = 0; repeat = strtol(argv[0], &endptr, 10); if (argc > 1 && *endptr == '\0') { if (errno == ERANGE || errno == EINVAL || repeat <= 0) { fputs("Invalid redis-cli repeat command option value.\n", stdout); sdsfreesplitres(argv, argc); linenoiseFree(line); continue; } skipargs = 1; } else { repeat = 1; } if (!isSensitiveCommand(argc - skipargs, argv + skipargs)) { if (history) linenoiseHistoryAdd(line); if (historyfile) linenoiseHistorySave(historyfile); } if (strcasecmp(argv[0],"quit") == 0 || strcasecmp(argv[0],"exit") == 0) { exit(0); } else if (argv[0][0] == ':') { cliSetPreferences(argv,argc,1); sdsfreesplitres(argv,argc); linenoiseFree(line); continue; } else if (strcasecmp(argv[0],"restart") == 0) { if (config.eval) { config.eval_ldb = 1; config.output = OUTPUT_RAW; sdsfreesplitres(argv,argc); linenoiseFree(line); return; /* Return to evalMode to restart the session. */ } else { printf("Use 'restart' only in Lua debugging mode."); } } else if (argc == 3 && !strcasecmp(argv[0],"connect")) { sdsfree(config.hostip); config.hostip = sdsnew(argv[1]); config.hostport = atoi(argv[2]); cliRefreshPrompt(); cliConnect(CC_FORCE); } else if (argc == 1 && !strcasecmp(argv[0],"clear")) { linenoiseClearScreen(); } else { long long start_time = mstime(), elapsed; issueCommandRepeat(argc-skipargs, argv+skipargs, repeat); /* If our debugging session ended, show the EVAL final * reply. */ if (config.eval_ldb_end) { config.eval_ldb_end = 0; cliReadReply(0); printf("\n(Lua debugging session ended%s)\n\n", config.eval_ldb_sync ? "" : " -- dataset changes rolled back"); } elapsed = mstime()-start_time; if (elapsed >= 500 && config.output == OUTPUT_STANDARD) { printf("(%.2fs)\n",(double)elapsed/1000); } } /* Free the argument vector */ sdsfreesplitres(argv,argc); } /* linenoise() returns malloc-ed lines like readline() */ linenoiseFree(line); } exit(0); } static int noninteractive(int argc, char **argv) { int retval = 0; sds *sds_args = getSdsArrayFromArgv(argc, argv, config.quoted_input); if (!sds_args) { printf("Invalid quoted string\n"); return 1; } if (config.stdinarg) { sds_args = sds_realloc(sds_args, (argc + 1) * sizeof(sds)); sds_args[argc] = readArgFromStdin(); argc++; } retval = issueCommand(argc, sds_args); sdsfreesplitres(sds_args, argc); return retval; } /*------------------------------------------------------------------------------ * Eval mode *--------------------------------------------------------------------------- */ static int evalMode(int argc, char **argv) { sds script = NULL; FILE *fp; char buf[1024]; size_t nread; char **argv2; int j, got_comma, keys; int retval = REDIS_OK; while(1) { if (config.eval_ldb) { printf( "Lua debugging session started, please use:\n" "quit -- End the session.\n" "restart -- Restart the script in debug mode again.\n" "help -- Show Lua script debugging commands.\n\n" ); } sdsfree(script); script = sdsempty(); got_comma = 0; keys = 0; /* Load the script from the file, as an sds string. */ fp = fopen(config.eval,"r"); if (!fp) { fprintf(stderr, "Can't open file '%s': %s\n", config.eval, strerror(errno)); exit(1); } while((nread = fread(buf,1,sizeof(buf),fp)) != 0) { script = sdscatlen(script,buf,nread); } fclose(fp); /* If we are debugging a script, enable the Lua debugger. */ if (config.eval_ldb) { redisReply *reply = redisCommand(context, config.eval_ldb_sync ? "SCRIPT DEBUG sync": "SCRIPT DEBUG yes"); if (reply) freeReplyObject(reply); } /* Create our argument vector */ argv2 = zmalloc(sizeof(sds)*(argc+3)); argv2[0] = sdsnew("EVAL"); argv2[1] = script; for (j = 0; j < argc; j++) { if (!got_comma && argv[j][0] == ',' && argv[j][1] == 0) { got_comma = 1; continue; } argv2[j+3-got_comma] = sdsnew(argv[j]); if (!got_comma) keys++; } argv2[2] = sdscatprintf(sdsempty(),"%d",keys); /* Call it */ int eval_ldb = config.eval_ldb; /* Save it, may be reverted. */ retval = issueCommand(argc+3-got_comma, argv2); if (eval_ldb) { if (!config.eval_ldb) { /* If the debugging session ended immediately, there was an * error compiling the script. Show it and they don't enter * the REPL at all. */ printf("Eval debugging session can't start:\n"); cliReadReply(0); break; /* Return to the caller. */ } else { strncpy(config.prompt,"lua debugger> ",sizeof(config.prompt)); repl(); /* Restart the session if repl() returned. */ cliConnect(CC_FORCE); printf("\n"); } } else { break; /* Return to the caller. */ } } return retval; } /*------------------------------------------------------------------------------ * Cluster Manager *--------------------------------------------------------------------------- */ /* The Cluster Manager global structure */ static struct clusterManager { list *nodes; /* List of nodes in the configuration. */ list *errors; int unreachable_masters; /* Masters we are not able to reach. */ } cluster_manager; /* Used by clusterManagerFixSlotsCoverage */ dict *clusterManagerUncoveredSlots = NULL; typedef struct clusterManagerNode { redisContext *context; sds name; char *ip; int port; uint64_t current_epoch; time_t ping_sent; time_t ping_recv; int flags; list *flags_str; /* Flags string representations */ sds replicate; /* Master ID if node is a slave */ int dirty; /* Node has changes that can be flushed */ uint8_t slots[CLUSTER_MANAGER_SLOTS]; int slots_count; int replicas_count; list *friends; sds *migrating; /* An array of sds where even strings are slots and odd * strings are the destination node IDs. */ sds *importing; /* An array of sds where even strings are slots and odd * strings are the source node IDs. */ int migrating_count; /* Length of the migrating array (migrating slots*2) */ int importing_count; /* Length of the importing array (importing slots*2) */ float weight; /* Weight used by rebalance */ int balance; /* Used by rebalance */ } clusterManagerNode; /* Data structure used to represent a sequence of cluster nodes. */ typedef struct clusterManagerNodeArray { clusterManagerNode **nodes; /* Actual nodes array */ clusterManagerNode **alloc; /* Pointer to the allocated memory */ int len; /* Actual length of the array */ int count; /* Non-NULL nodes count */ } clusterManagerNodeArray; /* Used for the reshard table. */ typedef struct clusterManagerReshardTableItem { clusterManagerNode *source; int slot; } clusterManagerReshardTableItem; /* Info about a cluster internal link. */ typedef struct clusterManagerLink { sds node_name; sds node_addr; int connected; int handshaking; } clusterManagerLink; static dictType clusterManagerDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ NULL, /* key destructor */ dictSdsDestructor, /* val destructor */ NULL /* allow to expand */ }; static dictType clusterManagerLinkDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictListDestructor, /* val destructor */ NULL /* allow to expand */ }; typedef int clusterManagerCommandProc(int argc, char **argv); typedef int (*clusterManagerOnReplyError)(redisReply *reply, clusterManagerNode *n, int bulk_idx); /* Cluster Manager helper functions */ static clusterManagerNode *clusterManagerNewNode(char *ip, int port); static clusterManagerNode *clusterManagerNodeByName(const char *name); static clusterManagerNode *clusterManagerNodeByAbbreviatedName(const char *n); static void clusterManagerNodeResetSlots(clusterManagerNode *node); static int clusterManagerNodeIsCluster(clusterManagerNode *node, char **err); static void clusterManagerPrintNotClusterNodeError(clusterManagerNode *node, char *err); static int clusterManagerNodeLoadInfo(clusterManagerNode *node, int opts, char **err); static int clusterManagerLoadInfoFromNode(clusterManagerNode *node); static int clusterManagerNodeIsEmpty(clusterManagerNode *node, char **err); static int clusterManagerGetAntiAffinityScore(clusterManagerNodeArray *ipnodes, int ip_count, clusterManagerNode ***offending, int *offending_len); static void clusterManagerOptimizeAntiAffinity(clusterManagerNodeArray *ipnodes, int ip_count); static sds clusterManagerNodeInfo(clusterManagerNode *node, int indent); static void clusterManagerShowNodes(void); static void clusterManagerShowClusterInfo(void); static int clusterManagerFlushNodeConfig(clusterManagerNode *node, char **err); static void clusterManagerWaitForClusterJoin(void); static int clusterManagerCheckCluster(int quiet); static void clusterManagerLog(int level, const char* fmt, ...); static int clusterManagerIsConfigConsistent(void); static dict *clusterManagerGetLinkStatus(void); static void clusterManagerOnError(sds err); static void clusterManagerNodeArrayInit(clusterManagerNodeArray *array, int len); static void clusterManagerNodeArrayReset(clusterManagerNodeArray *array); static void clusterManagerNodeArrayShift(clusterManagerNodeArray *array, clusterManagerNode **nodeptr); static void clusterManagerNodeArrayAdd(clusterManagerNodeArray *array, clusterManagerNode *node); /* Cluster Manager commands. */ static int clusterManagerCommandCreate(int argc, char **argv); static int clusterManagerCommandAddNode(int argc, char **argv); static int clusterManagerCommandDeleteNode(int argc, char **argv); static int clusterManagerCommandInfo(int argc, char **argv); static int clusterManagerCommandCheck(int argc, char **argv); static int clusterManagerCommandFix(int argc, char **argv); static int clusterManagerCommandReshard(int argc, char **argv); static int clusterManagerCommandRebalance(int argc, char **argv); static int clusterManagerCommandSetTimeout(int argc, char **argv); static int clusterManagerCommandImport(int argc, char **argv); static int clusterManagerCommandCall(int argc, char **argv); static int clusterManagerCommandHelp(int argc, char **argv); static int clusterManagerCommandBackup(int argc, char **argv); typedef struct clusterManagerCommandDef { char *name; clusterManagerCommandProc *proc; int arity; char *args; char *options; } clusterManagerCommandDef; clusterManagerCommandDef clusterManagerCommands[] = { {"create", clusterManagerCommandCreate, -2, "host1:port1 ... hostN:portN", "replicas <arg>"}, {"check", clusterManagerCommandCheck, -1, "host:port", "search-multiple-owners"}, {"info", clusterManagerCommandInfo, -1, "host:port", NULL}, {"fix", clusterManagerCommandFix, -1, "host:port", "search-multiple-owners,fix-with-unreachable-masters"}, {"reshard", clusterManagerCommandReshard, -1, "host:port", "from <arg>,to <arg>,slots <arg>,yes,timeout <arg>,pipeline <arg>," "replace"}, {"rebalance", clusterManagerCommandRebalance, -1, "host:port", "weight <node1=w1...nodeN=wN>,use-empty-masters," "timeout <arg>,simulate,pipeline <arg>,threshold <arg>,replace"}, {"add-node", clusterManagerCommandAddNode, 2, "new_host:new_port existing_host:existing_port", "slave,master-id <arg>"}, {"del-node", clusterManagerCommandDeleteNode, 2, "host:port node_id",NULL}, {"call", clusterManagerCommandCall, -2, "host:port command arg arg .. arg", "only-masters,only-replicas"}, {"set-timeout", clusterManagerCommandSetTimeout, 2, "host:port milliseconds", NULL}, {"import", clusterManagerCommandImport, 1, "host:port", "from <arg>,from-user <arg>,from-pass <arg>,from-askpass,copy,replace"}, {"backup", clusterManagerCommandBackup, 2, "host:port backup_directory", NULL}, {"help", clusterManagerCommandHelp, 0, NULL, NULL} }; typedef struct clusterManagerOptionDef { char *name; char *desc; } clusterManagerOptionDef; clusterManagerOptionDef clusterManagerOptions[] = { {"--cluster-yes", "Automatic yes to cluster commands prompts"} }; static void getRDB(clusterManagerNode *node); static void createClusterManagerCommand(char *cmdname, int argc, char **argv) { clusterManagerCommand *cmd = &config.cluster_manager_command; cmd->name = cmdname; cmd->argc = argc; cmd->argv = argc ? argv : NULL; if (isColorTerm()) cmd->flags |= CLUSTER_MANAGER_CMD_FLAG_COLOR; } static clusterManagerCommandProc *validateClusterManagerCommand(void) { int i, commands_count = sizeof(clusterManagerCommands) / sizeof(clusterManagerCommandDef); clusterManagerCommandProc *proc = NULL; char *cmdname = config.cluster_manager_command.name; int argc = config.cluster_manager_command.argc; for (i = 0; i < commands_count; i++) { clusterManagerCommandDef cmddef = clusterManagerCommands[i]; if (!strcmp(cmddef.name, cmdname)) { if ((cmddef.arity > 0 && argc != cmddef.arity) || (cmddef.arity < 0 && argc < (cmddef.arity * -1))) { fprintf(stderr, "[ERR] Wrong number of arguments for " "specified --cluster sub command\n"); return NULL; } proc = cmddef.proc; } } if (!proc) fprintf(stderr, "Unknown --cluster subcommand\n"); return proc; } static int parseClusterNodeAddress(char *addr, char **ip_ptr, int *port_ptr, int *bus_port_ptr) { char *c = strrchr(addr, '@'); if (c != NULL) { *c = '\0'; if (bus_port_ptr != NULL) *bus_port_ptr = atoi(c + 1); } c = strrchr(addr, ':'); if (c != NULL) { *c = '\0'; *ip_ptr = addr; *port_ptr = atoi(++c); } else return 0; return 1; } /* Get host ip and port from command arguments. If only one argument has * been provided it must be in the form of 'ip:port', elsewhere * the first argument must be the ip and the second one the port. * If host and port can be detected, it returns 1 and it stores host and * port into variables referenced by 'ip_ptr' and 'port_ptr' pointers, * elsewhere it returns 0. */ static int getClusterHostFromCmdArgs(int argc, char **argv, char **ip_ptr, int *port_ptr) { int port = 0; char *ip = NULL; if (argc == 1) { char *addr = argv[0]; if (!parseClusterNodeAddress(addr, &ip, &port, NULL)) return 0; } else { ip = argv[0]; port = atoi(argv[1]); } if (!ip || !port) return 0; else { *ip_ptr = ip; *port_ptr = port; } return 1; } static void freeClusterManagerNodeFlags(list *flags) { listIter li; listNode *ln; listRewind(flags, &li); while ((ln = listNext(&li)) != NULL) { sds flag = ln->value; sdsfree(flag); } listRelease(flags); } static void freeClusterManagerNode(clusterManagerNode *node) { if (node->context != NULL) redisFree(node->context); if (node->friends != NULL) { listIter li; listNode *ln; listRewind(node->friends,&li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *fn = ln->value; freeClusterManagerNode(fn); } listRelease(node->friends); node->friends = NULL; } if (node->name != NULL) sdsfree(node->name); if (node->replicate != NULL) sdsfree(node->replicate); if ((node->flags & CLUSTER_MANAGER_FLAG_FRIEND) && node->ip) sdsfree(node->ip); int i; if (node->migrating != NULL) { for (i = 0; i < node->migrating_count; i++) sdsfree(node->migrating[i]); zfree(node->migrating); } if (node->importing != NULL) { for (i = 0; i < node->importing_count; i++) sdsfree(node->importing[i]); zfree(node->importing); } if (node->flags_str != NULL) { freeClusterManagerNodeFlags(node->flags_str); node->flags_str = NULL; } zfree(node); } static void freeClusterManager(void) { listIter li; listNode *ln; if (cluster_manager.nodes != NULL) { listRewind(cluster_manager.nodes,&li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; freeClusterManagerNode(n); } listRelease(cluster_manager.nodes); cluster_manager.nodes = NULL; } if (cluster_manager.errors != NULL) { listRewind(cluster_manager.errors,&li); while ((ln = listNext(&li)) != NULL) { sds err = ln->value; sdsfree(err); } listRelease(cluster_manager.errors); cluster_manager.errors = NULL; } if (clusterManagerUncoveredSlots != NULL) dictRelease(clusterManagerUncoveredSlots); } static clusterManagerNode *clusterManagerNewNode(char *ip, int port) { clusterManagerNode *node = zmalloc(sizeof(*node)); node->context = NULL; node->name = NULL; node->ip = ip; node->port = port; node->current_epoch = 0; node->ping_sent = 0; node->ping_recv = 0; node->flags = 0; node->flags_str = NULL; node->replicate = NULL; node->dirty = 0; node->friends = NULL; node->migrating = NULL; node->importing = NULL; node->migrating_count = 0; node->importing_count = 0; node->replicas_count = 0; node->weight = 1.0f; node->balance = 0; clusterManagerNodeResetSlots(node); return node; } static sds clusterManagerGetNodeRDBFilename(clusterManagerNode *node) { assert(config.cluster_manager_command.backup_dir); sds filename = sdsnew(config.cluster_manager_command.backup_dir); if (filename[sdslen(filename) - 1] != '/') filename = sdscat(filename, "/"); filename = sdscatprintf(filename, "redis-node-%s-%d-%s.rdb", node->ip, node->port, node->name); return filename; } /* Check whether reply is NULL or its type is REDIS_REPLY_ERROR. In the * latest case, if the 'err' arg is not NULL, it gets allocated with a copy * of reply error (it's up to the caller function to free it), elsewhere * the error is directly printed. */ static int clusterManagerCheckRedisReply(clusterManagerNode *n, redisReply *r, char **err) { int is_err = 0; if (!r || (is_err = (r->type == REDIS_REPLY_ERROR))) { if (is_err) { if (err != NULL) { *err = zmalloc((r->len + 1) * sizeof(char)); strcpy(*err, r->str); } else CLUSTER_MANAGER_PRINT_REPLY_ERROR(n, r->str); } return 0; } return 1; } /* Call MULTI command on a cluster node. */ static int clusterManagerStartTransaction(clusterManagerNode *node) { redisReply *reply = CLUSTER_MANAGER_COMMAND(node, "MULTI"); int success = clusterManagerCheckRedisReply(node, reply, NULL); if (reply) freeReplyObject(reply); return success; } /* Call EXEC command on a cluster node. */ static int clusterManagerExecTransaction(clusterManagerNode *node, clusterManagerOnReplyError onerror) { redisReply *reply = CLUSTER_MANAGER_COMMAND(node, "EXEC"); int success = clusterManagerCheckRedisReply(node, reply, NULL); if (success) { if (reply->type != REDIS_REPLY_ARRAY) { success = 0; goto cleanup; } size_t i; for (i = 0; i < reply->elements; i++) { redisReply *r = reply->element[i]; char *err = NULL; success = clusterManagerCheckRedisReply(node, r, &err); if (!success && onerror) success = onerror(r, node, i); if (err) { if (!success) CLUSTER_MANAGER_PRINT_REPLY_ERROR(node, err); zfree(err); } if (!success) break; } } cleanup: if (reply) freeReplyObject(reply); return success; } static int clusterManagerNodeConnect(clusterManagerNode *node) { if (node->context) redisFree(node->context); node->context = redisConnect(node->ip, node->port); if (!node->context->err && config.tls) { const char *err = NULL; if (cliSecureConnection(node->context, config.sslconfig, &err) == REDIS_ERR && err) { fprintf(stderr,"TLS Error: %s\n", err); redisFree(node->context); node->context = NULL; return 0; } } if (node->context->err) { fprintf(stderr,"Could not connect to Redis at "); fprintf(stderr,"%s:%d: %s\n", node->ip, node->port, node->context->errstr); redisFree(node->context); node->context = NULL; return 0; } /* Set aggressive KEEP_ALIVE socket option in the Redis context socket * in order to prevent timeouts caused by the execution of long * commands. At the same time this improves the detection of real * errors. */ anetKeepAlive(NULL, node->context->fd, REDIS_CLI_KEEPALIVE_INTERVAL); if (config.auth) { redisReply *reply; if (config.user == NULL) reply = redisCommand(node->context,"AUTH %s", config.auth); else reply = redisCommand(node->context,"AUTH %s %s", config.user,config.auth); int ok = clusterManagerCheckRedisReply(node, reply, NULL); if (reply != NULL) freeReplyObject(reply); if (!ok) return 0; } return 1; } static void clusterManagerRemoveNodeFromList(list *nodelist, clusterManagerNode *node) { listIter li; listNode *ln; listRewind(nodelist, &li); while ((ln = listNext(&li)) != NULL) { if (node == ln->value) { listDelNode(nodelist, ln); break; } } } /* Return the node with the specified name (ID) or NULL. */ static clusterManagerNode *clusterManagerNodeByName(const char *name) { if (cluster_manager.nodes == NULL) return NULL; clusterManagerNode *found = NULL; sds lcname = sdsempty(); lcname = sdscpy(lcname, name); sdstolower(lcname); listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->name && !sdscmp(n->name, lcname)) { found = n; break; } } sdsfree(lcname); return found; } /* Like clusterManagerNodeByName but the specified name can be just the first * part of the node ID as long as the prefix in unique across the * cluster. */ static clusterManagerNode *clusterManagerNodeByAbbreviatedName(const char*name) { if (cluster_manager.nodes == NULL) return NULL; clusterManagerNode *found = NULL; sds lcname = sdsempty(); lcname = sdscpy(lcname, name); sdstolower(lcname); listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->name && strstr(n->name, lcname) == n->name) { found = n; break; } } sdsfree(lcname); return found; } static void clusterManagerNodeResetSlots(clusterManagerNode *node) { memset(node->slots, 0, sizeof(node->slots)); node->slots_count = 0; } /* Call "INFO" redis command on the specified node and return the reply. */ static redisReply *clusterManagerGetNodeRedisInfo(clusterManagerNode *node, char **err) { redisReply *info = CLUSTER_MANAGER_COMMAND(node, "INFO"); if (err != NULL) *err = NULL; if (info == NULL) return NULL; if (info->type == REDIS_REPLY_ERROR) { if (err != NULL) { *err = zmalloc((info->len + 1) * sizeof(char)); strcpy(*err, info->str); } freeReplyObject(info); return NULL; } return info; } static int clusterManagerNodeIsCluster(clusterManagerNode *node, char **err) { redisReply *info = clusterManagerGetNodeRedisInfo(node, err); if (info == NULL) return 0; int is_cluster = (int) getLongInfoField(info->str, "cluster_enabled"); freeReplyObject(info); return is_cluster; } /* Checks whether the node is empty. Node is considered not-empty if it has * some key or if it already knows other nodes */ static int clusterManagerNodeIsEmpty(clusterManagerNode *node, char **err) { redisReply *info = clusterManagerGetNodeRedisInfo(node, err); int is_empty = 1; if (info == NULL) return 0; if (strstr(info->str, "db0:") != NULL) { is_empty = 0; goto result; } freeReplyObject(info); info = CLUSTER_MANAGER_COMMAND(node, "CLUSTER INFO"); if (err != NULL) *err = NULL; if (!clusterManagerCheckRedisReply(node, info, err)) { is_empty = 0; goto result; } long known_nodes = getLongInfoField(info->str, "cluster_known_nodes"); is_empty = (known_nodes == 1); result: freeReplyObject(info); return is_empty; } /* Return the anti-affinity score, which is a measure of the amount of * violations of anti-affinity in the current cluster layout, that is, how * badly the masters and slaves are distributed in the different IP * addresses so that slaves of the same master are not in the master * host and are also in different hosts. * * The score is calculated as follows: * * SAME_AS_MASTER = 10000 * each slave in the same IP of its master. * SAME_AS_SLAVE = 1 * each slave having the same IP as another slave of the same master. * FINAL_SCORE = SAME_AS_MASTER + SAME_AS_SLAVE * * So a greater score means a worse anti-affinity level, while zero * means perfect anti-affinity. * * The anti affinity optimization will try to get a score as low as * possible. Since we do not want to sacrifice the fact that slaves should * not be in the same host as the master, we assign 10000 times the score * to this violation, so that we'll optimize for the second factor only * if it does not impact the first one. * * The ipnodes argument is an array of clusterManagerNodeArray, one for * each IP, while ip_count is the total number of IPs in the configuration. * * The function returns the above score, and the list of * offending slaves can be stored into the 'offending' argument, * so that the optimizer can try changing the configuration of the * slaves violating the anti-affinity goals. */ static int clusterManagerGetAntiAffinityScore(clusterManagerNodeArray *ipnodes, int ip_count, clusterManagerNode ***offending, int *offending_len) { int score = 0, i, j; int node_len = cluster_manager.nodes->len; clusterManagerNode **offending_p = NULL; if (offending != NULL) { *offending = zcalloc(node_len * sizeof(clusterManagerNode*)); offending_p = *offending; } /* For each set of nodes in the same host, split by * related nodes (masters and slaves which are involved in * replication of each other) */ for (i = 0; i < ip_count; i++) { clusterManagerNodeArray *node_array = &(ipnodes[i]); dict *related = dictCreate(&clusterManagerDictType); char *ip = NULL; for (j = 0; j < node_array->len; j++) { clusterManagerNode *node = node_array->nodes[j]; if (node == NULL) continue; if (!ip) ip = node->ip; sds types; /* We always use the Master ID as key. */ sds key = (!node->replicate ? node->name : node->replicate); assert(key != NULL); dictEntry *entry = dictFind(related, key); if (entry) types = sdsdup((sds) dictGetVal(entry)); else types = sdsempty(); /* Master type 'm' is always set as the first character of the * types string. */ if (node->replicate) types = sdscat(types, "s"); else { sds s = sdscatsds(sdsnew("m"), types); sdsfree(types); types = s; } dictReplace(related, key, types); } /* Now it's trivial to check, for each related group having the * same host, what is their local score. */ dictIterator *iter = dictGetIterator(related); dictEntry *entry; while ((entry = dictNext(iter)) != NULL) { sds types = (sds) dictGetVal(entry); sds name = (sds) dictGetKey(entry); int typeslen = sdslen(types); if (typeslen < 2) continue; if (types[0] == 'm') score += (10000 * (typeslen - 1)); else score += (1 * typeslen); if (offending == NULL) continue; /* Populate the list of offending nodes. */ listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->replicate == NULL) continue; if (!strcmp(n->replicate, name) && !strcmp(n->ip, ip)) { *(offending_p++) = n; if (offending_len != NULL) (*offending_len)++; break; } } } //if (offending_len != NULL) *offending_len = offending_p - *offending; dictReleaseIterator(iter); dictRelease(related); } return score; } static void clusterManagerOptimizeAntiAffinity(clusterManagerNodeArray *ipnodes, int ip_count) { clusterManagerNode **offenders = NULL; int score = clusterManagerGetAntiAffinityScore(ipnodes, ip_count, NULL, NULL); if (score == 0) goto cleanup; clusterManagerLogInfo(">>> Trying to optimize slaves allocation " "for anti-affinity\n"); int node_len = cluster_manager.nodes->len; int maxiter = 500 * node_len; // Effort is proportional to cluster size... srand(time(NULL)); while (maxiter > 0) { int offending_len = 0; if (offenders != NULL) { zfree(offenders); offenders = NULL; } score = clusterManagerGetAntiAffinityScore(ipnodes, ip_count, &offenders, &offending_len); if (score == 0 || offending_len == 0) break; // Optimal anti affinity reached /* We'll try to randomly swap a slave's assigned master causing * an affinity problem with another random slave, to see if we * can improve the affinity. */ int rand_idx = rand() % offending_len; clusterManagerNode *first = offenders[rand_idx], *second = NULL; clusterManagerNode **other_replicas = zcalloc((node_len - 1) * sizeof(*other_replicas)); int other_replicas_count = 0; listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n != first && n->replicate != NULL) other_replicas[other_replicas_count++] = n; } if (other_replicas_count == 0) { zfree(other_replicas); break; } rand_idx = rand() % other_replicas_count; second = other_replicas[rand_idx]; char *first_master = first->replicate, *second_master = second->replicate; first->replicate = second_master, first->dirty = 1; second->replicate = first_master, second->dirty = 1; int new_score = clusterManagerGetAntiAffinityScore(ipnodes, ip_count, NULL, NULL); /* If the change actually makes thing worse, revert. Otherwise * leave as it is because the best solution may need a few * combined swaps. */ if (new_score > score) { first->replicate = first_master; second->replicate = second_master; } zfree(other_replicas); maxiter--; } score = clusterManagerGetAntiAffinityScore(ipnodes, ip_count, NULL, NULL); char *msg; int perfect = (score == 0); int log_level = (perfect ? CLUSTER_MANAGER_LOG_LVL_SUCCESS : CLUSTER_MANAGER_LOG_LVL_WARN); if (perfect) msg = "[OK] Perfect anti-affinity obtained!"; else if (score >= 10000) msg = ("[WARNING] Some slaves are in the same host as their master"); else msg=("[WARNING] Some slaves of the same master are in the same host"); clusterManagerLog(log_level, "%s\n", msg); cleanup: zfree(offenders); } /* Return a representable string of the node's flags */ static sds clusterManagerNodeFlagString(clusterManagerNode *node) { sds flags = sdsempty(); if (!node->flags_str) return flags; int empty = 1; listIter li; listNode *ln; listRewind(node->flags_str, &li); while ((ln = listNext(&li)) != NULL) { sds flag = ln->value; if (strcmp(flag, "myself") == 0) continue; if (!empty) flags = sdscat(flags, ","); flags = sdscatfmt(flags, "%S", flag); empty = 0; } return flags; } /* Return a representable string of the node's slots */ static sds clusterManagerNodeSlotsString(clusterManagerNode *node) { sds slots = sdsempty(); int first_range_idx = -1, last_slot_idx = -1, i; for (i = 0; i < CLUSTER_MANAGER_SLOTS; i++) { int has_slot = node->slots[i]; if (has_slot) { if (first_range_idx == -1) { if (sdslen(slots)) slots = sdscat(slots, ","); first_range_idx = i; slots = sdscatfmt(slots, "[%u", i); } last_slot_idx = i; } else { if (last_slot_idx >= 0) { if (first_range_idx == last_slot_idx) slots = sdscat(slots, "]"); else slots = sdscatfmt(slots, "-%u]", last_slot_idx); } last_slot_idx = -1; first_range_idx = -1; } } if (last_slot_idx >= 0) { if (first_range_idx == last_slot_idx) slots = sdscat(slots, "]"); else slots = sdscatfmt(slots, "-%u]", last_slot_idx); } return slots; } static sds clusterManagerNodeGetJSON(clusterManagerNode *node, unsigned long error_count) { sds json = sdsempty(); sds replicate = sdsempty(); if (node->replicate) replicate = sdscatprintf(replicate, "\"%s\"", node->replicate); else replicate = sdscat(replicate, "null"); sds slots = clusterManagerNodeSlotsString(node); sds flags = clusterManagerNodeFlagString(node); char *p = slots; while ((p = strchr(p, '-')) != NULL) *(p++) = ','; json = sdscatprintf(json, " {\n" " \"name\": \"%s\",\n" " \"host\": \"%s\",\n" " \"port\": %d,\n" " \"replicate\": %s,\n" " \"slots\": [%s],\n" " \"slots_count\": %d,\n" " \"flags\": \"%s\",\n" " \"current_epoch\": %llu", node->name, node->ip, node->port, replicate, slots, node->slots_count, flags, (unsigned long long)node->current_epoch ); if (error_count > 0) { json = sdscatprintf(json, ",\n \"cluster_errors\": %lu", error_count); } if (node->migrating_count > 0 && node->migrating != NULL) { int i = 0; sds migrating = sdsempty(); for (; i < node->migrating_count; i += 2) { sds slot = node->migrating[i]; sds dest = node->migrating[i + 1]; if (slot && dest) { if (sdslen(migrating) > 0) migrating = sdscat(migrating, ","); migrating = sdscatfmt(migrating, "\"%S\": \"%S\"", slot, dest); } } if (sdslen(migrating) > 0) json = sdscatfmt(json, ",\n \"migrating\": {%S}", migrating); sdsfree(migrating); } if (node->importing_count > 0 && node->importing != NULL) { int i = 0; sds importing = sdsempty(); for (; i < node->importing_count; i += 2) { sds slot = node->importing[i]; sds from = node->importing[i + 1]; if (slot && from) { if (sdslen(importing) > 0) importing = sdscat(importing, ","); importing = sdscatfmt(importing, "\"%S\": \"%S\"", slot, from); } } if (sdslen(importing) > 0) json = sdscatfmt(json, ",\n \"importing\": {%S}", importing); sdsfree(importing); } json = sdscat(json, "\n }"); sdsfree(replicate); sdsfree(slots); sdsfree(flags); return json; } /* ----------------------------------------------------------------------------- * Key space handling * -------------------------------------------------------------------------- */ /* We have 16384 hash slots. The hash slot of a given key is obtained * as the least significant 14 bits of the crc16 of the key. * * However if the key contains the {...} pattern, only the part between * { and } is hashed. This may be useful in the future to force certain * keys to be in the same node (assuming no resharding is in progress). */ static unsigned int clusterManagerKeyHashSlot(char *key, int keylen) { int s, e; /* start-end indexes of { and } */ for (s = 0; s < keylen; s++) if (key[s] == '{') break; /* No '{' ? Hash the whole key. This is the base case. */ if (s == keylen) return crc16(key,keylen) & 0x3FFF; /* '{' found? Check if we have the corresponding '}'. */ for (e = s+1; e < keylen; e++) if (key[e] == '}') break; /* No '}' or nothing between {} ? Hash the whole key. */ if (e == keylen || e == s+1) return crc16(key,keylen) & 0x3FFF; /* If we are here there is both a { and a } on its right. Hash * what is in the middle between { and }. */ return crc16(key+s+1,e-s-1) & 0x3FFF; } /* Return a string representation of the cluster node. */ static sds clusterManagerNodeInfo(clusterManagerNode *node, int indent) { sds info = sdsempty(); sds spaces = sdsempty(); int i; for (i = 0; i < indent; i++) spaces = sdscat(spaces, " "); if (indent) info = sdscat(info, spaces); int is_master = !(node->flags & CLUSTER_MANAGER_FLAG_SLAVE); char *role = (is_master ? "M" : "S"); sds slots = NULL; if (node->dirty && node->replicate != NULL) info = sdscatfmt(info, "S: %S %s:%u", node->name, node->ip, node->port); else { slots = clusterManagerNodeSlotsString(node); sds flags = clusterManagerNodeFlagString(node); info = sdscatfmt(info, "%s: %S %s:%u\n" "%s slots:%S (%u slots) " "%S", role, node->name, node->ip, node->port, spaces, slots, node->slots_count, flags); sdsfree(slots); sdsfree(flags); } if (node->replicate != NULL) info = sdscatfmt(info, "\n%s replicates %S", spaces, node->replicate); else if (node->replicas_count) info = sdscatfmt(info, "\n%s %U additional replica(s)", spaces, node->replicas_count); sdsfree(spaces); return info; } static void clusterManagerShowNodes(void) { listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *node = ln->value; sds info = clusterManagerNodeInfo(node, 0); printf("%s\n", (char *) info); sdsfree(info); } } static void clusterManagerShowClusterInfo(void) { int masters = 0; int keys = 0; listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *node = ln->value; if (!(node->flags & CLUSTER_MANAGER_FLAG_SLAVE)) { if (!node->name) continue; int replicas = 0; int dbsize = -1; char name[9]; memcpy(name, node->name, 8); name[8] = '\0'; listIter ri; listNode *rn; listRewind(cluster_manager.nodes, &ri); while ((rn = listNext(&ri)) != NULL) { clusterManagerNode *n = rn->value; if (n == node || !(n->flags & CLUSTER_MANAGER_FLAG_SLAVE)) continue; if (n->replicate && !strcmp(n->replicate, node->name)) replicas++; } redisReply *reply = CLUSTER_MANAGER_COMMAND(node, "DBSIZE"); if (reply != NULL && reply->type == REDIS_REPLY_INTEGER) dbsize = reply->integer; if (dbsize < 0) { char *err = ""; if (reply != NULL && reply->type == REDIS_REPLY_ERROR) err = reply->str; CLUSTER_MANAGER_PRINT_REPLY_ERROR(node, err); if (reply != NULL) freeReplyObject(reply); return; }; if (reply != NULL) freeReplyObject(reply); printf("%s:%d (%s...) -> %d keys | %d slots | %d slaves.\n", node->ip, node->port, name, dbsize, node->slots_count, replicas); masters++; keys += dbsize; } } clusterManagerLogOk("[OK] %d keys in %d masters.\n", keys, masters); float keys_per_slot = keys / (float) CLUSTER_MANAGER_SLOTS; printf("%.2f keys per slot on average.\n", keys_per_slot); } /* Flush dirty slots configuration of the node by calling CLUSTER ADDSLOTS */ static int clusterManagerAddSlots(clusterManagerNode *node, char**err) { redisReply *reply = NULL; void *_reply = NULL; int success = 1; /* First two args are used for the command itself. */ int argc = node->slots_count + 2; sds *argv = zmalloc(argc * sizeof(*argv)); size_t *argvlen = zmalloc(argc * sizeof(*argvlen)); argv[0] = "CLUSTER"; argv[1] = "ADDSLOTS"; argvlen[0] = 7; argvlen[1] = 8; *err = NULL; int i, argv_idx = 2; for (i = 0; i < CLUSTER_MANAGER_SLOTS; i++) { if (argv_idx >= argc) break; if (node->slots[i]) { argv[argv_idx] = sdsfromlonglong((long long) i); argvlen[argv_idx] = sdslen(argv[argv_idx]); argv_idx++; } } if (argv_idx == 2) { success = 0; goto cleanup; } redisAppendCommandArgv(node->context,argc,(const char**)argv,argvlen); if (redisGetReply(node->context, &_reply) != REDIS_OK) { success = 0; goto cleanup; } reply = (redisReply*) _reply; success = clusterManagerCheckRedisReply(node, reply, err); cleanup: zfree(argvlen); if (argv != NULL) { for (i = 2; i < argc; i++) sdsfree(argv[i]); zfree(argv); } if (reply != NULL) freeReplyObject(reply); return success; } /* Get the node the slot is assigned to from the point of view of node *n. * If the slot is unassigned or if the reply is an error, return NULL. * Use the **err argument in order to check whether the slot is unassigned * or the reply resulted in an error. */ static clusterManagerNode *clusterManagerGetSlotOwner(clusterManagerNode *n, int slot, char **err) { assert(slot >= 0 && slot < CLUSTER_MANAGER_SLOTS); clusterManagerNode *owner = NULL; redisReply *reply = CLUSTER_MANAGER_COMMAND(n, "CLUSTER SLOTS"); if (clusterManagerCheckRedisReply(n, reply, err)) { assert(reply->type == REDIS_REPLY_ARRAY); size_t i; for (i = 0; i < reply->elements; i++) { redisReply *r = reply->element[i]; assert(r->type == REDIS_REPLY_ARRAY && r->elements >= 3); int from, to; from = r->element[0]->integer; to = r->element[1]->integer; if (slot < from || slot > to) continue; redisReply *nr = r->element[2]; assert(nr->type == REDIS_REPLY_ARRAY && nr->elements >= 2); char *name = NULL; if (nr->elements >= 3) name = nr->element[2]->str; if (name != NULL) owner = clusterManagerNodeByName(name); else { char *ip = nr->element[0]->str; assert(ip != NULL); int port = (int) nr->element[1]->integer; listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *nd = ln->value; if (strcmp(nd->ip, ip) == 0 && port == nd->port) { owner = nd; break; } } } if (owner) break; } } if (reply) freeReplyObject(reply); return owner; } /* Set slot status to "importing" or "migrating" */ static int clusterManagerSetSlot(clusterManagerNode *node1, clusterManagerNode *node2, int slot, const char *status, char **err) { redisReply *reply = CLUSTER_MANAGER_COMMAND(node1, "CLUSTER " "SETSLOT %d %s %s", slot, status, (char *) node2->name); if (err != NULL) *err = NULL; if (!reply) return 0; int success = 1; if (reply->type == REDIS_REPLY_ERROR) { success = 0; if (err != NULL) { *err = zmalloc((reply->len + 1) * sizeof(char)); strcpy(*err, reply->str); } else CLUSTER_MANAGER_PRINT_REPLY_ERROR(node1, reply->str); goto cleanup; } cleanup: freeReplyObject(reply); return success; } static int clusterManagerClearSlotStatus(clusterManagerNode *node, int slot) { redisReply *reply = CLUSTER_MANAGER_COMMAND(node, "CLUSTER SETSLOT %d %s", slot, "STABLE"); int success = clusterManagerCheckRedisReply(node, reply, NULL); if (reply) freeReplyObject(reply); return success; } static int clusterManagerDelSlot(clusterManagerNode *node, int slot, int ignore_unassigned_err) { redisReply *reply = CLUSTER_MANAGER_COMMAND(node, "CLUSTER DELSLOTS %d", slot); char *err = NULL; int success = clusterManagerCheckRedisReply(node, reply, &err); if (!success && reply && reply->type == REDIS_REPLY_ERROR && ignore_unassigned_err) { char *get_owner_err = NULL; clusterManagerNode *assigned_to = clusterManagerGetSlotOwner(node, slot, &get_owner_err); if (!assigned_to) { if (get_owner_err == NULL) success = 1; else { CLUSTER_MANAGER_PRINT_REPLY_ERROR(node, get_owner_err); zfree(get_owner_err); } } } if (!success && err != NULL) { CLUSTER_MANAGER_PRINT_REPLY_ERROR(node, err); zfree(err); } if (reply) freeReplyObject(reply); return success; } static int clusterManagerAddSlot(clusterManagerNode *node, int slot) { redisReply *reply = CLUSTER_MANAGER_COMMAND(node, "CLUSTER ADDSLOTS %d", slot); int success = clusterManagerCheckRedisReply(node, reply, NULL); if (reply) freeReplyObject(reply); return success; } static signed int clusterManagerCountKeysInSlot(clusterManagerNode *node, int slot) { redisReply *reply = CLUSTER_MANAGER_COMMAND(node, "CLUSTER COUNTKEYSINSLOT %d", slot); int count = -1; int success = clusterManagerCheckRedisReply(node, reply, NULL); if (success && reply->type == REDIS_REPLY_INTEGER) count = reply->integer; if (reply) freeReplyObject(reply); return count; } static int clusterManagerBumpEpoch(clusterManagerNode *node) { redisReply *reply = CLUSTER_MANAGER_COMMAND(node, "CLUSTER BUMPEPOCH"); int success = clusterManagerCheckRedisReply(node, reply, NULL); if (reply) freeReplyObject(reply); return success; } /* Callback used by clusterManagerSetSlotOwner transaction. It should ignore * errors except for ADDSLOTS errors. * Return 1 if the error should be ignored. */ static int clusterManagerOnSetOwnerErr(redisReply *reply, clusterManagerNode *n, int bulk_idx) { UNUSED(reply); UNUSED(n); /* Only raise error when ADDSLOTS fail (bulk_idx == 1). */ return (bulk_idx != 1); } static int clusterManagerSetSlotOwner(clusterManagerNode *owner, int slot, int do_clear) { int success = clusterManagerStartTransaction(owner); if (!success) return 0; /* Ensure the slot is not already assigned. */ clusterManagerDelSlot(owner, slot, 1); /* Add the slot and bump epoch. */ clusterManagerAddSlot(owner, slot); if (do_clear) clusterManagerClearSlotStatus(owner, slot); clusterManagerBumpEpoch(owner); success = clusterManagerExecTransaction(owner, clusterManagerOnSetOwnerErr); return success; } /* Get the hash for the values of the specified keys in *keys_reply for the * specified nodes *n1 and *n2, by calling DEBUG DIGEST-VALUE redis command * on both nodes. Every key with same name on both nodes but having different * values will be added to the *diffs list. Return 0 in case of reply * error. */ static int clusterManagerCompareKeysValues(clusterManagerNode *n1, clusterManagerNode *n2, redisReply *keys_reply, list *diffs) { size_t i, argc = keys_reply->elements + 2; static const char *hash_zero = "0000000000000000000000000000000000000000"; char **argv = zcalloc(argc * sizeof(char *)); size_t *argv_len = zcalloc(argc * sizeof(size_t)); argv[0] = "DEBUG"; argv_len[0] = 5; argv[1] = "DIGEST-VALUE"; argv_len[1] = 12; for (i = 0; i < keys_reply->elements; i++) { redisReply *entry = keys_reply->element[i]; int idx = i + 2; argv[idx] = entry->str; argv_len[idx] = entry->len; } int success = 0; void *_reply1 = NULL, *_reply2 = NULL; redisReply *r1 = NULL, *r2 = NULL; redisAppendCommandArgv(n1->context,argc, (const char**)argv,argv_len); success = (redisGetReply(n1->context, &_reply1) == REDIS_OK); if (!success) goto cleanup; r1 = (redisReply *) _reply1; redisAppendCommandArgv(n2->context,argc, (const char**)argv,argv_len); success = (redisGetReply(n2->context, &_reply2) == REDIS_OK); if (!success) goto cleanup; r2 = (redisReply *) _reply2; success = (r1->type != REDIS_REPLY_ERROR && r2->type != REDIS_REPLY_ERROR); if (r1->type == REDIS_REPLY_ERROR) { CLUSTER_MANAGER_PRINT_REPLY_ERROR(n1, r1->str); success = 0; } if (r2->type == REDIS_REPLY_ERROR) { CLUSTER_MANAGER_PRINT_REPLY_ERROR(n2, r2->str); success = 0; } if (!success) goto cleanup; assert(keys_reply->elements == r1->elements && keys_reply->elements == r2->elements); for (i = 0; i < keys_reply->elements; i++) { char *key = keys_reply->element[i]->str; char *hash1 = r1->element[i]->str; char *hash2 = r2->element[i]->str; /* Ignore keys that don't exist in both nodes. */ if (strcmp(hash1, hash_zero) == 0 || strcmp(hash2, hash_zero) == 0) continue; if (strcmp(hash1, hash2) != 0) listAddNodeTail(diffs, key); } cleanup: if (r1) freeReplyObject(r1); if (r2) freeReplyObject(r2); zfree(argv); zfree(argv_len); return success; } /* Migrate keys taken from reply->elements. It returns the reply from the * MIGRATE command, or NULL if something goes wrong. If the argument 'dots' * is not NULL, a dot will be printed for every migrated key. */ static redisReply *clusterManagerMigrateKeysInReply(clusterManagerNode *source, clusterManagerNode *target, redisReply *reply, int replace, int timeout, char *dots) { redisReply *migrate_reply = NULL; char **argv = NULL; size_t *argv_len = NULL; int c = (replace ? 8 : 7); if (config.auth) c += 2; if (config.user) c += 1; size_t argc = c + reply->elements; size_t i, offset = 6; // Keys Offset argv = zcalloc(argc * sizeof(char *)); argv_len = zcalloc(argc * sizeof(size_t)); char portstr[255]; char timeoutstr[255]; snprintf(portstr, 10, "%d", target->port); snprintf(timeoutstr, 10, "%d", timeout); argv[0] = "MIGRATE"; argv_len[0] = 7; argv[1] = target->ip; argv_len[1] = strlen(target->ip); argv[2] = portstr; argv_len[2] = strlen(portstr); argv[3] = ""; argv_len[3] = 0; argv[4] = "0"; argv_len[4] = 1; argv[5] = timeoutstr; argv_len[5] = strlen(timeoutstr); if (replace) { argv[offset] = "REPLACE"; argv_len[offset] = 7; offset++; } if (config.auth) { if (config.user) { argv[offset] = "AUTH2"; argv_len[offset] = 5; offset++; argv[offset] = config.user; argv_len[offset] = strlen(config.user); offset++; argv[offset] = config.auth; argv_len[offset] = strlen(config.auth); offset++; } else { argv[offset] = "AUTH"; argv_len[offset] = 4; offset++; argv[offset] = config.auth; argv_len[offset] = strlen(config.auth); offset++; } } argv[offset] = "KEYS"; argv_len[offset] = 4; offset++; for (i = 0; i < reply->elements; i++) { redisReply *entry = reply->element[i]; size_t idx = i + offset; assert(entry->type == REDIS_REPLY_STRING); argv[idx] = (char *) sdsnewlen(entry->str, entry->len); argv_len[idx] = entry->len; if (dots) dots[i] = '.'; } if (dots) dots[reply->elements] = '\0'; void *_reply = NULL; redisAppendCommandArgv(source->context,argc, (const char**)argv,argv_len); int success = (redisGetReply(source->context, &_reply) == REDIS_OK); for (i = 0; i < reply->elements; i++) sdsfree(argv[i + offset]); if (!success) goto cleanup; migrate_reply = (redisReply *) _reply; cleanup: zfree(argv); zfree(argv_len); return migrate_reply; } /* Migrate all keys in the given slot from source to target.*/ static int clusterManagerMigrateKeysInSlot(clusterManagerNode *source, clusterManagerNode *target, int slot, int timeout, int pipeline, int verbose, char **err) { int success = 1; int do_fix = config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_FIX; int do_replace = config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_REPLACE; while (1) { char *dots = NULL; redisReply *reply = NULL, *migrate_reply = NULL; reply = CLUSTER_MANAGER_COMMAND(source, "CLUSTER " "GETKEYSINSLOT %d %d", slot, pipeline); success = (reply != NULL); if (!success) return 0; if (reply->type == REDIS_REPLY_ERROR) { success = 0; if (err != NULL) { *err = zmalloc((reply->len + 1) * sizeof(char)); strcpy(*err, reply->str); CLUSTER_MANAGER_PRINT_REPLY_ERROR(source, *err); } goto next; } assert(reply->type == REDIS_REPLY_ARRAY); size_t count = reply->elements; if (count == 0) { freeReplyObject(reply); break; } if (verbose) dots = zmalloc((count+1) * sizeof(char)); /* Calling MIGRATE command. */ migrate_reply = clusterManagerMigrateKeysInReply(source, target, reply, 0, timeout, dots); if (migrate_reply == NULL) goto next; if (migrate_reply->type == REDIS_REPLY_ERROR) { int is_busy = strstr(migrate_reply->str, "BUSYKEY") != NULL; int not_served = 0; if (!is_busy) { /* Check if the slot is unassigned (not served) in the * source node's configuration. */ char *get_owner_err = NULL; clusterManagerNode *served_by = clusterManagerGetSlotOwner(source, slot, &get_owner_err); if (!served_by) { if (get_owner_err == NULL) not_served = 1; else { CLUSTER_MANAGER_PRINT_REPLY_ERROR(source, get_owner_err); zfree(get_owner_err); } } } /* Try to handle errors. */ if (is_busy || not_served) { /* If the key's slot is not served, try to assign slot * to the target node. */ if (do_fix && not_served) { clusterManagerLogWarn("*** Slot was not served, setting " "owner to node %s:%d.\n", target->ip, target->port); clusterManagerSetSlot(source, target, slot, "node", NULL); } /* If the key already exists in the target node (BUSYKEY), * check whether its value is the same in both nodes. * In case of equal values, retry migration with the * REPLACE option. * In case of different values: * - If the migration is requested by the fix command, stop * and warn the user. * - In other cases (ie. reshard), proceed only if the user * launched the command with the --cluster-replace option.*/ if (is_busy) { clusterManagerLogWarn("\n*** Target key exists\n"); if (!do_replace) { clusterManagerLogWarn("*** Checking key values on " "both nodes...\n"); list *diffs = listCreate(); success = clusterManagerCompareKeysValues(source, target, reply, diffs); if (!success) { clusterManagerLogErr("*** Value check failed!\n"); listRelease(diffs); goto next; } if (listLength(diffs) > 0) { success = 0; clusterManagerLogErr( "*** Found %d key(s) in both source node and " "target node having different values.\n" " Source node: %s:%d\n" " Target node: %s:%d\n" " Keys(s):\n", listLength(diffs), source->ip, source->port, target->ip, target->port); listIter dli; listNode *dln; listRewind(diffs, &dli); while((dln = listNext(&dli)) != NULL) { char *k = dln->value; clusterManagerLogErr(" - %s\n", k); } clusterManagerLogErr("Please fix the above key(s) " "manually and try again " "or relaunch the command \n" "with --cluster-replace " "option to force key " "overriding.\n"); listRelease(diffs); goto next; } listRelease(diffs); } clusterManagerLogWarn("*** Replacing target keys...\n"); } freeReplyObject(migrate_reply); migrate_reply = clusterManagerMigrateKeysInReply(source, target, reply, is_busy, timeout, NULL); success = (migrate_reply != NULL && migrate_reply->type != REDIS_REPLY_ERROR); } else success = 0; if (!success) { if (migrate_reply != NULL) { if (err) { *err = zmalloc((migrate_reply->len + 1) * sizeof(char)); strcpy(*err, migrate_reply->str); } printf("\n"); CLUSTER_MANAGER_PRINT_REPLY_ERROR(source, migrate_reply->str); } goto next; } } if (verbose) { printf("%s", dots); fflush(stdout); } next: if (reply != NULL) freeReplyObject(reply); if (migrate_reply != NULL) freeReplyObject(migrate_reply); if (dots) zfree(dots); if (!success) break; } return success; } /* Move slots between source and target nodes using MIGRATE. * * Options: * CLUSTER_MANAGER_OPT_VERBOSE -- Print a dot for every moved key. * CLUSTER_MANAGER_OPT_COLD -- Move keys without opening slots / * reconfiguring the nodes. * CLUSTER_MANAGER_OPT_UPDATE -- Update node->slots for source/target nodes. * CLUSTER_MANAGER_OPT_QUIET -- Don't print info messages. */ static int clusterManagerMoveSlot(clusterManagerNode *source, clusterManagerNode *target, int slot, int opts, char**err) { if (!(opts & CLUSTER_MANAGER_OPT_QUIET)) { printf("Moving slot %d from %s:%d to %s:%d: ", slot, source->ip, source->port, target->ip, target->port); fflush(stdout); } if (err != NULL) *err = NULL; int pipeline = config.cluster_manager_command.pipeline, timeout = config.cluster_manager_command.timeout, print_dots = (opts & CLUSTER_MANAGER_OPT_VERBOSE), option_cold = (opts & CLUSTER_MANAGER_OPT_COLD), success = 1; if (!option_cold) { success = clusterManagerSetSlot(target, source, slot, "importing", err); if (!success) return 0; success = clusterManagerSetSlot(source, target, slot, "migrating", err); if (!success) return 0; } success = clusterManagerMigrateKeysInSlot(source, target, slot, timeout, pipeline, print_dots, err); if (!(opts & CLUSTER_MANAGER_OPT_QUIET)) printf("\n"); if (!success) return 0; /* Set the new node as the owner of the slot in all the known nodes. */ if (!option_cold) { listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE) continue; redisReply *r = CLUSTER_MANAGER_COMMAND(n, "CLUSTER " "SETSLOT %d %s %s", slot, "node", target->name); success = (r != NULL); if (!success) return 0; if (r->type == REDIS_REPLY_ERROR) { success = 0; if (err != NULL) { *err = zmalloc((r->len + 1) * sizeof(char)); strcpy(*err, r->str); CLUSTER_MANAGER_PRINT_REPLY_ERROR(n, *err); } } freeReplyObject(r); if (!success) return 0; } } /* Update the node logical config */ if (opts & CLUSTER_MANAGER_OPT_UPDATE) { source->slots[slot] = 0; target->slots[slot] = 1; } return 1; } /* Flush the dirty node configuration by calling replicate for slaves or * adding the slots defined in the masters. */ static int clusterManagerFlushNodeConfig(clusterManagerNode *node, char **err) { if (!node->dirty) return 0; redisReply *reply = NULL; int is_err = 0, success = 1; if (err != NULL) *err = NULL; if (node->replicate != NULL) { reply = CLUSTER_MANAGER_COMMAND(node, "CLUSTER REPLICATE %s", node->replicate); if (reply == NULL || (is_err = (reply->type == REDIS_REPLY_ERROR))) { if (is_err && err != NULL) { *err = zmalloc((reply->len + 1) * sizeof(char)); strcpy(*err, reply->str); } success = 0; /* If the cluster did not already joined it is possible that * the slave does not know the master node yet. So on errors * we return ASAP leaving the dirty flag set, to flush the * config later. */ goto cleanup; } } else { int added = clusterManagerAddSlots(node, err); if (!added || *err != NULL) success = 0; } node->dirty = 0; cleanup: if (reply != NULL) freeReplyObject(reply); return success; } /* Wait until the cluster configuration is consistent. */ static void clusterManagerWaitForClusterJoin(void) { printf("Waiting for the cluster to join\n"); int counter = 0, check_after = CLUSTER_JOIN_CHECK_AFTER + (int)(listLength(cluster_manager.nodes) * 0.15f); while(!clusterManagerIsConfigConsistent()) { printf("."); fflush(stdout); sleep(1); if (++counter > check_after) { dict *status = clusterManagerGetLinkStatus(); dictIterator *iter = NULL; if (status != NULL && dictSize(status) > 0) { printf("\n"); clusterManagerLogErr("Warning: %d node(s) may " "be unreachable\n", dictSize(status)); iter = dictGetIterator(status); dictEntry *entry; while ((entry = dictNext(iter)) != NULL) { sds nodeaddr = (sds) dictGetKey(entry); char *node_ip = NULL; int node_port = 0, node_bus_port = 0; list *from = (list *) dictGetVal(entry); if (parseClusterNodeAddress(nodeaddr, &node_ip, &node_port, &node_bus_port) && node_bus_port) { clusterManagerLogErr(" - The port %d of node %s may " "be unreachable from:\n", node_bus_port, node_ip); } else { clusterManagerLogErr(" - Node %s may be unreachable " "from:\n", nodeaddr); } listIter li; listNode *ln; listRewind(from, &li); while ((ln = listNext(&li)) != NULL) { sds from_addr = ln->value; clusterManagerLogErr(" %s\n", from_addr); sdsfree(from_addr); } clusterManagerLogErr("Cluster bus ports must be reachable " "by every node.\nRemember that " "cluster bus ports are different " "from standard instance ports.\n"); listEmpty(from); } } if (iter != NULL) dictReleaseIterator(iter); if (status != NULL) dictRelease(status); counter = 0; } } printf("\n"); } /* Load node's cluster configuration by calling "CLUSTER NODES" command. * Node's configuration (name, replicate, slots, ...) is then updated. * If CLUSTER_MANAGER_OPT_GETFRIENDS flag is set into 'opts' argument, * and node already knows other nodes, the node's friends list is populated * with the other nodes info. */ static int clusterManagerNodeLoadInfo(clusterManagerNode *node, int opts, char **err) { redisReply *reply = CLUSTER_MANAGER_COMMAND(node, "CLUSTER NODES"); int success = 1; *err = NULL; if (!clusterManagerCheckRedisReply(node, reply, err)) { success = 0; goto cleanup; } int getfriends = (opts & CLUSTER_MANAGER_OPT_GETFRIENDS); char *lines = reply->str, *p, *line; while ((p = strstr(lines, "\n")) != NULL) { *p = '\0'; line = lines; lines = p + 1; char *name = NULL, *addr = NULL, *flags = NULL, *master_id = NULL, *ping_sent = NULL, *ping_recv = NULL, *config_epoch = NULL, *link_status = NULL; UNUSED(link_status); int i = 0; while ((p = strchr(line, ' ')) != NULL) { *p = '\0'; char *token = line; line = p + 1; switch(i++){ case 0: name = token; break; case 1: addr = token; break; case 2: flags = token; break; case 3: master_id = token; break; case 4: ping_sent = token; break; case 5: ping_recv = token; break; case 6: config_epoch = token; break; case 7: link_status = token; break; } if (i == 8) break; // Slots } if (!flags) { success = 0; goto cleanup; } int myself = (strstr(flags, "myself") != NULL); clusterManagerNode *currentNode = NULL; if (myself) { node->flags |= CLUSTER_MANAGER_FLAG_MYSELF; currentNode = node; clusterManagerNodeResetSlots(node); if (i == 8) { int remaining = strlen(line); while (remaining > 0) { p = strchr(line, ' '); if (p == NULL) p = line + remaining; remaining -= (p - line); char *slotsdef = line; *p = '\0'; if (remaining) { line = p + 1; remaining--; } else line = p; char *dash = NULL; if (slotsdef[0] == '[') { slotsdef++; if ((p = strstr(slotsdef, "->-"))) { // Migrating *p = '\0'; p += 3; char *closing_bracket = strchr(p, ']'); if (closing_bracket) *closing_bracket = '\0'; sds slot = sdsnew(slotsdef); sds dst = sdsnew(p); node->migrating_count += 2; node->migrating = zrealloc(node->migrating, (node->migrating_count * sizeof(sds))); node->migrating[node->migrating_count - 2] = slot; node->migrating[node->migrating_count - 1] = dst; } else if ((p = strstr(slotsdef, "-<-"))) {//Importing *p = '\0'; p += 3; char *closing_bracket = strchr(p, ']'); if (closing_bracket) *closing_bracket = '\0'; sds slot = sdsnew(slotsdef); sds src = sdsnew(p); node->importing_count += 2; node->importing = zrealloc(node->importing, (node->importing_count * sizeof(sds))); node->importing[node->importing_count - 2] = slot; node->importing[node->importing_count - 1] = src; } } else if ((dash = strchr(slotsdef, '-')) != NULL) { p = dash; int start, stop; *p = '\0'; start = atoi(slotsdef); stop = atoi(p + 1); node->slots_count += (stop - (start - 1)); while (start <= stop) node->slots[start++] = 1; } else if (p > slotsdef) { node->slots[atoi(slotsdef)] = 1; node->slots_count++; } } } node->dirty = 0; } else if (!getfriends) { if (!(node->flags & CLUSTER_MANAGER_FLAG_MYSELF)) continue; else break; } else { if (addr == NULL) { fprintf(stderr, "Error: invalid CLUSTER NODES reply\n"); success = 0; goto cleanup; } char *c = strrchr(addr, '@'); if (c != NULL) *c = '\0'; c = strrchr(addr, ':'); if (c == NULL) { fprintf(stderr, "Error: invalid CLUSTER NODES reply\n"); success = 0; goto cleanup; } *c = '\0'; int port = atoi(++c); currentNode = clusterManagerNewNode(sdsnew(addr), port); currentNode->flags |= CLUSTER_MANAGER_FLAG_FRIEND; if (node->friends == NULL) node->friends = listCreate(); listAddNodeTail(node->friends, currentNode); } if (name != NULL) { if (currentNode->name) sdsfree(currentNode->name); currentNode->name = sdsnew(name); } if (currentNode->flags_str != NULL) freeClusterManagerNodeFlags(currentNode->flags_str); currentNode->flags_str = listCreate(); int flag_len; while ((flag_len = strlen(flags)) > 0) { sds flag = NULL; char *fp = strchr(flags, ','); if (fp) { *fp = '\0'; flag = sdsnew(flags); flags = fp + 1; } else { flag = sdsnew(flags); flags += flag_len; } if (strcmp(flag, "noaddr") == 0) currentNode->flags |= CLUSTER_MANAGER_FLAG_NOADDR; else if (strcmp(flag, "disconnected") == 0) currentNode->flags |= CLUSTER_MANAGER_FLAG_DISCONNECT; else if (strcmp(flag, "fail") == 0) currentNode->flags |= CLUSTER_MANAGER_FLAG_FAIL; else if (strcmp(flag, "slave") == 0) { currentNode->flags |= CLUSTER_MANAGER_FLAG_SLAVE; if (master_id != NULL) { if (currentNode->replicate) sdsfree(currentNode->replicate); currentNode->replicate = sdsnew(master_id); } } listAddNodeTail(currentNode->flags_str, flag); } if (config_epoch != NULL) currentNode->current_epoch = atoll(config_epoch); if (ping_sent != NULL) currentNode->ping_sent = atoll(ping_sent); if (ping_recv != NULL) currentNode->ping_recv = atoll(ping_recv); if (!getfriends && myself) break; } cleanup: if (reply) freeReplyObject(reply); return success; } /* Retrieves info about the cluster using argument 'node' as the starting * point. All nodes will be loaded inside the cluster_manager.nodes list. * Warning: if something goes wrong, it will free the starting node before * returning 0. */ static int clusterManagerLoadInfoFromNode(clusterManagerNode *node) { if (node->context == NULL && !clusterManagerNodeConnect(node)) { freeClusterManagerNode(node); return 0; } char *e = NULL; if (!clusterManagerNodeIsCluster(node, &e)) { clusterManagerPrintNotClusterNodeError(node, e); if (e) zfree(e); freeClusterManagerNode(node); return 0; } e = NULL; if (!clusterManagerNodeLoadInfo(node, CLUSTER_MANAGER_OPT_GETFRIENDS, &e)) { if (e) { CLUSTER_MANAGER_PRINT_REPLY_ERROR(node, e); zfree(e); } freeClusterManagerNode(node); return 0; } listIter li; listNode *ln; if (cluster_manager.nodes != NULL) { listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) freeClusterManagerNode((clusterManagerNode *) ln->value); listRelease(cluster_manager.nodes); } cluster_manager.nodes = listCreate(); listAddNodeTail(cluster_manager.nodes, node); if (node->friends != NULL) { listRewind(node->friends, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *friend = ln->value; if (!friend->ip || !friend->port) goto invalid_friend; if (!friend->context && !clusterManagerNodeConnect(friend)) goto invalid_friend; e = NULL; if (clusterManagerNodeLoadInfo(friend, 0, &e)) { if (friend->flags & (CLUSTER_MANAGER_FLAG_NOADDR | CLUSTER_MANAGER_FLAG_DISCONNECT | CLUSTER_MANAGER_FLAG_FAIL)) { goto invalid_friend; } listAddNodeTail(cluster_manager.nodes, friend); } else { clusterManagerLogErr("[ERR] Unable to load info for " "node %s:%d\n", friend->ip, friend->port); goto invalid_friend; } continue; invalid_friend: if (!(friend->flags & CLUSTER_MANAGER_FLAG_SLAVE)) cluster_manager.unreachable_masters++; freeClusterManagerNode(friend); } listRelease(node->friends); node->friends = NULL; } // Count replicas for each node listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->replicate != NULL) { clusterManagerNode *master = clusterManagerNodeByName(n->replicate); if (master == NULL) { clusterManagerLogWarn("*** WARNING: %s:%d claims to be " "slave of unknown node ID %s.\n", n->ip, n->port, n->replicate); } else master->replicas_count++; } } return 1; } /* Compare functions used by various sorting operations. */ int clusterManagerSlotCompare(const void *slot1, const void *slot2) { const char **i1 = (const char **)slot1; const char **i2 = (const char **)slot2; return strcmp(*i1, *i2); } int clusterManagerSlotCountCompareDesc(const void *n1, const void *n2) { clusterManagerNode *node1 = *((clusterManagerNode **) n1); clusterManagerNode *node2 = *((clusterManagerNode **) n2); return node2->slots_count - node1->slots_count; } int clusterManagerCompareNodeBalance(const void *n1, const void *n2) { clusterManagerNode *node1 = *((clusterManagerNode **) n1); clusterManagerNode *node2 = *((clusterManagerNode **) n2); return node1->balance - node2->balance; } static sds clusterManagerGetConfigSignature(clusterManagerNode *node) { sds signature = NULL; int node_count = 0, i = 0, name_len = 0; char **node_configs = NULL; redisReply *reply = CLUSTER_MANAGER_COMMAND(node, "CLUSTER NODES"); if (reply == NULL || reply->type == REDIS_REPLY_ERROR) goto cleanup; char *lines = reply->str, *p, *line; while ((p = strstr(lines, "\n")) != NULL) { i = 0; *p = '\0'; line = lines; lines = p + 1; char *nodename = NULL; int tot_size = 0; while ((p = strchr(line, ' ')) != NULL) { *p = '\0'; char *token = line; line = p + 1; if (i == 0) { nodename = token; tot_size = (p - token); name_len = tot_size++; // Make room for ':' in tot_size } if (++i == 8) break; } if (i != 8) continue; if (nodename == NULL) continue; int remaining = strlen(line); if (remaining == 0) continue; char **slots = NULL; int c = 0; while (remaining > 0) { p = strchr(line, ' '); if (p == NULL) p = line + remaining; int size = (p - line); remaining -= size; tot_size += size; char *slotsdef = line; *p = '\0'; if (remaining) { line = p + 1; remaining--; } else line = p; if (slotsdef[0] != '[') { c++; slots = zrealloc(slots, (c * sizeof(char *))); slots[c - 1] = slotsdef; } } if (c > 0) { if (c > 1) qsort(slots, c, sizeof(char *), clusterManagerSlotCompare); node_count++; node_configs = zrealloc(node_configs, (node_count * sizeof(char *))); /* Make room for '|' separators. */ tot_size += (sizeof(char) * (c - 1)); char *cfg = zmalloc((sizeof(char) * tot_size) + 1); memcpy(cfg, nodename, name_len); char *sp = cfg + name_len; *(sp++) = ':'; for (i = 0; i < c; i++) { if (i > 0) *(sp++) = ','; int slen = strlen(slots[i]); memcpy(sp, slots[i], slen); sp += slen; } *(sp++) = '\0'; node_configs[node_count - 1] = cfg; } zfree(slots); } if (node_count > 0) { if (node_count > 1) { qsort(node_configs, node_count, sizeof(char *), clusterManagerSlotCompare); } signature = sdsempty(); for (i = 0; i < node_count; i++) { if (i > 0) signature = sdscatprintf(signature, "%c", '|'); signature = sdscatfmt(signature, "%s", node_configs[i]); } } cleanup: if (reply != NULL) freeReplyObject(reply); if (node_configs != NULL) { for (i = 0; i < node_count; i++) zfree(node_configs[i]); zfree(node_configs); } return signature; } static int clusterManagerIsConfigConsistent(void) { if (cluster_manager.nodes == NULL) return 0; int consistent = (listLength(cluster_manager.nodes) <= 1); // If the Cluster has only one node, it's always consistent if (consistent) return 1; sds first_cfg = NULL; listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *node = ln->value; sds cfg = clusterManagerGetConfigSignature(node); if (cfg == NULL) { consistent = 0; break; } if (first_cfg == NULL) first_cfg = cfg; else { consistent = !sdscmp(first_cfg, cfg); sdsfree(cfg); if (!consistent) break; } } if (first_cfg != NULL) sdsfree(first_cfg); return consistent; } static list *clusterManagerGetDisconnectedLinks(clusterManagerNode *node) { list *links = NULL; redisReply *reply = CLUSTER_MANAGER_COMMAND(node, "CLUSTER NODES"); if (!clusterManagerCheckRedisReply(node, reply, NULL)) goto cleanup; links = listCreate(); char *lines = reply->str, *p, *line; while ((p = strstr(lines, "\n")) != NULL) { int i = 0; *p = '\0'; line = lines; lines = p + 1; char *nodename = NULL, *addr = NULL, *flags = NULL, *link_status = NULL; while ((p = strchr(line, ' ')) != NULL) { *p = '\0'; char *token = line; line = p + 1; if (i == 0) nodename = token; else if (i == 1) addr = token; else if (i == 2) flags = token; else if (i == 7) link_status = token; else if (i == 8) break; i++; } if (i == 7) link_status = line; if (nodename == NULL || addr == NULL || flags == NULL || link_status == NULL) continue; if (strstr(flags, "myself") != NULL) continue; int disconnected = ((strstr(flags, "disconnected") != NULL) || (strstr(link_status, "disconnected"))); int handshaking = (strstr(flags, "handshake") != NULL); if (disconnected || handshaking) { clusterManagerLink *link = zmalloc(sizeof(*link)); link->node_name = sdsnew(nodename); link->node_addr = sdsnew(addr); link->connected = 0; link->handshaking = handshaking; listAddNodeTail(links, link); } } cleanup: if (reply != NULL) freeReplyObject(reply); return links; } /* Check for disconnected cluster links. It returns a dict whose keys * are the unreachable node addresses and the values are lists of * node addresses that cannot reach the unreachable node. */ static dict *clusterManagerGetLinkStatus(void) { if (cluster_manager.nodes == NULL) return NULL; dict *status = dictCreate(&clusterManagerLinkDictType); listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *node = ln->value; list *links = clusterManagerGetDisconnectedLinks(node); if (links) { listIter lli; listNode *lln; listRewind(links, &lli); while ((lln = listNext(&lli)) != NULL) { clusterManagerLink *link = lln->value; list *from = NULL; dictEntry *entry = dictFind(status, link->node_addr); if (entry) from = dictGetVal(entry); else { from = listCreate(); dictAdd(status, sdsdup(link->node_addr), from); } sds myaddr = sdsempty(); myaddr = sdscatfmt(myaddr, "%s:%u", node->ip, node->port); listAddNodeTail(from, myaddr); sdsfree(link->node_name); sdsfree(link->node_addr); zfree(link); } listRelease(links); } } return status; } /* Add the error string to cluster_manager.errors and print it. */ static void clusterManagerOnError(sds err) { if (cluster_manager.errors == NULL) cluster_manager.errors = listCreate(); listAddNodeTail(cluster_manager.errors, err); clusterManagerLogErr("%s\n", (char *) err); } /* Check the slots coverage of the cluster. The 'all_slots' argument must be * and array of 16384 bytes. Every covered slot will be set to 1 in the * 'all_slots' array. The function returns the total number if covered slots.*/ static int clusterManagerGetCoveredSlots(char *all_slots) { if (cluster_manager.nodes == NULL) return 0; listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); int totslots = 0, i; while ((ln = listNext(&li)) != NULL) { clusterManagerNode *node = ln->value; for (i = 0; i < CLUSTER_MANAGER_SLOTS; i++) { if (node->slots[i] && !all_slots[i]) { all_slots[i] = 1; totslots++; } } } return totslots; } static void clusterManagerPrintSlotsList(list *slots) { clusterManagerNode n = {0}; listIter li; listNode *ln; listRewind(slots, &li); while ((ln = listNext(&li)) != NULL) { int slot = atoi(ln->value); if (slot >= 0 && slot < CLUSTER_MANAGER_SLOTS) n.slots[slot] = 1; } sds nodeslist = clusterManagerNodeSlotsString(&n); printf("%s\n", nodeslist); sdsfree(nodeslist); } /* Return the node, among 'nodes' with the greatest number of keys * in the specified slot. */ static clusterManagerNode * clusterManagerGetNodeWithMostKeysInSlot(list *nodes, int slot, char **err) { clusterManagerNode *node = NULL; int numkeys = 0; listIter li; listNode *ln; listRewind(nodes, &li); if (err) *err = NULL; while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE || n->replicate) continue; redisReply *r = CLUSTER_MANAGER_COMMAND(n, "CLUSTER COUNTKEYSINSLOT %d", slot); int success = clusterManagerCheckRedisReply(n, r, err); if (success) { if (r->integer > numkeys || node == NULL) { numkeys = r->integer; node = n; } } if (r != NULL) freeReplyObject(r); /* If the reply contains errors */ if (!success) { if (err != NULL && *err != NULL) CLUSTER_MANAGER_PRINT_REPLY_ERROR(n, err); node = NULL; break; } } return node; } /* This function returns the master that has the least number of replicas * in the cluster. If there are multiple masters with the same smaller * number of replicas, one at random is returned. */ static clusterManagerNode *clusterManagerNodeWithLeastReplicas() { clusterManagerNode *node = NULL; int lowest_count = 0; listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE) continue; if (node == NULL || n->replicas_count < lowest_count) { node = n; lowest_count = n->replicas_count; } } return node; } /* This function returns a random master node, return NULL if none */ static clusterManagerNode *clusterManagerNodeMasterRandom() { int master_count = 0; int idx; listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE) continue; master_count++; } assert(master_count > 0); srand(time(NULL)); idx = rand() % master_count; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE) continue; if (!idx--) { return n; } } /* Can not be reached */ assert(0); } static int clusterManagerFixSlotsCoverage(char *all_slots) { int force_fix = config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_FIX_WITH_UNREACHABLE_MASTERS; if (cluster_manager.unreachable_masters > 0 && !force_fix) { clusterManagerLogWarn("*** Fixing slots coverage with %d unreachable masters is dangerous: redis-cli will assume that slots about masters that are not reachable are not covered, and will try to reassign them to the reachable nodes. This can cause data loss and is rarely what you want to do. If you really want to proceed use the --cluster-fix-with-unreachable-masters option.\n", cluster_manager.unreachable_masters); exit(1); } int i, fixed = 0; list *none = NULL, *single = NULL, *multi = NULL; clusterManagerLogInfo(">>> Fixing slots coverage...\n"); for (i = 0; i < CLUSTER_MANAGER_SLOTS; i++) { int covered = all_slots[i]; if (!covered) { sds slot = sdsfromlonglong((long long) i); list *slot_nodes = listCreate(); sds slot_nodes_str = sdsempty(); listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE || n->replicate) continue; redisReply *reply = CLUSTER_MANAGER_COMMAND(n, "CLUSTER GETKEYSINSLOT %d %d", i, 1); if (!clusterManagerCheckRedisReply(n, reply, NULL)) { fixed = -1; if (reply) freeReplyObject(reply); goto cleanup; } assert(reply->type == REDIS_REPLY_ARRAY); if (reply->elements > 0) { listAddNodeTail(slot_nodes, n); if (listLength(slot_nodes) > 1) slot_nodes_str = sdscat(slot_nodes_str, ", "); slot_nodes_str = sdscatfmt(slot_nodes_str, "%s:%u", n->ip, n->port); } freeReplyObject(reply); } sdsfree(slot_nodes_str); dictAdd(clusterManagerUncoveredSlots, slot, slot_nodes); } } /* For every slot, take action depending on the actual condition: * 1) No node has keys for this slot. * 2) A single node has keys for this slot. * 3) Multiple nodes have keys for this slot. */ none = listCreate(); single = listCreate(); multi = listCreate(); dictIterator *iter = dictGetIterator(clusterManagerUncoveredSlots); dictEntry *entry; while ((entry = dictNext(iter)) != NULL) { sds slot = (sds) dictGetKey(entry); list *nodes = (list *) dictGetVal(entry); switch (listLength(nodes)){ case 0: listAddNodeTail(none, slot); break; case 1: listAddNodeTail(single, slot); break; default: listAddNodeTail(multi, slot); break; } } dictReleaseIterator(iter); /* we want explicit manual confirmation from users for all the fix cases */ int ignore_force = 1; /* Handle case "1": keys in no node. */ if (listLength(none) > 0) { printf("The following uncovered slots have no keys " "across the cluster:\n"); clusterManagerPrintSlotsList(none); if (confirmWithYes("Fix these slots by covering with a random node?", ignore_force)) { listIter li; listNode *ln; listRewind(none, &li); while ((ln = listNext(&li)) != NULL) { sds slot = ln->value; int s = atoi(slot); clusterManagerNode *n = clusterManagerNodeMasterRandom(); clusterManagerLogInfo(">>> Covering slot %s with %s:%d\n", slot, n->ip, n->port); if (!clusterManagerSetSlotOwner(n, s, 0)) { fixed = -1; goto cleanup; } /* Since CLUSTER ADDSLOTS succeeded, we also update the slot * info into the node struct, in order to keep it synced */ n->slots[s] = 1; fixed++; } } } /* Handle case "2": keys only in one node. */ if (listLength(single) > 0) { printf("The following uncovered slots have keys in just one node:\n"); clusterManagerPrintSlotsList(single); if (confirmWithYes("Fix these slots by covering with those nodes?", ignore_force)) { listIter li; listNode *ln; listRewind(single, &li); while ((ln = listNext(&li)) != NULL) { sds slot = ln->value; int s = atoi(slot); dictEntry *entry = dictFind(clusterManagerUncoveredSlots, slot); assert(entry != NULL); list *nodes = (list *) dictGetVal(entry); listNode *fn = listFirst(nodes); assert(fn != NULL); clusterManagerNode *n = fn->value; clusterManagerLogInfo(">>> Covering slot %s with %s:%d\n", slot, n->ip, n->port); if (!clusterManagerSetSlotOwner(n, s, 0)) { fixed = -1; goto cleanup; } /* Since CLUSTER ADDSLOTS succeeded, we also update the slot * info into the node struct, in order to keep it synced */ n->slots[atoi(slot)] = 1; fixed++; } } } /* Handle case "3": keys in multiple nodes. */ if (listLength(multi) > 0) { printf("The following uncovered slots have keys in multiple nodes:\n"); clusterManagerPrintSlotsList(multi); if (confirmWithYes("Fix these slots by moving keys " "into a single node?", ignore_force)) { listIter li; listNode *ln; listRewind(multi, &li); while ((ln = listNext(&li)) != NULL) { sds slot = ln->value; dictEntry *entry = dictFind(clusterManagerUncoveredSlots, slot); assert(entry != NULL); list *nodes = (list *) dictGetVal(entry); int s = atoi(slot); clusterManagerNode *target = clusterManagerGetNodeWithMostKeysInSlot(nodes, s, NULL); if (target == NULL) { fixed = -1; goto cleanup; } clusterManagerLogInfo(">>> Covering slot %s moving keys " "to %s:%d\n", slot, target->ip, target->port); if (!clusterManagerSetSlotOwner(target, s, 1)) { fixed = -1; goto cleanup; } /* Since CLUSTER ADDSLOTS succeeded, we also update the slot * info into the node struct, in order to keep it synced */ target->slots[atoi(slot)] = 1; listIter nli; listNode *nln; listRewind(nodes, &nli); while ((nln = listNext(&nli)) != NULL) { clusterManagerNode *src = nln->value; if (src == target) continue; /* Assign the slot to target node in the source node. */ if (!clusterManagerSetSlot(src, target, s, "NODE", NULL)) fixed = -1; if (fixed < 0) goto cleanup; /* Set the source node in 'importing' state * (even if we will actually migrate keys away) * in order to avoid receiving redirections * for MIGRATE. */ if (!clusterManagerSetSlot(src, target, s, "IMPORTING", NULL)) fixed = -1; if (fixed < 0) goto cleanup; int opts = CLUSTER_MANAGER_OPT_VERBOSE | CLUSTER_MANAGER_OPT_COLD; if (!clusterManagerMoveSlot(src, target, s, opts, NULL)) { fixed = -1; goto cleanup; } if (!clusterManagerClearSlotStatus(src, s)) fixed = -1; if (fixed < 0) goto cleanup; } fixed++; } } } cleanup: if (none) listRelease(none); if (single) listRelease(single); if (multi) listRelease(multi); return fixed; } /* Slot 'slot' was found to be in importing or migrating state in one or * more nodes. This function fixes this condition by migrating keys where * it seems more sensible. */ static int clusterManagerFixOpenSlot(int slot) { int force_fix = config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_FIX_WITH_UNREACHABLE_MASTERS; if (cluster_manager.unreachable_masters > 0 && !force_fix) { clusterManagerLogWarn("*** Fixing open slots with %d unreachable masters is dangerous: redis-cli will assume that slots about masters that are not reachable are not covered, and will try to reassign them to the reachable nodes. This can cause data loss and is rarely what you want to do. If you really want to proceed use the --cluster-fix-with-unreachable-masters option.\n", cluster_manager.unreachable_masters); exit(1); } clusterManagerLogInfo(">>> Fixing open slot %d\n", slot); /* Try to obtain the current slot owner, according to the current * nodes configuration. */ int success = 1; list *owners = listCreate(); /* List of nodes claiming some ownership. it could be stating in the configuration to have the node ownership, or just holding keys for such slot. */ list *migrating = listCreate(); list *importing = listCreate(); sds migrating_str = sdsempty(); sds importing_str = sdsempty(); clusterManagerNode *owner = NULL; /* The obvious slot owner if any. */ /* Iterate all the nodes, looking for potential owners of this slot. */ listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE) continue; if (n->slots[slot]) { listAddNodeTail(owners, n); } else { redisReply *r = CLUSTER_MANAGER_COMMAND(n, "CLUSTER COUNTKEYSINSLOT %d", slot); success = clusterManagerCheckRedisReply(n, r, NULL); if (success && r->integer > 0) { clusterManagerLogWarn("*** Found keys about slot %d " "in non-owner node %s:%d!\n", slot, n->ip, n->port); listAddNodeTail(owners, n); } if (r) freeReplyObject(r); if (!success) goto cleanup; } } /* If we have only a single potential owner for this slot, * set it as "owner". */ if (listLength(owners) == 1) owner = listFirst(owners)->value; /* Scan the list of nodes again, in order to populate the * list of nodes in importing or migrating state for * this slot. */ listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE) continue; int is_migrating = 0, is_importing = 0; if (n->migrating) { for (int i = 0; i < n->migrating_count; i += 2) { sds migrating_slot = n->migrating[i]; if (atoi(migrating_slot) == slot) { char *sep = (listLength(migrating) == 0 ? "" : ","); migrating_str = sdscatfmt(migrating_str, "%s%s:%u", sep, n->ip, n->port); listAddNodeTail(migrating, n); is_migrating = 1; break; } } } if (!is_migrating && n->importing) { for (int i = 0; i < n->importing_count; i += 2) { sds importing_slot = n->importing[i]; if (atoi(importing_slot) == slot) { char *sep = (listLength(importing) == 0 ? "" : ","); importing_str = sdscatfmt(importing_str, "%s%s:%u", sep, n->ip, n->port); listAddNodeTail(importing, n); is_importing = 1; break; } } } /* If the node is neither migrating nor importing and it's not * the owner, then is added to the importing list in case * it has keys in the slot. */ if (!is_migrating && !is_importing && n != owner) { redisReply *r = CLUSTER_MANAGER_COMMAND(n, "CLUSTER COUNTKEYSINSLOT %d", slot); success = clusterManagerCheckRedisReply(n, r, NULL); if (success && r->integer > 0) { clusterManagerLogWarn("*** Found keys about slot %d " "in node %s:%d!\n", slot, n->ip, n->port); char *sep = (listLength(importing) == 0 ? "" : ","); importing_str = sdscatfmt(importing_str, "%s%s:%u", sep, n->ip, n->port); listAddNodeTail(importing, n); } if (r) freeReplyObject(r); if (!success) goto cleanup; } } if (sdslen(migrating_str) > 0) printf("Set as migrating in: %s\n", migrating_str); if (sdslen(importing_str) > 0) printf("Set as importing in: %s\n", importing_str); /* If there is no slot owner, set as owner the node with the biggest * number of keys, among the set of migrating / importing nodes. */ if (owner == NULL) { clusterManagerLogInfo(">>> No single clear owner for the slot, " "selecting an owner by # of keys...\n"); owner = clusterManagerGetNodeWithMostKeysInSlot(cluster_manager.nodes, slot, NULL); // If we still don't have an owner, we can't fix it. if (owner == NULL) { clusterManagerLogErr("[ERR] Can't select a slot owner. " "Impossible to fix.\n"); success = 0; goto cleanup; } // Use ADDSLOTS to assign the slot. clusterManagerLogWarn("*** Configuring %s:%d as the slot owner\n", owner->ip, owner->port); success = clusterManagerClearSlotStatus(owner, slot); if (!success) goto cleanup; success = clusterManagerSetSlotOwner(owner, slot, 0); if (!success) goto cleanup; /* Since CLUSTER ADDSLOTS succeeded, we also update the slot * info into the node struct, in order to keep it synced */ owner->slots[slot] = 1; /* Remove the owner from the list of migrating/importing * nodes. */ clusterManagerRemoveNodeFromList(migrating, owner); clusterManagerRemoveNodeFromList(importing, owner); } /* If there are multiple owners of the slot, we need to fix it * so that a single node is the owner and all the other nodes * are in importing state. Later the fix can be handled by one * of the base cases above. * * Note that this case also covers multiple nodes having the slot * in migrating state, since migrating is a valid state only for * slot owners. */ if (listLength(owners) > 1) { /* Owner cannot be NULL at this point, since if there are more owners, * the owner has been set in the previous condition (owner == NULL). */ assert(owner != NULL); listRewind(owners, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n == owner) continue; success = clusterManagerDelSlot(n, slot, 1); if (!success) goto cleanup; n->slots[slot] = 0; /* Assign the slot to the owner in the node 'n' configuration.' */ success = clusterManagerSetSlot(n, owner, slot, "node", NULL); if (!success) goto cleanup; success = clusterManagerSetSlot(n, owner, slot, "importing", NULL); if (!success) goto cleanup; /* Avoid duplicates. */ clusterManagerRemoveNodeFromList(importing, n); listAddNodeTail(importing, n); /* Ensure that the node is not in the migrating list. */ clusterManagerRemoveNodeFromList(migrating, n); } } int move_opts = CLUSTER_MANAGER_OPT_VERBOSE; /* Case 1: The slot is in migrating state in one node, and in * importing state in 1 node. That's trivial to address. */ if (listLength(migrating) == 1 && listLength(importing) == 1) { clusterManagerNode *src = listFirst(migrating)->value; clusterManagerNode *dst = listFirst(importing)->value; clusterManagerLogInfo(">>> Case 1: Moving slot %d from " "%s:%d to %s:%d\n", slot, src->ip, src->port, dst->ip, dst->port); move_opts |= CLUSTER_MANAGER_OPT_UPDATE; success = clusterManagerMoveSlot(src, dst, slot, move_opts, NULL); } /* Case 2: There are multiple nodes that claim the slot as importing, * they probably got keys about the slot after a restart so opened * the slot. In this case we just move all the keys to the owner * according to the configuration. */ else if (listLength(migrating) == 0 && listLength(importing) > 0) { clusterManagerLogInfo(">>> Case 2: Moving all the %d slot keys to its " "owner %s:%d\n", slot, owner->ip, owner->port); move_opts |= CLUSTER_MANAGER_OPT_COLD; listRewind(importing, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n == owner) continue; success = clusterManagerMoveSlot(n, owner, slot, move_opts, NULL); if (!success) goto cleanup; clusterManagerLogInfo(">>> Setting %d as STABLE in " "%s:%d\n", slot, n->ip, n->port); success = clusterManagerClearSlotStatus(n, slot); if (!success) goto cleanup; } /* Since the slot has been moved in "cold" mode, ensure that all the * other nodes update their own configuration about the slot itself. */ listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n == owner) continue; if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE) continue; success = clusterManagerSetSlot(n, owner, slot, "NODE", NULL); if (!success) goto cleanup; } } /* Case 3: The slot is in migrating state in one node but multiple * other nodes claim to be in importing state and don't have any key in * the slot. We search for the importing node having the same ID as * the destination node of the migrating node. * In that case we move the slot from the migrating node to this node and * we close the importing states on all the other importing nodes. * If no importing node has the same ID as the destination node of the * migrating node, the slot's state is closed on both the migrating node * and the importing nodes. */ else if (listLength(migrating) == 1 && listLength(importing) > 1) { int try_to_fix = 1; clusterManagerNode *src = listFirst(migrating)->value; clusterManagerNode *dst = NULL; sds target_id = NULL; for (int i = 0; i < src->migrating_count; i += 2) { sds migrating_slot = src->migrating[i]; if (atoi(migrating_slot) == slot) { target_id = src->migrating[i + 1]; break; } } assert(target_id != NULL); listIter li; listNode *ln; listRewind(importing, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; int count = clusterManagerCountKeysInSlot(n, slot); if (count > 0) { try_to_fix = 0; break; } if (strcmp(n->name, target_id) == 0) dst = n; } if (!try_to_fix) goto unhandled_case; if (dst != NULL) { clusterManagerLogInfo(">>> Case 3: Moving slot %d from %s:%d to " "%s:%d and closing it on all the other " "importing nodes.\n", slot, src->ip, src->port, dst->ip, dst->port); /* Move the slot to the destination node. */ success = clusterManagerMoveSlot(src, dst, slot, move_opts, NULL); if (!success) goto cleanup; /* Close slot on all the other importing nodes. */ listRewind(importing, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (dst == n) continue; success = clusterManagerClearSlotStatus(n, slot); if (!success) goto cleanup; } } else { clusterManagerLogInfo(">>> Case 3: Closing slot %d on both " "migrating and importing nodes.\n", slot); /* Close the slot on both the migrating node and the importing * nodes. */ success = clusterManagerClearSlotStatus(src, slot); if (!success) goto cleanup; listRewind(importing, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; success = clusterManagerClearSlotStatus(n, slot); if (!success) goto cleanup; } } } else { int try_to_close_slot = (listLength(importing) == 0 && listLength(migrating) == 1); if (try_to_close_slot) { clusterManagerNode *n = listFirst(migrating)->value; if (!owner || owner != n) { redisReply *r = CLUSTER_MANAGER_COMMAND(n, "CLUSTER GETKEYSINSLOT %d %d", slot, 10); success = clusterManagerCheckRedisReply(n, r, NULL); if (r) { if (success) try_to_close_slot = (r->elements == 0); freeReplyObject(r); } if (!success) goto cleanup; } } /* Case 4: There are no slots claiming to be in importing state, but * there is a migrating node that actually don't have any key or is the * slot owner. We can just close the slot, probably a reshard * interrupted in the middle. */ if (try_to_close_slot) { clusterManagerNode *n = listFirst(migrating)->value; clusterManagerLogInfo(">>> Case 4: Closing slot %d on %s:%d\n", slot, n->ip, n->port); redisReply *r = CLUSTER_MANAGER_COMMAND(n, "CLUSTER SETSLOT %d %s", slot, "STABLE"); success = clusterManagerCheckRedisReply(n, r, NULL); if (r) freeReplyObject(r); if (!success) goto cleanup; } else { unhandled_case: success = 0; clusterManagerLogErr("[ERR] Sorry, redis-cli can't fix this slot " "yet (work in progress). Slot is set as " "migrating in %s, as importing in %s, " "owner is %s:%d\n", migrating_str, importing_str, owner->ip, owner->port); } } cleanup: listRelease(owners); listRelease(migrating); listRelease(importing); sdsfree(migrating_str); sdsfree(importing_str); return success; } static int clusterManagerFixMultipleSlotOwners(int slot, list *owners) { clusterManagerLogInfo(">>> Fixing multiple owners for slot %d...\n", slot); int success = 0; assert(listLength(owners) > 1); clusterManagerNode *owner = clusterManagerGetNodeWithMostKeysInSlot(owners, slot, NULL); if (!owner) owner = listFirst(owners)->value; clusterManagerLogInfo(">>> Setting slot %d owner: %s:%d\n", slot, owner->ip, owner->port); /* Set the slot owner. */ if (!clusterManagerSetSlotOwner(owner, slot, 0)) return 0; listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); /* Update configuration in all the other master nodes by assigning the slot * itself to the new owner, and by eventually migrating keys if the node * has keys for the slot. */ while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n == owner) continue; if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE) continue; int count = clusterManagerCountKeysInSlot(n, slot); success = (count >= 0); if (!success) break; clusterManagerDelSlot(n, slot, 1); if (!clusterManagerSetSlot(n, owner, slot, "node", NULL)) return 0; if (count > 0) { int opts = CLUSTER_MANAGER_OPT_VERBOSE | CLUSTER_MANAGER_OPT_COLD; success = clusterManagerMoveSlot(n, owner, slot, opts, NULL); if (!success) break; } } return success; } static int clusterManagerCheckCluster(int quiet) { listNode *ln = listFirst(cluster_manager.nodes); if (!ln) return 0; clusterManagerNode *node = ln->value; clusterManagerLogInfo(">>> Performing Cluster Check (using node %s:%d)\n", node->ip, node->port); int result = 1, consistent = 0; int do_fix = config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_FIX; if (!quiet) clusterManagerShowNodes(); consistent = clusterManagerIsConfigConsistent(); if (!consistent) { sds err = sdsnew("[ERR] Nodes don't agree about configuration!"); clusterManagerOnError(err); result = 0; } else { clusterManagerLogOk("[OK] All nodes agree about slots " "configuration.\n"); } /* Check open slots */ clusterManagerLogInfo(">>> Check for open slots...\n"); listIter li; listRewind(cluster_manager.nodes, &li); int i; dict *open_slots = NULL; while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->migrating != NULL) { if (open_slots == NULL) open_slots = dictCreate(&clusterManagerDictType); sds errstr = sdsempty(); errstr = sdscatprintf(errstr, "[WARNING] Node %s:%d has slots in " "migrating state ", n->ip, n->port); for (i = 0; i < n->migrating_count; i += 2) { sds slot = n->migrating[i]; dictReplace(open_slots, slot, sdsdup(n->migrating[i + 1])); char *fmt = (i > 0 ? ",%S" : "%S"); errstr = sdscatfmt(errstr, fmt, slot); } errstr = sdscat(errstr, "."); clusterManagerOnError(errstr); } if (n->importing != NULL) { if (open_slots == NULL) open_slots = dictCreate(&clusterManagerDictType); sds errstr = sdsempty(); errstr = sdscatprintf(errstr, "[WARNING] Node %s:%d has slots in " "importing state ", n->ip, n->port); for (i = 0; i < n->importing_count; i += 2) { sds slot = n->importing[i]; dictReplace(open_slots, slot, sdsdup(n->importing[i + 1])); char *fmt = (i > 0 ? ",%S" : "%S"); errstr = sdscatfmt(errstr, fmt, slot); } errstr = sdscat(errstr, "."); clusterManagerOnError(errstr); } } if (open_slots != NULL) { result = 0; dictIterator *iter = dictGetIterator(open_slots); dictEntry *entry; sds errstr = sdsnew("[WARNING] The following slots are open: "); i = 0; while ((entry = dictNext(iter)) != NULL) { sds slot = (sds) dictGetKey(entry); char *fmt = (i++ > 0 ? ",%S" : "%S"); errstr = sdscatfmt(errstr, fmt, slot); } clusterManagerLogErr("%s.\n", (char *) errstr); sdsfree(errstr); if (do_fix) { /* Fix open slots. */ dictReleaseIterator(iter); iter = dictGetIterator(open_slots); while ((entry = dictNext(iter)) != NULL) { sds slot = (sds) dictGetKey(entry); result = clusterManagerFixOpenSlot(atoi(slot)); if (!result) break; } } dictReleaseIterator(iter); dictRelease(open_slots); } clusterManagerLogInfo(">>> Check slots coverage...\n"); char slots[CLUSTER_MANAGER_SLOTS]; memset(slots, 0, CLUSTER_MANAGER_SLOTS); int coverage = clusterManagerGetCoveredSlots(slots); if (coverage == CLUSTER_MANAGER_SLOTS) { clusterManagerLogOk("[OK] All %d slots covered.\n", CLUSTER_MANAGER_SLOTS); } else { sds err = sdsempty(); err = sdscatprintf(err, "[ERR] Not all %d slots are " "covered by nodes.\n", CLUSTER_MANAGER_SLOTS); clusterManagerOnError(err); result = 0; if (do_fix/* && result*/) { dictType dtype = clusterManagerDictType; dtype.keyDestructor = dictSdsDestructor; dtype.valDestructor = dictListDestructor; clusterManagerUncoveredSlots = dictCreate(&dtype); int fixed = clusterManagerFixSlotsCoverage(slots); if (fixed > 0) result = 1; } } int search_multiple_owners = config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_CHECK_OWNERS; if (search_multiple_owners) { /* Check whether there are multiple owners, even when slots are * fully covered and there are no open slots. */ clusterManagerLogInfo(">>> Check for multiple slot owners...\n"); int slot = 0, slots_with_multiple_owners = 0; for (; slot < CLUSTER_MANAGER_SLOTS; slot++) { listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); list *owners = listCreate(); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE) continue; if (n->slots[slot]) listAddNodeTail(owners, n); else { /* Nodes having keys for the slot will be considered * owners too. */ int count = clusterManagerCountKeysInSlot(n, slot); if (count > 0) listAddNodeTail(owners, n); } } if (listLength(owners) > 1) { result = 0; clusterManagerLogErr("[WARNING] Slot %d has %d owners:\n", slot, listLength(owners)); listRewind(owners, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; clusterManagerLogErr(" %s:%d\n", n->ip, n->port); } slots_with_multiple_owners++; if (do_fix) { result = clusterManagerFixMultipleSlotOwners(slot, owners); if (!result) { clusterManagerLogErr("Failed to fix multiple owners " "for slot %d\n", slot); listRelease(owners); break; } else slots_with_multiple_owners--; } } listRelease(owners); } if (slots_with_multiple_owners == 0) clusterManagerLogOk("[OK] No multiple owners found.\n"); } return result; } static clusterManagerNode *clusterNodeForResharding(char *id, clusterManagerNode *target, int *raise_err) { clusterManagerNode *node = NULL; const char *invalid_node_msg = "*** The specified node (%s) is not known " "or not a master, please retry.\n"; node = clusterManagerNodeByName(id); *raise_err = 0; if (!node || node->flags & CLUSTER_MANAGER_FLAG_SLAVE) { clusterManagerLogErr(invalid_node_msg, id); *raise_err = 1; return NULL; } else if (target != NULL) { if (!strcmp(node->name, target->name)) { clusterManagerLogErr( "*** It is not possible to use " "the target node as " "source node.\n"); return NULL; } } return node; } static list *clusterManagerComputeReshardTable(list *sources, int numslots) { list *moved = listCreate(); int src_count = listLength(sources), i = 0, tot_slots = 0, j; clusterManagerNode **sorted = zmalloc(src_count * sizeof(*sorted)); listIter li; listNode *ln; listRewind(sources, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *node = ln->value; tot_slots += node->slots_count; sorted[i++] = node; } qsort(sorted, src_count, sizeof(clusterManagerNode *), clusterManagerSlotCountCompareDesc); for (i = 0; i < src_count; i++) { clusterManagerNode *node = sorted[i]; float n = ((float) numslots / tot_slots * node->slots_count); if (i == 0) n = ceil(n); else n = floor(n); int max = (int) n, count = 0; for (j = 0; j < CLUSTER_MANAGER_SLOTS; j++) { int slot = node->slots[j]; if (!slot) continue; if (count >= max || (int)listLength(moved) >= numslots) break; clusterManagerReshardTableItem *item = zmalloc(sizeof(*item)); item->source = node; item->slot = j; listAddNodeTail(moved, item); count++; } } zfree(sorted); return moved; } static void clusterManagerShowReshardTable(list *table) { listIter li; listNode *ln; listRewind(table, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerReshardTableItem *item = ln->value; clusterManagerNode *n = item->source; printf(" Moving slot %d from %s\n", item->slot, (char *) n->name); } } static void clusterManagerReleaseReshardTable(list *table) { if (table != NULL) { listIter li; listNode *ln; listRewind(table, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerReshardTableItem *item = ln->value; zfree(item); } listRelease(table); } } static void clusterManagerLog(int level, const char* fmt, ...) { int use_colors = (config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_COLOR); if (use_colors) { printf("\033["); switch (level) { case CLUSTER_MANAGER_LOG_LVL_INFO: printf(LOG_COLOR_BOLD); break; case CLUSTER_MANAGER_LOG_LVL_WARN: printf(LOG_COLOR_YELLOW); break; case CLUSTER_MANAGER_LOG_LVL_ERR: printf(LOG_COLOR_RED); break; case CLUSTER_MANAGER_LOG_LVL_SUCCESS: printf(LOG_COLOR_GREEN); break; default: printf(LOG_COLOR_RESET); break; } } va_list ap; va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); if (use_colors) printf("\033[" LOG_COLOR_RESET); } static void clusterManagerNodeArrayInit(clusterManagerNodeArray *array, int alloc_len) { array->nodes = zcalloc(alloc_len * sizeof(clusterManagerNode*)); array->alloc = array->nodes; array->len = alloc_len; array->count = 0; } /* Reset array->nodes to the original array allocation and re-count non-NULL * nodes. */ static void clusterManagerNodeArrayReset(clusterManagerNodeArray *array) { if (array->nodes > array->alloc) { array->len = array->nodes - array->alloc; array->nodes = array->alloc; array->count = 0; int i = 0; for(; i < array->len; i++) { if (array->nodes[i] != NULL) array->count++; } } } /* Shift array->nodes and store the shifted node into 'nodeptr'. */ static void clusterManagerNodeArrayShift(clusterManagerNodeArray *array, clusterManagerNode **nodeptr) { assert(array->len > 0); /* If the first node to be shifted is not NULL, decrement count. */ if (*array->nodes != NULL) array->count--; /* Store the first node to be shifted into 'nodeptr'. */ *nodeptr = *array->nodes; /* Shift the nodes array and decrement length. */ array->nodes++; array->len--; } static void clusterManagerNodeArrayAdd(clusterManagerNodeArray *array, clusterManagerNode *node) { assert(array->len > 0); assert(node != NULL); assert(array->count < array->len); array->nodes[array->count++] = node; } static void clusterManagerPrintNotEmptyNodeError(clusterManagerNode *node, char *err) { char *msg; if (err) msg = err; else { msg = "is not empty. Either the node already knows other " "nodes (check with CLUSTER NODES) or contains some " "key in database 0."; } clusterManagerLogErr("[ERR] Node %s:%d %s\n", node->ip, node->port, msg); } static void clusterManagerPrintNotClusterNodeError(clusterManagerNode *node, char *err) { char *msg = (err ? err : "is not configured as a cluster node."); clusterManagerLogErr("[ERR] Node %s:%d %s\n", node->ip, node->port, msg); } /* Execute redis-cli in Cluster Manager mode */ static void clusterManagerMode(clusterManagerCommandProc *proc) { int argc = config.cluster_manager_command.argc; char **argv = config.cluster_manager_command.argv; cluster_manager.nodes = NULL; if (!proc(argc, argv)) goto cluster_manager_err; freeClusterManager(); exit(0); cluster_manager_err: freeClusterManager(); exit(1); } /* Cluster Manager Commands */ static int clusterManagerCommandCreate(int argc, char **argv) { int i, j, success = 1; cluster_manager.nodes = listCreate(); for (i = 0; i < argc; i++) { char *addr = argv[i]; char *c = strrchr(addr, '@'); if (c != NULL) *c = '\0'; c = strrchr(addr, ':'); if (c == NULL) { fprintf(stderr, "Invalid address format: %s\n", addr); return 0; } *c = '\0'; char *ip = addr; int port = atoi(++c); clusterManagerNode *node = clusterManagerNewNode(ip, port); if (!clusterManagerNodeConnect(node)) { freeClusterManagerNode(node); return 0; } char *err = NULL; if (!clusterManagerNodeIsCluster(node, &err)) { clusterManagerPrintNotClusterNodeError(node, err); if (err) zfree(err); freeClusterManagerNode(node); return 0; } err = NULL; if (!clusterManagerNodeLoadInfo(node, 0, &err)) { if (err) { CLUSTER_MANAGER_PRINT_REPLY_ERROR(node, err); zfree(err); } freeClusterManagerNode(node); return 0; } err = NULL; if (!clusterManagerNodeIsEmpty(node, &err)) { clusterManagerPrintNotEmptyNodeError(node, err); if (err) zfree(err); freeClusterManagerNode(node); return 0; } listAddNodeTail(cluster_manager.nodes, node); } int node_len = cluster_manager.nodes->len; int replicas = config.cluster_manager_command.replicas; int masters_count = CLUSTER_MANAGER_MASTERS_COUNT(node_len, replicas); if (masters_count < 3) { clusterManagerLogErr( "*** ERROR: Invalid configuration for cluster creation.\n" "*** Redis Cluster requires at least 3 master nodes.\n" "*** This is not possible with %d nodes and %d replicas per node.", node_len, replicas); clusterManagerLogErr("\n*** At least %d nodes are required.\n", 3 * (replicas + 1)); return 0; } clusterManagerLogInfo(">>> Performing hash slots allocation " "on %d nodes...\n", node_len); int interleaved_len = 0, ip_count = 0; clusterManagerNode **interleaved = zcalloc(node_len*sizeof(**interleaved)); char **ips = zcalloc(node_len * sizeof(char*)); clusterManagerNodeArray *ip_nodes = zcalloc(node_len * sizeof(*ip_nodes)); listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; int found = 0; for (i = 0; i < ip_count; i++) { char *ip = ips[i]; if (!strcmp(ip, n->ip)) { found = 1; break; } } if (!found) { ips[ip_count++] = n->ip; } clusterManagerNodeArray *node_array = &(ip_nodes[i]); if (node_array->nodes == NULL) clusterManagerNodeArrayInit(node_array, node_len); clusterManagerNodeArrayAdd(node_array, n); } while (interleaved_len < node_len) { for (i = 0; i < ip_count; i++) { clusterManagerNodeArray *node_array = &(ip_nodes[i]); if (node_array->count > 0) { clusterManagerNode *n = NULL; clusterManagerNodeArrayShift(node_array, &n); interleaved[interleaved_len++] = n; } } } clusterManagerNode **masters = interleaved; interleaved += masters_count; interleaved_len -= masters_count; float slots_per_node = CLUSTER_MANAGER_SLOTS / (float) masters_count; long first = 0; float cursor = 0.0f; for (i = 0; i < masters_count; i++) { clusterManagerNode *master = masters[i]; long last = lround(cursor + slots_per_node - 1); if (last > CLUSTER_MANAGER_SLOTS || i == (masters_count - 1)) last = CLUSTER_MANAGER_SLOTS - 1; if (last < first) last = first; printf("Master[%d] -> Slots %ld - %ld\n", i, first, last); master->slots_count = 0; for (j = first; j <= last; j++) { master->slots[j] = 1; master->slots_count++; } master->dirty = 1; first = last + 1; cursor += slots_per_node; } /* Rotating the list sometimes helps to get better initial * anti-affinity before the optimizer runs. */ clusterManagerNode *first_node = interleaved[0]; for (i = 0; i < (interleaved_len - 1); i++) interleaved[i] = interleaved[i + 1]; interleaved[interleaved_len - 1] = first_node; int assign_unused = 0, available_count = interleaved_len; assign_replicas: for (i = 0; i < masters_count; i++) { clusterManagerNode *master = masters[i]; int assigned_replicas = 0; while (assigned_replicas < replicas) { if (available_count == 0) break; clusterManagerNode *found = NULL, *slave = NULL; int firstNodeIdx = -1; for (j = 0; j < interleaved_len; j++) { clusterManagerNode *n = interleaved[j]; if (n == NULL) continue; if (strcmp(n->ip, master->ip)) { found = n; interleaved[j] = NULL; break; } if (firstNodeIdx < 0) firstNodeIdx = j; } if (found) slave = found; else if (firstNodeIdx >= 0) { slave = interleaved[firstNodeIdx]; interleaved_len -= (firstNodeIdx + 1); interleaved += (firstNodeIdx + 1); } if (slave != NULL) { assigned_replicas++; available_count--; if (slave->replicate) sdsfree(slave->replicate); slave->replicate = sdsnew(master->name); slave->dirty = 1; } else break; printf("Adding replica %s:%d to %s:%d\n", slave->ip, slave->port, master->ip, master->port); if (assign_unused) break; } } if (!assign_unused && available_count > 0) { assign_unused = 1; printf("Adding extra replicas...\n"); goto assign_replicas; } for (i = 0; i < ip_count; i++) { clusterManagerNodeArray *node_array = ip_nodes + i; clusterManagerNodeArrayReset(node_array); } clusterManagerOptimizeAntiAffinity(ip_nodes, ip_count); clusterManagerShowNodes(); int ignore_force = 0; if (confirmWithYes("Can I set the above configuration?", ignore_force)) { listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *node = ln->value; char *err = NULL; int flushed = clusterManagerFlushNodeConfig(node, &err); if (!flushed && node->dirty && !node->replicate) { if (err != NULL) { CLUSTER_MANAGER_PRINT_REPLY_ERROR(node, err); zfree(err); } success = 0; goto cleanup; } else if (err != NULL) zfree(err); } clusterManagerLogInfo(">>> Nodes configuration updated\n"); clusterManagerLogInfo(">>> Assign a different config epoch to " "each node\n"); int config_epoch = 1; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *node = ln->value; redisReply *reply = NULL; reply = CLUSTER_MANAGER_COMMAND(node, "cluster set-config-epoch %d", config_epoch++); if (reply != NULL) freeReplyObject(reply); } clusterManagerLogInfo(">>> Sending CLUSTER MEET messages to join " "the cluster\n"); clusterManagerNode *first = NULL; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *node = ln->value; if (first == NULL) { first = node; continue; } redisReply *reply = NULL; reply = CLUSTER_MANAGER_COMMAND(node, "cluster meet %s %d", first->ip, first->port); int is_err = 0; if (reply != NULL) { if ((is_err = reply->type == REDIS_REPLY_ERROR)) CLUSTER_MANAGER_PRINT_REPLY_ERROR(node, reply->str); freeReplyObject(reply); } else { is_err = 1; fprintf(stderr, "Failed to send CLUSTER MEET command.\n"); } if (is_err) { success = 0; goto cleanup; } } /* Give one second for the join to start, in order to avoid that * waiting for cluster join will find all the nodes agree about * the config as they are still empty with unassigned slots. */ sleep(1); clusterManagerWaitForClusterJoin(); /* Useful for the replicas */ listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *node = ln->value; if (!node->dirty) continue; char *err = NULL; int flushed = clusterManagerFlushNodeConfig(node, &err); if (!flushed && !node->replicate) { if (err != NULL) { CLUSTER_MANAGER_PRINT_REPLY_ERROR(node, err); zfree(err); } success = 0; goto cleanup; } } // Reset Nodes listRewind(cluster_manager.nodes, &li); clusterManagerNode *first_node = NULL; while ((ln = listNext(&li)) != NULL) { clusterManagerNode *node = ln->value; if (!first_node) first_node = node; else freeClusterManagerNode(node); } listEmpty(cluster_manager.nodes); if (!clusterManagerLoadInfoFromNode(first_node)) { success = 0; goto cleanup; } clusterManagerCheckCluster(0); } cleanup: /* Free everything */ zfree(masters); zfree(ips); for (i = 0; i < node_len; i++) { clusterManagerNodeArray *node_array = ip_nodes + i; CLUSTER_MANAGER_NODE_ARRAY_FREE(node_array); } zfree(ip_nodes); return success; } static int clusterManagerCommandAddNode(int argc, char **argv) { int success = 1; redisReply *reply = NULL; char *ref_ip = NULL, *ip = NULL; int ref_port = 0, port = 0; if (!getClusterHostFromCmdArgs(argc - 1, argv + 1, &ref_ip, &ref_port)) goto invalid_args; if (!getClusterHostFromCmdArgs(1, argv, &ip, &port)) goto invalid_args; clusterManagerLogInfo(">>> Adding node %s:%d to cluster %s:%d\n", ip, port, ref_ip, ref_port); // Check the existing cluster clusterManagerNode *refnode = clusterManagerNewNode(ref_ip, ref_port); if (!clusterManagerLoadInfoFromNode(refnode)) return 0; if (!clusterManagerCheckCluster(0)) return 0; /* If --cluster-master-id was specified, try to resolve it now so that we * abort before starting with the node configuration. */ clusterManagerNode *master_node = NULL; if (config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_SLAVE) { char *master_id = config.cluster_manager_command.master_id; if (master_id != NULL) { master_node = clusterManagerNodeByName(master_id); if (master_node == NULL) { clusterManagerLogErr("[ERR] No such master ID %s\n", master_id); return 0; } } else { master_node = clusterManagerNodeWithLeastReplicas(); assert(master_node != NULL); printf("Automatically selected master %s:%d\n", master_node->ip, master_node->port); } } // Add the new node clusterManagerNode *new_node = clusterManagerNewNode(ip, port); int added = 0; if (!clusterManagerNodeConnect(new_node)) { clusterManagerLogErr("[ERR] Sorry, can't connect to node %s:%d\n", ip, port); success = 0; goto cleanup; } char *err = NULL; if (!(success = clusterManagerNodeIsCluster(new_node, &err))) { clusterManagerPrintNotClusterNodeError(new_node, err); if (err) zfree(err); goto cleanup; } if (!clusterManagerNodeLoadInfo(new_node, 0, &err)) { if (err) { CLUSTER_MANAGER_PRINT_REPLY_ERROR(new_node, err); zfree(err); } success = 0; goto cleanup; } if (!(success = clusterManagerNodeIsEmpty(new_node, &err))) { clusterManagerPrintNotEmptyNodeError(new_node, err); if (err) zfree(err); goto cleanup; } clusterManagerNode *first = listFirst(cluster_manager.nodes)->value; listAddNodeTail(cluster_manager.nodes, new_node); added = 1; // Send CLUSTER MEET command to the new node clusterManagerLogInfo(">>> Send CLUSTER MEET to node %s:%d to make it " "join the cluster.\n", ip, port); reply = CLUSTER_MANAGER_COMMAND(new_node, "CLUSTER MEET %s %d", first->ip, first->port); if (!(success = clusterManagerCheckRedisReply(new_node, reply, NULL))) goto cleanup; /* Additional configuration is needed if the node is added as a slave. */ if (master_node) { sleep(1); clusterManagerWaitForClusterJoin(); clusterManagerLogInfo(">>> Configure node as replica of %s:%d.\n", master_node->ip, master_node->port); freeReplyObject(reply); reply = CLUSTER_MANAGER_COMMAND(new_node, "CLUSTER REPLICATE %s", master_node->name); if (!(success = clusterManagerCheckRedisReply(new_node, reply, NULL))) goto cleanup; } clusterManagerLogOk("[OK] New node added correctly.\n"); cleanup: if (!added && new_node) freeClusterManagerNode(new_node); if (reply) freeReplyObject(reply); return success; invalid_args: fprintf(stderr, CLUSTER_MANAGER_INVALID_HOST_ARG); return 0; } static int clusterManagerCommandDeleteNode(int argc, char **argv) { UNUSED(argc); int success = 1; int port = 0; char *ip = NULL; if (!getClusterHostFromCmdArgs(1, argv, &ip, &port)) goto invalid_args; char *node_id = argv[1]; clusterManagerLogInfo(">>> Removing node %s from cluster %s:%d\n", node_id, ip, port); clusterManagerNode *ref_node = clusterManagerNewNode(ip, port); clusterManagerNode *node = NULL; // Load cluster information if (!clusterManagerLoadInfoFromNode(ref_node)) return 0; // Check if the node exists and is not empty node = clusterManagerNodeByName(node_id); if (node == NULL) { clusterManagerLogErr("[ERR] No such node ID %s\n", node_id); return 0; } if (node->slots_count != 0) { clusterManagerLogErr("[ERR] Node %s:%d is not empty! Reshard data " "away and try again.\n", node->ip, node->port); return 0; } // Send CLUSTER FORGET to all the nodes but the node to remove clusterManagerLogInfo(">>> Sending CLUSTER FORGET messages to the " "cluster...\n"); listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n == node) continue; if (n->replicate && !strcasecmp(n->replicate, node_id)) { // Reconfigure the slave to replicate with some other node clusterManagerNode *master = clusterManagerNodeWithLeastReplicas(); assert(master != NULL); clusterManagerLogInfo(">>> %s:%d as replica of %s:%d\n", n->ip, n->port, master->ip, master->port); redisReply *r = CLUSTER_MANAGER_COMMAND(n, "CLUSTER REPLICATE %s", master->name); success = clusterManagerCheckRedisReply(n, r, NULL); if (r) freeReplyObject(r); if (!success) return 0; } redisReply *r = CLUSTER_MANAGER_COMMAND(n, "CLUSTER FORGET %s", node_id); success = clusterManagerCheckRedisReply(n, r, NULL); if (r) freeReplyObject(r); if (!success) return 0; } /* Finally send CLUSTER RESET to the node. */ clusterManagerLogInfo(">>> Sending CLUSTER RESET SOFT to the " "deleted node.\n"); redisReply *r = redisCommand(node->context, "CLUSTER RESET %s", "SOFT"); success = clusterManagerCheckRedisReply(node, r, NULL); if (r) freeReplyObject(r); return success; invalid_args: fprintf(stderr, CLUSTER_MANAGER_INVALID_HOST_ARG); return 0; } static int clusterManagerCommandInfo(int argc, char **argv) { int port = 0; char *ip = NULL; if (!getClusterHostFromCmdArgs(argc, argv, &ip, &port)) goto invalid_args; clusterManagerNode *node = clusterManagerNewNode(ip, port); if (!clusterManagerLoadInfoFromNode(node)) return 0; clusterManagerShowClusterInfo(); return 1; invalid_args: fprintf(stderr, CLUSTER_MANAGER_INVALID_HOST_ARG); return 0; } static int clusterManagerCommandCheck(int argc, char **argv) { int port = 0; char *ip = NULL; if (!getClusterHostFromCmdArgs(argc, argv, &ip, &port)) goto invalid_args; clusterManagerNode *node = clusterManagerNewNode(ip, port); if (!clusterManagerLoadInfoFromNode(node)) return 0; clusterManagerShowClusterInfo(); return clusterManagerCheckCluster(0); invalid_args: fprintf(stderr, CLUSTER_MANAGER_INVALID_HOST_ARG); return 0; } static int clusterManagerCommandFix(int argc, char **argv) { config.cluster_manager_command.flags |= CLUSTER_MANAGER_CMD_FLAG_FIX; return clusterManagerCommandCheck(argc, argv); } static int clusterManagerCommandReshard(int argc, char **argv) { int port = 0; char *ip = NULL; if (!getClusterHostFromCmdArgs(argc, argv, &ip, &port)) goto invalid_args; clusterManagerNode *node = clusterManagerNewNode(ip, port); if (!clusterManagerLoadInfoFromNode(node)) return 0; clusterManagerCheckCluster(0); if (cluster_manager.errors && listLength(cluster_manager.errors) > 0) { fflush(stdout); fprintf(stderr, "*** Please fix your cluster problems before resharding\n"); return 0; } int slots = config.cluster_manager_command.slots; if (!slots) { while (slots <= 0 || slots > CLUSTER_MANAGER_SLOTS) { printf("How many slots do you want to move (from 1 to %d)? ", CLUSTER_MANAGER_SLOTS); fflush(stdout); char buf[6]; int nread = read(fileno(stdin),buf,6); if (nread <= 0) continue; int last_idx = nread - 1; if (buf[last_idx] != '\n') { int ch; while ((ch = getchar()) != '\n' && ch != EOF) {} } buf[last_idx] = '\0'; slots = atoi(buf); } } char buf[255]; char *to = config.cluster_manager_command.to, *from = config.cluster_manager_command.from; while (to == NULL) { printf("What is the receiving node ID? "); fflush(stdout); int nread = read(fileno(stdin),buf,255); if (nread <= 0) continue; int last_idx = nread - 1; if (buf[last_idx] != '\n') { int ch; while ((ch = getchar()) != '\n' && ch != EOF) {} } buf[last_idx] = '\0'; if (strlen(buf) > 0) to = buf; } int raise_err = 0; clusterManagerNode *target = clusterNodeForResharding(to, NULL, &raise_err); if (target == NULL) return 0; list *sources = listCreate(); list *table = NULL; int all = 0, result = 1; if (from == NULL) { printf("Please enter all the source node IDs.\n"); printf(" Type 'all' to use all the nodes as source nodes for " "the hash slots.\n"); printf(" Type 'done' once you entered all the source nodes IDs.\n"); while (1) { printf("Source node #%lu: ", listLength(sources) + 1); fflush(stdout); int nread = read(fileno(stdin),buf,255); if (nread <= 0) continue; int last_idx = nread - 1; if (buf[last_idx] != '\n') { int ch; while ((ch = getchar()) != '\n' && ch != EOF) {} } buf[last_idx] = '\0'; if (!strcmp(buf, "done")) break; else if (!strcmp(buf, "all")) { all = 1; break; } else { clusterManagerNode *src = clusterNodeForResharding(buf, target, &raise_err); if (src != NULL) listAddNodeTail(sources, src); else if (raise_err) { result = 0; goto cleanup; } } } } else { char *p; while((p = strchr(from, ',')) != NULL) { *p = '\0'; if (!strcmp(from, "all")) { all = 1; break; } else { clusterManagerNode *src = clusterNodeForResharding(from, target, &raise_err); if (src != NULL) listAddNodeTail(sources, src); else if (raise_err) { result = 0; goto cleanup; } } from = p + 1; } /* Check if there's still another source to process. */ if (!all && strlen(from) > 0) { if (!strcmp(from, "all")) all = 1; if (!all) { clusterManagerNode *src = clusterNodeForResharding(from, target, &raise_err); if (src != NULL) listAddNodeTail(sources, src); else if (raise_err) { result = 0; goto cleanup; } } } } listIter li; listNode *ln; if (all) { listEmpty(sources); listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE || n->replicate) continue; if (!sdscmp(n->name, target->name)) continue; listAddNodeTail(sources, n); } } if (listLength(sources) == 0) { fprintf(stderr, "*** No source nodes given, operation aborted.\n"); result = 0; goto cleanup; } printf("\nReady to move %d slots.\n", slots); printf(" Source nodes:\n"); listRewind(sources, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *src = ln->value; sds info = clusterManagerNodeInfo(src, 4); printf("%s\n", info); sdsfree(info); } printf(" Destination node:\n"); sds info = clusterManagerNodeInfo(target, 4); printf("%s\n", info); sdsfree(info); table = clusterManagerComputeReshardTable(sources, slots); printf(" Resharding plan:\n"); clusterManagerShowReshardTable(table); if (!(config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_YES)) { printf("Do you want to proceed with the proposed " "reshard plan (yes/no)? "); fflush(stdout); char buf[4]; int nread = read(fileno(stdin),buf,4); buf[3] = '\0'; if (nread <= 0 || strcmp("yes", buf) != 0) { result = 0; goto cleanup; } } int opts = CLUSTER_MANAGER_OPT_VERBOSE; listRewind(table, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerReshardTableItem *item = ln->value; char *err = NULL; result = clusterManagerMoveSlot(item->source, target, item->slot, opts, &err); if (!result) { if (err != NULL) { //clusterManagerLogErr("\n%s\n", err); zfree(err); } goto cleanup; } } cleanup: listRelease(sources); clusterManagerReleaseReshardTable(table); return result; invalid_args: fprintf(stderr, CLUSTER_MANAGER_INVALID_HOST_ARG); return 0; } static int clusterManagerCommandRebalance(int argc, char **argv) { int port = 0; char *ip = NULL; clusterManagerNode **weightedNodes = NULL; list *involved = NULL; if (!getClusterHostFromCmdArgs(argc, argv, &ip, &port)) goto invalid_args; clusterManagerNode *node = clusterManagerNewNode(ip, port); if (!clusterManagerLoadInfoFromNode(node)) return 0; int result = 1, i; if (config.cluster_manager_command.weight != NULL) { for (i = 0; i < config.cluster_manager_command.weight_argc; i++) { char *name = config.cluster_manager_command.weight[i]; char *p = strchr(name, '='); if (p == NULL) { result = 0; goto cleanup; } *p = '\0'; float w = atof(++p); clusterManagerNode *n = clusterManagerNodeByAbbreviatedName(name); if (n == NULL) { clusterManagerLogErr("*** No such master node %s\n", name); result = 0; goto cleanup; } n->weight = w; } } float total_weight = 0; int nodes_involved = 0; int use_empty = config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_EMPTYMASTER; involved = listCreate(); listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); /* Compute the total cluster weight. */ while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE || n->replicate) continue; if (!use_empty && n->slots_count == 0) { n->weight = 0; continue; } total_weight += n->weight; nodes_involved++; listAddNodeTail(involved, n); } weightedNodes = zmalloc(nodes_involved * sizeof(clusterManagerNode *)); if (weightedNodes == NULL) goto cleanup; /* Check cluster, only proceed if it looks sane. */ clusterManagerCheckCluster(1); if (cluster_manager.errors && listLength(cluster_manager.errors) > 0) { clusterManagerLogErr("*** Please fix your cluster problems " "before rebalancing\n"); result = 0; goto cleanup; } /* Calculate the slots balance for each node. It's the number of * slots the node should lose (if positive) or gain (if negative) * in order to be balanced. */ int threshold_reached = 0, total_balance = 0; float threshold = config.cluster_manager_command.threshold; i = 0; listRewind(involved, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; weightedNodes[i++] = n; int expected = (int) (((float)CLUSTER_MANAGER_SLOTS / total_weight) * n->weight); n->balance = n->slots_count - expected; total_balance += n->balance; /* Compute the percentage of difference between the * expected number of slots and the real one, to see * if it's over the threshold specified by the user. */ int over_threshold = 0; if (threshold > 0) { if (n->slots_count > 0) { float err_perc = fabs((100-(100.0*expected/n->slots_count))); if (err_perc > threshold) over_threshold = 1; } else if (expected > 1) { over_threshold = 1; } } if (over_threshold) threshold_reached = 1; } if (!threshold_reached) { clusterManagerLogWarn("*** No rebalancing needed! " "All nodes are within the %.2f%% threshold.\n", config.cluster_manager_command.threshold); goto cleanup; } /* Because of rounding, it is possible that the balance of all nodes * summed does not give 0. Make sure that nodes that have to provide * slots are always matched by nodes receiving slots. */ while (total_balance > 0) { listRewind(involved, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->balance <= 0 && total_balance > 0) { n->balance--; total_balance--; } } } /* Sort nodes by their slots balance. */ qsort(weightedNodes, nodes_involved, sizeof(clusterManagerNode *), clusterManagerCompareNodeBalance); clusterManagerLogInfo(">>> Rebalancing across %d nodes. " "Total weight = %.2f\n", nodes_involved, total_weight); if (config.verbose) { for (i = 0; i < nodes_involved; i++) { clusterManagerNode *n = weightedNodes[i]; printf("%s:%d balance is %d slots\n", n->ip, n->port, n->balance); } } /* Now we have at the start of the 'sn' array nodes that should get * slots, at the end nodes that must give slots. * We take two indexes, one at the start, and one at the end, * incrementing or decrementing the indexes accordingly til we * find nodes that need to get/provide slots. */ int dst_idx = 0; int src_idx = nodes_involved - 1; int simulate = config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_SIMULATE; while (dst_idx < src_idx) { clusterManagerNode *dst = weightedNodes[dst_idx]; clusterManagerNode *src = weightedNodes[src_idx]; int db = abs(dst->balance); int sb = abs(src->balance); int numslots = (db < sb ? db : sb); if (numslots > 0) { printf("Moving %d slots from %s:%d to %s:%d\n", numslots, src->ip, src->port, dst->ip, dst->port); /* Actually move the slots. */ list *lsrc = listCreate(), *table = NULL; listAddNodeTail(lsrc, src); table = clusterManagerComputeReshardTable(lsrc, numslots); listRelease(lsrc); int table_len = (int) listLength(table); if (!table || table_len != numslots) { clusterManagerLogErr("*** Assertion failed: Reshard table " "!= number of slots"); result = 0; goto end_move; } if (simulate) { for (i = 0; i < table_len; i++) printf("#"); } else { int opts = CLUSTER_MANAGER_OPT_QUIET | CLUSTER_MANAGER_OPT_UPDATE; listRewind(table, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerReshardTableItem *item = ln->value; result = clusterManagerMoveSlot(item->source, dst, item->slot, opts, NULL); if (!result) goto end_move; printf("#"); fflush(stdout); } } printf("\n"); end_move: clusterManagerReleaseReshardTable(table); if (!result) goto cleanup; } /* Update nodes balance. */ dst->balance += numslots; src->balance -= numslots; if (dst->balance == 0) dst_idx++; if (src->balance == 0) src_idx --; } cleanup: if (involved != NULL) listRelease(involved); if (weightedNodes != NULL) zfree(weightedNodes); return result; invalid_args: fprintf(stderr, CLUSTER_MANAGER_INVALID_HOST_ARG); return 0; } static int clusterManagerCommandSetTimeout(int argc, char **argv) { UNUSED(argc); int port = 0; char *ip = NULL; if (!getClusterHostFromCmdArgs(1, argv, &ip, &port)) goto invalid_args; int timeout = atoi(argv[1]); if (timeout < 100) { fprintf(stderr, "Setting a node timeout of less than 100 " "milliseconds is a bad idea.\n"); return 0; } // Load cluster information clusterManagerNode *node = clusterManagerNewNode(ip, port); if (!clusterManagerLoadInfoFromNode(node)) return 0; int ok_count = 0, err_count = 0; clusterManagerLogInfo(">>> Reconfiguring node timeout in every " "cluster node...\n"); listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; char *err = NULL; redisReply *reply = CLUSTER_MANAGER_COMMAND(n, "CONFIG %s %s %d", "SET", "cluster-node-timeout", timeout); if (reply == NULL) goto reply_err; int ok = clusterManagerCheckRedisReply(n, reply, &err); freeReplyObject(reply); if (!ok) goto reply_err; reply = CLUSTER_MANAGER_COMMAND(n, "CONFIG %s", "REWRITE"); if (reply == NULL) goto reply_err; ok = clusterManagerCheckRedisReply(n, reply, &err); freeReplyObject(reply); if (!ok) goto reply_err; clusterManagerLogWarn("*** New timeout set for %s:%d\n", n->ip, n->port); ok_count++; continue; reply_err:; int need_free = 0; if (err == NULL) err = ""; else need_free = 1; clusterManagerLogErr("ERR setting node-timeout for %s:%d: %s\n", n->ip, n->port, err); if (need_free) zfree(err); err_count++; } clusterManagerLogInfo(">>> New node timeout set. %d OK, %d ERR.\n", ok_count, err_count); return 1; invalid_args: fprintf(stderr, CLUSTER_MANAGER_INVALID_HOST_ARG); return 0; } static int clusterManagerCommandImport(int argc, char **argv) { int success = 1; int port = 0, src_port = 0; char *ip = NULL, *src_ip = NULL; char *invalid_args_msg = NULL; sds cmdfmt = NULL; if (!getClusterHostFromCmdArgs(argc, argv, &ip, &port)) { invalid_args_msg = CLUSTER_MANAGER_INVALID_HOST_ARG; goto invalid_args; } if (config.cluster_manager_command.from == NULL) { invalid_args_msg = "[ERR] Option '--cluster-from' is required for " "subcommand 'import'.\n"; goto invalid_args; } char *src_host[] = {config.cluster_manager_command.from}; if (!getClusterHostFromCmdArgs(1, src_host, &src_ip, &src_port)) { invalid_args_msg = "[ERR] Invalid --cluster-from host. You need to " "pass a valid address (ie. 120.0.0.1:7000).\n"; goto invalid_args; } clusterManagerLogInfo(">>> Importing data from %s:%d to cluster %s:%d\n", src_ip, src_port, ip, port); clusterManagerNode *refnode = clusterManagerNewNode(ip, port); if (!clusterManagerLoadInfoFromNode(refnode)) return 0; if (!clusterManagerCheckCluster(0)) return 0; char *reply_err = NULL; redisReply *src_reply = NULL; // Connect to the source node. redisContext *src_ctx = redisConnect(src_ip, src_port); if (src_ctx->err) { success = 0; fprintf(stderr,"Could not connect to Redis at %s:%d: %s.\n", src_ip, src_port, src_ctx->errstr); goto cleanup; } // Auth for the source node. char *from_user = config.cluster_manager_command.from_user; char *from_pass = config.cluster_manager_command.from_pass; if (cliAuth(src_ctx, from_user, from_pass) == REDIS_ERR) { success = 0; goto cleanup; } src_reply = reconnectingRedisCommand(src_ctx, "INFO"); if (!src_reply || src_reply->type == REDIS_REPLY_ERROR) { if (src_reply && src_reply->str) reply_err = src_reply->str; success = 0; goto cleanup; } if (getLongInfoField(src_reply->str, "cluster_enabled")) { clusterManagerLogErr("[ERR] The source node should not be a " "cluster node.\n"); success = 0; goto cleanup; } freeReplyObject(src_reply); src_reply = reconnectingRedisCommand(src_ctx, "DBSIZE"); if (!src_reply || src_reply->type == REDIS_REPLY_ERROR) { if (src_reply && src_reply->str) reply_err = src_reply->str; success = 0; goto cleanup; } int size = src_reply->integer, i; clusterManagerLogWarn("*** Importing %d keys from DB 0\n", size); // Build a slot -> node map clusterManagerNode *slots_map[CLUSTER_MANAGER_SLOTS]; memset(slots_map, 0, sizeof(slots_map)); listIter li; listNode *ln; for (i = 0; i < CLUSTER_MANAGER_SLOTS; i++) { listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if (n->flags & CLUSTER_MANAGER_FLAG_SLAVE) continue; if (n->slots_count == 0) continue; if (n->slots[i]) { slots_map[i] = n; break; } } } cmdfmt = sdsnew("MIGRATE %s %d %s %d %d"); if (config.auth) { if (config.user) { cmdfmt = sdscatfmt(cmdfmt," AUTH2 %s %s", config.user, config.auth); } else { cmdfmt = sdscatfmt(cmdfmt," AUTH %s", config.auth); } } if (config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_COPY) cmdfmt = sdscat(cmdfmt," COPY"); if (config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_REPLACE) cmdfmt = sdscat(cmdfmt," REPLACE"); /* Use SCAN to iterate over the keys, migrating to the * right node as needed. */ int cursor = -999, timeout = config.cluster_manager_command.timeout; while (cursor != 0) { if (cursor < 0) cursor = 0; freeReplyObject(src_reply); src_reply = reconnectingRedisCommand(src_ctx, "SCAN %d COUNT %d", cursor, 1000); if (!src_reply || src_reply->type == REDIS_REPLY_ERROR) { if (src_reply && src_reply->str) reply_err = src_reply->str; success = 0; goto cleanup; } assert(src_reply->type == REDIS_REPLY_ARRAY); assert(src_reply->elements >= 2); assert(src_reply->element[1]->type == REDIS_REPLY_ARRAY); if (src_reply->element[0]->type == REDIS_REPLY_STRING) cursor = atoi(src_reply->element[0]->str); else if (src_reply->element[0]->type == REDIS_REPLY_INTEGER) cursor = src_reply->element[0]->integer; int keycount = src_reply->element[1]->elements; for (i = 0; i < keycount; i++) { redisReply *kr = src_reply->element[1]->element[i]; assert(kr->type == REDIS_REPLY_STRING); char *key = kr->str; uint16_t slot = clusterManagerKeyHashSlot(key, kr->len); clusterManagerNode *target = slots_map[slot]; printf("Migrating %s to %s:%d: ", key, target->ip, target->port); redisReply *r = reconnectingRedisCommand(src_ctx, cmdfmt, target->ip, target->port, key, 0, timeout); if (!r || r->type == REDIS_REPLY_ERROR) { if (r && r->str) { clusterManagerLogErr("Source %s:%d replied with " "error:\n%s\n", src_ip, src_port, r->str); } success = 0; } freeReplyObject(r); if (!success) goto cleanup; clusterManagerLogOk("OK\n"); } } cleanup: if (reply_err) clusterManagerLogErr("Source %s:%d replied with error:\n%s\n", src_ip, src_port, reply_err); if (src_ctx) redisFree(src_ctx); if (src_reply) freeReplyObject(src_reply); if (cmdfmt) sdsfree(cmdfmt); return success; invalid_args: fprintf(stderr, "%s", invalid_args_msg); return 0; } static int clusterManagerCommandCall(int argc, char **argv) { int port = 0, i; char *ip = NULL; if (!getClusterHostFromCmdArgs(1, argv, &ip, &port)) goto invalid_args; clusterManagerNode *refnode = clusterManagerNewNode(ip, port); if (!clusterManagerLoadInfoFromNode(refnode)) return 0; argc--; argv++; size_t *argvlen = zmalloc(argc*sizeof(size_t)); clusterManagerLogInfo(">>> Calling"); for (i = 0; i < argc; i++) { argvlen[i] = strlen(argv[i]); printf(" %s", argv[i]); } printf("\n"); listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { clusterManagerNode *n = ln->value; if ((config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_MASTERS_ONLY) && (n->replicate != NULL)) continue; // continue if node is slave if ((config.cluster_manager_command.flags & CLUSTER_MANAGER_CMD_FLAG_SLAVES_ONLY) && (n->replicate == NULL)) continue; // continue if node is master if (!n->context && !clusterManagerNodeConnect(n)) continue; redisReply *reply = NULL; redisAppendCommandArgv(n->context, argc, (const char **) argv, argvlen); int status = redisGetReply(n->context, (void **)(&reply)); if (status != REDIS_OK || reply == NULL ) printf("%s:%d: Failed!\n", n->ip, n->port); else { sds formatted_reply = cliFormatReplyRaw(reply); printf("%s:%d: %s\n", n->ip, n->port, (char *) formatted_reply); sdsfree(formatted_reply); } if (reply != NULL) freeReplyObject(reply); } zfree(argvlen); return 1; invalid_args: fprintf(stderr, CLUSTER_MANAGER_INVALID_HOST_ARG); return 0; } static int clusterManagerCommandBackup(int argc, char **argv) { UNUSED(argc); int success = 1, port = 0; char *ip = NULL; if (!getClusterHostFromCmdArgs(1, argv, &ip, &port)) goto invalid_args; clusterManagerNode *refnode = clusterManagerNewNode(ip, port); if (!clusterManagerLoadInfoFromNode(refnode)) return 0; int no_issues = clusterManagerCheckCluster(0); int cluster_errors_count = (no_issues ? 0 : listLength(cluster_manager.errors)); config.cluster_manager_command.backup_dir = argv[1]; /* TODO: check if backup_dir is a valid directory. */ sds json = sdsnew("[\n"); int first_node = 0; listIter li; listNode *ln; listRewind(cluster_manager.nodes, &li); while ((ln = listNext(&li)) != NULL) { if (!first_node) first_node = 1; else json = sdscat(json, ",\n"); clusterManagerNode *node = ln->value; sds node_json = clusterManagerNodeGetJSON(node, cluster_errors_count); json = sdscat(json, node_json); sdsfree(node_json); if (node->replicate) continue; clusterManagerLogInfo(">>> Node %s:%d -> Saving RDB...\n", node->ip, node->port); fflush(stdout); getRDB(node); } json = sdscat(json, "\n]"); sds jsonpath = sdsnew(config.cluster_manager_command.backup_dir); if (jsonpath[sdslen(jsonpath) - 1] != '/') jsonpath = sdscat(jsonpath, "/"); jsonpath = sdscat(jsonpath, "nodes.json"); fflush(stdout); clusterManagerLogInfo("Saving cluster configuration to: %s\n", jsonpath); FILE *out = fopen(jsonpath, "w+"); if (!out) { clusterManagerLogErr("Could not save nodes to: %s\n", jsonpath); success = 0; goto cleanup; } fputs(json, out); fclose(out); cleanup: sdsfree(json); sdsfree(jsonpath); if (success) { if (!no_issues) { clusterManagerLogWarn("*** Cluster seems to have some problems, " "please be aware of it if you're going " "to restore this backup.\n"); } clusterManagerLogOk("[OK] Backup created into: %s\n", config.cluster_manager_command.backup_dir); } else clusterManagerLogOk("[ERR] Failed to back cluster!\n"); return success; invalid_args: fprintf(stderr, CLUSTER_MANAGER_INVALID_HOST_ARG); return 0; } static int clusterManagerCommandHelp(int argc, char **argv) { UNUSED(argc); UNUSED(argv); int commands_count = sizeof(clusterManagerCommands) / sizeof(clusterManagerCommandDef); int i = 0, j; fprintf(stderr, "Cluster Manager Commands:\n"); int padding = 15; for (; i < commands_count; i++) { clusterManagerCommandDef *def = &(clusterManagerCommands[i]); int namelen = strlen(def->name), padlen = padding - namelen; fprintf(stderr, " %s", def->name); for (j = 0; j < padlen; j++) fprintf(stderr, " "); fprintf(stderr, "%s\n", (def->args ? def->args : "")); if (def->options != NULL) { int optslen = strlen(def->options); char *p = def->options, *eos = p + optslen; char *comma = NULL; while ((comma = strchr(p, ',')) != NULL) { int deflen = (int)(comma - p); char buf[255]; memcpy(buf, p, deflen); buf[deflen] = '\0'; for (j = 0; j < padding; j++) fprintf(stderr, " "); fprintf(stderr, " --cluster-%s\n", buf); p = comma + 1; if (p >= eos) break; } if (p < eos) { for (j = 0; j < padding; j++) fprintf(stderr, " "); fprintf(stderr, " --cluster-%s\n", p); } } } fprintf(stderr, "\nFor check, fix, reshard, del-node, set-timeout, " "info, rebalance, call, import, backup you " "can specify the host and port of any working node in " "the cluster.\n"); int options_count = sizeof(clusterManagerOptions) / sizeof(clusterManagerOptionDef); i = 0; fprintf(stderr, "\nCluster Manager Options:\n"); for (; i < options_count; i++) { clusterManagerOptionDef *def = &(clusterManagerOptions[i]); int namelen = strlen(def->name), padlen = padding - namelen; fprintf(stderr, " %s", def->name); for (j = 0; j < padlen; j++) fprintf(stderr, " "); fprintf(stderr, "%s\n", def->desc); } fprintf(stderr, "\n"); return 0; } /*------------------------------------------------------------------------------ * Latency and latency history modes *--------------------------------------------------------------------------- */ static void latencyModePrint(long long min, long long max, double avg, long long count) { if (config.output == OUTPUT_STANDARD) { printf("min: %lld, max: %lld, avg: %.2f (%lld samples)", min, max, avg, count); fflush(stdout); } else if (config.output == OUTPUT_CSV) { printf("%lld,%lld,%.2f,%lld\n", min, max, avg, count); } else if (config.output == OUTPUT_RAW) { printf("%lld %lld %.2f %lld\n", min, max, avg, count); } } #define LATENCY_SAMPLE_RATE 10 /* milliseconds. */ #define LATENCY_HISTORY_DEFAULT_INTERVAL 15000 /* milliseconds. */ static void latencyMode(void) { redisReply *reply; long long start, latency, min = 0, max = 0, tot = 0, count = 0; long long history_interval = config.interval ? config.interval/1000 : LATENCY_HISTORY_DEFAULT_INTERVAL; double avg; long long history_start = mstime(); /* Set a default for the interval in case of --latency option * with --raw, --csv or when it is redirected to non tty. */ if (config.interval == 0) { config.interval = 1000; } else { config.interval /= 1000; /* We need to convert to milliseconds. */ } if (!context) exit(1); while(1) { start = mstime(); reply = reconnectingRedisCommand(context,"PING"); if (reply == NULL) { fprintf(stderr,"\nI/O error\n"); exit(1); } latency = mstime()-start; freeReplyObject(reply); count++; if (count == 1) { min = max = tot = latency; avg = (double) latency; } else { if (latency < min) min = latency; if (latency > max) max = latency; tot += latency; avg = (double) tot/count; } if (config.output == OUTPUT_STANDARD) { printf("\x1b[0G\x1b[2K"); /* Clear the line. */ latencyModePrint(min,max,avg,count); } else { if (config.latency_history) { latencyModePrint(min,max,avg,count); } else if (mstime()-history_start > config.interval) { latencyModePrint(min,max,avg,count); exit(0); } } if (config.latency_history && mstime()-history_start > history_interval) { printf(" -- %.2f seconds range\n", (float)(mstime()-history_start)/1000); history_start = mstime(); min = max = tot = count = 0; } usleep(LATENCY_SAMPLE_RATE * 1000); } } /*------------------------------------------------------------------------------ * Latency distribution mode -- requires 256 colors xterm *--------------------------------------------------------------------------- */ #define LATENCY_DIST_DEFAULT_INTERVAL 1000 /* milliseconds. */ /* Structure to store samples distribution. */ struct distsamples { long long max; /* Max latency to fit into this interval (usec). */ long long count; /* Number of samples in this interval. */ int character; /* Associated character in visualization. */ }; /* Helper function for latencyDistMode(). Performs the spectrum visualization * of the collected samples targeting an xterm 256 terminal. * * Takes an array of distsamples structures, ordered from smaller to bigger * 'max' value. Last sample max must be 0, to mean that it olds all the * samples greater than the previous one, and is also the stop sentinel. * * "tot' is the total number of samples in the different buckets, so it * is the SUM(samples[i].count) for i to 0 up to the max sample. * * As a side effect the function sets all the buckets count to 0. */ void showLatencyDistSamples(struct distsamples *samples, long long tot) { int j; /* We convert samples into an index inside the palette * proportional to the percentage a given bucket represents. * This way intensity of the different parts of the spectrum * don't change relative to the number of requests, which avoids to * pollute the visualization with non-latency related info. */ printf("\033[38;5;0m"); /* Set foreground color to black. */ for (j = 0; ; j++) { int coloridx = ceil((double) samples[j].count / tot * (spectrum_palette_size-1)); int color = spectrum_palette[coloridx]; printf("\033[48;5;%dm%c", (int)color, samples[j].character); samples[j].count = 0; if (samples[j].max == 0) break; /* Last sample. */ } printf("\033[0m\n"); fflush(stdout); } /* Show the legend: different buckets values and colors meaning, so * that the spectrum is more easily readable. */ void showLatencyDistLegend(void) { int j; printf("---------------------------------------------\n"); printf(". - * # .01 .125 .25 .5 milliseconds\n"); printf("1,2,3,...,9 from 1 to 9 milliseconds\n"); printf("A,B,C,D,E 10,20,30,40,50 milliseconds\n"); printf("F,G,H,I,J .1,.2,.3,.4,.5 seconds\n"); printf("K,L,M,N,O,P,Q,? 1,2,4,8,16,30,60,>60 seconds\n"); printf("From 0 to 100%%: "); for (j = 0; j < spectrum_palette_size; j++) { printf("\033[48;5;%dm ", spectrum_palette[j]); } printf("\033[0m\n"); printf("---------------------------------------------\n"); } static void latencyDistMode(void) { redisReply *reply; long long start, latency, count = 0; long long history_interval = config.interval ? config.interval/1000 : LATENCY_DIST_DEFAULT_INTERVAL; long long history_start = ustime(); int j, outputs = 0; struct distsamples samples[] = { /* We use a mostly logarithmic scale, with certain linear intervals * which are more interesting than others, like 1-10 milliseconds * range. */ {10,0,'.'}, /* 0.01 ms */ {125,0,'-'}, /* 0.125 ms */ {250,0,'*'}, /* 0.25 ms */ {500,0,'#'}, /* 0.5 ms */ {1000,0,'1'}, /* 1 ms */ {2000,0,'2'}, /* 2 ms */ {3000,0,'3'}, /* 3 ms */ {4000,0,'4'}, /* 4 ms */ {5000,0,'5'}, /* 5 ms */ {6000,0,'6'}, /* 6 ms */ {7000,0,'7'}, /* 7 ms */ {8000,0,'8'}, /* 8 ms */ {9000,0,'9'}, /* 9 ms */ {10000,0,'A'}, /* 10 ms */ {20000,0,'B'}, /* 20 ms */ {30000,0,'C'}, /* 30 ms */ {40000,0,'D'}, /* 40 ms */ {50000,0,'E'}, /* 50 ms */ {100000,0,'F'}, /* 0.1 s */ {200000,0,'G'}, /* 0.2 s */ {300000,0,'H'}, /* 0.3 s */ {400000,0,'I'}, /* 0.4 s */ {500000,0,'J'}, /* 0.5 s */ {1000000,0,'K'}, /* 1 s */ {2000000,0,'L'}, /* 2 s */ {4000000,0,'M'}, /* 4 s */ {8000000,0,'N'}, /* 8 s */ {16000000,0,'O'}, /* 16 s */ {30000000,0,'P'}, /* 30 s */ {60000000,0,'Q'}, /* 1 minute */ {0,0,'?'}, /* > 1 minute */ }; if (!context) exit(1); while(1) { start = ustime(); reply = reconnectingRedisCommand(context,"PING"); if (reply == NULL) { fprintf(stderr,"\nI/O error\n"); exit(1); } latency = ustime()-start; freeReplyObject(reply); count++; /* Populate the relevant bucket. */ for (j = 0; ; j++) { if (samples[j].max == 0 || latency <= samples[j].max) { samples[j].count++; break; } } /* From time to time show the spectrum. */ if (count && (ustime()-history_start)/1000 > history_interval) { if ((outputs++ % 20) == 0) showLatencyDistLegend(); showLatencyDistSamples(samples,count); history_start = ustime(); count = 0; } usleep(LATENCY_SAMPLE_RATE * 1000); } } /*------------------------------------------------------------------------------ * Slave mode *--------------------------------------------------------------------------- */ #define RDB_EOF_MARK_SIZE 40 void sendReplconf(const char* arg1, const char* arg2) { fprintf(stderr, "sending REPLCONF %s %s\n", arg1, arg2); redisReply *reply = redisCommand(context, "REPLCONF %s %s", arg1, arg2); /* Handle any error conditions */ if(reply == NULL) { fprintf(stderr, "\nI/O error\n"); exit(1); } else if(reply->type == REDIS_REPLY_ERROR) { fprintf(stderr, "REPLCONF %s error: %s\n", arg1, reply->str); /* non fatal, old versions may not support it */ } freeReplyObject(reply); } void sendCapa() { sendReplconf("capa", "eof"); } void sendRdbOnly(void) { sendReplconf("rdb-only", "1"); } /* Read raw bytes through a redisContext. The read operation is not greedy * and may not fill the buffer entirely. */ static ssize_t readConn(redisContext *c, char *buf, size_t len) { return c->funcs->read(c, buf, len); } /* Sends SYNC and reads the number of bytes in the payload. Used both by * slaveMode() and getRDB(). * returns 0 in case an EOF marker is used. */ unsigned long long sendSync(redisContext *c, char *out_eof) { /* To start we need to send the SYNC command and return the payload. * The hiredis client lib does not understand this part of the protocol * and we don't want to mess with its buffers, so everything is performed * using direct low-level I/O. */ char buf[4096], *p; ssize_t nread; /* Send the SYNC command. */ if (cliWriteConn(c, "SYNC\r\n", 6) != 6) { fprintf(stderr,"Error writing to master\n"); exit(1); } /* Read $<payload>\r\n, making sure to read just up to "\n" */ p = buf; while(1) { nread = readConn(c,p,1); if (nread <= 0) { fprintf(stderr,"Error reading bulk length while SYNCing\n"); exit(1); } if (*p == '\n' && p != buf) break; if (*p != '\n') p++; } *p = '\0'; if (buf[0] == '-') { fprintf(stderr, "SYNC with master failed: %s\n", buf); exit(1); } if (strncmp(buf+1,"EOF:",4) == 0 && strlen(buf+5) >= RDB_EOF_MARK_SIZE) { memcpy(out_eof, buf+5, RDB_EOF_MARK_SIZE); return 0; } return strtoull(buf+1,NULL,10); } static void slaveMode(void) { static char eofmark[RDB_EOF_MARK_SIZE]; static char lastbytes[RDB_EOF_MARK_SIZE]; static int usemark = 0; unsigned long long payload = sendSync(context,eofmark); char buf[1024]; int original_output = config.output; if (payload == 0) { payload = ULLONG_MAX; memset(lastbytes,0,RDB_EOF_MARK_SIZE); usemark = 1; fprintf(stderr,"SYNC with master, discarding " "bytes of bulk transfer until EOF marker...\n"); } else { fprintf(stderr,"SYNC with master, discarding %llu " "bytes of bulk transfer...\n", payload); } /* Discard the payload. */ while(payload) { ssize_t nread; nread = readConn(context,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload); if (nread <= 0) { fprintf(stderr,"Error reading RDB payload while SYNCing\n"); exit(1); } payload -= nread; if (usemark) { /* Update the last bytes array, and check if it matches our delimiter.*/ if (nread >= RDB_EOF_MARK_SIZE) { memcpy(lastbytes,buf+nread-RDB_EOF_MARK_SIZE,RDB_EOF_MARK_SIZE); } else { int rem = RDB_EOF_MARK_SIZE-nread; memmove(lastbytes,lastbytes+nread,rem); memcpy(lastbytes+rem,buf,nread); } if (memcmp(lastbytes,eofmark,RDB_EOF_MARK_SIZE) == 0) break; } } if (usemark) { unsigned long long offset = ULLONG_MAX - payload; fprintf(stderr,"SYNC done after %llu bytes. Logging commands from master.\n", offset); /* put the slave online */ sleep(1); sendReplconf("ACK", "0"); } else fprintf(stderr,"SYNC done. Logging commands from master.\n"); /* Now we can use hiredis to read the incoming protocol. */ config.output = OUTPUT_CSV; while (cliReadReply(0) == REDIS_OK); config.output = original_output; } /*------------------------------------------------------------------------------ * RDB transfer mode *--------------------------------------------------------------------------- */ /* This function implements --rdb, so it uses the replication protocol in order * to fetch the RDB file from a remote server. */ static void getRDB(clusterManagerNode *node) { int fd; redisContext *s; char *filename; if (node != NULL) { assert(node->context); s = node->context; filename = clusterManagerGetNodeRDBFilename(node); } else { s = context; filename = config.rdb_filename; } static char eofmark[RDB_EOF_MARK_SIZE]; static char lastbytes[RDB_EOF_MARK_SIZE]; static int usemark = 0; unsigned long long payload = sendSync(s, eofmark); char buf[4096]; if (payload == 0) { payload = ULLONG_MAX; memset(lastbytes,0,RDB_EOF_MARK_SIZE); usemark = 1; fprintf(stderr,"SYNC sent to master, writing bytes of bulk transfer " "until EOF marker to '%s'\n", filename); } else { fprintf(stderr,"SYNC sent to master, writing %llu bytes to '%s'\n", payload, filename); } int write_to_stdout = !strcmp(filename,"-"); /* Write to file. */ if (write_to_stdout) { fd = STDOUT_FILENO; } else { fd = open(filename, O_CREAT|O_WRONLY, 0644); if (fd == -1) { fprintf(stderr, "Error opening '%s': %s\n", filename, strerror(errno)); exit(1); } } while(payload) { ssize_t nread, nwritten; nread = readConn(s,buf,(payload > sizeof(buf)) ? sizeof(buf) : payload); if (nread <= 0) { fprintf(stderr,"I/O Error reading RDB payload from socket\n"); exit(1); } nwritten = write(fd, buf, nread); if (nwritten != nread) { fprintf(stderr,"Error writing data to file: %s\n", (nwritten == -1) ? strerror(errno) : "short write"); exit(1); } payload -= nread; if (usemark) { /* Update the last bytes array, and check if it matches our delimiter.*/ if (nread >= RDB_EOF_MARK_SIZE) { memcpy(lastbytes,buf+nread-RDB_EOF_MARK_SIZE,RDB_EOF_MARK_SIZE); } else { int rem = RDB_EOF_MARK_SIZE-nread; memmove(lastbytes,lastbytes+nread,rem); memcpy(lastbytes+rem,buf,nread); } if (memcmp(lastbytes,eofmark,RDB_EOF_MARK_SIZE) == 0) break; } } if (usemark) { payload = ULLONG_MAX - payload - RDB_EOF_MARK_SIZE; if (!write_to_stdout && ftruncate(fd, payload) == -1) fprintf(stderr,"ftruncate failed: %s.\n", strerror(errno)); fprintf(stderr,"Transfer finished with success after %llu bytes\n", payload); } else { fprintf(stderr,"Transfer finished with success.\n"); } redisFree(s); /* Close the connection ASAP as fsync() may take time. */ if (node) node->context = NULL; if (!write_to_stdout && fsync(fd) == -1) { fprintf(stderr,"Fail to fsync '%s': %s\n", filename, strerror(errno)); exit(1); } close(fd); if (node) { sdsfree(filename); return; } exit(0); } /*------------------------------------------------------------------------------ * Bulk import (pipe) mode *--------------------------------------------------------------------------- */ #define PIPEMODE_WRITE_LOOP_MAX_BYTES (128*1024) static void pipeMode(void) { long long errors = 0, replies = 0, obuf_len = 0, obuf_pos = 0; char obuf[1024*16]; /* Output buffer */ char aneterr[ANET_ERR_LEN]; redisReply *reply; int eof = 0; /* True once we consumed all the standard input. */ int done = 0; char magic[20]; /* Special reply we recognize. */ time_t last_read_time = time(NULL); srand(time(NULL)); /* Use non blocking I/O. */ if (anetNonBlock(aneterr,context->fd) == ANET_ERR) { fprintf(stderr, "Can't set the socket in non blocking mode: %s\n", aneterr); exit(1); } context->flags &= ~REDIS_BLOCK; /* Transfer raw protocol and read replies from the server at the same * time. */ while(!done) { int mask = AE_READABLE; if (!eof || obuf_len != 0) mask |= AE_WRITABLE; mask = aeWait(context->fd,mask,1000); /* Handle the readable state: we can read replies from the server. */ if (mask & AE_READABLE) { int read_error = 0; do { if (!read_error && redisBufferRead(context) == REDIS_ERR) { read_error = 1; } reply = NULL; if (redisGetReply(context, (void **) &reply) == REDIS_ERR) { fprintf(stderr, "Error reading replies from server\n"); exit(1); } if (reply) { last_read_time = time(NULL); if (reply->type == REDIS_REPLY_ERROR) { fprintf(stderr,"%s\n", reply->str); errors++; } else if (eof && reply->type == REDIS_REPLY_STRING && reply->len == 20) { /* Check if this is the reply to our final ECHO * command. If so everything was received * from the server. */ if (memcmp(reply->str,magic,20) == 0) { printf("Last reply received from server.\n"); done = 1; replies--; } } replies++; freeReplyObject(reply); } } while(reply); /* Abort on read errors. We abort here because it is important * to consume replies even after a read error: this way we can * show a potential problem to the user. */ if (read_error) exit(1); } /* Handle the writable state: we can send protocol to the server. */ if (mask & AE_WRITABLE) { ssize_t loop_nwritten = 0; while(1) { /* Transfer current buffer to server. */ if (obuf_len != 0) { ssize_t nwritten = cliWriteConn(context,obuf+obuf_pos,obuf_len); if (nwritten == -1) { if (errno != EAGAIN && errno != EINTR) { fprintf(stderr, "Error writing to the server: %s\n", strerror(errno)); exit(1); } else { nwritten = 0; } } obuf_len -= nwritten; obuf_pos += nwritten; loop_nwritten += nwritten; if (obuf_len != 0) break; /* Can't accept more data. */ } if (context->err) { fprintf(stderr, "Server I/O Error: %s\n", context->errstr); exit(1); } /* If buffer is empty, load from stdin. */ if (obuf_len == 0 && !eof) { ssize_t nread = read(STDIN_FILENO,obuf,sizeof(obuf)); if (nread == 0) { /* The ECHO sequence starts with a "\r\n" so that if there * is garbage in the protocol we read from stdin, the ECHO * will likely still be properly formatted. * CRLF is ignored by Redis, so it has no effects. */ char echo[] = "\r\n*2\r\n$4\r\nECHO\r\n$20\r\n01234567890123456789\r\n"; int j; eof = 1; /* Everything transferred, so we queue a special * ECHO command that we can match in the replies * to make sure everything was read from the server. */ for (j = 0; j < 20; j++) magic[j] = rand() & 0xff; memcpy(echo+21,magic,20); memcpy(obuf,echo,sizeof(echo)-1); obuf_len = sizeof(echo)-1; obuf_pos = 0; printf("All data transferred. Waiting for the last reply...\n"); } else if (nread == -1) { fprintf(stderr, "Error reading from stdin: %s\n", strerror(errno)); exit(1); } else { obuf_len = nread; obuf_pos = 0; } } if ((obuf_len == 0 && eof) || loop_nwritten > PIPEMODE_WRITE_LOOP_MAX_BYTES) break; } } /* Handle timeout, that is, we reached EOF, and we are not getting * replies from the server for a few seconds, nor the final ECHO is * received. */ if (eof && config.pipe_timeout > 0 && time(NULL)-last_read_time > config.pipe_timeout) { fprintf(stderr,"No replies for %d seconds: exiting.\n", config.pipe_timeout); errors++; break; } } printf("errors: %lld, replies: %lld\n", errors, replies); if (errors) exit(1); else exit(0); } /*------------------------------------------------------------------------------ * Find big keys *--------------------------------------------------------------------------- */ static redisReply *sendScan(unsigned long long *it) { redisReply *reply; if (config.pattern) reply = redisCommand(context, "SCAN %llu MATCH %b", *it, config.pattern, sdslen(config.pattern)); else reply = redisCommand(context,"SCAN %llu",*it); /* Handle any error conditions */ if(reply == NULL) { fprintf(stderr, "\nI/O error\n"); exit(1); } else if(reply->type == REDIS_REPLY_ERROR) { fprintf(stderr, "SCAN error: %s\n", reply->str); exit(1); } else if(reply->type != REDIS_REPLY_ARRAY) { fprintf(stderr, "Non ARRAY response from SCAN!\n"); exit(1); } else if(reply->elements != 2) { fprintf(stderr, "Invalid element count from SCAN!\n"); exit(1); } /* Validate our types are correct */ assert(reply->element[0]->type == REDIS_REPLY_STRING); assert(reply->element[1]->type == REDIS_REPLY_ARRAY); /* Update iterator */ *it = strtoull(reply->element[0]->str, NULL, 10); return reply; } static int getDbSize(void) { redisReply *reply; int size; reply = redisCommand(context, "DBSIZE"); if (reply == NULL) { fprintf(stderr, "\nI/O error\n"); exit(1); } else if (reply->type == REDIS_REPLY_ERROR) { fprintf(stderr, "Couldn't determine DBSIZE: %s\n", reply->str); exit(1); } else if (reply->type != REDIS_REPLY_INTEGER) { fprintf(stderr, "Non INTEGER response from DBSIZE!\n"); exit(1); } /* Grab the number of keys and free our reply */ size = reply->integer; freeReplyObject(reply); return size; } typedef struct { char *name; char *sizecmd; char *sizeunit; unsigned long long biggest; unsigned long long count; unsigned long long totalsize; sds biggest_key; } typeinfo; typeinfo type_string = { "string", "STRLEN", "bytes" }; typeinfo type_list = { "list", "LLEN", "items" }; typeinfo type_set = { "set", "SCARD", "members" }; typeinfo type_hash = { "hash", "HLEN", "fields" }; typeinfo type_zset = { "zset", "ZCARD", "members" }; typeinfo type_stream = { "stream", "XLEN", "entries" }; typeinfo type_other = { "other", NULL, "?" }; static typeinfo* typeinfo_add(dict *types, char* name, typeinfo* type_template) { typeinfo *info = zmalloc(sizeof(typeinfo)); *info = *type_template; info->name = sdsnew(name); dictAdd(types, info->name, info); return info; } void type_free(dict *d, void* val) { typeinfo *info = val; UNUSED(d); if (info->biggest_key) sdsfree(info->biggest_key); sdsfree(info->name); zfree(info); } static dictType typeinfoDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ NULL, /* key destructor (owned by the value)*/ type_free, /* val destructor */ NULL /* allow to expand */ }; static void getKeyTypes(dict *types_dict, redisReply *keys, typeinfo **types) { redisReply *reply; unsigned int i; /* Pipeline TYPE commands */ for(i=0;i<keys->elements;i++) { const char* argv[] = {"TYPE", keys->element[i]->str}; size_t lens[] = {4, keys->element[i]->len}; redisAppendCommandArgv(context, 2, argv, lens); } /* Retrieve types */ for(i=0;i<keys->elements;i++) { if(redisGetReply(context, (void**)&reply)!=REDIS_OK) { fprintf(stderr, "Error getting type for key '%s' (%d: %s)\n", keys->element[i]->str, context->err, context->errstr); exit(1); } else if(reply->type != REDIS_REPLY_STATUS) { if(reply->type == REDIS_REPLY_ERROR) { fprintf(stderr, "TYPE returned an error: %s\n", reply->str); } else { fprintf(stderr, "Invalid reply type (%d) for TYPE on key '%s'!\n", reply->type, keys->element[i]->str); } exit(1); } sds typereply = sdsnew(reply->str); dictEntry *de = dictFind(types_dict, typereply); sdsfree(typereply); typeinfo *type = NULL; if (de) type = dictGetVal(de); else if (strcmp(reply->str, "none")) /* create new types for modules, (but not for deleted keys) */ type = typeinfo_add(types_dict, reply->str, &type_other); types[i] = type; freeReplyObject(reply); } } static void getKeySizes(redisReply *keys, typeinfo **types, unsigned long long *sizes, int memkeys, unsigned memkeys_samples) { redisReply *reply; unsigned int i; /* Pipeline size commands */ for(i=0;i<keys->elements;i++) { /* Skip keys that disappeared between SCAN and TYPE (or unknown types when not in memkeys mode) */ if(!types[i] || (!types[i]->sizecmd && !memkeys)) continue; if (!memkeys) { const char* argv[] = {types[i]->sizecmd, keys->element[i]->str}; size_t lens[] = {strlen(types[i]->sizecmd), keys->element[i]->len}; redisAppendCommandArgv(context, 2, argv, lens); } else if (memkeys_samples==0) { const char* argv[] = {"MEMORY", "USAGE", keys->element[i]->str}; size_t lens[] = {6, 5, keys->element[i]->len}; redisAppendCommandArgv(context, 3, argv, lens); } else { sds samplesstr = sdsfromlonglong(memkeys_samples); const char* argv[] = {"MEMORY", "USAGE", keys->element[i]->str, "SAMPLES", samplesstr}; size_t lens[] = {6, 5, keys->element[i]->len, 7, sdslen(samplesstr)}; redisAppendCommandArgv(context, 5, argv, lens); sdsfree(samplesstr); } } /* Retrieve sizes */ for(i=0;i<keys->elements;i++) { /* Skip keys that disappeared between SCAN and TYPE (or unknown types when not in memkeys mode) */ if(!types[i] || (!types[i]->sizecmd && !memkeys)) { sizes[i] = 0; continue; } /* Retrieve size */ if(redisGetReply(context, (void**)&reply)!=REDIS_OK) { fprintf(stderr, "Error getting size for key '%s' (%d: %s)\n", keys->element[i]->str, context->err, context->errstr); exit(1); } else if(reply->type != REDIS_REPLY_INTEGER) { /* Theoretically the key could have been removed and * added as a different type between TYPE and SIZE */ fprintf(stderr, "Warning: %s on '%s' failed (may have changed type)\n", !memkeys? types[i]->sizecmd: "MEMORY USAGE", keys->element[i]->str); sizes[i] = 0; } else { sizes[i] = reply->integer; } freeReplyObject(reply); } } static void findBigKeys(int memkeys, unsigned memkeys_samples) { unsigned long long sampled = 0, total_keys, totlen=0, *sizes=NULL, it=0; redisReply *reply, *keys; unsigned int arrsize=0, i; dictIterator *di; dictEntry *de; typeinfo **types = NULL; double pct; dict *types_dict = dictCreate(&typeinfoDictType); typeinfo_add(types_dict, "string", &type_string); typeinfo_add(types_dict, "list", &type_list); typeinfo_add(types_dict, "set", &type_set); typeinfo_add(types_dict, "hash", &type_hash); typeinfo_add(types_dict, "zset", &type_zset); typeinfo_add(types_dict, "stream", &type_stream); /* Total keys pre scanning */ total_keys = getDbSize(); /* Status message */ printf("\n# Scanning the entire keyspace to find biggest keys as well as\n"); printf("# average sizes per key type. You can use -i 0.1 to sleep 0.1 sec\n"); printf("# per 100 SCAN commands (not usually needed).\n\n"); /* SCAN loop */ do { /* Calculate approximate percentage completion */ pct = 100 * (double)sampled/total_keys; /* Grab some keys and point to the keys array */ reply = sendScan(&it); keys = reply->element[1]; /* Reallocate our type and size array if we need to */ if(keys->elements > arrsize) { types = zrealloc(types, sizeof(typeinfo*)*keys->elements); sizes = zrealloc(sizes, sizeof(unsigned long long)*keys->elements); if(!types || !sizes) { fprintf(stderr, "Failed to allocate storage for keys!\n"); exit(1); } arrsize = keys->elements; } /* Retrieve types and then sizes */ getKeyTypes(types_dict, keys, types); getKeySizes(keys, types, sizes, memkeys, memkeys_samples); /* Now update our stats */ for(i=0;i<keys->elements;i++) { typeinfo *type = types[i]; /* Skip keys that disappeared between SCAN and TYPE */ if(!type) continue; type->totalsize += sizes[i]; type->count++; totlen += keys->element[i]->len; sampled++; if(type->biggest<sizes[i]) { /* Keep track of biggest key name for this type */ if (type->biggest_key) sdsfree(type->biggest_key); type->biggest_key = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len); if(!type->biggest_key) { fprintf(stderr, "Failed to allocate memory for key!\n"); exit(1); } printf( "[%05.2f%%] Biggest %-6s found so far '%s' with %llu %s\n", pct, type->name, type->biggest_key, sizes[i], !memkeys? type->sizeunit: "bytes"); /* Keep track of the biggest size for this type */ type->biggest = sizes[i]; } /* Update overall progress */ if(sampled % 1000000 == 0) { printf("[%05.2f%%] Sampled %llu keys so far\n", pct, sampled); } } /* Sleep if we've been directed to do so */ if(sampled && (sampled %100) == 0 && config.interval) { usleep(config.interval); } freeReplyObject(reply); } while(it != 0); if(types) zfree(types); if(sizes) zfree(sizes); /* We're done */ printf("\n-------- summary -------\n\n"); printf("Sampled %llu keys in the keyspace!\n", sampled); printf("Total key length in bytes is %llu (avg len %.2f)\n\n", totlen, totlen ? (double)totlen/sampled : 0); /* Output the biggest keys we found, for types we did find */ di = dictGetIterator(types_dict); while ((de = dictNext(di))) { typeinfo *type = dictGetVal(de); if(type->biggest_key) { printf("Biggest %6s found '%s' has %llu %s\n", type->name, type->biggest_key, type->biggest, !memkeys? type->sizeunit: "bytes"); } } dictReleaseIterator(di); printf("\n"); di = dictGetIterator(types_dict); while ((de = dictNext(di))) { typeinfo *type = dictGetVal(de); printf("%llu %ss with %llu %s (%05.2f%% of keys, avg size %.2f)\n", type->count, type->name, type->totalsize, !memkeys? type->sizeunit: "bytes", sampled ? 100 * (double)type->count/sampled : 0, type->count ? (double)type->totalsize/type->count : 0); } dictReleaseIterator(di); dictRelease(types_dict); /* Success! */ exit(0); } static void getKeyFreqs(redisReply *keys, unsigned long long *freqs) { redisReply *reply; unsigned int i; /* Pipeline OBJECT freq commands */ for(i=0;i<keys->elements;i++) { const char* argv[] = {"OBJECT", "FREQ", keys->element[i]->str}; size_t lens[] = {6, 4, keys->element[i]->len}; redisAppendCommandArgv(context, 3, argv, lens); } /* Retrieve freqs */ for(i=0;i<keys->elements;i++) { if(redisGetReply(context, (void**)&reply)!=REDIS_OK) { sds keyname = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len); fprintf(stderr, "Error getting freq for key '%s' (%d: %s)\n", keyname, context->err, context->errstr); sdsfree(keyname); exit(1); } else if(reply->type != REDIS_REPLY_INTEGER) { if(reply->type == REDIS_REPLY_ERROR) { fprintf(stderr, "Error: %s\n", reply->str); exit(1); } else { sds keyname = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len); fprintf(stderr, "Warning: OBJECT freq on '%s' failed (may have been deleted)\n", keyname); sdsfree(keyname); freqs[i] = 0; } } else { freqs[i] = reply->integer; } freeReplyObject(reply); } } #define HOTKEYS_SAMPLE 16 static void findHotKeys(void) { redisReply *keys, *reply; unsigned long long counters[HOTKEYS_SAMPLE] = {0}; sds hotkeys[HOTKEYS_SAMPLE] = {NULL}; unsigned long long sampled = 0, total_keys, *freqs = NULL, it = 0; unsigned int arrsize = 0, i, k; double pct; /* Total keys pre scanning */ total_keys = getDbSize(); /* Status message */ printf("\n# Scanning the entire keyspace to find hot keys as well as\n"); printf("# average sizes per key type. You can use -i 0.1 to sleep 0.1 sec\n"); printf("# per 100 SCAN commands (not usually needed).\n\n"); /* SCAN loop */ do { /* Calculate approximate percentage completion */ pct = 100 * (double)sampled/total_keys; /* Grab some keys and point to the keys array */ reply = sendScan(&it); keys = reply->element[1]; /* Reallocate our freqs array if we need to */ if(keys->elements > arrsize) { freqs = zrealloc(freqs, sizeof(unsigned long long)*keys->elements); if(!freqs) { fprintf(stderr, "Failed to allocate storage for keys!\n"); exit(1); } arrsize = keys->elements; } getKeyFreqs(keys, freqs); /* Now update our stats */ for(i=0;i<keys->elements;i++) { sampled++; /* Update overall progress */ if(sampled % 1000000 == 0) { printf("[%05.2f%%] Sampled %llu keys so far\n", pct, sampled); } /* Use eviction pool here */ k = 0; while (k < HOTKEYS_SAMPLE && freqs[i] > counters[k]) k++; if (k == 0) continue; k--; if (k == 0 || counters[k] == 0) { sdsfree(hotkeys[k]); } else { sdsfree(hotkeys[0]); memmove(counters,counters+1,sizeof(counters[0])*k); memmove(hotkeys,hotkeys+1,sizeof(hotkeys[0])*k); } counters[k] = freqs[i]; hotkeys[k] = sdscatrepr(sdsempty(), keys->element[i]->str, keys->element[i]->len); printf( "[%05.2f%%] Hot key '%s' found so far with counter %llu\n", pct, hotkeys[k], freqs[i]); } /* Sleep if we've been directed to do so */ if(sampled && (sampled %100) == 0 && config.interval) { usleep(config.interval); } freeReplyObject(reply); } while(it != 0); if (freqs) zfree(freqs); /* We're done */ printf("\n-------- summary -------\n\n"); printf("Sampled %llu keys in the keyspace!\n", sampled); for (i=1; i<= HOTKEYS_SAMPLE; i++) { k = HOTKEYS_SAMPLE - i; if(counters[k]>0) { printf("hot key found with counter: %llu\tkeyname: %s\n", counters[k], hotkeys[k]); sdsfree(hotkeys[k]); } } exit(0); } /*------------------------------------------------------------------------------ * Stats mode *--------------------------------------------------------------------------- */ /* Return the specified INFO field from the INFO command output "info". * A new buffer is allocated for the result, that needs to be free'd. * If the field is not found NULL is returned. */ static char *getInfoField(char *info, char *field) { char *p = strstr(info,field); char *n1, *n2; char *result; if (!p) return NULL; p += strlen(field)+1; n1 = strchr(p,'\r'); n2 = strchr(p,','); if (n2 && n2 < n1) n1 = n2; result = zmalloc(sizeof(char)*(n1-p)+1); memcpy(result,p,(n1-p)); result[n1-p] = '\0'; return result; } /* Like the above function but automatically convert the result into * a long. On error (missing field) LONG_MIN is returned. */ static long getLongInfoField(char *info, char *field) { char *value = getInfoField(info,field); long l; if (!value) return LONG_MIN; l = strtol(value,NULL,10); zfree(value); return l; } /* Convert number of bytes into a human readable string of the form: * 100B, 2G, 100M, 4K, and so forth. */ void bytesToHuman(char *s, long long n) { double d; if (n < 0) { *s = '-'; s++; n = -n; } if (n < 1024) { /* Bytes */ sprintf(s,"%lldB",n); return; } else if (n < (1024*1024)) { d = (double)n/(1024); sprintf(s,"%.2fK",d); } else if (n < (1024LL*1024*1024)) { d = (double)n/(1024*1024); sprintf(s,"%.2fM",d); } else if (n < (1024LL*1024*1024*1024)) { d = (double)n/(1024LL*1024*1024); sprintf(s,"%.2fG",d); } } static void statMode(void) { redisReply *reply; long aux, requests = 0; int i = 0; while(1) { char buf[64]; int j; reply = reconnectingRedisCommand(context,"INFO"); if (reply->type == REDIS_REPLY_ERROR) { printf("ERROR: %s\n", reply->str); exit(1); } if ((i++ % 20) == 0) { printf( "------- data ------ --------------------- load -------------------- - child -\n" "keys mem clients blocked requests connections \n"); } /* Keys */ aux = 0; for (j = 0; j < 20; j++) { long k; sprintf(buf,"db%d:keys",j); k = getLongInfoField(reply->str,buf); if (k == LONG_MIN) continue; aux += k; } sprintf(buf,"%ld",aux); printf("%-11s",buf); /* Used memory */ aux = getLongInfoField(reply->str,"used_memory"); bytesToHuman(buf,aux); printf("%-8s",buf); /* Clients */ aux = getLongInfoField(reply->str,"connected_clients"); sprintf(buf,"%ld",aux); printf(" %-8s",buf); /* Blocked (BLPOPPING) Clients */ aux = getLongInfoField(reply->str,"blocked_clients"); sprintf(buf,"%ld",aux); printf("%-8s",buf); /* Requests */ aux = getLongInfoField(reply->str,"total_commands_processed"); sprintf(buf,"%ld (+%ld)",aux,requests == 0 ? 0 : aux-requests); printf("%-19s",buf); requests = aux; /* Connections */ aux = getLongInfoField(reply->str,"total_connections_received"); sprintf(buf,"%ld",aux); printf(" %-12s",buf); /* Children */ aux = getLongInfoField(reply->str,"bgsave_in_progress"); aux |= getLongInfoField(reply->str,"aof_rewrite_in_progress") << 1; aux |= getLongInfoField(reply->str,"loading") << 2; switch(aux) { case 0: break; case 1: printf("SAVE"); break; case 2: printf("AOF"); break; case 3: printf("SAVE+AOF"); break; case 4: printf("LOAD"); break; } printf("\n"); freeReplyObject(reply); usleep(config.interval); } } /*------------------------------------------------------------------------------ * Scan mode *--------------------------------------------------------------------------- */ static void scanMode(void) { redisReply *reply; unsigned long long cur = 0; do { reply = sendScan(&cur); for (unsigned int j = 0; j < reply->element[1]->elements; j++) { if (config.output == OUTPUT_STANDARD) { sds out = sdscatrepr(sdsempty(), reply->element[1]->element[j]->str, reply->element[1]->element[j]->len); printf("%s\n", out); sdsfree(out); } else { printf("%s\n", reply->element[1]->element[j]->str); } } freeReplyObject(reply); } while(cur != 0); exit(0); } /*------------------------------------------------------------------------------ * LRU test mode *--------------------------------------------------------------------------- */ /* Return an integer from min to max (both inclusive) using a power-law * distribution, depending on the value of alpha: the greater the alpha * the more bias towards lower values. * * With alpha = 6.2 the output follows the 80-20 rule where 20% of * the returned numbers will account for 80% of the frequency. */ long long powerLawRand(long long min, long long max, double alpha) { double pl, r; max += 1; r = ((double)rand()) / RAND_MAX; pl = pow( ((pow(max,alpha+1) - pow(min,alpha+1))*r + pow(min,alpha+1)), (1.0/(alpha+1))); return (max-1-(long long)pl)+min; } /* Generates a key name among a set of lru_test_sample_size keys, using * an 80-20 distribution. */ void LRUTestGenKey(char *buf, size_t buflen) { snprintf(buf, buflen, "lru:%lld", powerLawRand(1, config.lru_test_sample_size, 6.2)); } #define LRU_CYCLE_PERIOD 1000 /* 1000 milliseconds. */ #define LRU_CYCLE_PIPELINE_SIZE 250 static void LRUTestMode(void) { redisReply *reply; char key[128]; long long start_cycle; int j; srand(time(NULL)^getpid()); while(1) { /* Perform cycles of 1 second with 50% writes and 50% reads. * We use pipelining batching writes / reads N times per cycle in order * to fill the target instance easily. */ start_cycle = mstime(); long long hits = 0, misses = 0; while(mstime() - start_cycle < LRU_CYCLE_PERIOD) { /* Write cycle. */ for (j = 0; j < LRU_CYCLE_PIPELINE_SIZE; j++) { char val[6]; val[5] = '\0'; for (int i = 0; i < 5; i++) val[i] = 'A'+rand()%('z'-'A'); LRUTestGenKey(key,sizeof(key)); redisAppendCommand(context, "SET %s %s",key,val); } for (j = 0; j < LRU_CYCLE_PIPELINE_SIZE; j++) redisGetReply(context, (void**)&reply); /* Read cycle. */ for (j = 0; j < LRU_CYCLE_PIPELINE_SIZE; j++) { LRUTestGenKey(key,sizeof(key)); redisAppendCommand(context, "GET %s",key); } for (j = 0; j < LRU_CYCLE_PIPELINE_SIZE; j++) { if (redisGetReply(context, (void**)&reply) == REDIS_OK) { switch(reply->type) { case REDIS_REPLY_ERROR: printf("%s\n", reply->str); break; case REDIS_REPLY_NIL: misses++; break; default: hits++; break; } } } if (context->err) { fprintf(stderr,"I/O error during LRU test\n"); exit(1); } } /* Print stats. */ printf( "%lld Gets/sec | Hits: %lld (%.2f%%) | Misses: %lld (%.2f%%)\n", hits+misses, hits, (double)hits/(hits+misses)*100, misses, (double)misses/(hits+misses)*100); } exit(0); } /*------------------------------------------------------------------------------ * Intrinsic latency mode. * * Measure max latency of a running process that does not result from * syscalls. Basically this software should provide a hint about how much * time the kernel leaves the process without a chance to run. *--------------------------------------------------------------------------- */ /* This is just some computation the compiler can't optimize out. * Should run in less than 100-200 microseconds even using very * slow hardware. Runs in less than 10 microseconds in modern HW. */ unsigned long compute_something_fast(void) { unsigned char s[256], i, j, t; int count = 1000, k; unsigned long output = 0; for (k = 0; k < 256; k++) s[k] = k; i = 0; j = 0; while(count--) { i++; j = j + s[i]; t = s[i]; s[i] = s[j]; s[j] = t; output += s[(s[i]+s[j])&255]; } return output; } static void intrinsicLatencyModeStop(int s) { UNUSED(s); force_cancel_loop = 1; } static void sigIntHandler(int s) { UNUSED(s); if (config.monitor_mode || config.pubsub_mode) { close(context->fd); context->fd = REDIS_INVALID_FD; config.blocking_state_aborted = 1; } else { exit(1); } } static void intrinsicLatencyMode(void) { long long test_end, run_time, max_latency = 0, runs = 0; run_time = (long long)config.intrinsic_latency_duration * 1000000; test_end = ustime() + run_time; signal(SIGINT, intrinsicLatencyModeStop); while(1) { long long start, end, latency; start = ustime(); compute_something_fast(); end = ustime(); latency = end-start; runs++; if (latency <= 0) continue; /* Reporting */ if (latency > max_latency) { max_latency = latency; printf("Max latency so far: %lld microseconds.\n", max_latency); } double avg_us = (double)run_time/runs; double avg_ns = avg_us * 1e3; if (force_cancel_loop || end > test_end) { printf("\n%lld total runs " "(avg latency: " "%.4f microseconds / %.2f nanoseconds per run).\n", runs, avg_us, avg_ns); printf("Worst run took %.0fx longer than the average latency.\n", max_latency / avg_us); exit(0); } } } static sds askPassword(const char *msg) { linenoiseMaskModeEnable(); sds auth = linenoise(msg); linenoiseMaskModeDisable(); return auth; } /*------------------------------------------------------------------------------ * Program main() *--------------------------------------------------------------------------- */ int main(int argc, char **argv) { int firstarg; struct timeval tv; memset(&config.sslconfig, 0, sizeof(config.sslconfig)); config.hostip = sdsnew("127.0.0.1"); config.hostport = 6379; config.hostsocket = NULL; config.repeat = 1; config.interval = 0; config.dbnum = 0; config.input_dbnum = 0; config.interactive = 0; config.shutdown = 0; config.monitor_mode = 0; config.pubsub_mode = 0; config.blocking_state_aborted = 0; config.latency_mode = 0; config.latency_dist_mode = 0; config.latency_history = 0; config.lru_test_mode = 0; config.lru_test_sample_size = 0; config.cluster_mode = 0; config.cluster_send_asking = 0; config.slave_mode = 0; config.getrdb_mode = 0; config.stat_mode = 0; config.scan_mode = 0; config.intrinsic_latency_mode = 0; config.pattern = NULL; config.rdb_filename = NULL; config.pipe_mode = 0; config.pipe_timeout = REDIS_CLI_DEFAULT_PIPE_TIMEOUT; config.bigkeys = 0; config.hotkeys = 0; config.stdinarg = 0; config.auth = NULL; config.askpass = 0; config.user = NULL; config.eval = NULL; config.eval_ldb = 0; config.eval_ldb_end = 0; config.eval_ldb_sync = 0; config.enable_ldb_on_eval = 0; config.last_cmd_type = -1; config.verbose = 0; config.set_errcode = 0; config.no_auth_warning = 0; config.in_multi = 0; config.cluster_manager_command.name = NULL; config.cluster_manager_command.argc = 0; config.cluster_manager_command.argv = NULL; config.cluster_manager_command.flags = 0; config.cluster_manager_command.replicas = 0; config.cluster_manager_command.from = NULL; config.cluster_manager_command.to = NULL; config.cluster_manager_command.from_user = NULL; config.cluster_manager_command.from_pass = NULL; config.cluster_manager_command.from_askpass = 0; config.cluster_manager_command.weight = NULL; config.cluster_manager_command.weight_argc = 0; config.cluster_manager_command.slots = 0; config.cluster_manager_command.timeout = CLUSTER_MANAGER_MIGRATE_TIMEOUT; config.cluster_manager_command.pipeline = CLUSTER_MANAGER_MIGRATE_PIPELINE; config.cluster_manager_command.threshold = CLUSTER_MANAGER_REBALANCE_THRESHOLD; config.cluster_manager_command.backup_dir = NULL; pref.hints = 1; spectrum_palette = spectrum_palette_color; spectrum_palette_size = spectrum_palette_color_size; if (!isatty(fileno(stdout)) && (getenv("FAKETTY") == NULL)) { config.output = OUTPUT_RAW; config.push_output = 0; } else { config.output = OUTPUT_STANDARD; config.push_output = 1; } config.mb_delim = sdsnew("\n"); config.cmd_delim = sdsnew("\n"); firstarg = parseOptions(argc,argv); argc -= firstarg; argv += firstarg; parseEnv(); if (config.askpass) { config.auth = askPassword("Please input password: "); } if (config.cluster_manager_command.from_askpass) { config.cluster_manager_command.from_pass = askPassword( "Please input import source node password: "); } #ifdef USE_OPENSSL if (config.tls) { cliSecureInit(); } #endif gettimeofday(&tv, NULL); init_genrand64(((long long) tv.tv_sec * 1000000 + tv.tv_usec) ^ getpid()); /* Cluster Manager mode */ if (CLUSTER_MANAGER_MODE()) { clusterManagerCommandProc *proc = validateClusterManagerCommand(); if (!proc) { exit(1); } clusterManagerMode(proc); } /* Latency mode */ if (config.latency_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); latencyMode(); } /* Latency distribution mode */ if (config.latency_dist_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); latencyDistMode(); } /* Slave mode */ if (config.slave_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); sendCapa(); slaveMode(); } /* Get RDB mode. */ if (config.getrdb_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); sendCapa(); sendRdbOnly(); getRDB(NULL); } /* Pipe mode */ if (config.pipe_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); pipeMode(); } /* Find big keys */ if (config.bigkeys) { if (cliConnect(0) == REDIS_ERR) exit(1); findBigKeys(0, 0); } /* Find large keys */ if (config.memkeys) { if (cliConnect(0) == REDIS_ERR) exit(1); findBigKeys(1, config.memkeys_samples); } /* Find hot keys */ if (config.hotkeys) { if (cliConnect(0) == REDIS_ERR) exit(1); findHotKeys(); } /* Stat mode */ if (config.stat_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); if (config.interval == 0) config.interval = 1000000; statMode(); } /* Scan mode */ if (config.scan_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); scanMode(); } /* LRU test mode */ if (config.lru_test_mode) { if (cliConnect(0) == REDIS_ERR) exit(1); LRUTestMode(); } /* Intrinsic latency mode */ if (config.intrinsic_latency_mode) intrinsicLatencyMode(); /* Start interactive mode when no command is provided */ if (argc == 0 && !config.eval) { /* Ignore SIGPIPE in interactive mode to force a reconnect */ signal(SIGPIPE, SIG_IGN); signal(SIGINT, sigIntHandler); /* Note that in repl mode we don't abort on connection error. * A new attempt will be performed for every command send. */ cliConnect(0); repl(); } /* Otherwise, we have some arguments to execute */ if (cliConnect(0) != REDIS_OK) exit(1); if (config.eval) { return evalMode(argc,argv); } else { return noninteractive(argc,argv); } }
794120.c
/* * Copyright (c) 2015-2019 Intel Corporation. * All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * */ #include "gcpu.h" #include "guest.h" #include "vmx_cap.h" #include "vmm_base.h" #include "vmm_asm.h" #include "event.h" #include "heap.h" #include "gpm.h" #include "apic_regs.h" #include "modules/msr_monitor.h" #include "modules/vmcall.h" #include "modules/virtual_apic.h" #include "modules/instr_decode.h" #include "lib/util.h" #ifdef MODULE_INTERRUPT_IPI #error "MODULE_INTERRUPT_IPI has conflict with MODULE_VIRTUAL_APIC" #endif #ifndef MODULE_EXT_INTR #error "MODULE_EXT_INTR should be used with MODULE_VIRTUAL_APIC" #endif #define MSR_X2APIC_BASE 0x800 #define LAPIC_ENABLED (1ULL << 11) #define LAPIC_X2_ENABLED (1ULL << 10) /* access type for VMExit of virtual apic page access */ #define LN_ACCESS_DATA_READ 0x0 #define LN_ACCESS_DATA_WRITE 0x1 #define LN_ACCESS_INSTR_FETCH 0x2 #define LN_ACCESS_EVENT_DELIVERY 0x3 #define GP_ACCESS_EVENT_DELIVERY 0xa #define GP_ACCESS_INSTR_FETCH 0xf #define MODULE_POST_INTERRUPT /* 0xf2 is the vector saved for KVM to deliver posted interrupt IPI. * which is also used in eVMM. */ #define POST_NOTIFY_VECTOR 0xf2 typedef union { struct { uint16_t rvi:8; // low byte: the vector with the highest priority that is requesting service. uint16_t svi:8; // high byte: the vector with the highest priority that is in service. }bits; uint16_t uint16; } g_intr_status_t; static uint8_t get_highest_pending_intr(irr_t *p_irr) { int seg, off; //if unsgined, seg>=0 is always true, can't cover intr 0x00~0x20. VMM_ASSERT(p_irr != NULL); for (seg=(APIC_IRR_NR-1); seg>=0; seg--) { if (p_irr->intr[seg] != 0) { off = (int)asm_bsr32(p_irr->intr[seg]); return (uint8_t)(seg*32 + off); } } return 0; } void vapic_set_pending_intr(guest_cpu_handle_t gcpu, uint8_t vector) { g_intr_status_t g_intr_status; uint64_t vapic_page = vmcs_read(gcpu->vmcs, VMCS_VIRTUAL_APIC_ADDR); uint32_t seg, off; /* set vIRR */ seg = (vector/32) << 4; off = (vector%32); asm_bts64((uint64_t*)(vapic_page + APIC_IRR + seg), off); /* update RVI conditionally */ g_intr_status.uint16 = (uint16_t)vmcs_read(gcpu->vmcs, VMCS_GUEST_INTERRUPT_STATUS); if (g_intr_status.bits.rvi < vector) { g_intr_status.bits.rvi = vector; vmcs_write(gcpu->vmcs, VMCS_GUEST_INTERRUPT_STATUS, (uint64_t)g_intr_status.uint16); } } uint8_t vapic_get_pending_intr(guest_cpu_handle_t gcpu) { irr_t virr; vapic_get_virr(gcpu, &virr); return get_highest_pending_intr(&virr); } void vapic_clear_pending_intr(guest_cpu_handle_t gcpu, uint8_t vector) { g_intr_status_t g_intr_status; uint64_t vapic_page = vmcs_read(gcpu->vmcs, VMCS_VIRTUAL_APIC_ADDR); uint32_t seg, off; /* clear vIRR */ seg = (vector/32) << 4; off = (vector%32); asm_btr64((uint64_t*)(vapic_page + APIC_IRR + seg), off); /* update RVI conditionally */ g_intr_status.uint16 = (uint16_t)vmcs_read(gcpu->vmcs, VMCS_GUEST_INTERRUPT_STATUS); if (g_intr_status.bits.rvi <= vector) { g_intr_status.bits.rvi = vapic_get_pending_intr(gcpu); vmcs_write(gcpu->vmcs, VMCS_GUEST_INTERRUPT_STATUS, (uint64_t)(g_intr_status.uint16)); } } void vapic_get_virr(guest_cpu_handle_t gcpu, irr_t * p_virr) { uint64_t vapic_page; uint64_t addr; uint8_t i; VMM_ASSERT(p_virr != NULL); vapic_page = vmcs_read(gcpu->vmcs, VMCS_VIRTUAL_APIC_ADDR); addr = (uint64_t)(vapic_page + APIC_IRR); for (i=0; i<APIC_IRR_NR; i++) { p_virr->intr[i] = *(uint32_t *)(addr); addr += 0x0010; } } void vapic_merge_virr(guest_cpu_handle_t gcpu, irr_t *p_virr) { g_intr_status_t g_intr_status; uint64_t vapic_page; uint64_t addr; uint8_t i; uint8_t vector; VMM_ASSERT(p_virr != NULL); /* merge vIRR unconditionally */ vapic_page = vmcs_read(gcpu->vmcs, VMCS_VIRTUAL_APIC_ADDR); addr = (uint64_t)(vapic_page + APIC_IRR); for (i=0; i<APIC_IRR_NR; i++) { *(uint32_t *)(addr) |= p_virr->intr[i]; addr += 0x0010; } /* merge RVI conditionally */ vector = get_highest_pending_intr(p_virr); g_intr_status.uint16 = (uint16_t)vmcs_read(gcpu->vmcs, VMCS_GUEST_INTERRUPT_STATUS); if (g_intr_status.bits.rvi < vector) { g_intr_status.bits.rvi = vector; vmcs_write(gcpu->vmcs, VMCS_GUEST_INTERRUPT_STATUS, (uint64_t)(g_intr_status.uint16)); } } void vapic_clear_virr(guest_cpu_handle_t gcpu) { uint64_t vapic_page; uint16_t int_status; /* clear RVI */ int_status = (uint16_t)vmcs_read(gcpu->vmcs, VMCS_GUEST_INTERRUPT_STATUS); vmcs_write(gcpu->vmcs, VMCS_GUEST_INTERRUPT_STATUS, (uint64_t)(int_status & 0xFF00)); /* clear vIRR */ vapic_page = vmcs_read(gcpu->vmcs, VMCS_VIRTUAL_APIC_ADDR); memset((uint8_t *)(vapic_page + APIC_IRR), 0, APIC_IRR_NR * 0x10); } static inline void vapic_set_reg(uint64_t apic_page, uint32_t offset, uint32_t val) { *((volatile uint32_t *)(apic_page + offset)) = val; } static inline void lapic_set_reg(uint32_t offset, uint32_t val) { uint64_t apic_base; apic_base = asm_rdmsr(MSR_APIC_BASE); if (!(apic_base & LAPIC_ENABLED)) return; if (apic_base & LAPIC_X2_ENABLED) { /* x2APIC */ asm_wrmsr(MSR_X2APIC_BASE + (offset >> 4), val); }else { /* xAPIC */ *((volatile uint32_t *)(uint64_t)((apic_base & (~PAGE_4K_MASK)) + offset)) = val; } } static void setup_virtual_apic_page(guest_cpu_handle_t gcpu) { uint64_t vapic_page_hpa; uint64_t *vapic_page = page_alloc(1); VMM_ASSERT_EX(hmm_hva_to_hpa((uint64_t)vapic_page, &vapic_page_hpa, NULL), "fail to convert hva %p to hpa\n", vapic_page); vmcs_write(gcpu->vmcs, VMCS_VIRTUAL_APIC_ADDR, vapic_page_hpa); memset(vapic_page, 0, PAGE_4K_SIZE); memcpy(vapic_page, (void *)0xFEE00000, PAGE_4K_SIZE); asm_wbinvd(); } #ifdef MODULE_POST_INTERRUPT static void setup_post_interrupt(guest_cpu_handle_t gcpu) { uint64_t post_interrupt_desc_hpa; uint64_t *post_interrupt_desc = page_alloc(1); VMM_ASSERT_EX(hmm_hva_to_hpa((uint64_t)post_interrupt_desc, &post_interrupt_desc_hpa, NULL), "fail to convert hva %p to hpa\n", post_interrupt_desc); memset(post_interrupt_desc, 0, PAGE_4K_SIZE); vmcs_write(gcpu->vmcs, VMCS_POST_INTR_DESC_ADDR, post_interrupt_desc_hpa); vmcs_write(gcpu->vmcs, VMCS_POST_INTR_NOTI_VECTOR, POST_NOTIFY_VECTOR); } #endif static void virtual_apic_gcpu_init(guest_cpu_handle_t gcpu, UNUSED void *pv) { uint32_t proc_ctrl, proc2_ctrl; uint32_t pin_ctrl; uint32_t exit_ctrl; uint64_t apic_base; proc_ctrl = vmcs_read(gcpu->vmcs, VMCS_PROC_CTRL1); proc_ctrl |= (PROC_TPR_SHADOW); proc_ctrl |= (PROC_SECONDARY_CTRL); vmcs_write(gcpu->vmcs, VMCS_PROC_CTRL1, proc_ctrl); proc2_ctrl = vmcs_read(gcpu->vmcs, VMCS_PROC_CTRL2); proc2_ctrl |= (PROC2_VAPIC_ACCESSES | PROC2_APIC_REG_VIRTUALIZE | PROC2_VINT_DELIVERY); vmcs_write(gcpu->vmcs, VMCS_PROC_CTRL2, proc2_ctrl); exit_ctrl = EXIT_ACK_INT_EXIT | vmcs_read(gcpu->vmcs, VMCS_EXIT_CTRL); vmcs_write(gcpu->vmcs, VMCS_EXIT_CTRL, exit_ctrl); pin_ctrl = (~PIN_PROC_POSTED_INT) & vmcs_read(gcpu->vmcs, VMCS_PIN_CTRL); vmcs_write(gcpu->vmcs, VMCS_PIN_CTRL, pin_ctrl); #ifdef MODULE_POST_INTERRUPT pin_ctrl = PIN_PROC_POSTED_INT | vmcs_read(gcpu->vmcs, VMCS_PIN_CTRL); vmcs_write(gcpu->vmcs, VMCS_PIN_CTRL, pin_ctrl); setup_post_interrupt(gcpu); print_trace("Guest%d, GCPU%d Post-int notify vector 0x%x, PIR=0x%llx\n", gcpu->guest->id, gcpu->id, vmcs_read(gcpu->vmcs, VMCS_POST_INTR_NOTI_VECTOR), vmcs_read(gcpu->vmcs, VMCS_POST_INTR_DESC_ADDR)); #endif setup_virtual_apic_page(gcpu); apic_base = asm_rdmsr(MSR_APIC_BASE); vmcs_write(gcpu->vmcs, VMCS_TPR_THRESHOLD, 0); vmcs_write(gcpu->vmcs, VMCS_APIC_ACCESS_ADDR, apic_base & (~PAGE_4K_MASK)); print_trace("Guest%d, GCPU%d vapic inited vpage=%x abase=%llx\n", gcpu->guest->id, gcpu->id, vmcs_read(gcpu->vmcs, VMCS_VIRTUAL_APIC_ADDR), vmcs_read(gcpu->vmcs, VMCS_APIC_ACCESS_ADDR)); } static void virtual_apic_write(guest_cpu_handle_t gcpu) { vmx_exit_qualification_t qual; uint32_t offset; uint32_t val; uint32_t val_h = 0; uint64_t vapic_page = vmcs_read(gcpu->vmcs, VMCS_VIRTUAL_APIC_ADDR); qual.uint64 = vmcs_read(gcpu->vmcs, VMCS_EXIT_QUAL); offset = qual.uint64 & 0xfff; val = *((uint32_t *)(vapic_page+offset)); /* write ICR_H has no vmexit, so we need to fill in it manually */ if (offset == APIC_ICR_L) { val_h = *((uint32_t *)(vapic_page + APIC_ICR_H)); lapic_set_reg(APIC_ICR_H, val_h); } lapic_set_reg(offset, val); } static void set_pending_intr_to_gcpu(guest_cpu_handle_t gcpu, void *pv) { uint8_t vector; boolean_t *handled = (boolean_t *)pv; vmcs_write(gcpu->vmcs, VMCS_PROC_CTRL1, vmcs_read(gcpu->vmcs, VMCS_PROC_CTRL1) & ~(PROC_INT_WINDOW_EXIT)); for(vector = gcpu_get_pending_intr(gcpu); vector >= 0x20; vector = gcpu_get_pending_intr(gcpu)) { /* set vIRR and update RVI */ vapic_set_pending_intr(gcpu, vector); /* clear interrupt bufferred */ gcpu_clear_pending_intr(gcpu, vector); /* perform EOI to local APIC */ lapic_set_reg(APIC_EOI, APIC_EOI_ACK); } *handled = TRUE; } static void virtual_apic_access(guest_cpu_handle_t gcpu) { vmx_exit_qualification_t qual; uint64_t hva; pf_ec_t pfec; pf_info_t pfinfo; uint8_t instr[17] = {0}; uint64_t val; uint32_t reg_id; uint32_t op_size; uint64_t vapic_page = vmcs_read(gcpu->vmcs, VMCS_VIRTUAL_APIC_ADDR); uint64_t g_rip = vmcs_read(gcpu->vmcs, VMCS_GUEST_RIP); if (FALSE == gcpu_gva_to_hva(gcpu, g_rip, GUEST_CAN_READ, &hva, &pfec)) { print_panic("vapic access: fail to convert gva to hva!\n"); return; } if (FALSE == gcpu_copy_from_gva(gcpu, g_rip, (uint64_t)instr, 17, &pfinfo)) { print_panic("vapic access: fail to copy instruction from gva!\n"); return; } qual.uint64 = vmcs_read(gcpu->vmcs, VMCS_EXIT_QUAL); switch (qual.apic_access.access_type) { case LN_ACCESS_DATA_READ: if (TRUE == decode_mov_from_mem(gcpu, &reg_id, &op_size)) { val = *((volatile uint64_t *)(vapic_page + qual.apic_access.offset)); gcpu_set_gp_reg(gcpu, reg_id, val); gcpu_skip_instruction(gcpu); } break; case LN_ACCESS_DATA_WRITE: if (TRUE == decode_mov_to_mem(gcpu, &val, &op_size)) { vapic_set_reg(vapic_page, qual.apic_access.offset, val); lapic_set_reg(qual.apic_access.offset, val); gcpu_skip_instruction(gcpu); } break; case LN_ACCESS_INSTR_FETCH: case LN_ACCESS_EVENT_DELIVERY: case GP_ACCESS_EVENT_DELIVERY: case GP_ACCESS_INSTR_FETCH: print_info("unsupport qual %llx\n", qual.uint64); D(VMM_ASSERT(0)); break; default: print_info("wrong qual %11x\n", qual.uint64); D(VMM_ASSERT(0)); break; } } static void msr_apic_base_write_handler(guest_cpu_handle_t gcpu, uint32_t msr_id) { uint64_t msr_value = get_val_for_wrmsr(gcpu); vmcs_write(gcpu->vmcs, VMCS_APIC_ACCESS_ADDR, msr_value & (~PAGE_4K_MASK)); asm_wrmsr(msr_id, msr_value); gcpu_skip_instruction(gcpu); } void virtual_apic_init(void) { event_register(EVENT_GCPU_MODULE_INIT, virtual_apic_gcpu_init); monitor_msr_write(0, MSR_APIC_BASE, msr_apic_base_write_handler); monitor_msr_write(1, MSR_APIC_BASE, msr_apic_base_write_handler); vmexit_install_handler(virtual_apic_write, REASON_56_APIC_WRITE); vmexit_install_handler(virtual_apic_access, REASON_44_APIC_ACCESS); event_register(EVENT_INJECT_INTR, set_pending_intr_to_gcpu); }
887682.c
/* * Copyright (c) 2018 Qualcomm Technologies, Inc. * All Rights Reserved. */ // Copyright (c) 2018 Qualcomm Technologies, Inc. // All rights reserved. // Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) // provided that the following conditions are met: // Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. // Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. // Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, // BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. // IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, // OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, // EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <string.h> #include "qsCommon.h" #include "qsQuartz.h" #include "qapi_zb.h" #include "qapi_zb_cl_common.h" #include "qapi_zb_cl_qz_cb_mnl.h" #include "qapi_zb_cl_qz_cb.h" void QZ_qapi_ZB_CL_Event_CB_t_Handler(qapi_ZB_Handle_t ZB_Handle, qapi_ZB_Cluster_t Cluster, const qapi_ZB_CL_Event_Data_t *Event_Data, uint32_t CB_Param) { PackedBuffer_t qsInputBuffer = { NULL, 0, 0, 0, NULL, NULL }; SerStatus_t qsResult = ssSuccess; uint32_t qsIndex = 0; uint16_t qsSize = 0; UNUSED(qsIndex); /* Calculate size of packed function arguments. */ qsSize = (12 + CalcPackedSize_qapi_ZB_CL_Event_Data_t((qapi_ZB_CL_Event_Data_t *)Event_Data) + (QS_POINTER_HEADER_SIZE * 1)); if(AllocatePackedBuffer(NULL, QS_ASYNC_E, MODULE_ZB, QAPI_ZB_CL_FILE_ID, QAPI_ZB_CL_EVENT_CB_T_CALLBACK_ID, &qsInputBuffer, qsSize)) { /* Write arguments packed. */ if(qsResult == ssSuccess) qsResult = PackedWrite_32(&qsInputBuffer, (uint32_t *)&ZB_Handle); if(qsResult == ssSuccess) qsResult = PackedWrite_32(&qsInputBuffer, (uint32_t *)&Cluster); if(qsResult == ssSuccess) qsResult = PackedWrite_32(&qsInputBuffer, (uint32_t *)&CB_Param); if(qsResult == ssSuccess) qsResult = PackedWrite_PointerHeader(&qsInputBuffer, (void *)Event_Data); if((qsResult == ssSuccess) && (Event_Data != NULL)) { qsResult = PackedWrite_qapi_ZB_CL_Event_Data_t(&qsInputBuffer, (qapi_ZB_CL_Event_Data_t *)Event_Data); } /* Send the event. */ if(qsResult == ssSuccess) qsResult = SendEvent(&qsInputBuffer); } else qsResult = ssAllocationError; #ifdef ENABLE_DEBUG_CALLBACKS if(qsResult != ssSuccess) SendErrorCallback(MODULE_ZB, QAPI_ZB_CL_FILE_ID, QAPI_ZB_CL_EVENT_CB_T_CALLBACK_ID); #endif // ENABLE_DEBUG_CALLBACKS } void QZ_qapi_ZB_CL_Custom_Cluster_Event_CB_t_Handler(qapi_ZB_Handle_t ZB_Handle, qapi_ZB_Cluster_t Cluster, qapi_ZB_CL_Custom_Cluster_Event_Data_t *Event_Data, uint32_t CB_Param) { Mnl_QZ_qapi_ZB_CL_Custom_Cluster_Event_CB_t_Handler(ZB_Handle, Cluster, Event_Data, CB_Param); }
84444.c
/* barChartSample.c was originally generated by the autoSql program, which also * generated barChartSample.h and barChartSample.sql. This module links the database and * the RAM representation of objects. */ #include "common.h" #include "linefile.h" #include "dystring.h" #include "jksql.h" #include "barChartSample.h" char *barChartSampleCommaSepFieldNames = "sample,category"; void barChartSampleStaticLoad(char **row, struct barChartSample *ret) /* Load a row from barChartSample table into ret. The contents of ret will * be replaced at the next call to this function. */ { ret->sample = row[0]; ret->category = row[1]; } struct barChartSample *barChartSampleLoadByQuery(struct sqlConnection *conn, char *query) /* Load all barChartSample from table that satisfy the query given. * Where query is of the form 'select * from example where something=something' * or 'select example.* from example, anotherTable where example.something = * anotherTable.something'. * Dispose of this with barChartSampleFreeList(). */ { struct barChartSample *list = NULL, *el; struct sqlResult *sr; char **row; sr = sqlGetResult(conn, query); while ((row = sqlNextRow(sr)) != NULL) { el = barChartSampleLoad(row); slAddHead(&list, el); } slReverse(&list); sqlFreeResult(&sr); return list; } void barChartSampleSaveToDb(struct sqlConnection *conn, struct barChartSample *el, char *tableName, int updateSize) /* Save barChartSample as a row to the table specified by tableName. * As blob fields may be arbitrary size updateSize specifies the approx size * of a string that would contain the entire query. Arrays of native types are * converted to comma separated strings and loaded as such, User defined types are * inserted as NULL. This function automatically escapes quoted strings for mysql. */ { struct dyString *update = newDyString(updateSize); sqlDyStringPrintf(update, "insert into %s values ( '%s','%s')", tableName, el->sample, el->category); sqlUpdate(conn, update->string); freeDyString(&update); } struct barChartSample *barChartSampleLoad(char **row) /* Load a barChartSample from row fetched with select * from barChartSample * from database. Dispose of this with barChartSampleFree(). */ { struct barChartSample *ret; AllocVar(ret); ret->sample = cloneString(row[0]); ret->category = cloneString(row[1]); return ret; } struct barChartSample *barChartSampleLoadAll(char *fileName) /* Load all barChartSample from a whitespace-separated file. * Dispose of this with barChartSampleFreeList(). */ { struct barChartSample *list = NULL, *el; struct lineFile *lf = lineFileOpen(fileName, TRUE); char *row[2]; while (lineFileRow(lf, row)) { el = barChartSampleLoad(row); slAddHead(&list, el); } lineFileClose(&lf); slReverse(&list); return list; } struct barChartSample *barChartSampleLoadAllByChar(char *fileName, char chopper) /* Load all barChartSample from a chopper separated file. * Dispose of this with barChartSampleFreeList(). */ { struct barChartSample *list = NULL, *el; struct lineFile *lf = lineFileOpen(fileName, TRUE); char *row[2]; while (lineFileNextCharRow(lf, chopper, row, ArraySize(row))) { el = barChartSampleLoad(row); slAddHead(&list, el); } lineFileClose(&lf); slReverse(&list); return list; } struct barChartSample *barChartSampleCommaIn(char **pS, struct barChartSample *ret) /* Create a barChartSample out of a comma separated string. * This will fill in ret if non-null, otherwise will * return a new barChartSample */ { char *s = *pS; if (ret == NULL) AllocVar(ret); ret->sample = sqlStringComma(&s); ret->category = sqlStringComma(&s); *pS = s; return ret; } void barChartSampleFree(struct barChartSample **pEl) /* Free a single dynamically allocated barChartSample such as created * with barChartSampleLoad(). */ { struct barChartSample *el; if ((el = *pEl) == NULL) return; freeMem(el->sample); freeMem(el->category); freez(pEl); } void barChartSampleFreeList(struct barChartSample **pList) /* Free a list of dynamically allocated barChartSample's */ { struct barChartSample *el, *next; for (el = *pList; el != NULL; el = next) { next = el->next; barChartSampleFree(&el); } *pList = NULL; } void barChartSampleOutput(struct barChartSample *el, FILE *f, char sep, char lastSep) /* Print out barChartSample. Separate fields with sep. Follow last field with lastSep. */ { if (sep == ',') fputc('"',f); fprintf(f, "%s", el->sample); if (sep == ',') fputc('"',f); fputc(sep,f); if (sep == ',') fputc('"',f); fprintf(f, "%s", el->category); if (sep == ',') fputc('"',f); fputc(lastSep,f); } /* -------------------------------- End autoSql Generated Code -------------------------------- */
118542.c
inherit "obj/monster"; reset(arg) { object robe; object sword; object sword2; ::reset(arg); if(arg) { return; } set_gender(1); set_level(random(5)+50); set_str(random(10)+350); set_max_hp(query_hp() + 8000); set_name("knight"); set_alias("human"); set_race("human"); set_short("Valiant looking knight"); set_long("Valiant looking knight, he is wearing a black robe and\n"+ "some underwear under robe. He has blue eyes and a very\n"+ "strong looking body.\n"); //tuned by Moonstar 17.5.2004 set_al(10); set_aggressive(0); set_skills("blades", 100); set_skills("slash", 80); set_skills("two weapons combat", 100); set_skills("critical",100); set_skills("doublehit", 100); set_skills("find weakness", 80); set_skills("stun",100); set_skills("double strike",100); set_skills("disarm",70); set_skill_chance("disarm", 70); set_skills("find weakness",80); set_skills("reflect spell",15); set_skills("strike",100); set_skill_chance("strike", 50); robe = clone_object("/wizards/jenny/linnake/rojut/knight_robe.c"); move_object(robe, this_object()); init_command("wear robe"); sword = clone_object("/wizards/jenny/linnake/rojut/knight_sword.c"); move_object(sword, this_object()); init_command("wield sword"); sword2 = clone_object("/wizards/jenny/linnake/rojut/knight_sword.c"); move_object(sword2, this_object()); init_command("wield sword"); }
617249.c
/* * This file was ported to MPlayer from xine CVS asmrp.c,v 1.2 2002/12/17 16:49:48 */ /* * Copyright (C) 2002 the xine project * * This file is part of xine, a free video player. * * xine is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * xine is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA * * * a parser for real's asm rules * * grammar for these rules: * rule_book = { rule } rule = ( '#' condition { ',' assignment } | [ assignment {',' assignment} ]) ';' assignment = id '=' const const = ( number | string ) condition = comp_expr { ( '&&' | '||' ) comp_expr } comp_expr = operand { ( '<' | '<=' | '==' | '>=' | '>' ) operand } operand = ( '$' id | num | '(' condition ')' ) */ #include "config.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include "mp_msg.h" #include "asmrp.h" /* #define LOG */ #define ASMRP_SYM_NONE 0 #define ASMRP_SYM_EOF 1 #define ASMRP_SYM_NUM 2 #define ASMRP_SYM_ID 3 #define ASMRP_SYM_STRING 4 #define ASMRP_SYM_HASH 10 #define ASMRP_SYM_SEMICOLON 11 #define ASMRP_SYM_COMMA 12 #define ASMRP_SYM_EQUALS 13 #define ASMRP_SYM_AND 14 #define ASMRP_SYM_OR 15 #define ASMRP_SYM_LESS 16 #define ASMRP_SYM_LEQ 17 #define ASMRP_SYM_GEQ 18 #define ASMRP_SYM_GREATER 19 #define ASMRP_SYM_DOLLAR 20 #define ASMRP_SYM_LPAREN 21 #define ASMRP_SYM_RPAREN 22 #define ASMRP_MAX_ID 1024 #define ASMRP_MAX_SYMTAB 10 typedef struct { char *id; int v; } asmrp_sym_t; typedef struct { /* public part */ int sym; int num; char str[ASMRP_MAX_ID]; /* private part */ char *buf; int pos; char ch; asmrp_sym_t sym_tab[ASMRP_MAX_SYMTAB]; int sym_tab_num; } asmrp_t; static asmrp_t *asmrp_new (void) { asmrp_t *p; p = malloc (sizeof (asmrp_t)); p->sym_tab_num = 0; p->sym = ASMRP_SYM_NONE; return p; } static void asmrp_dispose (asmrp_t *p) { int i; for (i=0; i<p->sym_tab_num; i++) free (p->sym_tab[i].id); free (p); } static void asmrp_getch (asmrp_t *p) { p->ch = p->buf[p->pos]; p->pos++; #ifdef LOG printf ("%c\n", p->ch); #endif } static void asmrp_init (asmrp_t *p, const char *str) { p->buf = strdup (str); p->pos = 0; asmrp_getch (p); } static void asmrp_number (asmrp_t *p) { int num; num = 0; while ( (p->ch>='0') && (p->ch<='9') ) { num = num*10 + (p->ch - '0'); asmrp_getch (p); } p->sym = ASMRP_SYM_NUM; p->num = num; } static void asmrp_string (asmrp_t *p) { int l; l = 0; while ( (p->ch!='"') && (p->ch>=32) ) { if(l < ASMRP_MAX_ID - 1) p->str[l++] = p->ch; else mp_msg(MSGT_STREAM, MSGL_ERR, "error: string too long, ignoring char %c.\n", p->ch); asmrp_getch (p); } p->str[l]=0; if (p->ch=='"') asmrp_getch (p); p->sym = ASMRP_SYM_STRING; } static void asmrp_identifier (asmrp_t *p) { int l; l = 0; while ( ((p->ch>='A') && (p->ch<='z')) || ((p->ch>='0') && (p->ch<='9'))) { if(l < ASMRP_MAX_ID - 1) p->str[l++] = p->ch; else mp_msg(MSGT_STREAM, MSGL_ERR, "error: identifier too long, ignoring char %c.\n", p->ch); asmrp_getch (p); } p->str[l]=0; p->sym = ASMRP_SYM_ID; } #ifdef LOG static void asmrp_print_sym (asmrp_t *p) { printf ("symbol: "); switch (p->sym) { case ASMRP_SYM_NONE: printf ("NONE\n"); break; case ASMRP_SYM_EOF: printf ("EOF\n"); break; case ASMRP_SYM_NUM: printf ("NUM %d\n", p->num); break; case ASMRP_SYM_ID: printf ("ID '%s'\n", p->str); break; case ASMRP_SYM_STRING: printf ("STRING \"%s\"\n", p->str); break; case ASMRP_SYM_HASH: printf ("#\n"); break; case ASMRP_SYM_SEMICOLON: printf (";\n"); break; case ASMRP_SYM_COMMA: printf (",\n"); break; case ASMRP_SYM_EQUALS: printf ("==\n"); break; case ASMRP_SYM_AND: printf ("&&\n"); break; case ASMRP_SYM_OR: printf ("||\n"); break; case ASMRP_SYM_LESS: printf ("<\n"); break; case ASMRP_SYM_LEQ: printf ("<=\n"); break; case ASMRP_SYM_GEQ: printf (">=\n"); break; case ASMRP_SYM_GREATER: printf (">\n"); break; case ASMRP_SYM_DOLLAR: printf ("$\n"); break; case ASMRP_SYM_LPAREN: printf ("(\n"); break; case ASMRP_SYM_RPAREN: printf (")\n"); break; default: printf ("unknown symbol %d\n", p->sym); } } #endif static void asmrp_get_sym (asmrp_t *p) { while (p->ch <= 32) { if (p->ch == 0) { p->sym = ASMRP_SYM_EOF; return; } asmrp_getch (p); } if (p->ch == '\\') asmrp_getch (p); switch (p->ch) { case '#': p->sym = ASMRP_SYM_HASH; asmrp_getch (p); break; case ';': p->sym = ASMRP_SYM_SEMICOLON; asmrp_getch (p); break; case ',': p->sym = ASMRP_SYM_COMMA; asmrp_getch (p); break; case '=': p->sym = ASMRP_SYM_EQUALS; asmrp_getch (p); if (p->ch=='=') asmrp_getch (p); break; case '&': p->sym = ASMRP_SYM_AND; asmrp_getch (p); if (p->ch=='&') asmrp_getch (p); break; case '|': p->sym = ASMRP_SYM_OR; asmrp_getch (p); if (p->ch=='|') asmrp_getch (p); break; case '<': p->sym = ASMRP_SYM_LESS; asmrp_getch (p); if (p->ch=='=') { p->sym = ASMRP_SYM_LEQ; asmrp_getch (p); } break; case '>': p->sym = ASMRP_SYM_GREATER; asmrp_getch (p); if (p->ch=='=') { p->sym = ASMRP_SYM_GEQ; asmrp_getch (p); } break; case '$': p->sym = ASMRP_SYM_DOLLAR; asmrp_getch (p); break; case '(': p->sym = ASMRP_SYM_LPAREN; asmrp_getch (p); break; case ')': p->sym = ASMRP_SYM_RPAREN; asmrp_getch (p); break; case '"': asmrp_getch (p); asmrp_string (p); break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': asmrp_number (p); break; default: asmrp_identifier (p); } #ifdef LOG asmrp_print_sym (p); #endif } static int asmrp_find_id (asmrp_t *p, char *s) { int i; for (i=0; i<p->sym_tab_num; i++) { if (!strcmp (s, p->sym_tab[i].id)) return i; } return -1; } static int asmrp_set_id (asmrp_t *p, char *s, int v) { int i; i = asmrp_find_id (p, s); if (i<0) { if (p->sym_tab_num == ASMRP_MAX_SYMTAB - 1) { mp_msg(MSGT_STREAM, MSGL_ERR, "sym_tab overflow, ignoring identifier %s\n", s); return 0; } i = p->sym_tab_num; p->sym_tab_num++; p->sym_tab[i].id = strdup (s); #ifdef LOG printf ("new symbol '%s'\n", s); #endif } p->sym_tab[i].v = v; #ifdef LOG printf ("symbol '%s' assigned %d\n", s, v); #endif return i; } static int asmrp_condition (asmrp_t *p) ; static int asmrp_operand (asmrp_t *p) { int i, ret; #ifdef LOG printf ("operand\n"); #endif ret = 0; switch (p->sym) { case ASMRP_SYM_DOLLAR: asmrp_get_sym (p); if (p->sym != ASMRP_SYM_ID) { mp_msg(MSGT_STREAM, MSGL_ERR, "error: identifier expected.\n"); break; } i = asmrp_find_id (p, p->str); if (i<0) { mp_msg(MSGT_STREAM, MSGL_ERR, "error: unknown identifier %s\n", p->str); } else ret = p->sym_tab[i].v; asmrp_get_sym (p); break; case ASMRP_SYM_NUM: ret = p->num; asmrp_get_sym (p); break; case ASMRP_SYM_LPAREN: asmrp_get_sym (p); ret = asmrp_condition (p); if (p->sym != ASMRP_SYM_RPAREN) { mp_msg(MSGT_STREAM, MSGL_ERR, "error: ) expected.\n"); break; } asmrp_get_sym (p); break; default: mp_msg(MSGT_STREAM, MSGL_ERR, "syntax error, $ number or ( expected\n"); } #ifdef LOG printf ("operand done, =%d\n", ret); #endif return ret; } static int asmrp_comp_expression (asmrp_t *p) { int a; #ifdef LOG printf ("comp_expression\n"); #endif a = asmrp_operand (p); while ( (p->sym == ASMRP_SYM_LESS) || (p->sym == ASMRP_SYM_LEQ) || (p->sym == ASMRP_SYM_EQUALS) || (p->sym == ASMRP_SYM_GEQ) || (p->sym == ASMRP_SYM_GREATER) ) { int op = p->sym; int b; asmrp_get_sym (p); b = asmrp_operand (p); switch (op) { case ASMRP_SYM_LESS: a = a<b; break; case ASMRP_SYM_LEQ: a = a<=b; break; case ASMRP_SYM_EQUALS: a = a==b; break; case ASMRP_SYM_GEQ: a = a>=b; break; case ASMRP_SYM_GREATER: a = a>b; break; } } #ifdef LOG printf ("comp_expression done = %d\n", a); #endif return a; } static int asmrp_condition (asmrp_t *p) { int a; #ifdef LOG printf ("condition\n"); #endif a = asmrp_comp_expression (p); while ( (p->sym == ASMRP_SYM_AND) || (p->sym == ASMRP_SYM_OR) ) { int op, b; op = p->sym; asmrp_get_sym (p); b = asmrp_comp_expression (p); switch (op) { case ASMRP_SYM_AND: a = a & b; break; case ASMRP_SYM_OR: a = a | b; break; } } #ifdef LOG printf ("condition done = %d\n", a); #endif return a; } static void asmrp_assignment (asmrp_t *p) { #ifdef LOG printf ("assignment\n"); #endif if (p->sym == ASMRP_SYM_COMMA || p->sym == ASMRP_SYM_SEMICOLON) { #ifdef LOG printf ("empty assignment\n"); #endif return; } if (p->sym != ASMRP_SYM_ID) { mp_msg(MSGT_STREAM, MSGL_ERR, "error: identifier expected\n"); return; } asmrp_get_sym (p); if (p->sym != ASMRP_SYM_EQUALS) { mp_msg(MSGT_STREAM, MSGL_ERR, "error: = expected\n"); return; } asmrp_get_sym (p); if ( (p->sym != ASMRP_SYM_NUM) && (p->sym != ASMRP_SYM_STRING) && (p->sym != ASMRP_SYM_ID)) { mp_msg(MSGT_STREAM, MSGL_ERR, "error: number or string expected\n"); return; } asmrp_get_sym (p); #ifdef LOG printf ("assignment done\n"); #endif } static int asmrp_rule (asmrp_t *p) { int ret; #ifdef LOG printf ("rule\n"); #endif ret = 1; if (p->sym == ASMRP_SYM_HASH) { asmrp_get_sym (p); ret = asmrp_condition (p); while (p->sym == ASMRP_SYM_COMMA) { asmrp_get_sym (p); asmrp_assignment (p); } } else if (p->sym != ASMRP_SYM_SEMICOLON) { asmrp_assignment (p); while (p->sym == ASMRP_SYM_COMMA) { asmrp_get_sym (p); asmrp_assignment (p); } } #ifdef LOG printf ("rule done = %d\n", ret); #endif if (p->sym != ASMRP_SYM_SEMICOLON) { mp_msg(MSGT_STREAM, MSGL_ERR, "semicolon expected.\n"); return ret; } asmrp_get_sym (p); return ret; } static int asmrp_eval (asmrp_t *p, int *matches) { int rule_num, num_matches; #ifdef LOG printf ("eval\n"); #endif asmrp_get_sym (p); rule_num = 0; num_matches = 0; while (p->sym != ASMRP_SYM_EOF) { if (asmrp_rule (p)) { #ifdef LOG printf ("rule #%d is true\n", rule_num); #endif if(num_matches < MAX_RULEMATCHES - 1) matches[num_matches++] = rule_num; else mp_msg(MSGT_STREAM, MSGL_ERR, "Ignoring matched asm rule %d, too many matched rules.\n", rule_num); } rule_num++; } matches[num_matches] = -1; return num_matches; } int asmrp_match (const char *rules, int bandwidth, int *matches) { asmrp_t *p; int num_matches; p = asmrp_new (); asmrp_init (p, rules); asmrp_set_id (p, "Bandwidth", bandwidth); asmrp_set_id (p, "OldPNMPlayer", 0); num_matches = asmrp_eval (p, matches); asmrp_dispose (p); return num_matches; }
491193.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "functions.h" #include "STACKImplementation.h" char operators[2][4]={{'+','-','*','/'},{'0','0','1','1'}}; int main(void){ Stack operator_stack,operand_stack; char expresion[100],token,garbage,op,op1,op2,result,res; printf("Give an infix expresion, so we can evaluate it\n"); scanf("%s",expresion); int length=strlen(expresion); InitializeStack(&operand_stack); InitializeStack(&operator_stack); for(int i=0;i<length;i++){ token = expresion[i]; if (token==' ') continue; if ((token >= '0') && (token <= '9')) push(&operand_stack,token); else if (token == '(' ) push(&operator_stack,token); else if ((token == '+') || (token == '-') || (token == '*') || (token == '/') ){ char top=get_top(&operator_stack,1); while(!Empty(&operator_stack) && has_precendence(token,top)){ op1=0;op2=0; pop(&operator_stack,&op); pop(&operand_stack,&op1); pop(&operand_stack,&op2); res=calculate(op1,op,op2); push(&operand_stack,res); top=get_top(&operator_stack,1); } push(&operator_stack,token); } if(token==')'){ char top=get_top(&operator_stack,1); while(top!='('){ pop(&operator_stack,&op); pop(&operand_stack,&op1); pop(&operand_stack,&op2); res=calculate(op1,op,op2); push(&operand_stack,res); top=get_top(&operator_stack,1); } pop(&operator_stack,&garbage); } } while(!Empty(&operator_stack)){ pop(&operator_stack,&op); pop(&operand_stack,&op1); pop(&operand_stack,&op2); res=calculate(op1,op,op2); push(&operand_stack,res); } char final; pop(&operand_stack,&final); printf("The result is %d\n",final); return 0; }
358710.c
/**********************************************************************/ /* ____ ____ */ /* / /\/ / */ /* /___/ \ / */ /* \ \ \/ */ /* \ \ Copyright (c) 2003-2009 Xilinx, Inc. */ /* / / All Right Reserved. */ /* /---/ /\ */ /* \ \ / \ */ /* \___\/\___\ */ /***********************************************************************/ /* This file is designed for use with ISim build 0x7708f090 */ #define XSI_HIDE_SYMBOL_SPEC true #include "xsi.h" #include <memory.h> #ifdef __GNUC__ #include <stdlib.h> #else #include <malloc.h> #define alloca _alloca #endif extern char *IEEE_P_2592010699; extern char *SIMPRIM_P_0947159679; extern char *IEEE_P_2717149903; extern char *IEEE_P_1367372525; extern char *SIMPRIM_P_4208868169; unsigned char ieee_p_2592010699_sub_1388759734_503743352(char *, unsigned char ); unsigned char ieee_p_2592010699_sub_1605435078_503743352(char *, unsigned char , unsigned char ); unsigned char ieee_p_2592010699_sub_1690584930_503743352(char *, unsigned char ); unsigned char ieee_p_2592010699_sub_2507238156_503743352(char *, unsigned char , unsigned char ); unsigned char ieee_p_2592010699_sub_2545490612_503743352(char *, unsigned char , unsigned char ); void ieee_p_2717149903_sub_2486506143_2101202839(char *, char *, char *, unsigned int , unsigned int , char *, char *, char *, char *, unsigned char , char *, char *, char *, unsigned char , unsigned char , unsigned char , unsigned char , unsigned char , unsigned char , unsigned char ); void ieee_p_2717149903_sub_2603698110_2101202839(char *, char *, char *, char *, char *, unsigned int , unsigned int , char *, char *, int64 , char *, unsigned int , unsigned int , char *, char *, int64 , int64 , int64 , unsigned char , unsigned char , unsigned char , char *, char *, unsigned char , unsigned char , unsigned char , unsigned char , unsigned char , unsigned char , unsigned char ); void ieee_p_2717149903_sub_3797369404_2101202839(char *, char *, char *, char *, char *, unsigned int , unsigned int , char *, char *, int64 , char *, unsigned int , unsigned int , char *, char *, int64 , int64 , int64 , int64 , int64 , unsigned char , unsigned char , char *, char *, unsigned char , unsigned char , unsigned char , unsigned char , unsigned char , unsigned char , unsigned char ); void ieee_p_2717149903_sub_539877840_2101202839(char *, char *, char *, unsigned int , unsigned int , char *, char *, unsigned int , unsigned int , char *); void ieee_p_2717149903_sub_649313994_2101202839(char *, char *, char *, unsigned int , unsigned int , char *, char *, unsigned int , unsigned int , int64 ); void ieee_p_2717149903_sub_756322403_2101202839(char *, char *, char *, char *, char *, unsigned int , unsigned int , char *, char *, int64 , int64 , int64 , int64 , unsigned char , char *, char *, unsigned char , unsigned char , unsigned char ); static void simprim_a_4130118134_1564065396_p_0(char *t0) { char *t1; char *t2; unsigned char t3; unsigned char t4; char *t5; char *t6; char *t7; char *t8; char *t9; LAB0: LAB3: t1 = (t0 + 1880U); t2 = *((char **)t1); t3 = *((unsigned char *)t2); t4 = ieee_p_2592010699_sub_1388759734_503743352(IEEE_P_2592010699, t3); t1 = (t0 + 19768); t5 = (t1 + 56U); t6 = *((char **)t5); t7 = (t6 + 56U); t8 = *((char **)t7); *((unsigned char *)t8) = t4; xsi_driver_first_trans_fast(t1); LAB2: t9 = (t0 + 19272); *((int *)t9) = 1; LAB1: return; LAB4: goto LAB2; } static void simprim_a_4130118134_1564065396_p_1(char *t0) { char *t1; char *t2; unsigned char t3; unsigned char t4; char *t5; char *t6; char *t7; char *t8; char *t9; LAB0: LAB3: t1 = (t0 + 2040U); t2 = *((char **)t1); t3 = *((unsigned char *)t2); t4 = ieee_p_2592010699_sub_1388759734_503743352(IEEE_P_2592010699, t3); t1 = (t0 + 19832); t5 = (t1 + 56U); t6 = *((char **)t5); t7 = (t6 + 56U); t8 = *((char **)t7); *((unsigned char *)t8) = t4; xsi_driver_first_trans_fast(t1); LAB2: t9 = (t0 + 19288); *((int *)t9) = 1; LAB1: return; LAB4: goto LAB2; } static void simprim_a_4130118134_1564065396_p_2(char *t0) { char *t1; char *t2; unsigned char t3; unsigned char t4; char *t5; char *t6; char *t7; char *t8; char *t9; LAB0: LAB3: t1 = (t0 + 1720U); t2 = *((char **)t1); t3 = *((unsigned char *)t2); t4 = ieee_p_2592010699_sub_1388759734_503743352(IEEE_P_2592010699, t3); t1 = (t0 + 19896); t5 = (t1 + 56U); t6 = *((char **)t5); t7 = (t6 + 56U); t8 = *((char **)t7); *((unsigned char *)t8) = t4; xsi_driver_first_trans_fast(t1); LAB2: t9 = (t0 + 19304); *((int *)t9) = 1; LAB1: return; LAB4: goto LAB2; } static void simprim_a_4130118134_1564065396_p_3(char *t0) { char *t1; char *t2; unsigned char t3; unsigned char t4; char *t5; char *t6; char *t7; char *t8; char *t9; LAB0: LAB3: t1 = (t0 + 2360U); t2 = *((char **)t1); t3 = *((unsigned char *)t2); t4 = ieee_p_2592010699_sub_1388759734_503743352(IEEE_P_2592010699, t3); t1 = (t0 + 19960); t5 = (t1 + 56U); t6 = *((char **)t5); t7 = (t6 + 56U); t8 = *((char **)t7); *((unsigned char *)t8) = t4; xsi_driver_first_trans_fast(t1); LAB2: t9 = (t0 + 19320); *((int *)t9) = 1; LAB1: return; LAB4: goto LAB2; } static void simprim_a_4130118134_1564065396_p_4(char *t0) { char *t1; char *t2; unsigned char t3; unsigned char t4; char *t5; char *t6; char *t7; char *t8; char *t9; LAB0: LAB3: t1 = (t0 + 2200U); t2 = *((char **)t1); t3 = *((unsigned char *)t2); t4 = ieee_p_2592010699_sub_1388759734_503743352(IEEE_P_2592010699, t3); t1 = (t0 + 20024); t5 = (t1 + 56U); t6 = *((char **)t5); t7 = (t6 + 56U); t8 = *((char **)t7); *((unsigned char *)t8) = t4; xsi_driver_first_trans_fast(t1); LAB2: t9 = (t0 + 19336); *((int *)t9) = 1; LAB1: return; LAB4: goto LAB2; } static void simprim_a_4130118134_1564065396_p_5(char *t0) { char *t1; char *t2; unsigned char t3; unsigned char t4; char *t5; char *t6; char *t7; char *t8; char *t9; LAB0: LAB3: t1 = ((SIMPRIM_P_0947159679) + 1032U); t2 = *((char **)t1); t3 = *((unsigned char *)t2); t4 = ieee_p_2592010699_sub_1388759734_503743352(IEEE_P_2592010699, t3); t1 = (t0 + 20088); t5 = (t1 + 56U); t6 = *((char **)t5); t7 = (t6 + 56U); t8 = *((char **)t7); *((unsigned char *)t8) = t4; xsi_driver_first_trans_fast(t1); LAB2: t9 = (t0 + 19352); *((int *)t9) = 1; LAB1: return; LAB4: goto LAB2; } static void simprim_a_4130118134_1564065396_p_6(char *t0) { char *t1; char *t2; unsigned char t3; unsigned char t4; char *t5; char *t6; char *t7; char *t8; char *t9; LAB0: LAB3: t1 = ((SIMPRIM_P_0947159679) + 1992U); t2 = *((char **)t1); t3 = *((unsigned char *)t2); t4 = ieee_p_2592010699_sub_1388759734_503743352(IEEE_P_2592010699, t3); t1 = (t0 + 20152); t5 = (t1 + 56U); t6 = *((char **)t5); t7 = (t6 + 56U); t8 = *((char **)t7); *((unsigned char *)t8) = t4; xsi_driver_first_trans_fast(t1); LAB2: t9 = (t0 + 19368); *((int *)t9) = 1; LAB1: return; LAB4: goto LAB2; } static void simprim_a_4130118134_1564065396_p_7(char *t0) { char t7[16]; char *t1; char *t2; char *t3; char *t4; char *t5; char *t6; LAB0: t1 = (t0 + 16280); t2 = (t0 + 3600U); t3 = (t0 + 20216); t4 = (t0 + 2800U); t5 = (t0 + 5856U); t6 = *((char **)t5); memcpy(t7, t6, 16U); ieee_p_2717149903_sub_539877840_2101202839(IEEE_P_2717149903, t1, t2, 0U, 0U, t3, t4, 0U, 0U, t7); t5 = (t0 + 19384); *((int *)t5) = 1; LAB1: return; } static void simprim_a_4130118134_1564065396_p_8(char *t0) { char t7[16]; char *t1; char *t2; char *t3; char *t4; char *t5; char *t6; LAB0: t1 = (t0 + 16528); t2 = (t0 + 3760U); t3 = (t0 + 20280); t4 = (t0 + 2480U); t5 = (t0 + 5976U); t6 = *((char **)t5); memcpy(t7, t6, 16U); ieee_p_2717149903_sub_539877840_2101202839(IEEE_P_2717149903, t1, t2, 0U, 0U, t3, t4, 0U, 0U, t7); t5 = (t0 + 19400); *((int *)t5) = 1; LAB1: return; } static void simprim_a_4130118134_1564065396_p_9(char *t0) { char t7[16]; char *t1; char *t2; char *t3; char *t4; char *t5; char *t6; LAB0: t1 = (t0 + 16776); t2 = (t0 + 3920U); t3 = (t0 + 20344); t4 = (t0 + 2640U); t5 = (t0 + 6096U); t6 = *((char **)t5); memcpy(t7, t6, 16U); ieee_p_2717149903_sub_539877840_2101202839(IEEE_P_2717149903, t1, t2, 0U, 0U, t3, t4, 0U, 0U, t7); t5 = (t0 + 19416); *((int *)t5) = 1; LAB1: return; } static void simprim_a_4130118134_1564065396_p_10(char *t0) { char t7[16]; char *t1; char *t2; char *t3; char *t4; char *t5; char *t6; LAB0: t1 = (t0 + 17024); t2 = (t0 + 4080U); t3 = (t0 + 20408); t4 = (t0 + 3440U); t5 = (t0 + 6216U); t6 = *((char **)t5); memcpy(t7, t6, 16U); ieee_p_2717149903_sub_539877840_2101202839(IEEE_P_2717149903, t1, t2, 0U, 0U, t3, t4, 0U, 0U, t7); t5 = (t0 + 19432); *((int *)t5) = 1; LAB1: return; } static void simprim_a_4130118134_1564065396_p_11(char *t0) { char t7[16]; char *t1; char *t2; char *t3; char *t4; char *t5; char *t6; LAB0: t1 = (t0 + 17272); t2 = (t0 + 4240U); t3 = (t0 + 20472); t4 = (t0 + 3280U); t5 = (t0 + 6336U); t6 = *((char **)t5); memcpy(t7, t6, 16U); ieee_p_2717149903_sub_539877840_2101202839(IEEE_P_2717149903, t1, t2, 0U, 0U, t3, t4, 0U, 0U, t7); t5 = (t0 + 19448); *((int *)t5) = 1; LAB1: return; } static void simprim_a_4130118134_1564065396_p_12(char *t0) { char *t1; char *t2; char *t3; char *t4; char *t5; char *t6; int64 t7; LAB0: t1 = (t0 + 17520); t2 = (t0 + 4400U); t3 = (t0 + 20536); t4 = (t0 + 3600U); t5 = (t0 + 8616U); t6 = *((char **)t5); t7 = *((int64 *)t6); ieee_p_2717149903_sub_649313994_2101202839(IEEE_P_2717149903, t1, t2, 0U, 0U, t3, t4, 0U, 0U, t7); t5 = (t0 + 19464); *((int *)t5) = 1; LAB1: return; } static void simprim_a_4130118134_1564065396_p_13(char *t0) { char *t1; char *t2; char *t3; char *t4; char *t5; char *t6; int64 t7; LAB0: t1 = (t0 + 17768); t2 = (t0 + 4560U); t3 = (t0 + 20600); t4 = (t0 + 3760U); t5 = (t0 + 8496U); t6 = *((char **)t5); t7 = *((int64 *)t6); ieee_p_2717149903_sub_649313994_2101202839(IEEE_P_2717149903, t1, t2, 0U, 0U, t3, t4, 0U, 0U, t7); t5 = (t0 + 19480); *((int *)t5) = 1; LAB1: return; } static void simprim_a_4130118134_1564065396_p_14(char *t0) { char *t1; char *t2; char *t3; char *t4; char *t5; char *t6; int64 t7; LAB0: t1 = (t0 + 18016); t2 = (t0 + 4720U); t3 = (t0 + 20664); t4 = (t0 + 3920U); t5 = (t0 + 8736U); t6 = *((char **)t5); t7 = *((int64 *)t6); ieee_p_2717149903_sub_649313994_2101202839(IEEE_P_2717149903, t1, t2, 0U, 0U, t3, t4, 0U, 0U, t7); t5 = (t0 + 19496); *((int *)t5) = 1; LAB1: return; } static void simprim_a_4130118134_1564065396_p_15(char *t0) { char *t1; char *t2; char *t3; char *t4; char *t5; char *t6; int64 t7; LAB0: t1 = (t0 + 18264); t2 = (t0 + 4880U); t3 = (t0 + 20728); t4 = (t0 + 4080U); t5 = (t0 + 8856U); t6 = *((char **)t5); t7 = *((int64 *)t6); ieee_p_2717149903_sub_649313994_2101202839(IEEE_P_2717149903, t1, t2, 0U, 0U, t3, t4, 0U, 0U, t7); t5 = (t0 + 19512); *((int *)t5) = 1; LAB1: return; } static void simprim_a_4130118134_1564065396_p_16(char *t0) { char *t1; char *t2; char *t3; char *t4; char *t5; char *t6; int64 t7; LAB0: t1 = (t0 + 18512); t2 = (t0 + 5040U); t3 = (t0 + 20792); t4 = (t0 + 4240U); t5 = (t0 + 8976U); t6 = *((char **)t5); t7 = *((int64 *)t6); ieee_p_2717149903_sub_649313994_2101202839(IEEE_P_2717149903, t1, t2, 0U, 0U, t3, t4, 0U, 0U, t7); t5 = (t0 + 19528); *((int *)t5) = 1; LAB1: return; } static void simprim_a_4130118134_1564065396_p_17(char *t0) { char t17[16]; char t25[16]; char t50[16]; char t59[288]; char *t1; char *t2; unsigned char t3; char *t4; unsigned char t5; unsigned char t6; char *t7; int64 t8; int64 t9; int64 t10; int64 t11; int64 t12; char *t13; char *t14; char *t15; char *t16; char *t18; char *t19; int t20; unsigned int t21; char *t22; char *t23; char *t24; char *t26; char *t27; int t28; char *t29; char *t30; char *t31; char *t32; char *t33; int64 t34; char *t35; unsigned char t36; char *t37; unsigned char t38; unsigned char t39; char *t40; unsigned char t41; unsigned char t42; unsigned char t43; unsigned char t44; unsigned char t45; char *t46; unsigned char t47; unsigned char t48; char *t49; char *t51; char *t52; int t53; char *t54; unsigned char t55; char *t56; unsigned char t57; unsigned char t58; unsigned int t60; unsigned int t61; unsigned int t62; unsigned char t63; unsigned char t64; unsigned char t65; char *t66; unsigned char t67; unsigned char t68; char *t69; unsigned char t70; unsigned char t71; int t72; unsigned int t73; unsigned int t74; char *t75; char *t76; char *t77; char *t78; char *t79; unsigned char t80; char *t81; char *t82; unsigned char t83; unsigned char t84; char *t85; unsigned char t86; unsigned char t87; int t88; unsigned int t89; unsigned int t90; char *t91; char *t92; char *t93; char *t94; char *t95; int t96; unsigned int t97; unsigned int t98; char *t99; char *t100; char *t101; char *t102; char *t103; char *t104; char *t105; char *t106; int t107; unsigned int t108; char *t109; char *t110; unsigned char t111; char *t112; unsigned char t113; LAB0: t1 = (t0 + 12216U); t2 = *((char **)t1); t3 = *((unsigned char *)t2); if (t3 != 0) goto LAB2; LAB4: LAB3: t1 = (t0 + 5376U); t2 = *((char **)t1); t3 = *((unsigned char *)t2); if (t3 != 0) goto LAB40; LAB42: LAB41: t1 = (t0 + 11136U); t2 = *((char **)t1); t3 = *((unsigned char *)t2); t1 = (t0 + 10296U); t4 = *((char **)t1); t5 = *((unsigned char *)t4); t6 = ieee_p_2592010699_sub_2545490612_503743352(IEEE_P_2592010699, t3, t5); t1 = (t0 + 11016U); t7 = *((char **)t1); t36 = *((unsigned char *)t7); t38 = ieee_p_2592010699_sub_2545490612_503743352(IEEE_P_2592010699, t6, t36); t1 = (t0 + 11256U); t13 = *((char **)t1); t39 = *((unsigned char *)t13); t41 = ieee_p_2592010699_sub_2545490612_503743352(IEEE_P_2592010699, t38, t39); t1 = (t0 + 11376U); t14 = *((char **)t1); t42 = *((unsigned char *)t14); t43 = ieee_p_2592010699_sub_2545490612_503743352(IEEE_P_2592010699, t41, t42); t1 = (t0 + 10416U); t15 = *((char **)t1); t44 = *((unsigned char *)t15); t45 = ieee_p_2592010699_sub_2545490612_503743352(IEEE_P_2592010699, t43, t44); t1 = (t0 + 10176U); t16 = *((char **)t1); t47 = *((unsigned char *)t16); t48 = ieee_p_2592010699_sub_2545490612_503743352(IEEE_P_2592010699, t45, t47); t1 = (t0 + 11496U); t18 = *((char **)t1); t1 = (t18 + 0); *((unsigned char *)t1) = t48; t1 = (t0 + 3000U); t2 = *((char **)t1); t5 = *((unsigned char *)t2); t6 = (t5 == (unsigned char)3); if (t6 == 1) goto LAB64; LAB65: t1 = (t0 + 3160U); t4 = *((char **)t1); t36 = *((unsigned char *)t4); t38 = (t36 == (unsigned char)3); t3 = t38; LAB66: if (t3 != 0) goto LAB61; LAB63: t1 = (t0 + 5080U); t2 = *((char **)t1); t3 = *((unsigned char *)t2); t1 = (t0 + 12336U); t4 = *((char **)t1); t1 = (t4 + 0); *((unsigned char *)t1) = t3; t1 = (t0 + 4920U); t2 = *((char **)t1); t3 = *((unsigned char *)t2); t1 = (t0 + 12456U); t4 = *((char **)t1); t1 = (t4 + 0); *((unsigned char *)t1) = t3; LAB62: t1 = (t0 + 18760); t2 = (t0 + 11736U); t4 = *((char **)t2); t2 = (t4 + 0); t7 = (t0 + 11616U); t13 = *((char **)t7); t7 = (t0 + 30152U); t14 = ((SIMPRIM_P_4208868169) + 1168U); t15 = *((char **)t14); memcpy(t59, t15, 288U); t14 = ((SIMPRIM_P_4208868169) + 8120U); t16 = xsi_get_transient_memory(6U); memset(t16, 0, 6U); t18 = t16; t19 = (t0 + 4600U); t22 = *((char **)t19); t3 = *((unsigned char *)t22); *((unsigned char *)t18) = t3; t18 = (t18 + 1U); t19 = (t0 + 4760U); t23 = *((char **)t19); t5 = *((unsigned char *)t23); *((unsigned char *)t18) = t5; t18 = (t18 + 1U); t19 = (t0 + 4440U); t24 = *((char **)t19); t6 = *((unsigned char *)t24); *((unsigned char *)t18) = t6; t18 = (t18 + 1U); t19 = (t0 + 12336U); t26 = *((char **)t19); t36 = *((unsigned char *)t26); *((unsigned char *)t18) = t36; t18 = (t18 + 1U); t19 = (t0 + 12456U); t27 = *((char **)t19); t38 = *((unsigned char *)t27); *((unsigned char *)t18) = t38; t18 = (t18 + 1U); t19 = (t0 + 11736U); t29 = *((char **)t19); t39 = *((unsigned char *)t29); *((unsigned char *)t18) = t39; t19 = (t17 + 0U); t30 = (t19 + 0U); *((int *)t30) = 0; t30 = (t19 + 4U); *((int *)t30) = 5; t30 = (t19 + 8U); *((int *)t30) = 1; t20 = (5 - 0); t21 = (t20 * 1); t21 = (t21 + 1); t30 = (t19 + 12U); *((unsigned int *)t30) = t21; ieee_vital_primitives_vitalstatetable(IEEE_P_1367372525, t1, t2, t13, t7, t59, t14, t16, t17); t1 = (t0 + 11496U); t2 = *((char **)t1); t3 = *((unsigned char *)t2); t1 = (t0 + 11736U); t4 = *((char **)t1); t5 = *((unsigned char *)t4); t6 = ieee_p_2592010699_sub_2507238156_503743352(IEEE_P_2592010699, t3, t5); t1 = (t0 + 11736U); t7 = *((char **)t1); t1 = (t7 + 0); *((unsigned char *)t1) = t6; t1 = (t0 + 18760); t2 = (t0 + 1520U); t4 = (t0 + 20856); t7 = (t0 + 11856U); t13 = *((char **)t7); t7 = (t0 + 32084); t15 = (t17 + 0U); t16 = (t15 + 0U); *((int *)t16) = 1; t16 = (t15 + 4U); *((int *)t16) = 1; t16 = (t15 + 8U); *((int *)t16) = 1; t20 = (1 - 1); t21 = (t20 * 1); t21 = (t21 + 1); t16 = (t15 + 12U); *((unsigned int *)t16) = t21; t16 = (t0 + 11736U); t18 = *((char **)t16); t3 = *((unsigned char *)t18); t16 = xsi_get_transient_memory(160U); memset(t16, 0, 160U); t19 = t16; t28 = (0 - 0); t21 = (t28 * 1); t60 = (32U * t21); t22 = (t19 + t60); t23 = t22; t24 = (t0 + 4560U); t8 = xsi_signal_get_last_event(t24); *((int64 *)t23) = t8; t26 = (t22 + 8U); t27 = (t0 + 6456U); t29 = *((char **)t27); memcpy(t26, t29, 16U); t27 = (t22 + 24U); t30 = (t0 + 4440U); t31 = *((char **)t30); t39 = *((unsigned char *)t31); t41 = (t39 != (unsigned char)2); if (t41 == 1) goto LAB79; LAB80: t38 = (unsigned char)0; LAB81: if (t38 == 1) goto LAB76; LAB77: t36 = (unsigned char)0; LAB78: if (t36 == 1) goto LAB73; LAB74: t6 = (unsigned char)0; LAB75: if (t6 == 1) goto LAB70; LAB71: t5 = (unsigned char)0; LAB72: *((unsigned char *)t27) = t5; t53 = (1 - 0); t61 = (t53 * 1); t62 = (32U * t61); t30 = (t19 + t62); t40 = t30; t46 = (t0 + 5040U); t9 = xsi_signal_get_last_event(t46); *((int64 *)t40) = t9; t49 = (t30 + 8U); t51 = (t0 + 6696U); t52 = *((char **)t51); memcpy(t49, t52, 16U); t51 = (t30 + 24U); t54 = (t0 + 4920U); t56 = *((char **)t54); t64 = *((unsigned char *)t56); t65 = (t64 != (unsigned char)3); if (t65 == 1) goto LAB85; LAB86: t63 = (unsigned char)0; LAB87: if (t63 == 1) goto LAB82; LAB83: t58 = (unsigned char)0; LAB84: *((unsigned char *)t51) = t58; t72 = (2 - 0); t73 = (t72 * 1); t74 = (32U * t73); t54 = (t19 + t74); t75 = t54; t76 = (t0 + 4880U); t10 = xsi_signal_get_last_event(t76); *((int64 *)t75) = t10; t77 = (t54 + 8U); t78 = (t0 + 6576U); t79 = *((char **)t78); memcpy(t77, t79, 16U); t78 = (t54 + 24U); t81 = (t0 + 3000U); t82 = *((char **)t81); t83 = *((unsigned char *)t82); t84 = (t83 == (unsigned char)2); if (t84 == 1) goto LAB88; LAB89: t80 = (unsigned char)0; LAB90: *((unsigned char *)t78) = t80; t88 = (3 - 0); t89 = (t88 * 1); t90 = (32U * t89); t81 = (t19 + t90); t91 = t81; t92 = (t0 + 2960U); t11 = xsi_signal_get_last_event(t92); *((int64 *)t91) = t11; t93 = (t81 + 8U); t94 = (t0 + 9696U); t95 = *((char **)t94); memcpy(t93, t95, 16U); t94 = (t81 + 24U); *((unsigned char *)t94) = (unsigned char)1; t96 = (4 - 0); t97 = (t96 * 1); t98 = (32U * t97); t99 = (t19 + t98); t100 = t99; t101 = (t0 + 3120U); t12 = xsi_signal_get_last_event(t101); *((int64 *)t100) = t12; t102 = (t99 + 8U); t103 = (t0 + 9696U); t104 = *((char **)t103); memcpy(t102, t104, 16U); t103 = (t99 + 24U); *((unsigned char *)t103) = (unsigned char)1; t105 = (t25 + 0U); t106 = (t105 + 0U); *((int *)t106) = 0; t106 = (t105 + 4U); *((int *)t106) = 4; t106 = (t105 + 8U); *((int *)t106) = 1; t107 = (4 - 0); t108 = (t107 * 1); t108 = (t108 + 1); t106 = (t105 + 12U); *((unsigned int *)t106) = t108; t106 = ((IEEE_P_2717149903) + 1288U); t109 = *((char **)t106); memcpy(t50, t109, 16U); t106 = (t0 + 5496U); t110 = *((char **)t106); t111 = *((unsigned char *)t110); t106 = (t0 + 5616U); t112 = *((char **)t106); t113 = *((unsigned char *)t112); ieee_p_2717149903_sub_2486506143_2101202839(IEEE_P_2717149903, t1, t2, 0U, 0U, t4, t13, t7, t17, t3, t16, t25, t50, (unsigned char)3, t111, t113, (unsigned char)1, (unsigned char)0, (unsigned char)0, (unsigned char)0); t1 = (t0 + 19544); *((int *)t1) = 1; LAB1: return; LAB2: t1 = (t0 + 5736U); t4 = *((char **)t1); t5 = *((unsigned char *)t4); t6 = (t5 == (unsigned char)1); if (t6 != 0) goto LAB5; LAB7: t1 = (t0 + 12336U); t2 = *((char **)t1); t1 = (t2 + 0); *((unsigned char *)t1) = (unsigned char)2; t1 = (t0 + 12456U); t2 = *((char **)t1); t1 = (t2 + 0); *((unsigned char *)t1) = (unsigned char)3; LAB6: t1 = (t0 + 7776U); t2 = *((char **)t1); t8 = *((int64 *)t2); t9 = (0 * 1LL); t5 = (t8 == t9); if (t5 == 1) goto LAB11; LAB12: t3 = (unsigned char)0; LAB13: if (t3 != 0) goto LAB8; LAB10: t1 = (t0 + 7776U); t2 = *((char **)t1); t8 = *((int64 *)t2); t9 = (0 * 1LL); t5 = (t8 != t9); if (t5 == 1) goto LAB16; LAB17: t3 = (unsigned char)0; LAB18: if (t3 != 0) goto LAB14; LAB15: t1 = (t0 + 7776U); t2 = *((char **)t1); t8 = *((int64 *)t2); t9 = (0 * 1LL); t5 = (t8 != t9); if (t5 == 1) goto LAB21; LAB22: t3 = (unsigned char)0; LAB23: if (t3 != 0) goto LAB19; LAB20: LAB9: t1 = (t0 + 7896U); t2 = *((char **)t1); t8 = *((int64 *)t2); t9 = (0 * 1LL); t5 = (t8 == t9); if (t5 == 1) goto LAB27; LAB28: t3 = (unsigned char)0; LAB29: if (t3 != 0) goto LAB24; LAB26: t1 = (t0 + 7896U); t2 = *((char **)t1); t8 = *((int64 *)t2); t9 = (0 * 1LL); t5 = (t8 != t9); if (t5 == 1) goto LAB32; LAB33: t3 = (unsigned char)0; LAB34: if (t3 != 0) goto LAB30; LAB31: t1 = (t0 + 7896U); t2 = *((char **)t1); t8 = *((int64 *)t2); t9 = (0 * 1LL); t5 = (t8 != t9); if (t5 == 1) goto LAB37; LAB38: t3 = (unsigned char)0; LAB39: if (t3 != 0) goto LAB35; LAB36: LAB25: t1 = (t0 + 12216U); t2 = *((char **)t1); t1 = (t2 + 0); *((unsigned char *)t1) = (unsigned char)0; goto LAB3; LAB5: t1 = (t0 + 12336U); t7 = *((char **)t1); t1 = (t7 + 0); *((unsigned char *)t1) = (unsigned char)3; t1 = (t0 + 12456U); t2 = *((char **)t1); t1 = (t2 + 0); *((unsigned char *)t1) = (unsigned char)2; goto LAB6; LAB8: t1 = (t0 + 8376U); t7 = *((char **)t1); t12 = *((int64 *)t7); t1 = (t0 + 11976U); t13 = *((char **)t1); t1 = (t13 + 0); *((int64 *)t1) = t12; goto LAB9; LAB11: t1 = (t0 + 8376U); t4 = *((char **)t1); t10 = *((int64 *)t4); t11 = (0 * 1LL); t6 = (t10 != t11); t3 = t6; goto LAB13; LAB14: t1 = (t0 + 7776U); t7 = *((char **)t1); t12 = *((int64 *)t7); t1 = (t0 + 11976U); t13 = *((char **)t1); t1 = (t13 + 0); *((int64 *)t1) = t12; goto LAB9; LAB16: t1 = (t0 + 8376U); t4 = *((char **)t1); t10 = *((int64 *)t4); t11 = (0 * 1LL); t6 = (t10 == t11); t3 = t6; goto LAB18; LAB19: t1 = (t0 + 8376U); t7 = *((char **)t1); t12 = *((int64 *)t7); t1 = (t0 + 11976U); t13 = *((char **)t1); t1 = (t13 + 0); *((int64 *)t1) = t12; goto LAB9; LAB21: t1 = (t0 + 8376U); t4 = *((char **)t1); t10 = *((int64 *)t4); t11 = (0 * 1LL); t6 = (t10 != t11); t3 = t6; goto LAB23; LAB24: t1 = (t0 + 8256U); t7 = *((char **)t1); t12 = *((int64 *)t7); t1 = (t0 + 12096U); t13 = *((char **)t1); t1 = (t13 + 0); *((int64 *)t1) = t12; goto LAB25; LAB27: t1 = (t0 + 8256U); t4 = *((char **)t1); t10 = *((int64 *)t4); t11 = (0 * 1LL); t6 = (t10 != t11); t3 = t6; goto LAB29; LAB30: t1 = (t0 + 7896U); t7 = *((char **)t1); t12 = *((int64 *)t7); t1 = (t0 + 12096U); t13 = *((char **)t1); t1 = (t13 + 0); *((int64 *)t1) = t12; goto LAB25; LAB32: t1 = (t0 + 8256U); t4 = *((char **)t1); t10 = *((int64 *)t4); t11 = (0 * 1LL); t6 = (t10 == t11); t3 = t6; goto LAB34; LAB35: t1 = (t0 + 8256U); t7 = *((char **)t1); t12 = *((int64 *)t7); t1 = (t0 + 12096U); t13 = *((char **)t1); t1 = (t13 + 0); *((int64 *)t1) = t12; goto LAB25; LAB37: t1 = (t0 + 8256U); t4 = *((char **)t1); t10 = *((int64 *)t4); t11 = (0 * 1LL); t6 = (t10 != t11); t3 = t6; goto LAB39; LAB40: t1 = (t0 + 18760); t4 = (t0 + 11136U); t7 = *((char **)t4); t4 = (t7 + 0); t13 = (t0 + 10656U); t14 = *((char **)t13); t13 = (t0 + 4720U); t15 = (t0 + 32019); t18 = (t17 + 0U); t19 = (t18 + 0U); *((int *)t19) = 1; t19 = (t18 + 4U); *((int *)t19) = 1; t19 = (t18 + 8U); *((int *)t19) = 1; t20 = (1 - 1); t21 = (t20 * 1); t21 = (t21 + 1); t19 = (t18 + 12U); *((unsigned int *)t19) = t21; t19 = (t0 + 8736U); t22 = *((char **)t19); t8 = *((int64 *)t22); t19 = (t0 + 4560U); t23 = (t0 + 32020); t26 = (t25 + 0U); t27 = (t26 + 0U); *((int *)t27) = 1; t27 = (t26 + 4U); *((int *)t27) = 3; t27 = (t26 + 8U); *((int *)t27) = 1; t28 = (3 - 1); t21 = (t28 * 1); t21 = (t21 + 1); t27 = (t26 + 12U); *((unsigned int *)t27) = t21; t27 = (t0 + 8496U); t29 = *((char **)t27); t9 = *((int64 *)t29); t27 = (t0 + 6816U); t30 = *((char **)t27); t10 = *((int64 *)t30); t27 = (t0 + 6936U); t31 = *((char **)t27); t11 = *((int64 *)t31); t27 = (t0 + 7416U); t32 = *((char **)t27); t12 = *((int64 *)t32); t27 = (t0 + 7296U); t33 = *((char **)t27); t34 = *((int64 *)t33); t27 = (t0 + 4920U); t35 = *((char **)t27); t6 = *((unsigned char *)t35); t36 = ieee_p_2592010699_sub_1690584930_503743352(IEEE_P_2592010699, t6); t27 = (t0 + 4440U); t37 = *((char **)t27); t38 = *((unsigned char *)t37); t39 = ieee_p_2592010699_sub_1605435078_503743352(IEEE_P_2592010699, t36, t38); t27 = (t0 + 5080U); t40 = *((char **)t27); t41 = *((unsigned char *)t40); t42 = ieee_p_2592010699_sub_1690584930_503743352(IEEE_P_2592010699, t41); t43 = ieee_p_2592010699_sub_1605435078_503743352(IEEE_P_2592010699, t39, t42); t44 = ieee_p_2592010699_sub_1388759734_503743352(IEEE_P_2592010699, t43); t45 = (t44 != (unsigned char)2); if (t45 == 1) goto LAB43; LAB44: t5 = (unsigned char)0; LAB45: t27 = (t0 + 32023); t51 = (t50 + 0U); t52 = (t51 + 0U); *((int *)t52) = 1; t52 = (t51 + 4U); *((int *)t52) = 5; t52 = (t51 + 8U); *((int *)t52) = 1; t53 = (5 - 1); t21 = (t53 * 1); t21 = (t21 + 1); t52 = (t51 + 12U); *((unsigned int *)t52) = t21; t52 = (t0 + 5496U); t54 = *((char **)t52); t55 = *((unsigned char *)t54); t52 = (t0 + 5616U); t56 = *((char **)t52); t57 = *((unsigned char *)t56); ieee_p_2717149903_sub_3797369404_2101202839(IEEE_P_2717149903, t1, t4, t14, t13, 0U, 0U, t15, t17, t8, t19, 0U, 0U, t23, t25, t9, t10, t11, t12, t34, t5, (unsigned char)8, t27, t50, t55, t57, (unsigned char)1, (unsigned char)1, (unsigned char)1, (unsigned char)1, (unsigned char)1); t1 = (t0 + 18760); t2 = (t0 + 11016U); t4 = *((char **)t2); t2 = (t4 + 0); t7 = (t0 + 10536U); t13 = *((char **)t7); t7 = (t0 + 4400U); t14 = (t0 + 32028); t16 = (t17 + 0U); t18 = (t16 + 0U); *((int *)t18) = 1; t18 = (t16 + 4U); *((int *)t18) = 2; t18 = (t16 + 8U); *((int *)t18) = 1; t20 = (2 - 1); t21 = (t20 * 1); t21 = (t21 + 1); t18 = (t16 + 12U); *((unsigned int *)t18) = t21; t18 = (t0 + 8616U); t19 = *((char **)t18); t8 = *((int64 *)t19); t18 = (t0 + 4560U); t22 = (t0 + 32030); t24 = (t25 + 0U); t26 = (t24 + 0U); *((int *)t26) = 1; t26 = (t24 + 4U); *((int *)t26) = 3; t26 = (t24 + 8U); *((int *)t26) = 1; t28 = (3 - 1); t21 = (t28 * 1); t21 = (t21 + 1); t26 = (t24 + 12U); *((unsigned int *)t26) = t21; t26 = (t0 + 8496U); t27 = *((char **)t26); t9 = *((int64 *)t27); t26 = (t0 + 7056U); t29 = *((char **)t26); t10 = *((int64 *)t29); t26 = (t0 + 7176U); t30 = *((char **)t26); t11 = *((int64 *)t30); t26 = (t0 + 7656U); t31 = *((char **)t26); t12 = *((int64 *)t31); t26 = (t0 + 7536U); t32 = *((char **)t26); t34 = *((int64 *)t32); t26 = (t0 + 4920U); t33 = *((char **)t26); t5 = *((unsigned char *)t33); t6 = ieee_p_2592010699_sub_1690584930_503743352(IEEE_P_2592010699, t5); t26 = (t0 + 11736U); t35 = *((char **)t26); t36 = *((unsigned char *)t35); t26 = (t0 + 4760U); t37 = *((char **)t26); t38 = *((unsigned char *)t37); t39 = ieee_p_2592010699_sub_2507238156_503743352(IEEE_P_2592010699, t36, t38); t41 = ieee_p_2592010699_sub_1605435078_503743352(IEEE_P_2592010699, t6, t39); t26 = (t0 + 5080U); t40 = *((char **)t26); t42 = *((unsigned char *)t40); t43 = ieee_p_2592010699_sub_1690584930_503743352(IEEE_P_2592010699, t42); t44 = ieee_p_2592010699_sub_1605435078_503743352(IEEE_P_2592010699, t41, t43); t45 = ieee_p_2592010699_sub_1388759734_503743352(IEEE_P_2592010699, t44); t47 = (t45 != (unsigned char)2); if (t47 == 1) goto LAB46; LAB47: t3 = (unsigned char)0; LAB48: t26 = (t0 + 32033); t51 = (t50 + 0U); t52 = (t51 + 0U); *((int *)t52) = 1; t52 = (t51 + 4U); *((int *)t52) = 5; t52 = (t51 + 8U); *((int *)t52) = 1; t53 = (5 - 1); t21 = (t53 * 1); t21 = (t21 + 1); t52 = (t51 + 12U); *((unsigned int *)t52) = t21; t52 = (t0 + 5496U); t54 = *((char **)t52); t57 = *((unsigned char *)t54); t52 = (t0 + 5616U); t56 = *((char **)t52); t58 = *((unsigned char *)t56); ieee_p_2717149903_sub_3797369404_2101202839(IEEE_P_2717149903, t1, t2, t13, t7, 0U, 0U, t14, t17, t8, t18, 0U, 0U, t22, t25, t9, t10, t11, t12, t34, t3, (unsigned char)8, t26, t50, t57, t58, (unsigned char)1, (unsigned char)1, (unsigned char)1, (unsigned char)1, (unsigned char)1); t1 = (t0 + 18760); t2 = (t0 + 11256U); t4 = *((char **)t2); t2 = (t4 + 0); t7 = (t0 + 10776U); t13 = *((char **)t7); t7 = (t0 + 4880U); t14 = (t0 + 32038); t16 = (t17 + 0U); t18 = (t16 + 0U); *((int *)t18) = 1; t18 = (t16 + 4U); *((int *)t18) = 3; t18 = (t16 + 8U); *((int *)t18) = 1; t20 = (3 - 1); t21 = (t20 * 1); t21 = (t21 + 1); t18 = (t16 + 12U); *((unsigned int *)t18) = t21; t18 = (t0 + 8856U); t19 = *((char **)t18); t8 = *((int64 *)t19); t18 = (t0 + 4560U); t22 = (t0 + 32041); t24 = (t25 + 0U); t26 = (t24 + 0U); *((int *)t26) = 1; t26 = (t24 + 4U); *((int *)t26) = 3; t26 = (t24 + 8U); *((int *)t26) = 1; t28 = (3 - 1); t21 = (t28 * 1); t21 = (t21 + 1); t26 = (t24 + 12U); *((unsigned int *)t26) = t21; t26 = (t0 + 8496U); t27 = *((char **)t26); t9 = *((int64 *)t27); t26 = (t0 + 8136U); t29 = *((char **)t26); t10 = *((int64 *)t29); t26 = (t0 + 11976U); t30 = *((char **)t26); t11 = *((int64 *)t30); t26 = (t0 + 4440U); t31 = *((char **)t26); t6 = *((unsigned char *)t31); t36 = ieee_p_2592010699_sub_1388759734_503743352(IEEE_P_2592010699, t6); t38 = (t36 != (unsigned char)2); if (t38 == 1) goto LAB52; LAB53: t5 = (unsigned char)0; LAB54: if (t5 == 1) goto LAB49; LAB50: t3 = (unsigned char)0; LAB51: t26 = (t0 + 32044); t37 = (t50 + 0U); t40 = (t37 + 0U); *((int *)t40) = 1; t40 = (t37 + 4U); *((int *)t40) = 5; t40 = (t37 + 8U); *((int *)t40) = 1; t53 = (5 - 1); t21 = (t53 * 1); t21 = (t21 + 1); t40 = (t37 + 12U); *((unsigned int *)t40) = t21; t40 = (t0 + 5496U); t46 = *((char **)t40); t44 = *((unsigned char *)t46); t40 = (t0 + 5616U); t49 = *((char **)t40); t45 = *((unsigned char *)t49); ieee_p_2717149903_sub_2603698110_2101202839(IEEE_P_2717149903, t1, t2, t13, t7, 0U, 0U, t14, t17, t8, t18, 0U, 0U, t22, t25, t9, t10, t11, (unsigned char)0, t3, (unsigned char)8, t26, t50, t44, t45, (unsigned char)1, (unsigned char)1, (unsigned char)1, (unsigned char)1, (unsigned char)1); t1 = (t0 + 18760); t2 = (t0 + 11376U); t4 = *((char **)t2); t2 = (t4 + 0); t7 = (t0 + 10896U); t13 = *((char **)t7); t7 = (t0 + 5040U); t14 = (t0 + 32049); t16 = (t17 + 0U); t18 = (t16 + 0U); *((int *)t18) = 1; t18 = (t16 + 4U); *((int *)t18) = 3; t18 = (t16 + 8U); *((int *)t18) = 1; t20 = (3 - 1); t21 = (t20 * 1); t21 = (t21 + 1); t18 = (t16 + 12U); *((unsigned int *)t18) = t21; t18 = (t0 + 8976U); t19 = *((char **)t18); t8 = *((int64 *)t19); t18 = (t0 + 4560U); t22 = (t0 + 32052); t24 = (t25 + 0U); t26 = (t24 + 0U); *((int *)t26) = 1; t26 = (t24 + 4U); *((int *)t26) = 3; t26 = (t24 + 8U); *((int *)t26) = 1; t28 = (3 - 1); t21 = (t28 * 1); t21 = (t21 + 1); t26 = (t24 + 12U); *((unsigned int *)t26) = t21; t26 = (t0 + 8496U); t27 = *((char **)t26); t9 = *((int64 *)t27); t26 = (t0 + 8016U); t29 = *((char **)t26); t10 = *((int64 *)t29); t26 = (t0 + 12096U); t30 = *((char **)t26); t11 = *((int64 *)t30); t26 = (t0 + 4920U); t31 = *((char **)t26); t6 = *((unsigned char *)t31); t36 = ieee_p_2592010699_sub_1690584930_503743352(IEEE_P_2592010699, t6); t26 = (t0 + 4440U); t32 = *((char **)t26); t38 = *((unsigned char *)t32); t39 = ieee_p_2592010699_sub_1605435078_503743352(IEEE_P_2592010699, t36, t38); t41 = ieee_p_2592010699_sub_1388759734_503743352(IEEE_P_2592010699, t39); t42 = (t41 != (unsigned char)2); if (t42 == 1) goto LAB58; LAB59: t5 = (unsigned char)0; LAB60: if (t5 == 1) goto LAB55; LAB56: t3 = (unsigned char)0; LAB57: t26 = (t0 + 32055); t40 = (t50 + 0U); t46 = (t40 + 0U); *((int *)t46) = 1; t46 = (t40 + 4U); *((int *)t46) = 5; t46 = (t40 + 8U); *((int *)t46) = 1; t53 = (5 - 1); t21 = (t53 * 1); t21 = (t21 + 1); t46 = (t40 + 12U); *((unsigned int *)t46) = t21; t46 = (t0 + 5496U); t49 = *((char **)t46); t48 = *((unsigned char *)t49); t46 = (t0 + 5616U); t51 = *((char **)t46); t55 = *((unsigned char *)t51); ieee_p_2717149903_sub_2603698110_2101202839(IEEE_P_2717149903, t1, t2, t13, t7, 0U, 0U, t14, t17, t8, t18, 0U, 0U, t22, t25, t9, t10, t11, (unsigned char)0, t3, (unsigned char)8, t26, t50, t48, t55, (unsigned char)1, (unsigned char)1, (unsigned char)1, (unsigned char)1, (unsigned char)1); t1 = (t0 + 18760); t2 = (t0 + 10176U); t4 = *((char **)t2); t2 = (t4 + 0); t7 = (t0 + 9816U); t13 = *((char **)t7); t7 = (t0 + 4560U); t14 = (t0 + 32060); t16 = (t17 + 0U); t18 = (t16 + 0U); *((int *)t18) = 1; t18 = (t16 + 4U); *((int *)t18) = 3; t18 = (t16 + 8U); *((int *)t18) = 1; t20 = (3 - 1); t21 = (t20 * 1); t21 = (t21 + 1); t18 = (t16 + 12U); *((unsigned int *)t18) = t21; t8 = (0 * 1LL); t18 = (t0 + 9096U); t19 = *((char **)t18); t9 = *((int64 *)t19); t18 = (t0 + 9216U); t22 = *((char **)t18); t10 = *((int64 *)t22); t18 = (t0 + 9336U); t23 = *((char **)t18); t11 = *((int64 *)t23); t18 = (t0 + 4440U); t24 = *((char **)t18); t3 = *((unsigned char *)t24); t5 = ieee_p_2592010699_sub_1388759734_503743352(IEEE_P_2592010699, t3); t6 = (t5 != (unsigned char)2); t18 = (t0 + 32063); t27 = (t25 + 0U); t29 = (t27 + 0U); *((int *)t29) = 1; t29 = (t27 + 4U); *((int *)t29) = 5; t29 = (t27 + 8U); *((int *)t29) = 1; t28 = (5 - 1); t21 = (t28 * 1); t21 = (t21 + 1); t29 = (t27 + 12U); *((unsigned int *)t29) = t21; t29 = (t0 + 5496U); t30 = *((char **)t29); t36 = *((unsigned char *)t30); t29 = (t0 + 5616U); t31 = *((char **)t29); t38 = *((unsigned char *)t31); ieee_p_2717149903_sub_756322403_2101202839(IEEE_P_2717149903, t1, t2, t13, t7, 0U, 0U, t14, t17, t8, t9, t10, t11, t6, t18, t25, t36, t38, (unsigned char)1); t1 = (t0 + 18760); t2 = (t0 + 10296U); t4 = *((char **)t2); t2 = (t4 + 0); t7 = (t0 + 9936U); t13 = *((char **)t7); t7 = (t0 + 4880U); t14 = (t0 + 32068); t16 = (t17 + 0U); t18 = (t16 + 0U); *((int *)t18) = 1; t18 = (t16 + 4U); *((int *)t18) = 3; t18 = (t16 + 8U); *((int *)t18) = 1; t20 = (3 - 1); t21 = (t20 * 1); t21 = (t21 + 1); t18 = (t16 + 12U); *((unsigned int *)t18) = t21; t8 = (0 * 1LL); t9 = (0 * 1LL); t18 = (t0 + 9456U); t19 = *((char **)t18); t10 = *((int64 *)t19); t11 = (0 * 1LL); t18 = (t0 + 32071); t23 = (t25 + 0U); t24 = (t23 + 0U); *((int *)t24) = 1; t24 = (t23 + 4U); *((int *)t24) = 5; t24 = (t23 + 8U); *((int *)t24) = 1; t28 = (5 - 1); t21 = (t28 * 1); t21 = (t21 + 1); t24 = (t23 + 12U); *((unsigned int *)t24) = t21; t24 = (t0 + 5496U); t26 = *((char **)t24); t3 = *((unsigned char *)t26); t24 = (t0 + 5616U); t27 = *((char **)t24); t5 = *((unsigned char *)t27); ieee_p_2717149903_sub_756322403_2101202839(IEEE_P_2717149903, t1, t2, t13, t7, 0U, 0U, t14, t17, t8, t9, t10, t11, (unsigned char)1, t18, t25, t3, t5, (unsigned char)1); t1 = (t0 + 18760); t2 = (t0 + 10416U); t4 = *((char **)t2); t2 = (t4 + 0); t7 = (t0 + 10056U); t13 = *((char **)t7); t7 = (t0 + 5040U); t14 = (t0 + 32076); t16 = (t17 + 0U); t18 = (t16 + 0U); *((int *)t18) = 1; t18 = (t16 + 4U); *((int *)t18) = 3; t18 = (t16 + 8U); *((int *)t18) = 1; t20 = (3 - 1); t21 = (t20 * 1); t21 = (t21 + 1); t18 = (t16 + 12U); *((unsigned int *)t18) = t21; t8 = (0 * 1LL); t9 = (0 * 1LL); t18 = (t0 + 9576U); t19 = *((char **)t18); t10 = *((int64 *)t19); t11 = (0 * 1LL); t18 = (t0 + 32079); t23 = (t25 + 0U); t24 = (t23 + 0U); *((int *)t24) = 1; t24 = (t23 + 4U); *((int *)t24) = 5; t24 = (t23 + 8U); *((int *)t24) = 1; t28 = (5 - 1); t21 = (t28 * 1); t21 = (t21 + 1); t24 = (t23 + 12U); *((unsigned int *)t24) = t21; t24 = (t0 + 5496U); t26 = *((char **)t24); t3 = *((unsigned char *)t26); t24 = (t0 + 5616U); t27 = *((char **)t24); t5 = *((unsigned char *)t27); ieee_p_2717149903_sub_756322403_2101202839(IEEE_P_2717149903, t1, t2, t13, t7, 0U, 0U, t14, t17, t8, t9, t10, t11, (unsigned char)1, t18, t25, t3, t5, (unsigned char)1); goto LAB41; LAB43: t27 = (t0 + 3000U); t46 = *((char **)t27); t47 = *((unsigned char *)t46); t48 = (t47 == (unsigned char)2); t5 = t48; goto LAB45; LAB46: t26 = (t0 + 3000U); t46 = *((char **)t26); t48 = *((unsigned char *)t46); t55 = (t48 == (unsigned char)2); t3 = t55; goto LAB48; LAB49: t26 = (t0 + 3000U); t33 = *((char **)t26); t42 = *((unsigned char *)t33); t43 = (t42 == (unsigned char)2); t3 = t43; goto LAB51; LAB52: t26 = (t0 + 4760U); t32 = *((char **)t26); t39 = *((unsigned char *)t32); t41 = (t39 != (unsigned char)2); t5 = t41; goto LAB54; LAB55: t26 = (t0 + 3000U); t35 = *((char **)t26); t45 = *((unsigned char *)t35); t47 = (t45 == (unsigned char)2); t3 = t47; goto LAB57; LAB58: t26 = (t0 + 4760U); t33 = *((char **)t26); t43 = *((unsigned char *)t33); t44 = (t43 != (unsigned char)3); t5 = t44; goto LAB60; LAB61: t1 = (t0 + 5736U); t7 = *((char **)t1); t39 = *((unsigned char *)t7); t41 = (t39 == (unsigned char)1); if (t41 != 0) goto LAB67; LAB69: t1 = (t0 + 12336U); t2 = *((char **)t1); t1 = (t2 + 0); *((unsigned char *)t1) = (unsigned char)2; t1 = (t0 + 12456U); t2 = *((char **)t1); t1 = (t2 + 0); *((unsigned char *)t1) = (unsigned char)3; LAB68: goto LAB62; LAB64: t3 = (unsigned char)1; goto LAB66; LAB67: t1 = (t0 + 12336U); t13 = *((char **)t1); t1 = (t13 + 0); *((unsigned char *)t1) = (unsigned char)3; t1 = (t0 + 12456U); t2 = *((char **)t1); t1 = (t2 + 0); *((unsigned char *)t1) = (unsigned char)2; goto LAB68; LAB70: t30 = (t0 + 3160U); t37 = *((char **)t30); t55 = *((unsigned char *)t37); t57 = (t55 == (unsigned char)2); t5 = t57; goto LAB72; LAB73: t30 = (t0 + 3000U); t35 = *((char **)t30); t47 = *((unsigned char *)t35); t48 = (t47 == (unsigned char)2); t6 = t48; goto LAB75; LAB76: t30 = (t0 + 5080U); t33 = *((char **)t30); t44 = *((unsigned char *)t33); t45 = (t44 != (unsigned char)3); t36 = t45; goto LAB78; LAB79: t30 = (t0 + 4920U); t32 = *((char **)t30); t42 = *((unsigned char *)t32); t43 = (t42 != (unsigned char)3); t38 = t43; goto LAB81; LAB82: t54 = (t0 + 3160U); t69 = *((char **)t54); t70 = *((unsigned char *)t69); t71 = (t70 == (unsigned char)2); t58 = t71; goto LAB84; LAB85: t54 = (t0 + 3000U); t66 = *((char **)t54); t67 = *((unsigned char *)t66); t68 = (t67 == (unsigned char)2); t63 = t68; goto LAB87; LAB88: t81 = (t0 + 3160U); t85 = *((char **)t81); t86 = *((unsigned char *)t85); t87 = (t86 == (unsigned char)2); t80 = t87; goto LAB90; } extern void simprim_a_4130118134_1564065396_0433961640_init() { static char *pe[] = {(void *)simprim_a_4130118134_1564065396_p_0,(void *)simprim_a_4130118134_1564065396_p_1,(void *)simprim_a_4130118134_1564065396_p_2,(void *)simprim_a_4130118134_1564065396_p_3,(void *)simprim_a_4130118134_1564065396_p_4,(void *)simprim_a_4130118134_1564065396_p_5,(void *)simprim_a_4130118134_1564065396_p_6,(void *)simprim_a_4130118134_1564065396_p_7,(void *)simprim_a_4130118134_1564065396_p_8,(void *)simprim_a_4130118134_1564065396_p_9,(void *)simprim_a_4130118134_1564065396_p_10,(void *)simprim_a_4130118134_1564065396_p_11,(void *)simprim_a_4130118134_1564065396_p_12,(void *)simprim_a_4130118134_1564065396_p_13,(void *)simprim_a_4130118134_1564065396_p_14,(void *)simprim_a_4130118134_1564065396_p_15,(void *)simprim_a_4130118134_1564065396_p_16,(void *)simprim_a_4130118134_1564065396_p_17}; xsi_register_didat("simprim_a_4130118134_1564065396_0433961640", "isim/tb_IDE_control_unit_isim_translate.exe.sim/simprim/a_4130118134_1564065396_0433961640.didat"); xsi_register_executes(pe); }
276023.c
/******** Vtest C Program Source Code File (.C) ********/ /* */ /* PROGRAM NAME: Vtest Application (Vtest) */ /* ------------- */ /* Generated by GPF (Gui Programming Facility) Version 2.1 Level(B2) */ /* */ /* SUBSYSTEM: */ /* ---------- */ /* Presentation Manager (32-Bit) - IBM C VisualAge */ /* */ /* DATABASE NAME: No DBM */ /* ------------- */ /* */ /* DATE AND TIME: Fri Nov 9 19:01:03 2001 */ /* ------------- */ /* */ /* COPYRIGHT: */ /* ---------- */ /* (C) 2001 Elipse Limited */ /* */ /* REVISION LEVEL: 1.0 */ /* --------------- */ /* */ /* WHAT YOU NEED TO COMPILE THIS PROGRAM: */ /* -------------------------------------- */ /* */ /* REQUIRED FILES: */ /* --------------- */ /* */ /* Vtest.C - Source code */ /* Vtest.Cmd - Command file to build this program */ /* Vtest.Mak - Make file for this program */ /* Vtest.Def - Module definition file */ /* Vtest.Ext - External definition file */ /* Vtest.H - Application header file */ /* Vtest.Ids - Pm header file */ /* Vtest.L - Linker automatic response file */ /* Vtest.Rc - Resource file */ /* */ /* OS2.H - Presentation Manager include file */ /* STDLIB.H - Miscellaneous function declarations */ /* STDIO.H - Declarations for standard I/O routines */ /* STRING.H - String function declarations */ /* MALLOC.H - Definitions for memory allocation functions */ /* PROCESS.H - Definitions for process control functions */ /* */ /* REQUIRED LIBRARIES: */ /* ------------------- */ /* */ /* OS2386.LIB - Presentation Manager/OS2 library */ /* */ /* REQUIRED PROGRAMS: */ /* ------------------ */ /* */ /* C Compiler (32-Bit) */ /* Linker (32-Bit) */ /* Resource Compiler (32-Bit) */ /* */ /**********************************************************************/ /**********************************************************************/ /* */ /* Include relevant sections of the DOS header file. */ /* */ /**********************************************************************/ /*====================================================================*/ /* */ /* #define: To include: */ /* */ /* + INCL_DOSPROCESS Process and thread support */ /* + INCL_DOSFILEMGR File Management */ /* + INCL_DOSMEMMGR Memory Management */ /* + INCL_DOSSEMAPHORES Semaphore support */ /* + INCL_DOSDATETIME Date/Time and Timer support */ /* INCL_DOSMODULEMGR Module manager */ /* + INCL_DOSRESOURCES Resource support */ /* INCL_DOSNLS National Language Support */ /* INCL_DOSSIGNALS Signals */ /* INCL_DOSMISC Miscellaneous */ /* INCL_DOSMONITORS Monitors */ /* INCL_DOSQUEUES Queues */ /* INCL_DOSSESMGR Session Manager Support */ /* INCL_DOSDEVICES Device specific, ring 2 support */ /* INCL_DOSNMPIPES Named Pipes Support */ /* INCL_DOSPROFILE DosProfile API */ /* INCL_DOSMVDM MVDM support */ /* INCL_DOSEXCEPTIONS Exception Management Support */ /**********************************************************************/ /**********************************************************************/ /* */ /* Include relevant sections of the PM header file. */ /* */ /**********************************************************************/ /*====================================================================*/ /* */ /* #define: To include: */ /* */ /* + INCL_WINWINDOWMGR General window management */ /* + INCL_WINMESSAGEMGR Message management */ /* + INCL_WININPUT Mouse and keyboard input */ /* + INCL_WINDIALOGS Dialog boxes */ /* + INCL_WINSTATICS Static controls */ /* + INCL_WINBUTTONS Button controls */ /* + INCL_WINENTRYFIELDS Entry Fields */ /* INCL_WINMLE Multiple Line Entry Fields */ /* + INCL_WINLISTBOXES List box controls */ /* + INCL_WINMENUS Menu controls */ /* + INCL_WINSCROLLBARS Scroll bar controls */ /* + INCL_WINFRAMEMGR Frame manager */ /* INCL_WINFRAMECTLS Frame controls (title bars & size border)*/ /* INCL_WINRECTANGLES Rectangle routines */ /* INCL_WINSYS System values (and colors) */ /* INCL_WINTIMER Timer routines */ /* + INCL_WINACCELERATORS Keyboard accelerators */ /* INCL_WINTRACKRECT WinTrackRect() function */ /* INCL_WINCLIPBOARD Clipboard manager */ /* + INCL_WINCURSORS Text cursors */ /* + INCL_WINPOINTERS Mouse pointers */ /* INCL_WINHOOKS Hook manager */ /* + INCL_WINSWITCHLIST Shell Switch List API */ /* INCL_WINPROGRAMLIST Shell Program List API */ /* INCL_WINSHELLDATA Shell Data (?) */ /* INCL_WINCOUNTRY Country support */ /* INCL_WINATOM Atom Manager */ /* INCL_WINCATCHTHROW WinCatch/WinThrow support */ /* INCL_WINERRORS Error code definitions */ /* INCL_NLS DBCS window manager definition */ /* + INCL_WINHELP Help Manager definitions */ /* INCL_WINSEI Set Error Info API */ /* INCL_WINLOAD Load/Delete Library/Procedure */ /* INCL_WINTYPES Definitions for Datatypes */ /* INCL_WINSTDDLGS OS/2 Standard Dialog definitions */ /* */ /* ===================================================================*/ /* */ /* INCL_GPI Include all of the GPI */ /* INCL_GPICONTROL Basic PS control */ /* INCL_GPICORRELATION Picking, Boundary and Correlation */ /* INCL_GPISEGMENTS Segment Control and Drawing */ /* INCL_GPISEGEDITING Segment Editing via Elements */ /* INCL_GPITRANSFORMS Transform and Transform Conversion */ /* INCL_GPIPATHS Paths and Clipping with Paths */ /* INCL_GPILOGCOLORTABLE Logical Color Tables */ /* INCL_GPIPRIMITIVES Drawing Primitives and Primitive Attributes*/ /* INCL_GPILCIDS Phyical and Logical Fonts with Lcids */ /* INCL_GPIBITMAPS Bitmaps and Pel Operations */ /* INCL_GPIREGIONS Regions and Clipping with Regions */ /* INCL_GPIMETAFILES Metafiles */ /* INCL_GPIDEFAULTS Default Primitive Attributes */ /* INCL_GPIERRORS defined if INCL_ERRORS defined */ /* */ /**********************************************************************/ #define INCL_DOS /* Need DOS support */ #define INCL_DOSERRORS #define INCL_DOSEXCEPTIONS /* Exception Management Support */ #define INCL_WINWINDOWMGR /* General window management */ #define INCL_WINMESSAGEMGR /* Message management */ #define INCL_WININPUT /* Mouse and keyboard input */ #define INCL_WINDIALOGS /* Dialog boxes */ #define INCL_WINSTATICS /* Static controls */ #define INCL_WINBUTTONS /* Button controls */ #define INCL_WINENTRYFIELDS /* Entry Fields */ #define INCL_WINMLE /* Multiple Line Entry Fields */ #define INCL_WINLISTBOXES /* List box controls */ #define INCL_WINMENUS /* Menu controls */ #define INCL_WINSCROLLBARS /* Scroll bar controls */ #define INCL_WINFRAMEMGR /* Frame manager */ #define INCL_WINRECTANGLES /* Rectangle routines */ #define INCL_WINSYS /* System values (and colors) */ #define INCL_WINACCELERATORS /* Keyboard accelerators */ #define INCL_WINCLIPBOARD /* Clipboard manager */ #define INCL_WINCURSORS /* Text cursors */ #define INCL_WINPOINTERS /* Mouse pointers */ #define INCL_WINSWITCHLIST /* Shell Switch List API */ #define INCL_WINPROGRAMLIST /* Shell Program List API */ #define INCL_WINSHELLDATA /* Shell Data (?) */ #define INCL_WINERRORS /* Error code definitions */ #define INCL_WINHELP /* Help Manager definitions */ #define INCL_WINTIMER /* Timer routines */ #define INCL_WINSTDDLGS /* OS/2 Standard Dialog definitions */ #define INCL_GPIPRIMITIVES /* Drawing Primitives and Attributes*/ #define INCL_GPILCIDS /* Phyical and Logical Fonts Lcids */ #define INCL_GPIBITMAPS /* Bitmaps and Pel Operations */ #include <os2.h> /* Presentation Manager include file */ /**********************************************************************/ /* */ /* Include C library routine header files */ /* */ /**********************************************************************/ #include <stdlib.h> /* Miscellaneous function declarations */ #include <stdio.h> /* Declarations for standard I/O routines */ #include <string.h> /* String function declarations */ #include <malloc.h> /* Definitions for memory allocation */ #include <process.h> /* Definitions for process control functions */ /**********************************************************************/ /* */ /* Miscellaneous constants */ /* */ /**********************************************************************/ #define EXTRAWORDS (4 * sizeof(PVOID)) /* Extra window words */ #define GPF_P2C_LEVEL 2 /* GpfP2C.lib Level */ #define COMMAND 0 /* Process commands received from the user */ #define EXTERN #define STACKSIZE 40960 /**********************************************************************/ /* */ /* Miscellaneous constants Ids */ /* */ /**********************************************************************/ #include "Vtest.Ids" /**********************************************************************/ /* */ /* Function prototypes for C functions */ /* */ /**********************************************************************/ #include "Vtest.Ext" /**********************************************************************/ /* */ /* STATIC Function prototypes for private C functions */ /* */ /**********************************************************************/ static VOID InitializeApplication( VOID ); static VOID EndApplication( VOID ); static VOID ActionPushButtonSend( PGPFPARMS pGpfParms ); static VOID ActionPushButtonExit( PGPFPARMS pGpfParms ); static MRESULT ProcessMainWindow( PGPFPARMS pGpfParms ); static VOID CommandMainWindow( PGPFPARMS pGpfParms ); /**********************************************************************/ /* */ /* Miscellaneous constants Vtest.H */ /* */ /**********************************************************************/ #include "Vtest.H" /****************** Start of main procedure ***************************/ /**********************************************************************/ /* */ /* FUNCTION: main */ /* */ /* This is a typical PM main function. */ /* It initializes PM, creates a message queue, loads some strings , */ /* registers some window classes, creates a main window, gets and */ /* dispatches messages to its window procedure until the time to */ /* quit, and then tidies up before terminating. */ /* */ /* It sets the values of the following globals:- */ /* */ /* habMainTask, hwndFrameMainWindow, hwndMainWindow */ /* */ /**********************************************************************/ INT _CENTRY main( LONG argc, PCHAR *argv ) { QMSG qmsg; /* Message structure */ PID Pid; /* Process Id */ TID Tid; /* Thread Id */ HELPINIT hinit; /* Help init */ HPOINTER hptrIcon; /* Handle of the Icon */ Argc = argc; Argv = argv; hmodVtest = (HMODULE)NULL; /* hMod Inline */ hwndHelp = (HWND)0; /* Help Hwnd */ szTaskList = (PCHAR)"Vtest"; EndOfAppl = FALSE; /* Switch Accept End Application */ Vtest_Level = (PCHAR)"1.0"; /********************************************************************/ /* Initialize PM and Create application msg queue */ /********************************************************************/ /* Initialize PM */ habMainTask = WinInitialize( 0 ); /* Create application msg queue */ hmqMainTask = WinCreateMsgQueue( habMainTask, 1024 ); WinSetPointer( HWND_DESKTOP, WinQuerySysPointer( HWND_DESKTOP, SPTR_WAIT,FALSE ) ); /********************************************************************/ /* Query System preferences set by the user from the Control Panel */ /********************************************************************/ LogoTimer = PrfQueryProfileInt( HINI_PROFILE, (PSZ)"PM_ControlPanel", (PSZ)"LogoDisplayTime", -1 ); PrfQueryProfileString( HINI_PROFILE, (PSZ)"PM_National", (PSZ)"sDecimal", (PSZ)".", sDecimal, sizeof(sDecimal) ); strncpy( Swctl.szSwtitle, szTaskList, sizeof(Swctl.szSwtitle)-1 ); /********************************************************************/ /* Initialize Application */ /********************************************************************/ InitializeApplication(); /********************************************************************/ /* Initialize the help manager initialization structure, create an */ /* instance of the help manager, and associate that instance with */ /* the frame. */ /********************************************************************/ /* size of initialization structure */ hinit.cb = sizeof(hinit); /* store HM return code from init. */ hinit.ulReturnCode = (ULONG)NULL; /* no tutorial program */ hinit.pszTutorialName = (PSZ)NULL; /* indicates help table is defined */ /* in the RC file. */ hinit.phtHelpTable = (PHELPTABLE)(0xFFFF0000 | ID_VTEST); /* help table in Exe or a DLL */ hinit.hmodHelpTableModule = hmodVtest; hinit.hmodAccelActionBarModule = (HMODULE)NULL; hinit.idAccelTable = (USHORT)NULL; /* action bar is not tailored */ hinit.idActionBar = (USHORT)NULL; /* help window title */ hinit.pszHelpWindowTitle = (PSZ)szTaskList; /* help panels ID is not displayed */ hinit.fShowPanelId = CMIC_HIDE_PANEL_ID; /* library with help panels */ hinit.pszHelpLibraryName = (PSZ)"Vtest.Hlp"; /********************************************************************/ /* Control Gpf Library Level */ /********************************************************************/ if (GpfP2CLevel() != GPF_P2C_LEVEL) { WinMessageBox( HWND_DESKTOP, HWND_DESKTOP, (PSZ) "Gpf Verion Error", (PSZ) "Invalid P2C Level", 1, MB_OK | MB_APPLMODAL | MB_MOVEABLE ); WinDestroyMsgQueue( hmqMainTask ); WinTerminate( habMainTask ); DosExit( EXIT_PROCESS, 0 ); } /********************************************************************/ /* Create Instance of IPF, pass Anchor Block handle and address of */ /* of IPF initialization structure, and check that creation was */ /* successful. */ /********************************************************************/ hwndHelp = WinCreateHelpInstance( habMainTask, &hinit ); if (!hwndHelp) { WinMessageBox( HWND_DESKTOP, HWND_DESKTOP, (PSZ) "Help Not Available", (PSZ) "Help Creation Error", 1, MB_OK | MB_APPLMODAL | MB_MOVEABLE ); } else { if (hinit.ulReturnCode) { WinMessageBox( HWND_DESKTOP, HWND_DESKTOP, (PSZ) "Help Terminated Due to Error", (PSZ) "Help Creation Error", 1, MB_OK | MB_APPLMODAL | MB_MOVEABLE ); WinDestroyHelpInstance( hwndHelp ); hwndHelp = (HWND)NULL; } } /********************************************************************/ /* Create the window "MainWindow". */ /********************************************************************/ CreateMainWindow( (PVOID) NULL ); /********************************************************************/ /* Make sure the window was created. */ /********************************************************************/ if ((!hwndFrameMainWindow) || (!hwndMainWindow)) { WinDestroyMsgQueue( hmqMainTask ); WinTerminate( habMainTask ); DosExit( EXIT_PROCESS, 0 ); } hptrIcon = (HPOINTER) WinSendMsg( hwndFrameMainWindow, WM_QUERYICON, (MPARAM)NULL, (MPARAM)NULL ); /********************************************************************/ /* Set the window title (Task List) */ /********************************************************************/ WinQueryWindowProcess( hwndFrameMainWindow, &Pid, &Tid ); Swctl.hwnd = hwndFrameMainWindow; Swctl.hwndIcon = hptrIcon; Swctl.hprog = (HPROGRAM)NULL; Swctl.idProcess = Pid; Swctl.idSession = (USHORT)NULL; Swctl.uchVisibility = SWL_VISIBLE; Swctl.fbJump = SWL_JUMPABLE; strncpy( Swctl.szSwtitle, szTaskList, sizeof(Swctl.szSwtitle)-1 ); hSwitch = WinAddSwitchEntry( &Swctl ); /********************************************************************/ /* Show the window "MainWindow". */ /********************************************************************/ ShowMainWindow( (PVOID) NULL ); /* Show Main Window */ /********************************************************************/ /* Main message-processing loop - get and dispatch messages until */ /* WM_QUIT received and Accepted (EndOfAppl = TRUE) */ /********************************************************************/ while(!EndOfAppl) { if (! WinGetMsg( habMainTask, &qmsg, (HWND)NULL, 0, 0 ) ) EndApplication(); else WinDispatchMsg( habMainTask, &qmsg ); } /********************************************************************/ /* Tidy up and terminate */ /********************************************************************/ WinDestroyWindow( hwndFrameMainWindow ); if (hwndHelp != (HWND)NULL) WinDestroyHelpInstance( hwndHelp ); WinDestroyMsgQueue( hmqMainTask ); WinTerminate( habMainTask ); /********************************************************************/ /* Dos Exit */ /********************************************************************/ DosExit( EXIT_PROCESS, 0 ); return(0); } /**********************************************************************/ /* End of main procedure */ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /* */ /* WinProc: GpfDefWindowProc */ /* */ /* Controls the "DefaultWindowProc" Area. */ /* */ /* */ /**********************************************************************/ MRESULT EXPENTRY GpfDefWindowProc( HWND hwnd, ULONG msg, MPARAM mp1, MPARAM mp2 ) { LONG Color; /* Work BackGround Color */ PGPFPARMS pGpfParms; /* Far Pointer */ SHORT Short; /* Work Short */ /********************************************************************/ /* Initialize Gpf Parameters */ /********************************************************************/ pGpfParms = (PGPFPARMS) WinQueryWindowULong( hwnd, QWL_USER ); if (!pGpfParms) return WinDefWindowProc( hwnd, msg, mp1, mp2 ); switch(msg) { /**************************************************************/ /* Things to do when the Window is first initalized */ /**************************************************************/ case WM_INITDLG: /**************************************************************/ /* Save CreateUserParms */ /**************************************************************/ pGpfParms->pCrtParms = (PVOID)mp2; /**************************************************************/ /* Associate Help Instance */ /**************************************************************/ WinAssociateHelpInstance( hwndHelp, GpfGetHwndFrame( hwnd ) ); /**************************************************************/ /* Save Default Button */ /**************************************************************/ pGpfParms->hwndDefaultBtn = GpfQueryDefaultButton( hwnd ); /**************************************************************/ /* Set Default BackGroundColor */ /**************************************************************/ pGpfParms->BgColor = SYSCLR_WINDOW; /**************************************************************/ /* Query Presentation Parameters BackGroundColor */ /**************************************************************/ if (WinQueryPresParam( hwnd, PP_BACKGROUNDCOLOR, (ULONG)NULL, (PULONG)NULL, 4L, &Color, (USHORT)NULL ) ) pGpfParms->BgColor = Color; /**************************************************************/ /* Query Presentation Parameters BackGroundColor Index */ /**************************************************************/ if (WinQueryPresParam( hwnd, (ULONG)NULL, PP_BACKGROUNDCOLORINDEX, (PULONG)NULL, 4L, &Color, QPF_ID1COLORINDEX)) pGpfParms->BgColor = Color; /**************************************************************/ /* Set Default ForeGroundColor */ /**************************************************************/ pGpfParms->FgColor = SYSCLR_WINDOWTEXT; /**************************************************************/ /* Query Presentation Parameters ForeGroundColor */ /**************************************************************/ if (WinQueryPresParam( hwnd, PP_FOREGROUNDCOLOR, (ULONG)NULL, (PULONG)NULL, 4L, &Color, (USHORT)NULL ) ) pGpfParms->FgColor = Color; /**************************************************************/ /* Query Presentation Parameters ForeGroundColor Index */ /**************************************************************/ if (WinQueryPresParam( hwnd, (ULONG)NULL, PP_FOREGROUNDCOLORINDEX, (PULONG)NULL, 4L, &Color, QPF_ID1COLORINDEX ) ) pGpfParms->FgColor = Color; /**************************************************************/ /* Return pGpfParms Mresult */ /**************************************************************/ return (pGpfParms->mresult); /**************************************************************/ /* The application has asked for the window to be destroyed. */ /**************************************************************/ case WM_DESTROY: GpfDestroyTemplate( hwnd ); /* Destroy Template */ WinSetWindowULong( hwnd, QWL_USER, (ULONG)NULL ); /* Extract selector from far pointer */ free( pGpfParms ); /* FreeMain (GpfParms) */ break; /**************************************************************/ /* Let frame control erase background for us */ /**************************************************************/ case WM_ERASEBACKGROUND: /* The client window is cleared to SYSCLR_WINDOW. */ return (MRESULT)(FALSE); /* No background windows. */ /***************************************************************/ /* The window needs painting. */ /***************************************************************/ case WM_PAINT: Short = pGpfParms->ShortMsgId; /* Save MsgId */ pGpfParms->ShortMsgId = -1; /* Reset MsgId */ GpfSendMsgInformation( pGpfParms, Short ); return WinDefWindowProc( hwnd, msg, mp1, mp2 ); /**************************************************************/ /* Resize & Reposition all the BASE windows. */ /* WM_SIZE message was generated when window was created. */ /**************************************************************/ case WM_SIZE: if (pGpfParms->Options & GPF_OP_ADJUSTSIZE) GpfAdjustControlSize( hwnd, mp1, mp2 ); /* Adjust Size */ return WinDefWindowProc( hwnd, msg, mp1, mp2 ); /**************************************************************/ /* Receive Char ( Set Tabulation ) */ /**************************************************************/ case WM_CHAR: pGpfParms->mresult = GpfSetTabulation( hwnd, msg, mp1, mp2 ); Short = WinQueryWindowUShort( WinQueryFocus( HWND_DESKTOP ), QWS_ID ); if (!pGpfParms->mresult) { if (pGpfParms->pGpfPage) WinSendMsg(pGpfParms->pGpfPage->hwndNoteBook, msg, mp1, mp2 ); } GpfSendMsgInformation( pGpfParms, Short ); return (pGpfParms->mresult); /**************************************************************/ /* Process MouseMove for Information Msg */ /**************************************************************/ case WM_MOUSEMOVE: Short = WinQueryWindowUShort( GpfGetHwndFrame( hwnd ),QWS_ID ); GpfSendMsgInformation( pGpfParms, Short ); return WinDefWindowProc( hwnd, msg, mp1, mp2 ); /**************************************************************/ /* Process ControlPointer for Information Msg */ /**************************************************************/ case WM_CONTROLPOINTER: if (WinWindowFromID( hwnd, SHORT1FROMMP(mp1) ) ) GpfSendMsgInformation( pGpfParms, SHORT1FROMMP(mp1)); return WinDefWindowProc( hwnd, msg, mp1, mp2 ); /**************************************************************/ /* Process MenuSelected for Information Msg */ /**************************************************************/ case WM_MENUSELECT: GpfSendMsgInformation( pGpfParms, SHORT1FROMMP(mp1)); return WinDefWindowProc( hwnd, msg, mp1, mp2 ); /**************************************************************/ /* Process MenuEnd for Information Msg */ /**************************************************************/ case WM_MENUEND: if (SHORT1FROMMP(mp1) == FID_MENU) { pGpfParms->ShortMsgId = -1; /* Reset MsgId */ GpfSendMsgInformation( pGpfParms, 0); } return WinDefWindowProc( hwnd, msg, mp1, mp2 ); /**************************************************************/ /* Things to do when the Window is Moved */ /**************************************************************/ case WM_MOVE: return WinDefWindowProc( hwnd, msg, mp1, mp2 ); /***************************************************************/ /* The List Box needs to know how big to make an Item. */ /***************************************************************/ case WM_MEASUREITEM: return (MRESULT)GpfMeasureItem( hwnd, mp1, mp2 ); /**************************************************************/ /* The List Box wants us to Draw an Item */ /**************************************************************/ case WM_DRAWITEM: return (MRESULT)GpfDrawItem( hwnd, mp1, mp2 ); /**************************************************************/ /* Things to do when the Window is End LogoTimer */ /**************************************************************/ case WM_TIMER: if (SHORT1FROMMP(mp1) != TID_USERMAX) /* LogoTimer... */ break; /* No... Ignore Timer */ WinStopTimer( WinQueryAnchorBlock( hwnd ), hwnd, TID_USERMAX ); /* Stop LogoTimer */ /**************************************************************/ /* Simulate CANCEL - Post WM_CLOSE to Window */ /**************************************************************/ WinPostMsg( hwnd, WM_CLOSE, (MPARAM)NULL, (MPARAM)NULL ); break; case HM_HELPSUBITEM_NOT_FOUND: /**************************************************************/ /* Help manager couldn't find a help panel for an item. */ /* Returning FALSE will display the extended help for that */ /* window. */ /* Returning TRUE Tells the Help Manager to do nothing. */ /**************************************************************/ Short = WinQueryWindowUShort( GpfGetHwndFrame( hwnd ),QWS_ID ); if (SHORT1FROMMP(mp1) == 0xFFFD) /* HLPM_MENU */ { if (WinSendMsg( hwndHelp, HM_DISPLAY_HELP, MPFROMSHORT(SHORT1FROMMP(mp2)), MPFROMSHORT(HM_RESOURCEID) )) if (WinSendMsg( hwndHelp, HM_DISPLAY_HELP, MPFROMSHORT(Short), MPFROMSHORT(HM_RESOURCEID) )) WinSendMsg( hwndHelp, HM_DISPLAY_HELP, MPFROMSHORT( ID_VTEST ), MPFROMSHORT(HM_RESOURCEID) ); } else { if (WinSendMsg( hwndHelp, HM_DISPLAY_HELP, MPFROMSHORT(SHORT1FROMMP(mp2)), MPFROMSHORT(HM_RESOURCEID) )) GpfDisplayHelp( WinQueryFocus( HWND_DESKTOP )); } return (MRESULT)(TRUE); case HM_QUERY_KEYS_HELP: /**************************************************************/ /* Return Keys help panel ID for the help manager */ /**************************************************************/ /**************************************************************/ /* QUERY_KEYS_HELP */ /* */ /* If the user requests KEYS HELP from the help pull-down, */ /* IPF sends the HM_QUERY_KEYS_HELP message to the application*/ /* which should return the panel id of the keys_help panel, */ /* or a 0 to tell IPF to do nothing. */ /**************************************************************/ return (MRESULT)(ID_VTEST - 1); case HM_EXT_HELP_UNDEFINED: /*************************************************************/ /* This message is sent to the application by Help Manager */ /* to notify it that an extended help panel has not been */ /* defined. */ /*************************************************************/ WinSendMsg( hwndHelp, HM_DISPLAY_HELP, MPFROMSHORT( ID_VTEST ), MPFROMSHORT( HM_RESOURCEID ) ); return ((MRESULT) FALSE); case HM_ERROR: /*************************************************************/ /* If an error occurs using IPF, an HM_ERROR msg will be */ /* sent to the application. */ /*************************************************************/ if ( (ULONG) mp1 == HMERR_NO_MEMORY ) { WinMessageBox( HWND_DESKTOP, hwnd, (PSZ) "Help Terminated Due to Error", (PSZ) "Help Error", 1, MB_OK | MB_APPLMODAL | MB_MOVEABLE ); WinDestroyHelpInstance( hwndHelp ); hwndHelp = (HWND)NULL; } return ((MRESULT) FALSE); /***************************************************************/ /* Other messages are handled by the default window procedure. */ /***************************************************************/ default: /* Pass all other messages to the default window procedure */ return WinDefWindowProc( hwnd, msg, mp1, mp2 ); } return ((MRESULT) FALSE); } /**********************************************************************/ /* End of window procedure - WinProc: GpfDefWindowProc */ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /* */ /* FUNCTION : GpfDisplayHelp */ /* */ /**********************************************************************/ VOID APIENTRY GpfDisplayHelp( HWND hwnd ) { HWND hwndDesktop; /* hwnd Desktop */ HWND hwndWrk; /* hwnd Work */ USHORT uShort; /* Work uShort */ HAB hab; /* Anchor block handle */ hab = WinQueryAnchorBlock(hwnd); if ((!hab) || (!hwndHelp)) /* Verify hwnd, hab, and hwndHelp */ return; hwndWrk = hwnd; hwndDesktop = WinQueryDesktopWindow(hab,(HDC)NULL); while(hwndWrk != hwndDesktop) { uShort = WinQueryWindowUShort(hwndWrk,QWS_ID); if ( uShort != FID_CLIENT ) { if (!WinSendMsg( hwndHelp, HM_DISPLAY_HELP, MPFROMSHORT(uShort), MPFROMSHORT(HM_RESOURCEID) )) return; } hwndWrk = WinQueryWindow( hwndWrk, QW_PARENT); } WinSendMsg( hwndHelp, HM_DISPLAY_HELP, MPFROMSHORT( ID_VTEST ), MPFROMSHORT( HM_RESOURCEID ) ); return; } /**********************************************************************/ /* End of GpfDisplayHelp Function */ /**********************************************************************/ /**********************************************************************/ /******************* Start of GpfSendMsg Function *********************/ /**********************************************************************/ /* */ /* FUNCTION : GpfSendMsg */ /* */ /* Display, and operate the message box window */ /**********************************************************************/ SHORT APIENTRY GpfSendMsg( SHORT Id ) { CHAR String[256]; HWND SysModalWindow; HWND hwndActive; /* Query Active Window (ApplModal) */ USHORT Reply = 0; SHORT Alarm = -1; SHORT Style = 0; PCHAR Token1 = 0; PCHAR Token2 = 0; /*********************************************************************/ /* Save Window (System Modal) */ /*********************************************************************/ SysModalWindow = WinQuerySysModalWindow( HWND_DESKTOP ); /*********************************************************************/ /* Load Msg strings */ /*********************************************************************/ WinLoadString( habMainTask, hmodVtest, Id, sizeof(String),(PSZ)String ); Token1 = strchr( String, ':' ); if (Token1) { *Token1 = 0; Alarm = atoi( String ); Token2 = strchr( ++Token1, ' ' ); if (Token2) { *(Token2++) = 0; Style = atoi( Token1 ); /*************************************************************/ /* Set Alarm */ /*************************************************************/ if (Alarm != -1) WinAlarm( HWND_DESKTOP, Alarm ); /*************************************************************/ /* Query HwndOwner */ /*************************************************************/ hwndActive = WinQueryActiveWindow( HWND_DESKTOP ); /*************************************************************/ /* Now, display the message, save the button press for a */ /* return code. */ /* Pop up a Message box. */ /*************************************************************/ Reply = WinMessageBox( HWND_DESKTOP, hwndActive, (PSZ) Token2, (PSZ)Swctl.szSwtitle, Id, Style ); /*************************************************************/ /* Restore Window (System Modal) */ /*************************************************************/ WinSetSysModalWindow( HWND_DESKTOP, SysModalWindow ); /*************************************************************/ /* Reset Information Message */ /*************************************************************/ hwndActive = WinWindowFromID( hwndActive, FID_CLIENT ); WinPostMsg( hwndActive, WM_MOUSEMOVE, (MPARAM) NULL, (MPARAM) NULL ); } } return(Reply); } /**********************************************************************/ /* End of GpfSendMsg Function */ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /* */ /* STATIC PRIVATE FUNCTION: Initialize Application */ /* */ /**********************************************************************/ static VOID InitializeApplication( VOID ) { GpfInitThread(habMainTask,hmqMainTask); GpfGetVunitHunit(&Vunits,&Hunits); return; } /**********************************************************************/ /* End of Private Function: Initialize Application */ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /* */ /* STATIC PRIVATE FUNCTION: End Application */ /* */ /**********************************************************************/ static VOID EndApplication( VOID ) { EndOfAppl = TRUE; return; } /**********************************************************************/ /* End of Private Function: End Application */ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /* */ /* FUNCTION: ShowMainWindow */ /* */ /* Show Window "MainWindow". */ /* */ /**********************************************************************/ BOOL APIENTRY ShowMainWindow( PVOID pCrtParms ) { GpfExecAsyncProc((PFNASYNCPROC)AsynShowMainWindow, pCrtParms,hmqMainTask); return(FALSE); } /**********************************************************************/ /* End of ShowMainWindow Function */ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /* */ /* FUNCTION: AsynShowMainWindow */ /* */ /* AsynShow Window "MainWindow". */ /* */ /**********************************************************************/ VOID APIENTRY AsynShowMainWindow( PVOID pCrtParms ) { /* Make sure the window was not created. */ if (!hwndFrameMainWindow) CreateMainWindow( pCrtParms ); WinSetWindowPos( hwndFrameMainWindow, HWND_TOP, 0, 0, 0, 0, SWP_ZORDER | SWP_ACTIVATE | SWP_SHOW ); WinShowWindow( hwndMainWindow, TRUE ); return; } /**********************************************************************/ /* End of AsynShowMainWindow Function */ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /* */ /* FUNCTION: DismissMainWindow */ /* */ /* Dismiss Window Hide - Window "MainWindow". */ /* */ /**********************************************************************/ VOID APIENTRY DismissMainWindow( BOOL Rcode ) { Rcode = Rcode; /* Null , For Delete Unreferenced Formal Parameter */ /* Hide Window */ WinSetWindowPos( hwndFrameMainWindow, (HWND)NULL, 0, 0, 0, 0, SWP_DEACTIVATE | SWP_HIDE ); WinShowWindow( hwndMainWindow, FALSE ); return; } /**********************************************************************/ /* End of DismissMainWindow Function */ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /* */ /* FUNCTION: CreateMainWindow */ /* */ /* Create the window "MainWindow" And Controls. */ /* */ /**********************************************************************/ HWND APIENTRY CreateMainWindow( PVOID pCrtParms ) { /********************************************************************/ /* Register the window class "MainWindow". */ /********************************************************************/ WinRegisterClass( /* Register window class */ habMainTask, /* Anchor block handle */ (PSZ)"MainWindow", /* Window class name */ (PFNWP)fnwpMainWindow, /* Address window proc */ CS_CLIPSIBLINGS | CS_SAVEBITS | /* Class style */ CS_MOVENOTIFY | /* Class style */ CS_SIZEREDRAW | CS_SYNCPAINT, /* Class style */ EXTRAWORDS /* Extra window words */ ); /********************************************************************/ /* Create the window "MainWindow". */ /********************************************************************/ hwndFrameMainWindow = /* Save Hwnd */ GpfCreateTemplate( ID_MAINWINDOW, /* Ressource Id */ hmodVtest, /* Ressource Md */ HWND_DESKTOP, /* Hwnd Parent */ HWND_DESKTOP, /* Hwnd Owner */ pCrtParms, /* User Parms */ 0,0L); /* Reserved */ return (hwndFrameMainWindow); } /**********************************************************************/ /* End of CreateMainWindow Function */ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /* */ /* WinProc: fnwpMainWindow */ /* */ /* Controls the "MainWindow" Area. */ /* */ /* */ /**********************************************************************/ MRESULT EXPENTRY fnwpMainWindow( HWND hwnd, ULONG msg, MPARAM mp1, MPARAM mp2 ) { PGPFPARMS pGpfParms; /* Far Pointer */ MPARAM SvMp2; /* 2nd (packed) parms */ MPARAM SvMp1; /* 1st (packed) parms */ ULONG SvMsg; /* message number */ MRESULT SvMresult; /* Mresult */ USHORT SvCommand; /* Command From mp1 (SHORT 1) */ USHORT SvNotify; /* Notify From mp1 (SHORT 2) */ HWND SvHwndControl; /* Control hwnd */ LONG SvVsComponentId; /* ValueSet Component ID */ MRESULT PmMresult; /*******************************************************************/ /* Query Gpf Parameters */ /*******************************************************************/ pGpfParms = (PGPFPARMS) WinQueryWindowULong( hwnd, QWL_USER ); if ((!pGpfParms) && (msg == WM_CREATE)) { pGpfParms = malloc( sizeof(GPFPARMS) ); /* Set Pointer*/ WinSetWindowULong( hwnd, QWL_USER, (ULONG)pGpfParms ); memset( pGpfParms, 0, sizeof(GPFPARMS) ); /* Clear Area */ pGpfParms->cb = sizeof(GPFPARMS); /* Set sizeof */ } if (!pGpfParms) return GpfDefWindowProc( hwnd, msg, mp1, mp2 ); /********************************************************************/ /* Save Gpf Parameters */ /********************************************************************/ SvMsg = pGpfParms->msg; SvMp1 = pGpfParms->mp1; SvMp2 = pGpfParms->mp2; SvMresult = pGpfParms->mresult; SvCommand = pGpfParms->Command; SvNotify = pGpfParms->Notify; SvHwndControl = pGpfParms->hwndControl; SvVsComponentId = pGpfParms->VsComponentId; /********************************************************************/ /* Initialize Gpf Parameters */ /********************************************************************/ pGpfParms->hwnd = hwnd; /* window handle */ pGpfParms->msg = msg; /* message number */ pGpfParms->mp1 = mp1; /* 1st (packed) parms */ pGpfParms->mp2 = mp2; /* 2nd (packed) parms */ pGpfParms->mresult = (MRESULT) FALSE; /* Mresult */ pGpfParms->Command = 0; /* Command From mp1 (SHORT 1) */ pGpfParms->Notify = 0; /* Notify From mp1 (SHORT 2) */ PmMresult = ProcessMainWindow( pGpfParms ); /*******************************************************************/ /* Query Gpf Parameters */ /*******************************************************************/ pGpfParms = (PGPFPARMS) WinQueryWindowULong( hwnd, QWL_USER ); if (pGpfParms) { /****************************************************************/ /* Restore Gpf Parameters */ /****************************************************************/ pGpfParms->msg = SvMsg; pGpfParms->mp1 = SvMp1; pGpfParms->mp2 = SvMp2; pGpfParms->mresult = SvMresult; pGpfParms->Command = SvCommand; pGpfParms->Notify = SvNotify; pGpfParms->hwndControl = SvHwndControl; pGpfParms->VsComponentId = SvVsComponentId; } return PmMresult; } /**********************************************************************/ /* End of window procedure - WinProc: fnwpMainWindow */ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /* */ /* FUNCTION : ProcessMainWindow */ /* */ /* Process "MainWindow". */ /* */ /**********************************************************************/ static MRESULT ProcessMainWindow( PGPFPARMS pGpfParms ) { switch(pGpfParms->msg) { /**************************************************************/ /* Things to do when the Window is first initalized */ /* This message occurs when a dialog box is being created. */ /**************************************************************/ case WM_INITDLG: /**************************************************************/ /* Initialize Window 'MainWindow' */ /**************************************************************/ GpfDefWindowProc( pGpfParms->hwnd, pGpfParms->msg, pGpfParms->mp1, pGpfParms->mp2 ); /**************************************************************/ /* Return pGpfParms Mresult */ /**************************************************************/ return (pGpfParms->mresult); /**************************************************************/ /* The application has asked for the window to be created. */ /**************************************************************/ case WM_CREATE: pGpfParms->Options |= GPF_OP_MAINWINDOW; /* Set MainWindow */ pGpfParms->Options |= GPF_OP_ADJUSTSIZE; /* Set AdjustSize */ hwndMainWindow = pGpfParms->hwnd; /* Save Hwnd */ hwndFrameMainWindow = GpfGetHwndFrame( pGpfParms->hwnd ); pGpfParms->hmod = hmodVtest; pGpfParms->hab = habMainTask; /* Anchor block handle */ break; /**************************************************************/ /* The application has asked for the window to be destroyed. */ /* Always sent to the window being destroyed after the window */ /* has been hidden on the device, but before its children */ /* have been destroyed. The message is sent first to the */ /* window being destroyed, then to the children as they are */ /* destroyed. Therefore, during processing the WM_DESTROY it */ /* can be assumed that all the children still exist. */ /**************************************************************/ case WM_DESTROY: hwndFrameMainWindow = (HWND)NULL; /* Reset HwndFrm */ hwndMainWindow = (HWND)NULL; /* Reset Hwnd */ /* Destroy Template , Help Instance and GpfParms */ return GpfDefWindowProc( pGpfParms->hwnd, pGpfParms->msg, pGpfParms->mp1, pGpfParms->mp2 ); /**************************************************************/ /* The window needs painting. */ /**************************************************************/ case WM_PAINT: /**************************************************************/ /* Obtain a cache PS */ /**************************************************************/ pGpfParms->hps = WinBeginPaint( pGpfParms->hwnd, pGpfParms->hpsBeginPaint, (PRECTL)&pGpfParms->rcl ); WinFillRect(pGpfParms->hps,&pGpfParms->rcl,pGpfParms->BgColor); WinEndPaint( pGpfParms->hps ); /* Release cache PS */ return GpfDefWindowProc( pGpfParms->hwnd, pGpfParms->msg, pGpfParms->mp1, pGpfParms->mp2 ); /**************************************************************/ /* Process ScrollBar received from the user */ /**************************************************************/ case WM_VSCROLL: case WM_HSCROLL: /* Set Command From mp1 */ pGpfParms->Command = SHORT1FROMMP( pGpfParms->mp1 ); /* Set Notify From mp2 */ pGpfParms->Notify = SHORT2FROMMP( pGpfParms->mp2 ); if ((pGpfParms->Command != FID_VERTSCROLL) && (pGpfParms->Command != FID_HORZSCROLL)) CommandMainWindow( pGpfParms ); /* Process the command */ return (pGpfParms->mresult); /**************************************************************/ /* Process Controls received from the user */ /**************************************************************/ case WM_CONTROL: /* Set Command From mp1 */ pGpfParms->Command = SHORT1FROMMP( pGpfParms->mp1 ); /* Set Notify From mp1 */ pGpfParms->Notify = SHORT2FROMMP( pGpfParms->mp1 ); CommandMainWindow( pGpfParms ); /* Process the command */ return (pGpfParms->mresult); /**************************************************************/ /* This message occurs when a control has a significant event */ /* to its owner or when a key stroke has been */ /* translated by an accelerator table into a WM_COMMAND. */ /**************************************************************/ case WM_SYSCOMMAND: case WM_HELP: case WM_COMMAND: /* Set Command From mp1 */ pGpfParms->Command = SHORT1FROMMP(pGpfParms->mp1); pGpfParms->Notify = COMMAND; /* Set Notify From mp1 */ CommandMainWindow( pGpfParms ); /* Process the command */ if (!(pGpfParms->Flag & GPF_FL_PROCESSED)) return GpfDefWindowProc( pGpfParms->hwnd, pGpfParms->msg, pGpfParms->mp1, pGpfParms->mp2 ); return (pGpfParms->mresult); /**************************************************************/ /* This message is sent to a frame window to indicate that */ /* the window is being closed by the user. */ /**************************************************************/ case WM_CLOSE: pGpfParms->Command = DID_CANCEL; /* simulate CANCEL */ pGpfParms->Notify = COMMAND; /* Set Notify */ CommandMainWindow( pGpfParms ); /* Process the command */ if (!(pGpfParms->Flag & GPF_FL_PROCESSED)) WinPostMsg( pGpfParms->hwnd, /* Cause termination */ WM_QUIT, (MPARAM) NULL, (MPARAM) NULL); return (pGpfParms->mresult); /**************************************************************/ /* All other messages are passed to the default procedure. */ /**************************************************************/ default: /* Pass all other messages to the default window procedure */ return GpfDefWindowProc( pGpfParms->hwnd, pGpfParms->msg, pGpfParms->mp1, pGpfParms->mp2 ); } return ((MRESULT) FALSE); } /**********************************************************************/ /* End of ProcessMainWindow Function */ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /* */ /* STATIC PRIVATE FUNCTION: CommandMainWindow */ /* */ /* Takes appropriate action when a WM_COMMAND/WM_CONTROL message is */ /* received by the window procedure. */ /* Window "MainWindow". */ /* */ /**********************************************************************/ static VOID CommandMainWindow ( PGPFPARMS pGpfParms ) { pGpfParms->Flag |= GPF_FL_PROCESSED; /* Set Processed Flag */ pGpfParms->hwndControl = WinWindowFromID( pGpfParms->hwnd, pGpfParms->Command ); /* Switch according to Control chosen */ switch( pGpfParms->Command ) { /******************************************************************/ /* Name "PushButtonSend". */ /* Text "~Send". */ /******************************************************************/ case ID_PUSHBUTTONSEND: ActionPushButtonSend( pGpfParms ); return; /******************************************************************/ /* Name "PushButtonExit". */ /* Text "E~xit". */ /******************************************************************/ case ID_PUSHBUTTONEXIT: ActionPushButtonExit( pGpfParms ); return; } /* Take no action for any other selections */ pGpfParms->Flag &= ~GPF_FL_PROCESSED; /* Reset Processed Flag */ return; } /**********************************************************************/ /* End of Private Function: CommandMainWindow */ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /* */ /* STATIC PRIVATE FUNCTION: ActionPushButtonSend */ /* */ /* Takes appropriate action when a WM_COMMAND/WM_CONTROL message is */ /* received by the window procedure. */ /* Window "MainWindow". */ /* */ /**********************************************************************/ static VOID ActionPushButtonSend( PGPFPARMS pGpfParms ) { /* Switch according to Control chosen */ switch( pGpfParms->Notify ) { case COMMAND: case BN_CLICKED: case BN_DBLCLICKED: send_data( pGpfParms ); return; } /* Take no action for any other selections */ return; } /**********************************************************************/ /* End of Private Function */ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /**********************************************************************/ /* */ /* STATIC PRIVATE FUNCTION: ActionPushButtonExit */ /* */ /* Takes appropriate action when a WM_COMMAND/WM_CONTROL message is */ /* received by the window procedure. */ /* Window "MainWindow". */ /* */ /**********************************************************************/ static VOID ActionPushButtonExit( PGPFPARMS pGpfParms ) { /* Switch according to Control chosen */ switch( pGpfParms->Notify ) { case COMMAND: case BN_CLICKED: case BN_DBLCLICKED: /**************************************************************/ /* Exit selected */ /**************************************************************/ WinPostMsg( hwndMainWindow, WM_QUIT, (MPARAM) NULL, (MPARAM) NULL ); return; } /* Take no action for any other selections */ return; } /**********************************************************************/ /* End of Private Function */ /**********************************************************************/ /**********************************************************************/ 
579728.c
// Copyright (c) Microsoft. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. #include <stdlib.h> #include <stdio.h> #include <stdint.h> #include "iot_configs.h" #include "sample.h" #include "AzureIoTHub.h" /*String containing Hostname, Device Id & Device Key in the format: */ /* "HostName=<host_name>;DeviceId=<device_id>;SharedAccessKey=<device_key>" */ static const char* connectionString = IOT_CONFIG_CONNECTION_STRING; // Define the Model BEGIN_NAMESPACE(WeatherStation); DECLARE_MODEL(ContosoAnemometer, WITH_DATA(ascii_char_ptr, DeviceId), WITH_DATA(int, WindSpeed), WITH_DATA(float, Temperature), WITH_DATA(float, Humidity), WITH_ACTION(TurnFanOn), WITH_ACTION(TurnFanOff), WITH_ACTION(SetAirResistance, int, Position) ); END_NAMESPACE(WeatherStation); static char propText[1024]; EXECUTE_COMMAND_RESULT TurnFanOn(ContosoAnemometer* device) { (void)device; (void)printf("Turning fan on.\r\n"); return EXECUTE_COMMAND_SUCCESS; } EXECUTE_COMMAND_RESULT TurnFanOff(ContosoAnemometer* device) { (void)device; (void)printf("Turning fan off.\r\n"); return EXECUTE_COMMAND_SUCCESS; } EXECUTE_COMMAND_RESULT SetAirResistance(ContosoAnemometer* device, int Position) { (void)device; (void)printf("Setting Air Resistance Position to %d.\r\n", Position); return EXECUTE_COMMAND_SUCCESS; } void sendCallback(IOTHUB_CLIENT_CONFIRMATION_RESULT result, void* userContextCallback) { unsigned int messageTrackingId = (unsigned int)(uintptr_t)userContextCallback; (void)printf("Message Id: %u Received.\r\n", messageTrackingId); (void)printf("Result Call Back Called! Result is: %s \r\n", ENUM_TO_STRING(IOTHUB_CLIENT_CONFIRMATION_RESULT, result)); } static void sendMessage(IOTHUB_CLIENT_LL_HANDLE iotHubClientHandle, const unsigned char* buffer, size_t size) { static unsigned int messageTrackingId; IOTHUB_MESSAGE_HANDLE messageHandle = IoTHubMessage_CreateFromByteArray(buffer, size); if (messageHandle == NULL) { printf("unable to create a new IoTHubMessage\r\n"); } else { if (IoTHubClient_LL_SendEventAsync(iotHubClientHandle, messageHandle, sendCallback, (void*)(uintptr_t)messageTrackingId) != IOTHUB_CLIENT_OK) { printf("failed to hand over the message to IoTHubClient"); } else { printf("IoTHubClient accepted the message for delivery\r\n"); } IoTHubMessage_Destroy(messageHandle); } free((void*)buffer); messageTrackingId++; } /*this function "links" IoTHub to the serialization library*/ static IOTHUBMESSAGE_DISPOSITION_RESULT IoTHubMessage(IOTHUB_MESSAGE_HANDLE message, void* userContextCallback) { IOTHUBMESSAGE_DISPOSITION_RESULT result; const unsigned char* buffer; size_t size; if (IoTHubMessage_GetByteArray(message, &buffer, &size) != IOTHUB_MESSAGE_OK) { printf("unable to IoTHubMessage_GetByteArray\r\n"); result = IOTHUBMESSAGE_ABANDONED; } else { /*buffer is not zero terminated*/ char* temp = malloc(size + 1); if (temp == NULL) { printf("failed to malloc\r\n"); result = IOTHUBMESSAGE_ABANDONED; } else { EXECUTE_COMMAND_RESULT executeCommandResult; (void)memcpy(temp, buffer, size); temp[size] = '\0'; executeCommandResult = EXECUTE_COMMAND(userContextCallback, temp); result = (executeCommandResult == EXECUTE_COMMAND_ERROR) ? IOTHUBMESSAGE_ABANDONED : (executeCommandResult == EXECUTE_COMMAND_SUCCESS) ? IOTHUBMESSAGE_ACCEPTED : IOTHUBMESSAGE_REJECTED; free(temp); } } return result; } void simplesample_http_run(void) { if (platform_init() != 0) { printf("Failed to initialize the platform.\r\n"); } else { if (serializer_init(NULL) != SERIALIZER_OK) { (void)printf("Failed on serializer_init\r\n"); } else { IOTHUB_CLIENT_LL_HANDLE iotHubClientHandle = IoTHubClient_LL_CreateFromConnectionString(connectionString, HTTP_Protocol); int avgWindSpeed = 10; float minTemperature = 20.0; float minHumidity = 60.0; srand((unsigned int)time(NULL)); if (iotHubClientHandle == NULL) { (void)printf("Failed on IoTHubClient_LL_Create\r\n"); } else { // Because it can poll "after 9 seconds" polls will happen // effectively at ~10 seconds. // Note that for scalabilty, the default value of minimumPollingTime // is 25 minutes. For more information, see: // https://azure.microsoft.com/documentation/articles/iot-hub-devguide/#messaging unsigned int minimumPollingTime = 9; ContosoAnemometer* myWeather; if (IoTHubClient_LL_SetOption(iotHubClientHandle, "MinimumPollingTime", &minimumPollingTime) != IOTHUB_CLIENT_OK) { printf("failure to set option \"MinimumPollingTime\"\r\n"); } #ifdef SET_TRUSTED_CERT_IN_SAMPLES // For mbed add the certificate information if (IoTHubClient_LL_SetOption(iotHubClientHandle, "TrustedCerts", certificates) != IOTHUB_CLIENT_OK) { (void)printf("failure to set option \"TrustedCerts\"\r\n"); } #endif // SET_TRUSTED_CERT_IN_SAMPLES myWeather = CREATE_MODEL_INSTANCE(WeatherStation, ContosoAnemometer); if (myWeather == NULL) { (void)printf("Failed on CREATE_MODEL_INSTANCE\r\n"); } else { if (IoTHubClient_LL_SetMessageCallback(iotHubClientHandle, IoTHubMessage, myWeather) != IOTHUB_CLIENT_OK) { printf("unable to IoTHubClient_SetMessageCallback\r\n"); } else { myWeather->DeviceId = "myFirstDevice"; myWeather->WindSpeed = avgWindSpeed + (rand() % 4 + 2); myWeather->Temperature = minTemperature + (rand() % 10); myWeather->Humidity = minHumidity + (rand() % 20); { unsigned char* destination; size_t destinationSize; if (SERIALIZE(&destination, &destinationSize, myWeather->DeviceId, myWeather->WindSpeed, myWeather->Temperature, myWeather->Humidity) != CODEFIRST_OK) { (void)printf("Failed to serialize\r\n"); } else { IOTHUB_MESSAGE_HANDLE messageHandle = IoTHubMessage_CreateFromByteArray(destination, destinationSize); if (messageHandle == NULL) { printf("unable to create a new IoTHubMessage\r\n"); } else { MAP_HANDLE propMap = IoTHubMessage_Properties(messageHandle); (void)sprintf_s(propText, sizeof(propText), myWeather->Temperature > 28 ? "true" : "false"); if (Map_AddOrUpdate(propMap, "temperatureAlert", propText) != MAP_OK) { printf("ERROR: Map_AddOrUpdate Failed!\r\n"); } if (IoTHubClient_LL_SendEventAsync(iotHubClientHandle, messageHandle, sendCallback, (void*)1) != IOTHUB_CLIENT_OK) { printf("failed to hand over the message to IoTHubClient"); } else { printf("IoTHubClient accepted the message for delivery\r\n"); } IoTHubMessage_Destroy(messageHandle); } free(destination); } } /* wait for commands */ while (1) { IoTHubClient_LL_DoWork(iotHubClientHandle); ThreadAPI_Sleep(100); } } DESTROY_MODEL_INSTANCE(myWeather); } IoTHubClient_LL_Destroy(iotHubClientHandle); } serializer_deinit(); } platform_deinit(); } } void sample_run(void) { simplesample_http_run(); }
251765.c
/***************************************************************************//** * @file * @brief Kernel - Core Functions ******************************************************************************* * # License * <b>Copyright 2018 Silicon Laboratories Inc. www.silabs.com</b> ******************************************************************************* * * The licensor of this software is Silicon Laboratories Inc. Your use of this * software is governed by the terms of Silicon Labs Master Software License * Agreement (MSLA) available at * www.silabs.com/about-us/legal/master-software-license-agreement. This * software is distributed to you in Source Code format and is governed by the * sections of the MSLA applicable to Source Code. * ******************************************************************************/ /******************************************************************************************************** ******************************************************************************************************** * DEPENDENCIES & AVAIL CHECK(S) ******************************************************************************************************** *******************************************************************************************************/ #include <rtos_description.h> #if (defined(RTOS_MODULE_KERNEL_AVAIL)) /******************************************************************************************************** ******************************************************************************************************** * INCLUDE FILES ******************************************************************************************************** *******************************************************************************************************/ #define MICRIUM_SOURCE #include "../include/os.h" #include "os_priv.h" #include <common/include/rtos_prio.h> #ifdef VSC_INCLUDE_SOURCE_FILE_NAMES const CPU_CHAR *os_core__c = "$Id: $"; #endif /******************************************************************************************************** ******************************************************************************************************** * LOCAL DEFINES ******************************************************************************************************** *******************************************************************************************************/ /******************************************************************************************************** * DEFAULT RUNTIME CONFIGURATION *******************************************************************************************************/ // Default Stacks, Pool Size, Stack Limit and Tasks. #define OS_INIT_CFG_DFLT { \ .ISR = \ { \ .StkBasePtr = DEF_NULL, \ .StkSize = KERNEL_ISR_STACK_SIZE_DFLT \ }, \ .MsgPoolSize = 100u, \ .TaskStkLimit = 10u, \ .IdleTask = \ { \ .StkBasePtr = DEF_NULL, \ .StkSize = KERNEL_IDLE_TASK_STACK_SIZE_DFLT \ }, \ .StatTaskCfg = \ { \ .StkBasePtr = DEF_NULL, \ .StkSize = KERNEL_STAT_TASK_STACK_SIZE_DFLT, \ .Prio = KERNEL_STAT_TASK_PRIO_DFLT, \ .RateHz = 10u \ }, \ .TickTaskCfg = \ { \ .StkBasePtr = DEF_NULL, \ .StkSize = KERNEL_TICK_TASK_STACK_SIZE_DFLT, \ .Prio = KERNEL_TICK_TASK_PRIO_DFLT, \ .RateHz = 1000u \ }, \ .TmrTaskCfg = \ { \ .StkBasePtr = DEF_NULL, \ .StkSize = KERNEL_TMR_TASK_STACK_SIZE_DFLT, \ .Prio = KERNEL_TMR_TASK_PRIO_DFLT, \ .RateHz = 10u \ }, \ .MemSeg = DEF_NULL \ } /******************************************************************************************************** ******************************************************************************************************** * LOCAL GLOBAL VARIABLES ******************************************************************************************************** *******************************************************************************************************/ #ifndef OS_CFG_COMPAT_INIT #if (RTOS_CFG_EXTERNALIZE_OPTIONAL_CFG_EN == DEF_DISABLED) // Kernel configuration. const OS_INIT_CFG OS_InitCfgDflt = OS_INIT_CFG_DFLT; static OS_INIT_CFG OS_InitCfg = OS_INIT_CFG_DFLT; #else // Kernel configuration. extern const OS_INIT_CFG OS_InitCfg; #endif #endif /******************************************************************************************************** ******************************************************************************************************** * GLOBAL FUNCTIONS ******************************************************************************************************** *******************************************************************************************************/ /****************************************************************************************************//** * OS_ConfigureISRStk() * * @brief Configure the stack used for ISRs, if available. * * @param p_stk_base_ptr Pointer to the base of the buffer used as the stack. * * @param stk_size Size of the stack, in CPU_STK elements. * * @note (1) This function is optional. If it is called, it must be called before OSInit(). * If it is not called, default values will be used. *******************************************************************************************************/ #if (!defined(OS_CFG_COMPAT_INIT) \ && (RTOS_CFG_EXTERNALIZE_OPTIONAL_CFG_EN == DEF_DISABLED)) void OS_ConfigureISRStk(CPU_STK *p_stk_base_ptr, CPU_STK_SIZE stk_size) { RTOS_ASSERT_CRITICAL((OSInitialized == DEF_FALSE), RTOS_ERR_ALREADY_INIT,; ); OS_InitCfg.ISR.StkBasePtr = p_stk_base_ptr; OS_InitCfg.ISR.StkSize = stk_size; } #endif /****************************************************************************************************//** * OS_ConfigureMemSeg() * * @brief Configure the memory segment used by the Kernel. * * @param p_mem_seg Pointer to the memory segment in which the kernel data will be allocated. * * @note (1) This function is optional. If it is called, it must be called before OSInit(). * If it is not called, default values will be used. *******************************************************************************************************/ #if (!defined(OS_CFG_COMPAT_INIT) \ && (RTOS_CFG_EXTERNALIZE_OPTIONAL_CFG_EN == DEF_DISABLED)) void OS_ConfigureMemSeg(MEM_SEG *p_mem_seg) { RTOS_ASSERT_CRITICAL((OSInitialized == DEF_FALSE), RTOS_ERR_ALREADY_INIT,; ); OS_InitCfg.MemSeg = p_mem_seg; } #endif /****************************************************************************************************//** * OS_ConfigureMsgPoolSize() * * @brief Configure the Kernel message pool size. * * @param msg_pool_size Number of messages the kernel will manage. Shared between task message * queues and regular message queues. * * @note (1) This function is optional. If it is called, it must be called before OSInit(). * If it is not called, default values will be used. *******************************************************************************************************/ #if (!defined(OS_CFG_COMPAT_INIT) \ && (RTOS_CFG_EXTERNALIZE_OPTIONAL_CFG_EN == DEF_DISABLED)) void OS_ConfigureMsgPoolSize(OS_MSG_SIZE msg_pool_size) { RTOS_ASSERT_CRITICAL((OSInitialized == DEF_FALSE), RTOS_ERR_ALREADY_INIT,; ); OS_InitCfg.MsgPoolSize = msg_pool_size; } #endif /****************************************************************************************************//** * OS_ConfigureStkLimit() * * @brief Configure the application stack limit. * * @param task_stk_limit Stack limit in percentage to empty. * * @note (1) This function is optional. If it is called, it must be called before OSInit(). * If it is not called, default values will be used. *******************************************************************************************************/ #if (!defined(OS_CFG_COMPAT_INIT) \ && (RTOS_CFG_EXTERNALIZE_OPTIONAL_CFG_EN == DEF_DISABLED)) void OS_ConfigureStkLimit(CPU_STK_SIZE task_stk_limit) { RTOS_ASSERT_CRITICAL((OSInitialized == DEF_FALSE), RTOS_ERR_ALREADY_INIT,; ); OS_InitCfg.TaskStkLimit = task_stk_limit; } #endif /****************************************************************************************************//** * OS_ConfigureIdleTaskStk() * * @brief Configure the stack used by the Idle Task. * * @param p_stk_base_ptr Pointer to the base of the buffer used as the stack. * * @param stk_size Size of the stack, in CPU_STK elements. * * @note (1) This function is optional. If it is called, it must be called before OSInit(). * If it is not called, default values will be used. * * @note (2) The idle task will be removed in an upcoming release as well as this function. In case * an Idle task is really needed, it is recommended to create a very low priority task * (with no other tasks at the same priority) that never performs any blocking calls. *******************************************************************************************************/ #if (!defined(OS_CFG_COMPAT_INIT) \ && (RTOS_CFG_EXTERNALIZE_OPTIONAL_CFG_EN == DEF_DISABLED) \ && (OS_CFG_TASK_IDLE_EN == DEF_ENABLED)) void OS_ConfigureIdleTaskStk(CPU_STK *p_stk_base_ptr, CPU_STK_SIZE stk_size) { RTOS_ASSERT_CRITICAL((OSInitialized == DEF_FALSE), RTOS_ERR_ALREADY_INIT,; ); OS_InitCfg.IdleTask.StkBasePtr = p_stk_base_ptr; OS_InitCfg.IdleTask.StkSize = stk_size; } #endif /****************************************************************************************************//** * OS_ConfigureStatTask() * * @brief If enabled, configure the Statistics Task. * * @param p_stat_task_cfg Pointer to the Statistics Task configuration. * * @note (1) This function is optional. If it is called, it must be called before OSInit(). * If it is not called, default values will be used. *******************************************************************************************************/ #if (!defined(OS_CFG_COMPAT_INIT) \ && (RTOS_CFG_EXTERNALIZE_OPTIONAL_CFG_EN == DEF_DISABLED) \ && (OS_CFG_STAT_TASK_EN == DEF_ENABLED)) void OS_ConfigureStatTask(OS_TASK_CFG *p_stat_task_cfg) { RTOS_ASSERT_CRITICAL((OSInitialized == DEF_FALSE), RTOS_ERR_ALREADY_INIT,; ); OS_ASSERT_DBG_NO_ERR((p_stat_task_cfg != DEF_NULL), RTOS_ERR_NULL_PTR,; ); OS_InitCfg.StatTaskCfg = *p_stat_task_cfg; } #endif /****************************************************************************************************//** * OS_ConfigureTickTask() * * @brief If enabled, configure the Tick Task. * * @param p_tick_task_cfg Pointer to the Tick Task configuration. * * @note (1) This function is optional. If it is called, it must be called before OSInit(). * If it is not called, default values will be used. *******************************************************************************************************/ #if (!defined(OS_CFG_COMPAT_INIT) \ && (RTOS_CFG_EXTERNALIZE_OPTIONAL_CFG_EN == DEF_DISABLED) \ && (OS_CFG_TASK_TICK_EN == DEF_ENABLED)) void OS_ConfigureTickTask(OS_TASK_CFG *p_tick_task_cfg) { RTOS_ASSERT_CRITICAL((OSInitialized == DEF_FALSE), RTOS_ERR_ALREADY_INIT,; ); OS_ASSERT_DBG_NO_ERR((p_tick_task_cfg != DEF_NULL), RTOS_ERR_NULL_PTR,; ); OS_InitCfg.TickTaskCfg = *p_tick_task_cfg; } #endif /****************************************************************************************************//** * OS_ConfigureTmrTask() * * @brief If enabled, configure the Timer Management Task. * * @param p_tmr_task_cfg Pointer to the Timer Management Task configuration. * * @note (1) This function is optional. If it is called, it must be called before OSInit(). * If it is not called, default values will be used. *******************************************************************************************************/ #if (!defined(OS_CFG_COMPAT_INIT) \ && (RTOS_CFG_EXTERNALIZE_OPTIONAL_CFG_EN == DEF_DISABLED) \ && (OS_CFG_TMR_EN == DEF_ENABLED)) void OS_ConfigureTmrTask(OS_TASK_CFG *p_tmr_task_cfg) { RTOS_ASSERT_CRITICAL((OSInitialized == DEF_FALSE), RTOS_ERR_ALREADY_INIT,; ); OS_ASSERT_DBG_NO_ERR((p_tmr_task_cfg != DEF_NULL), RTOS_ERR_NULL_PTR,; ); OS_InitCfg.TmrTaskCfg = *p_tmr_task_cfg; } #endif /****************************************************************************************************//** * OSInit() * * @brief Initializes the internals of the Kernel and MUST be called before creating any Kernel * object and before calling OSStart(). * * @param p_err Pointer to the variable that will receive one of the following error * code(s) from this function: * - RTOS_ERR_NONE * - RTOS_ERR_OS_ILLEGAL_RUN_TIME * - RTOS_ERR_SEG_OVF * * @note (1) This function MUST be called AFTER Common's Mem_Init(). *******************************************************************************************************/ void OSInit(RTOS_ERR *p_err) { #ifdef OS_CFG_COMPAT_INIT #if (OS_CFG_ISR_STK_SIZE > 0u) CPU_STK *p_stk; CPU_STK_SIZE size; #endif #else CPU_STK *p_stk; CPU_STK_SIZE size; #endif // Validate 'p_err' OS_ASSERT_DBG_ERR_PTR_VALIDATE(p_err,; ); // Allocate Objects and Stacks. #ifndef OS_CFG_COMPAT_INIT // Allocate ISR Stack. if (OS_InitCfg.ISR.StkSize > 0u) { if (OS_InitCfg.ISR.StkBasePtr == DEF_NULL) { OSCfg_ISRStk = (CPU_STK *)Mem_SegAlloc("Kernel's ISR Stack", OS_InitCfg.MemSeg, OS_InitCfg.ISR.StkSize * sizeof(CPU_STK), p_err); if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) { return; } } else { OSCfg_ISRStk = OS_InitCfg.ISR.StkBasePtr; } OSCfg_ISRStkBasePtr = OSCfg_ISRStk; OSCfg_ISRStkSize = OS_InitCfg.ISR.StkSize; OSCfg_ISRStkSizeRAM = OS_InitCfg.ISR.StkSize * sizeof(CPU_STK); OSCfg_DataSizeRAM += OSCfg_ISRStkSizeRAM; } #if (OS_MSG_EN == DEF_ENABLED) // Message Queue. if (OS_InitCfg.MsgPoolSize > 0u) { OSCfg_MsgPool = (OS_MSG *)Mem_SegAlloc("Kernel's Msg Pool", OS_InitCfg.MemSeg, OS_InitCfg.MsgPoolSize * sizeof(OS_MSG), p_err); if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) { return; } OSCfg_MsgPoolBasePtr = OSCfg_MsgPool; OSCfg_MsgPoolSize = OS_InitCfg.MsgPoolSize; OSCfg_MsgPoolSizeRAM = OS_InitCfg.MsgPoolSize * sizeof(OS_MSG); OSCfg_DataSizeRAM += OSCfg_MsgPoolSizeRAM; } #endif #if (OS_CFG_TASK_IDLE_EN == DEF_ENABLED) // Idle Task's Stack. if (OS_InitCfg.IdleTask.StkSize > 0u) { if (OS_InitCfg.IdleTask.StkBasePtr == DEF_NULL) { OSCfg_IdleTaskStk = (CPU_STK *)Mem_SegAlloc("Kernel's Idle Task Stack", OS_InitCfg.MemSeg, OS_InitCfg.IdleTask.StkSize * sizeof(CPU_STK), p_err); if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) { return; } } else { OSCfg_IdleTaskStk = OS_InitCfg.IdleTask.StkBasePtr; } OSCfg_IdleTaskStkBasePtr = OSCfg_IdleTaskStk; OSCfg_IdleTaskStkLimit = ((OS_InitCfg.IdleTask.StkSize * OS_InitCfg.TaskStkLimit) / 100u); OSCfg_IdleTaskStkSize = OS_InitCfg.IdleTask.StkSize; OSCfg_IdleTaskStkSizeRAM = OS_InitCfg.IdleTask.StkSize * sizeof(CPU_STK); OSCfg_DataSizeRAM += OSCfg_IdleTaskStkSizeRAM; } #endif #if (OS_CFG_TASK_TICK_EN == DEF_ENABLED) // Tick Task's Stack. if (OS_InitCfg.TickTaskCfg.StkSize > 0u) { if (OS_InitCfg.TickTaskCfg.StkBasePtr == DEF_NULL) { OSCfg_TickTaskStk = (CPU_STK *)Mem_SegAlloc("Kernel's Tick Task Stack", OS_InitCfg.MemSeg, OS_InitCfg.TickTaskCfg.StkSize * sizeof(CPU_STK), p_err); if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) { return; } } else { OSCfg_TickTaskStk = OS_InitCfg.TickTaskCfg.StkBasePtr; } OSCfg_TickRate_Hz = OS_InitCfg.TickTaskCfg.RateHz; OSCfg_TickTaskPrio = OS_InitCfg.TickTaskCfg.Prio; OSCfg_TickTaskStkBasePtr = OSCfg_TickTaskStk; OSCfg_TickTaskStkLimit = ((OS_InitCfg.TickTaskCfg.StkSize * OS_InitCfg.TaskStkLimit) / 100u); OSCfg_TickTaskStkSize = OS_InitCfg.TickTaskCfg.StkSize; OSCfg_TickTaskStkSizeRAM = OS_InitCfg.TickTaskCfg.StkSize * sizeof(CPU_STK); OSCfg_DataSizeRAM += OSCfg_TickTaskStkSizeRAM; } #endif #if (OS_CFG_STAT_TASK_EN == DEF_ENABLED) // Statistic Task's Stack. if (OS_InitCfg.StatTaskCfg.StkSize > 0u) { if (OS_InitCfg.StatTaskCfg.StkBasePtr == DEF_NULL) { OSCfg_StatTaskStk = (CPU_STK *)Mem_SegAlloc("Kernel's Stat Task Stack", OS_InitCfg.MemSeg, OS_InitCfg.StatTaskCfg.StkSize * sizeof(CPU_STK), p_err); if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) { return; } } else { OSCfg_StatTaskStk = OS_InitCfg.StatTaskCfg.StkBasePtr; } OSCfg_StatTaskPrio = OS_InitCfg.StatTaskCfg.Prio; OSCfg_StatTaskRate_Hz = OS_InitCfg.StatTaskCfg.RateHz; OSCfg_StatTaskStkBasePtr = OSCfg_StatTaskStk; OSCfg_StatTaskStkLimit = ((OS_InitCfg.StatTaskCfg.StkSize * OS_InitCfg.TaskStkLimit) / 100u); OSCfg_StatTaskStkSize = OS_InitCfg.StatTaskCfg.StkSize; OSCfg_StatTaskStkSizeRAM = OS_InitCfg.StatTaskCfg.StkSize * sizeof(CPU_STK); OSCfg_DataSizeRAM += OSCfg_StatTaskStkSizeRAM; } #endif #if (OS_CFG_TMR_EN == DEF_ENABLED) // Timer Manager Task's Stack. if (OS_InitCfg.TmrTaskCfg.StkSize > 0u) { if (OS_InitCfg.TmrTaskCfg.StkBasePtr == DEF_NULL) { OSCfg_TmrTaskStk = (CPU_STK *)Mem_SegAlloc("Kernel's Timer Task Stack", OS_InitCfg.MemSeg, OS_InitCfg.TmrTaskCfg.StkSize * sizeof(CPU_STK), p_err); if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) { return; } } else { OSCfg_TmrTaskStk = OS_InitCfg.TmrTaskCfg.StkBasePtr; } OSCfg_TmrTaskPrio = OS_InitCfg.TmrTaskCfg.Prio; OSCfg_TmrTaskRate_Hz = OS_InitCfg.TmrTaskCfg.RateHz; OSCfg_TmrTaskStkBasePtr = OSCfg_TmrTaskStk; OSCfg_TmrTaskStkLimit = ((OS_InitCfg.TmrTaskCfg.StkSize * OS_InitCfg.TaskStkLimit) / 100u); OSCfg_TmrTaskStkSize = OS_InitCfg.TmrTaskCfg.StkSize; OSCfg_TmrTaskStkSizeRAM = OS_InitCfg.TmrTaskCfg.StkSize * sizeof(CPU_STK); OSCfg_DataSizeRAM += OSCfg_TmrTaskStkSizeRAM; } #endif #endif OSInitHook(); // Call port specific initialization code OSIntNestingCtr = 0u; // Clear the interrupt nesting counter OSRunning = OS_STATE_OS_STOPPED; // Indicate that multitasking not started OSSchedLockNestingCtr = 0u; // Clear the scheduling lock counter OSTCBCurPtr = DEF_NULL; // Initialize OS_TCB pointers to a known state OSTCBHighRdyPtr = DEF_NULL; OSPrioCur = 0u; // Initialize priority variables to a known state OSPrioHighRdy = 0u; #if (OS_CFG_SCHED_LOCK_TIME_MEAS_EN == DEF_ENABLED) OSSchedLockTimeBegin = 0u; OSSchedLockTimeMax = 0u; OSSchedLockTimeMaxCur = 0u; #endif #ifdef OS_SAFETY_CRITICAL_IEC61508 OSSafetyCriticalStartFlag = DEF_FALSE; #endif #if (OS_CFG_SCHED_ROUND_ROBIN_EN == DEF_ENABLED) OSSchedRoundRobinEn = DEF_FALSE; OSSchedRoundRobinDfltTimeQuanta = OSCfg_TickRate_Hz / 10u; #endif // Clear exception stack for stack checking. #ifdef OS_CFG_COMPAT_INIT #if (OS_CFG_ISR_STK_SIZE > 0u) p_stk = OSCfg_ISRStkBasePtr; if (p_stk != DEF_NULL) { size = OSCfg_ISRStkSize; while (size > 0u) { size--; *p_stk = 0u; p_stk++; } } #if (OS_CFG_TASK_STK_REDZONE_EN == DEF_ENABLED) // Initialize Redzoned ISR stack OS_TaskStkRedzoneInit(OSCfg_ISRStkBasePtr, OSCfg_ISRStkSize); #endif #endif #else if (OSCfg_ISRStkSize > 0u) { p_stk = OSCfg_ISRStkBasePtr; if (p_stk != DEF_NULL) { size = OSCfg_ISRStkSize; while (size > 0u) { size--; *p_stk = 0u; p_stk++; } } #if (OS_CFG_TASK_STK_REDZONE_EN == DEF_ENABLED) // Initialize Redzoned ISR stack OS_TaskStkRedzoneInit(OSCfg_ISRStkBasePtr, OSCfg_ISRStkSize); #endif } #endif #if (OS_CFG_APP_HOOKS_EN == DEF_ENABLED) // Clear application hook pointers #if (OS_CFG_TASK_STK_REDZONE_EN == DEF_ENABLED) OS_AppRedzoneHitHookPtr = DEF_NULL; #endif OS_AppTaskCreateHookPtr = DEF_NULL; OS_AppTaskDelHookPtr = DEF_NULL; OS_AppTaskReturnHookPtr = DEF_NULL; OS_AppIdleTaskHookPtr = DEF_NULL; OS_AppStatTaskHookPtr = DEF_NULL; OS_AppTaskSwHookPtr = DEF_NULL; OS_AppTimeTickHookPtr = DEF_NULL; #endif #if (OS_CFG_TASK_REG_TBL_SIZE > 0u) OSTaskRegNextAvailID = 0u; #endif OS_PrioInit(); // Initialize the priority bitmap table OS_RdyListInit(); // Initialize the Ready List #if (OS_CFG_FLAG_EN == DEF_ENABLED) // Initialize the Event Flag module #if (OS_CFG_DBG_EN == DEF_ENABLED) OSFlagDbgListPtr = DEF_NULL; OSFlagQty = 0u; #endif #endif #if (OS_CFG_MEM_EN == DEF_ENABLED) // Initialize the Memory Manager module OS_MemInit(p_err); if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) { return; } #endif #if (OS_MSG_EN == DEF_ENABLED) // Initialize the free list of OS_MSGs if (OSCfg_MsgPoolSize > 0u) { OS_MsgPoolInit(p_err); if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) { return; } } #endif #if (OS_CFG_MUTEX_EN == DEF_ENABLED) // Initialize the Mutex Manager module #if (OS_CFG_DBG_EN == DEF_ENABLED) OSMutexDbgListPtr = DEF_NULL; OSMutexQty = 0u; #endif #endif #if (OS_CFG_Q_EN == DEF_ENABLED) // Initialize the Message Queue Manager module #if (OS_CFG_DBG_EN == DEF_ENABLED) OSQDbgListPtr = DEF_NULL; OSQQty = 0u; #endif #endif #if (OS_CFG_SEM_EN == DEF_ENABLED) // Initialize the Semaphore Manager module #if (OS_CFG_DBG_EN == DEF_ENABLED) OSSemDbgListPtr = DEF_NULL; OSSemQty = 0u; #endif #endif #if defined(OS_CFG_TLS_TBL_SIZE) && (OS_CFG_TLS_TBL_SIZE > 0u) OS_TLS_Init(p_err); // Initialize Task Local Storage, before creating tasks if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) { return; } #endif OS_TaskInit(p_err); // Initialize the task manager if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) { return; } #if (OS_CFG_TASK_IDLE_EN == DEF_ENABLED) // Initialize the Idle Task OS_IdleTaskInit(p_err); if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) { return; } #endif #if (OS_CFG_TASK_TICK_EN == DEF_ENABLED) // Initialize the Tick Task OS_TickTaskInit(p_err); if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) { return; } #endif #if (OS_CFG_STAT_TASK_EN == DEF_ENABLED) // Initialize the Statistic Task OS_StatTaskInit(p_err); if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) { return; } #endif #if (OS_CFG_TMR_EN == DEF_ENABLED) // Initialize the Timer Manager module OS_TmrInit(p_err); if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) { return; } #endif #if (OS_CFG_DBG_EN == DEF_ENABLED) OS_Dbg_Init(); #endif OSCfg_Init(); OSInitialized = DEF_TRUE; // Kernel is initialized } /****************************************************************************************************//** * OSIntEnter() * * @brief Used in an interrupt service routine (ISR) to notify the Kernel that you are about to * service an interrupt. This allows the Kernel to keep track of interrupt nesting and * only performs rescheduling at the last nested ISR. * * @note (1) Your ISR can directly increment 'OSIntNestingCtr' without calling this function because * OSIntNestingCtr has been declared 'global'. The port is actually considered part of the * OS and is allowed to access the Kernel's variables. In that case you must handle the * access protection to this variable. * * @note (2) You MUST still call OSIntExit() even though you can increment 'OSIntNestingCtr' directly. * * @note (3) You MUST invoke OSIntEnter() and OSIntExit() in pairs. In other words, for every call * to OSIntEnter() (or direct increment to OSIntNestingCtr) at the beginning of the ISR * you MUST have a call to OSIntExit() at the end of the ISR. * * @note (4) You are allowed to nest interrupts up to 250 levels deep. *******************************************************************************************************/ void OSIntEnter(void) { CPU_SR_ALLOC(); OS_TRACE_ISR_ENTER(); if (OSRunning != OS_STATE_OS_RUNNING) { // Is OS running? return; // No } CPU_INT_DIS(); if (OSIntNestingCtr < 250u) { // Have we nested less than 250 levels? OSIntNestingCtr++; // Increment ISR nesting level } CPU_INT_EN(); } /****************************************************************************************************//** * OSIntExit() * * @brief Notifies the Kernel that you have completed servicing an ISR. When the last nested ISR * has completed, the Kernel will call the scheduler to determine whether a new, high-priority * task is ready to run. * * @note (1) You MUST invoke OSIntEnter() and OSIntExit() in pairs. In other words, for every call * to OSIntEnter() (or direct increment to OSIntNestingCtr) at the beginning of the ISR, * you MUST have a call to OSIntExit() at the end of the ISR. * * @note (2) Rescheduling is prevented when the scheduler is locked (see OSSchedLock()). *******************************************************************************************************/ void OSIntExit(void) { #if (OS_CFG_TASK_STK_REDZONE_EN == DEF_ENABLED) CPU_BOOLEAN stk_status; #endif CPU_SR_ALLOC(); if (OSRunning != OS_STATE_OS_RUNNING) { // Has the OS started? OS_TRACE_ISR_EXIT(); return; // No } CPU_INT_DIS(); if (OSIntNestingCtr == 0u) { // Prevent OSIntNestingCtr from wrapping OS_TRACE_ISR_EXIT(); CPU_INT_EN(); return; } OSIntNestingCtr--; if (OSIntNestingCtr > 0u) { // ISRs still nested? OS_TRACE_ISR_EXIT(); CPU_INT_EN(); // Yes return; } if (OSSchedLockNestingCtr > 0u) { // Scheduler still locked? OS_TRACE_ISR_EXIT(); CPU_INT_EN(); // Yes return; } // Verify ISR Stack #if (OS_CFG_TASK_STK_REDZONE_EN == DEF_ENABLED) #ifdef OS_CFG_COMPAT_INIT #if (OS_CFG_ISR_STK_SIZE > 0u) stk_status = OS_TaskStkRedzoneChk(OSCfg_ISRStkBasePtr, OSCfg_ISRStkSize); if (stk_status != DEF_OK) { OSRedzoneHitHook(DEF_NULL); } #endif #else if (OSCfg_ISRStkSize > 0u) { stk_status = OS_TaskStkRedzoneChk(OSCfg_ISRStkBasePtr, OSCfg_ISRStkSize); if (stk_status != DEF_OK) { OSRedzoneHitHook(DEF_NULL); } } #endif #endif OSPrioHighRdy = OS_PrioGetHighest(); // Find highest priority #if (OS_CFG_TASK_IDLE_EN == DEF_ENABLED) OSTCBHighRdyPtr = OSRdyList[OSPrioHighRdy].HeadPtr; // Get highest priority task ready-to-run if (OSTCBHighRdyPtr == OSTCBCurPtr) { // Current task still the highest priority? #if (OS_CFG_TASK_STK_REDZONE_EN == DEF_ENABLED) stk_status = OSTaskStkRedzoneChk(DEF_NULL); if (stk_status != DEF_OK) { OSRedzoneHitHook(OSTCBCurPtr); } #endif OS_TRACE_ISR_EXIT(); CPU_INT_EN(); // Yes return; } #else if (OSPrioHighRdy != (OS_CFG_PRIO_MAX - 1u)) { // Are we returning to idle? OSTCBHighRdyPtr = OSRdyList[OSPrioHighRdy].HeadPtr; // No ... get highest priority task ready-to-run if (OSTCBHighRdyPtr == OSTCBCurPtr) { // Current task still the highest priority? // Yes OS_TRACE_ISR_EXIT(); CPU_INT_EN(); return; } } #endif #if (OS_CFG_TASK_PROFILE_EN == DEF_ENABLED) OSTCBHighRdyPtr->CtxSwCtr++; // Inc. # of context switches for this new task #endif #if ((OS_CFG_TASK_PROFILE_EN == DEF_ENABLED) || (OS_CFG_DBG_EN == DEF_ENABLED)) OSTaskCtxSwCtr++; // Keep track of the total number of ctx switches #endif #if defined(OS_CFG_TLS_TBL_SIZE) && (OS_CFG_TLS_TBL_SIZE > 0u) OS_TLS_TaskSw(); #endif OS_TRACE_ISR_EXIT_TO_SCHEDULER(); OSIntCtxSw(); // Perform interrupt level ctx switch CPU_INT_EN(); } /****************************************************************************************************//** * OSSched() * * @brief This function is called by other Kernel services to determine whether a new, high * priority task has been made ready to run. This function is invoked by TASK level code and * is not used to reschedule tasks from ISRs (see OSIntExit() for ISR rescheduling). * * @note (1) Rescheduling is prevented when the scheduler is locked (see OSSchedLock()). *******************************************************************************************************/ void OSSched(void) { CPU_SR_ALLOC(); // Can't schedule when the kernel is stopped. OS_ASSERT_DBG_NO_ERR((OSRunning == OS_STATE_OS_RUNNING), RTOS_ERR_NOT_READY,; ); if (OSIntNestingCtr > 0u) { // ISRs still nested? return; // Yes ... only schedule when no nested ISRs } if (OSSchedLockNestingCtr > 0u) { // Scheduler locked? return; // Yes } CPU_INT_DIS(); OSPrioHighRdy = OS_PrioGetHighest(); // Find the highest priority ready #if (OS_CFG_TASK_IDLE_EN == DEF_ENABLED) OSTCBHighRdyPtr = OSRdyList[OSPrioHighRdy].HeadPtr; // Get highest priority task ready-to-run if (OSTCBHighRdyPtr == OSTCBCurPtr) { // Current task still the highest priority? CPU_INT_EN(); // Yes return; } #else if (OSPrioHighRdy != (OS_CFG_PRIO_MAX - 1u)) { // Are we returning to idle? OSTCBHighRdyPtr = OSRdyList[OSPrioHighRdy].HeadPtr; // No ... get highest priority task ready-to-run if (OSTCBHighRdyPtr == OSTCBCurPtr) { // Current task still the highest priority? CPU_INT_EN(); // Yes return; } } #endif OS_TRACE_TASK_PREEMPT(OSTCBCurPtr); #if (OS_CFG_TASK_PROFILE_EN == DEF_ENABLED) OSTCBHighRdyPtr->CtxSwCtr++; // Inc. # of context switches to this task #endif #if ((OS_CFG_TASK_PROFILE_EN == DEF_ENABLED) || (OS_CFG_DBG_EN == DEF_ENABLED)) OSTaskCtxSwCtr++; // Increment context switch counter #endif #if defined(OS_CFG_TLS_TBL_SIZE) && (OS_CFG_TLS_TBL_SIZE > 0u) OS_TLS_TaskSw(); #endif #if (OS_CFG_TASK_IDLE_EN == DEF_ENABLED) OS_TASK_SW(); // Perform a task level context switch CPU_INT_EN(); #else if ((OSPrioHighRdy != (OS_CFG_PRIO_MAX - 1u))) { OS_TASK_SW(); // Perform a task level context switch CPU_INT_EN(); } else { OSTCBHighRdyPtr = OSTCBCurPtr; CPU_INT_EN(); while (DEF_ON) { #if ((OS_CFG_DBG_EN == DEF_ENABLED) || (OS_CFG_STAT_TASK_EN == DEF_ENABLED)) CPU_CRITICAL_ENTER(); #if (OS_CFG_DBG_EN == DEF_ENABLED) OSIdleTaskCtr++; #endif #if (OS_CFG_STAT_TASK_EN == DEF_ENABLED) OSStatTaskCtr++; #endif CPU_CRITICAL_EXIT(); #endif #if (OS_CFG_APP_HOOKS_EN == DEF_ENABLED) OSIdleTaskHook(); // Call user definable HOOK #endif if ((*((volatile OS_PRIO *)&OSPrioHighRdy) != (OS_CFG_PRIO_MAX - 1u))) { break; } } } #endif #ifdef OS_TASK_SW_SYNC OS_TASK_SW_SYNC(); #endif } /****************************************************************************************************//** * OSSchedLock() * * @brief Prevents rescheduling from taking place, allowing your application to prevent context * switches until you are ready to permit context switching. * * @param p_err Pointer to the variable that will receive one of the following error code(s) * from this function: * - RTOS_ERR_NONE * - RTOS_ERR_WOULD_OVF * * @note (1) You MUST invoke OSSchedLock() and OSSchedUnlock() in pairs. In other words, for every * call to OSSchedLock(), you MUST have a call to OSSchedUnlock(). *******************************************************************************************************/ void OSSchedLock(RTOS_ERR *p_err) { CPU_SR_ALLOC(); OS_ASSERT_DBG_ERR_PTR_VALIDATE(p_err,; ); // Not allowed to call from an ISR OS_ASSERT_DBG_ERR_SET((OSIntNestingCtr == 0u), *p_err, RTOS_ERR_ISR,; ); // Make sure kernel is running. OS_ASSERT_DBG_ERR_SET((OSRunning == OS_STATE_OS_RUNNING), *p_err, RTOS_ERR_NOT_READY,; ); if (OSSchedLockNestingCtr >= 250u) { // Prevent OSSchedLockNestingCtr overflowing RTOS_ERR_SET(*p_err, RTOS_ERR_WOULD_OVF); return; } CPU_CRITICAL_ENTER(); OSSchedLockNestingCtr++; // Increment lock nesting level #if (OS_CFG_SCHED_LOCK_TIME_MEAS_EN == DEF_ENABLED) OS_SchedLockTimeMeasStart(); #endif CPU_CRITICAL_EXIT(); RTOS_ERR_SET(*p_err, RTOS_ERR_NONE); } /****************************************************************************************************//** * OSSchedUnlock() * * @brief Re-allows rescheduling. * * @param p_err Pointer to the variable that will receive one of the following error code(s) * from this function: * - RTOS_ERR_NONE * - RTOS_ERR_INVALID_STATE * - RTOS_ERR_OS_SCHED_LOCKED * * @note (1) You MUST invoke OSSchedLock() and OSSchedUnlock() in pairs. In other words, for every * call to OSSchedLock(), you MUST have a call to OSSchedUnlock(). *******************************************************************************************************/ void OSSchedUnlock(RTOS_ERR *p_err) { CPU_SR_ALLOC(); OS_ASSERT_DBG_ERR_PTR_VALIDATE(p_err,; ); // Not allowed to call from an ISR OS_ASSERT_DBG_ERR_SET((OSIntNestingCtr == 0u), *p_err, RTOS_ERR_ISR,; ); // Make sure kernel is running. OS_ASSERT_DBG_ERR_SET((OSRunning == OS_STATE_OS_RUNNING), *p_err, RTOS_ERR_NOT_READY,; ); if (OSSchedLockNestingCtr == 0u) { // See if the scheduler is locked RTOS_ERR_SET(*p_err, RTOS_ERR_INVALID_STATE); return; } CPU_CRITICAL_ENTER(); OSSchedLockNestingCtr--; // Decrement lock nesting level if (OSSchedLockNestingCtr > 0u) { CPU_CRITICAL_EXIT(); // Scheduler is still locked RTOS_ERR_SET(*p_err, RTOS_ERR_OS_SCHED_LOCKED); return; } #if (OS_CFG_SCHED_LOCK_TIME_MEAS_EN == DEF_ENABLED) OS_SchedLockTimeMeasStop(); #endif CPU_CRITICAL_EXIT(); // Scheduler should be re-enabled OSSched(); // Run the scheduler RTOS_ERR_SET(*p_err, RTOS_ERR_NONE); } /****************************************************************************************************//** * OSSchedRoundRobinCfg() * * @brief Changes the round-robin scheduling parameters. * * @param en Determines if the round-robin will be used: * - DEF_ENABLED Round-robin scheduling is enabled. * - DEF_DISABLED Round-robin scheduling is disabled. * * @param dflt_time_quanta Default number of ticks between time slices. * A value of 0 assumes OSCfg_TickRate_Hz / 10. * * @param p_err Pointer to the variable that will receive one of the following * error code(s) from this function: * - RTOS_ERR_NONE *******************************************************************************************************/ #if (OS_CFG_SCHED_ROUND_ROBIN_EN == DEF_ENABLED) void OSSchedRoundRobinCfg(CPU_BOOLEAN en, OS_TICK dflt_time_quanta, RTOS_ERR *p_err) { CPU_SR_ALLOC(); OS_ASSERT_DBG_ERR_PTR_VALIDATE(p_err,; ); CPU_CRITICAL_ENTER(); if (en != DEF_ENABLED) { OSSchedRoundRobinEn = DEF_FALSE; } else { OSSchedRoundRobinEn = DEF_TRUE; } if (dflt_time_quanta > 0u) { OSSchedRoundRobinDfltTimeQuanta = dflt_time_quanta; } else { OSSchedRoundRobinDfltTimeQuanta = (OS_TICK)(OSCfg_TickRate_Hz / 10u); } CPU_CRITICAL_EXIT(); RTOS_ERR_SET(*p_err, RTOS_ERR_NONE); } #endif /****************************************************************************************************//** * OSSchedRoundRobinYield() * * @brief Gives up the CPU when a task is finished its execution before its time slice expires. * * @param p_err Pointer to the variable that will receive one of the following error code(s) * from this function: * - RTOS_ERR_NONE * - RTOS_ERR_NOT_AVAIL * - RTOS_ERR_NONE_WAITING * - RTOS_ERR_OS_SCHED_LOCKED * * @note (1) This function MUST be called from a task. *******************************************************************************************************/ #if (OS_CFG_SCHED_ROUND_ROBIN_EN == DEF_ENABLED) void OSSchedRoundRobinYield(RTOS_ERR *p_err) { OS_RDY_LIST *p_rdy_list; OS_TCB *p_tcb; CPU_SR_ALLOC(); OS_ASSERT_DBG_ERR_PTR_VALIDATE(p_err,; ); // Not allowed to call from an ISR OS_ASSERT_DBG_ERR_SET((OSIntNestingCtr == 0u), *p_err, RTOS_ERR_ISR,; ); if (OSSchedLockNestingCtr > 0u) { // Can't yield if the scheduler is locked RTOS_ERR_SET(*p_err, RTOS_ERR_OS_SCHED_LOCKED); return; } if (OSSchedRoundRobinEn != DEF_TRUE) { // Make sure round-robin has been enabled RTOS_ERR_SET(*p_err, RTOS_ERR_NOT_AVAIL); return; } CPU_CRITICAL_ENTER(); p_rdy_list = &OSRdyList[OSPrioCur]; // Can't yield if it's the only task at that priority if (p_rdy_list->HeadPtr == p_rdy_list->TailPtr) { CPU_CRITICAL_EXIT(); RTOS_ERR_SET(*p_err, RTOS_ERR_NONE_WAITING); return; } OS_RdyListMoveHeadToTail(p_rdy_list); // Move current OS_TCB to the end of the list p_tcb = p_rdy_list->HeadPtr; // Point to new OS_TCB at head of the list if (p_tcb->TimeQuanta == 0u) { // See if we need to use the default time slice p_tcb->TimeQuantaCtr = OSSchedRoundRobinDfltTimeQuanta; } else { p_tcb->TimeQuantaCtr = p_tcb->TimeQuanta; // Load time slice counter with new time } CPU_CRITICAL_EXIT(); OSSched(); // Run new task RTOS_ERR_SET(*p_err, RTOS_ERR_NONE); } #endif /****************************************************************************************************//** * OSStart() * * @brief Starts the multitasking process which lets the Kernel manage the tasks that you created. * Before you can call OSStart(), you MUST have called OSInit() and you MUST have created * at least one application task. * * @param p_err Pointer to the variable that will receive one of the following error code(s) * from this function: * - RTOS_ERR_NONE * * @note (1) OSStartHighRdy() MUST: * - (a) Call OSTaskSwHook(). * - (b) Load the context of the task pointed to by OSTCBHighRdyPtr. * - (c) Execute the task. * * @note (2) OSStart() is not supposed to return. If it does, that would be considered a fatal error. *******************************************************************************************************/ void OSStart(RTOS_ERR *p_err) { OS_OBJ_QTY kernel_task_cnt; OS_ASSERT_DBG_ERR_PTR_VALIDATE(p_err,; ); OS_ASSERT_DBG_ERR_SET((OSInitialized == DEF_TRUE), *p_err, RTOS_ERR_NOT_INIT,; ); kernel_task_cnt = 0u; // Calculate the number of kernel tasks #if (OS_CFG_STAT_TASK_EN == DEF_ENABLED) kernel_task_cnt++; #endif #if (OS_CFG_TASK_TICK_EN == DEF_ENABLED) kernel_task_cnt++; #endif #if (OS_CFG_TMR_EN == DEF_ENABLED) kernel_task_cnt++; #endif #if (OS_CFG_TASK_IDLE_EN == DEF_ENABLED) kernel_task_cnt++; #endif // Make sure at least one application task is created OS_ASSERT_DBG_ERR_SET((OSTaskQty > kernel_task_cnt), *p_err, RTOS_ERR_INVALID_CFG,; ); // Make sure kernel is not already running OS_ASSERT_DBG_ERR_SET((OSRunning == OS_STATE_OS_STOPPED), *p_err, RTOS_ERR_INVALID_STATE,; ); OSPrioHighRdy = OS_PrioGetHighest(); // Find the highest priority OSPrioCur = OSPrioHighRdy; OSTCBHighRdyPtr = OSRdyList[OSPrioHighRdy].HeadPtr; OSTCBCurPtr = OSTCBHighRdyPtr; #ifdef OS_SAFETY_CRITICAL_IEC61508 OSSafetyCriticalStartFlag = DEF_TRUE; // Prevent creation of additional kernel objects #endif OSRunning = OS_STATE_OS_RUNNING; OSStartHighRdy(); // Execute target specific code to start task RTOS_CRITICAL_FAIL_EXEC(RTOS_ERR_OS,; ); // OSStart() is not supposed to return } /******************************************************************************************************** ******************************************************************************************************** * DEPRECATED GLOBAL FUNCTIONS ******************************************************************************************************** *******************************************************************************************************/ /****************************************************************************************************//** * OSVersion() * * @brief Returns the version number of the Kernel. The returned value is the Kernel's version * number multiplied by 10000. In other words, version 3.01.02 would be returned as 30102. * * @param p_err Pointer to the variable that will receive one of the following error code(s) * from this function: * - RTOS_ERR_NONE * * @return The version number of the Kernel multiplied by 10000. * * @note (1) This function is DEPRECATED and will be removed in a future version of this product. * Instead, use RTOS_Version() or RTOS_VERSION. * @deprecated *******************************************************************************************************/ CPU_INT16U OSVersion(RTOS_ERR *p_err) { OS_ASSERT_DBG_ERR_PTR_VALIDATE(p_err, 0u); RTOS_ERR_SET(*p_err, RTOS_ERR_NONE); return (OS_VERSION); } /******************************************************************************************************** ******************************************************************************************************** * INTERNAL FUNCTIONS ******************************************************************************************************** *******************************************************************************************************/ /****************************************************************************************************//** * OS_IdleTask() * * @brief This task is internal to the Kernel and executes whenever no other higher priority tasks * execute because they are ALL waiting for event(s) to occur. * * @param p_arg Argument passed to the task when the task is created. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. * * @note (2) OSIdleTaskHook() is called after the critical section to ensure that interrupts will * be enabled for at least a few instructions. On some processors (ex. Philips XA), * enabling and then disabling interrupts doesn't allow the processor enough time to have * interrupts enabled before they were disabled again. The Kernel would thus never * recognize interrupts. * * @note (3) This hook has been added to allow you to do such things as STOP the CPU to reduce * power usage. *******************************************************************************************************/ #if (OS_CFG_TASK_IDLE_EN == DEF_ENABLED) void OS_IdleTask(void *p_arg) { #if ((OS_CFG_DBG_EN == DEF_ENABLED) || (OS_CFG_STAT_TASK_EN == DEF_ENABLED)) CPU_SR_ALLOC(); #endif (void)p_arg; // Prevent compiler warning for not using 'p_arg' while (DEF_ON) { #if ((OS_CFG_DBG_EN == DEF_ENABLED) || (OS_CFG_STAT_TASK_EN == DEF_ENABLED)) CPU_CRITICAL_ENTER(); #if (OS_CFG_DBG_EN == DEF_ENABLED) OSIdleTaskCtr++; #endif #if (OS_CFG_STAT_TASK_EN == DEF_ENABLED) OSStatTaskCtr++; #endif CPU_CRITICAL_EXIT(); #endif #if (OS_CFG_APP_HOOKS_EN == DEF_ENABLED) OSIdleTaskHook(); // Call user definable HOOK #endif } } #endif /****************************************************************************************************//** * OS_IdleTaskInit() * * @brief This function initializes the idle task. * * @param p_err Pointer to the variable that will receive one of the following error code(s) from this function: * - RTOS_ERR_NONE * - RTOS_ERR_OS_ILLEGAL_RUN_TIME * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ #if (OS_CFG_TASK_IDLE_EN == DEF_ENABLED) void OS_IdleTaskInit(RTOS_ERR *p_err) { #if (OS_CFG_DBG_EN == DEF_ENABLED) OSIdleTaskCtr = 0u; #endif // --------------- CREATE THE IDLE TASK --------------- OSTaskCreate(&OSIdleTaskTCB, (CPU_CHAR *)((void *)"Kernel's Idle Task"), OS_IdleTask, DEF_NULL, (OS_CFG_PRIO_MAX - 1u), OSCfg_IdleTaskStkBasePtr, OSCfg_IdleTaskStkLimit, OSCfg_IdleTaskStkSize, 0u, 0u, DEF_NULL, (OS_OPT_TASK_STK_CHK | OS_OPT_TASK_STK_CLR | OS_OPT_TASK_NO_TLS), p_err); } #endif /****************************************************************************************************//** * OS_Pend() * * @brief This function is called to place a task in the blocked state waiting for an event to occur. * This function exists because it is common to a number of OSxxxPend() services. * * @param p_obj Pointer to the object to pend on. If there are no object used to pend * on then the caller must pass a NULL pointer. * * @param pending_on Specifies what the task will be pending on: * - OS_TASK_PEND_ON_FLAG * - OS_TASK_PEND_ON_TASK_Q <- No object (pending for a message sent to * the task) * - OS_TASK_PEND_ON_MUTEX * - OS_TASK_PEND_ON_Q * - OS_TASK_PEND_ON_SEM * - OS_TASK_PEND_ON_TASK_SEM <- No object (pending on a signal sent to * the task) * * @param timeout Amount of time the task will wait for the event to occur. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ void OS_Pend(OS_PEND_OBJ *p_obj, OS_STATE pending_on, OS_TICK timeout) { OS_PEND_LIST *p_pend_list; OSTCBCurPtr->PendOn = pending_on; // Resource not available, wait until it is OSTCBCurPtr->PendStatus = OS_STATUS_PEND_OK; OS_TaskBlock(OSTCBCurPtr, // Block the task and add it to the tick list if needed timeout); if (p_obj != DEF_NULL) { // Add the current task to the pend list ... p_pend_list = &p_obj->PendList; // ... if there is an object to pend on OSTCBCurPtr->PendObjPtr = p_obj; // Save the pointer to the object pending on OS_PendListInsertPrio(p_pend_list, // Insert in the pend list in priority order OSTCBCurPtr); } else { OSTCBCurPtr->PendObjPtr = DEF_NULL; // If no object being pended on, clear the pend object } #if (OS_CFG_DBG_EN == DEF_ENABLED) OS_PendDbgNameAdd(p_obj, OSTCBCurPtr); #endif } /****************************************************************************************************//** * OS_PendAbort() * * @brief This function is called by the OSxxxPendAbort() and OSxxxDel() functions to cancel pending * on an event. * * @param p_tcb Pointer to the OS_TCB of the task that we'll abort the pend for. * * @param ts Timestamp as to when the pend was cancelled. * * @param reason Indicates how the task was readied: * - OS_STATUS_PEND_DEL Object pended on was deleted. * - OS_STATUS_PEND_ABORT Pend was aborted. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ void OS_PendAbort(OS_TCB *p_tcb, CPU_TS ts, OS_STATUS reason) { #if (OS_CFG_TS_EN == DEF_DISABLED) (void)ts; // Prevent compiler warning for not using 'ts' #endif switch (p_tcb->TaskState) { case OS_TASK_STATE_PEND: case OS_TASK_STATE_PEND_TIMEOUT: #if (OS_MSG_EN == DEF_ENABLED) p_tcb->MsgPtr = DEF_NULL; p_tcb->MsgSize = 0u; #endif #if (OS_CFG_TS_EN == DEF_ENABLED) p_tcb->TS = ts; #endif OS_PendListRemove(p_tcb); // Remove task from the pend list #if (OS_CFG_TASK_TICK_EN == DEF_ENABLED) if (p_tcb->TaskState == OS_TASK_STATE_PEND_TIMEOUT) { OS_TickListRemove(p_tcb); // Cancel the timeout } #endif OS_RdyListInsert(p_tcb); // Insert the task in the ready list p_tcb->TaskState = OS_TASK_STATE_RDY; // Task will be ready p_tcb->PendStatus = reason; // Indicate how the task became ready p_tcb->PendOn = OS_TASK_PEND_ON_NOTHING; // Indicate no longer pending break; case OS_TASK_STATE_PEND_SUSPENDED: case OS_TASK_STATE_PEND_TIMEOUT_SUSPENDED: #if (OS_MSG_EN == DEF_ENABLED) p_tcb->MsgPtr = DEF_NULL; p_tcb->MsgSize = 0u; #endif #if (OS_CFG_TS_EN == DEF_ENABLED) p_tcb->TS = ts; #endif OS_PendListRemove(p_tcb); // Remove task from the pend list #if (OS_CFG_TASK_TICK_EN == DEF_ENABLED) if (p_tcb->TaskState == OS_TASK_STATE_PEND_TIMEOUT_SUSPENDED) { OS_TickListRemove(p_tcb); // Cancel the timeout } #endif p_tcb->TaskState = OS_TASK_STATE_SUSPENDED; // Task needs to remain suspended p_tcb->PendStatus = reason; // Indicate how the task became ready p_tcb->PendOn = OS_TASK_PEND_ON_NOTHING; // Indicate no longer pending break; case OS_TASK_STATE_RDY: // Cannot cancel a pend when a task is in these states. case OS_TASK_STATE_DLY: case OS_TASK_STATE_SUSPENDED: case OS_TASK_STATE_DLY_SUSPENDED: break; case OS_TASK_STATE_DEL: default: RTOS_CRITICAL_FAIL_EXEC(RTOS_ERR_OS,; ); } } /****************************************************************************************************//** * OS_PendDbgNameAdd() * * @brief Add pointers to ASCII 'names' of objects so they can easily be displayed using a Kernel * aware tool. * * @param p_obj Pointer to the object being pended on. * * @param p_tcb Pointer to the OS_TCB of the task pending on the object. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ #if (OS_CFG_DBG_EN == DEF_ENABLED) void OS_PendDbgNameAdd(OS_PEND_OBJ *p_obj, OS_TCB *p_tcb) { OS_PEND_LIST *p_pend_list; OS_TCB *p_tcb1; if (p_obj != DEF_NULL) { p_tcb->DbgNamePtr = p_obj->NamePtr; // Task pending on this object ... save name in TCB p_pend_list = &p_obj->PendList; // Find name of HP task pending on this object ... p_tcb1 = p_pend_list->HeadPtr; p_obj->DbgNamePtr = p_tcb1->NamePtr; // ... Save in object } else { switch (p_tcb->PendOn) { case OS_TASK_PEND_ON_TASK_Q: p_tcb->DbgNamePtr = (CPU_CHAR *)((void *)"Task Q"); break; case OS_TASK_PEND_ON_TASK_SEM: p_tcb->DbgNamePtr = (CPU_CHAR *)((void *)"Task Sem"); break; default: p_tcb->DbgNamePtr = (CPU_CHAR *)((void *)" "); break; } } } /****************************************************************************************************//** * OS_PendDbgNameRemove() * * @brief Remove pointers to ASCII 'names' of objects so they can easily be displayed using a Kernel * aware tool. * * @param p_obj Pointer to the object being pended on. * * @param p_tcb Pointer to the OS_TCB of the task pending on the object. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ void OS_PendDbgNameRemove(OS_PEND_OBJ *p_obj, OS_TCB *p_tcb) { OS_PEND_LIST *p_pend_list; OS_TCB *p_tcb1; p_tcb->DbgNamePtr = (CPU_CHAR *)((void *)" "); // Remove name of object pended on for readied task if (p_obj != DEF_NULL) { p_pend_list = &p_obj->PendList; p_tcb1 = p_pend_list->HeadPtr; if (p_tcb1 != DEF_NULL) { // Find name of HP task pending on this object ... p_obj->DbgNamePtr = p_tcb1->NamePtr; // ... Save in object } else { p_obj->DbgNamePtr = (CPU_CHAR *)((void *)" "); // Or no other task is pending on object } } } #endif /****************************************************************************************************//** * OS_PendListChangePrio() * * @brief This function is called to change the position of a task waiting in a pend list. The * strategy used is to remove the task from the pend list and add it again using its changed * priority. * * @param p_tcb Pointer to the TCB of the task to move. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. * * @note (2) It's assumed that the TCB contains the NEW priority in its .Prio field. *******************************************************************************************************/ void OS_PendListChangePrio(OS_TCB *p_tcb) { OS_PEND_LIST *p_pend_list; OS_PEND_OBJ *p_obj; p_obj = p_tcb->PendObjPtr; // Get pointer to pend list p_pend_list = &p_obj->PendList; if (p_pend_list->HeadPtr->PendNextPtr != DEF_NULL) { // Only move if multiple entries in the list OS_PendListRemove(p_tcb); // Remove entry from current position p_tcb->PendObjPtr = p_obj; OS_PendListInsertPrio(p_pend_list, // INSERT it back in the list p_tcb); } } /****************************************************************************************************//** * OS_PendListInit() * * @brief This function is called to initialize the fields of an OS_PEND_LIST. * * @param p_pend_list Pointer to an OS_PEND_LIST. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ void OS_PendListInit(OS_PEND_LIST *p_pend_list) { p_pend_list->HeadPtr = DEF_NULL; p_pend_list->TailPtr = DEF_NULL; #if (OS_CFG_DBG_EN == DEF_ENABLED) p_pend_list->NbrEntries = 0u; #endif } /****************************************************************************************************//** * OS_PendListInsertPrio() * * @brief This function is called to place an OS_TCB entry in a linked list based on its priority. * The highest priority being placed at the head of the list. The TCB is assumed to contain * the priority of the task in its .Prio field. * @verbatim * CASE 0: Insert in an empty list. * * OS_PEND_LIST * +---------------+ * | TailPtr |-> 0 * +---------------+ * | HeadPtr |-> 0 * +---------------+ * | NbrEntries=0 | * +---------------+ * * CASE 1: Insert BEFORE or AFTER an OS_TCB * * OS_PEND_LIST * +--------------+ OS_TCB * | TailPtr |-+--> +--------------+ * +--------------+ | | PendNextPtr |->0 * | HeadPtr |-/ +--------------+ * +--------------+ 0<-| PendPrevPtr | * | NbrEntries=1 | +--------------+ * +--------------+ | | * +--------------+ * | | * +--------------+ * * OS_PEND_LIST * +--------------+ * | TailPtr |---------------------------------------------+ * +--------------+ OS_TCB OS_TCB | OS_TCB * | HeadPtr |----> +--------------+ +--------------+ +-> +--------------+ * +--------------+ | PendNextPtr |<----| PendNextPtr | .... | PendNextPtr |->0 * | NbrEntries=N | +--------------+ +--------------+ +--------------+ * +--------------+ 0<-| PendPrevPtr |<----| PendPrevPtr | .... | PendPrevPtr | * +--------------+ +--------------+ +--------------+ * | | | | | | * +--------------+ +--------------+ +--------------+ * | | | | | | * +--------------+ +--------------+ +--------------+ * @endverbatim * * @param p_pend_list Pointer to the OS_PEND_LIST where the OS_TCB entry will be inserted. * * @param p_tcb The OS_TCB to insert in the list. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ void OS_PendListInsertPrio(OS_PEND_LIST *p_pend_list, OS_TCB *p_tcb) { OS_PRIO prio; OS_TCB *p_tcb_next; prio = p_tcb->Prio; // Obtain the priority of the task to insert if (p_pend_list->HeadPtr == DEF_NULL) { // CASE 0: Insert when there are no entries #if (OS_CFG_DBG_EN == DEF_ENABLED) p_pend_list->NbrEntries = 1u; // This is the first entry #endif p_tcb->PendNextPtr = DEF_NULL; // No other OS_TCBs in the list p_tcb->PendPrevPtr = DEF_NULL; p_pend_list->HeadPtr = p_tcb; p_pend_list->TailPtr = p_tcb; } else { #if (OS_CFG_DBG_EN == DEF_ENABLED) p_pend_list->NbrEntries++; // CASE 1: One more OS_TCBs in the list #endif p_tcb_next = p_pend_list->HeadPtr; while (p_tcb_next != DEF_NULL) { // Find the position where to insert if (prio < p_tcb_next->Prio) { break; // Found! ... insert BEFORE current } else { p_tcb_next = p_tcb_next->PendNextPtr; // Not Found, follow the list } } if (p_tcb_next == DEF_NULL) { // TCB to insert is lowest in priority p_tcb->PendNextPtr = DEF_NULL; // ... insert at the tail. p_tcb->PendPrevPtr = p_pend_list->TailPtr; p_tcb->PendPrevPtr->PendNextPtr = p_tcb; p_pend_list->TailPtr = p_tcb; } else { if (p_tcb_next->PendPrevPtr == DEF_NULL) { // Is new TCB highest priority? p_tcb->PendNextPtr = p_tcb_next; // Yes, insert as new Head of list p_tcb->PendPrevPtr = DEF_NULL; p_tcb_next->PendPrevPtr = p_tcb; p_pend_list->HeadPtr = p_tcb; } else { // No, insert in between two entries p_tcb->PendNextPtr = p_tcb_next; p_tcb->PendPrevPtr = p_tcb_next->PendPrevPtr; p_tcb->PendPrevPtr->PendNextPtr = p_tcb; p_tcb_next->PendPrevPtr = p_tcb; } } } } /****************************************************************************************************//** * OS_PendListRemove() * * @brief This function is called to remove a task from a pend list knowing its TCB. * @verbatim * CASE 0: OS_PEND_LIST list is empty, nothing to do. * * CASE 1: Only 1 OS_TCB in the list. * * OS_PEND_LIST * +--------------+ OS_TCB * | TailPtr |-+--> +--------------+ * +--------------+ | | PendNextPtr |->0 * | HeadPtr |-/ +--------------+ * +--------------+ 0<-| PendPrevPtr | * | NbrEntries=1 | +--------------+ * +--------------+ | | * +--------------+ * | | * +--------------+ * * CASE N: Two or more OS_TCBs in the list. * * OS_PEND_LIST * +--------------+ * | TailPtr |---------------------------------------------+ * +--------------+ OS_TCB OS_TCB | OS_TCB * | HeadPtr |----> +--------------+ +--------------+ +-> +--------------+ * +--------------+ | PendNextPtr |<----| PendNextPtr | .... | PendNextPtr |->0 * | NbrEntries=N | +--------------+ +--------------+ +--------------+ * +--------------+ 0<-| PendPrevPtr |<----| PendPrevPtr | .... | PendPrevPtr | * +--------------+ +--------------+ +--------------+ * | | | | | | * +--------------+ +--------------+ +--------------+ * | | | | | | * +--------------+ +--------------+ +--------------+ * @endverbatim * @param p_tcb Pointer to the TCB of the task to remove from the pend list. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ void OS_PendListRemove(OS_TCB *p_tcb) { OS_PEND_LIST *p_pend_list; OS_TCB *p_next; OS_TCB *p_prev; if (p_tcb->PendObjPtr != DEF_NULL) { // Only remove if object has a pend list. p_pend_list = &p_tcb->PendObjPtr->PendList; // Get pointer to pend list // Remove TCB from the pend list. if (p_pend_list->HeadPtr->PendNextPtr == DEF_NULL) { p_pend_list->HeadPtr = DEF_NULL; // Only one entry in the pend list p_pend_list->TailPtr = DEF_NULL; } else if (p_tcb->PendPrevPtr == DEF_NULL) { // See if entry is at the head of the list p_next = p_tcb->PendNextPtr; // Yes p_next->PendPrevPtr = DEF_NULL; p_pend_list->HeadPtr = p_next; } else if (p_tcb->PendNextPtr == DEF_NULL) { // See if entry is at the tail of the list p_prev = p_tcb->PendPrevPtr; // Yes p_prev->PendNextPtr = DEF_NULL; p_pend_list->TailPtr = p_prev; } else { p_prev = p_tcb->PendPrevPtr; // Remove from inside the list p_next = p_tcb->PendNextPtr; p_prev->PendNextPtr = p_next; p_next->PendPrevPtr = p_prev; } #if (OS_CFG_DBG_EN == DEF_ENABLED) p_pend_list->NbrEntries--; // One less entry in the list #endif p_tcb->PendNextPtr = DEF_NULL; p_tcb->PendPrevPtr = DEF_NULL; p_tcb->PendObjPtr = DEF_NULL; } } /****************************************************************************************************//** * OS_Post() * * @brief This function is called to post to a task. This function exist because it is common to a * number of OSxxxPost() services. * * @param p_obj Pointer to the object being posted to. If there are no object posted to * then the caller must pass a NULL pointer. * * @param p_tcb Pointer to the OS_TCB that will receive the 'post'. * * @param p_void If we are posting a message to a task, this is the message that the task * will receive. * * @param msg_size If we are posting a message to a task, this is the size of the message. * * @param ts The timestamp as to when the post occurred. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ void OS_Post(OS_PEND_OBJ *p_obj, OS_TCB *p_tcb, void *p_void, OS_MSG_SIZE msg_size, CPU_TS ts) { #if (OS_CFG_TS_EN == DEF_DISABLED) (void)ts; // Prevent compiler warning for not using 'ts' #endif #if (OS_MSG_EN == DEF_DISABLED) (void)msg_size; // Prevent compiler warning for not using 'msg_size' #endif #if (OS_MSG_EN == DEF_DISABLED) (void)p_void; // Prevent compiler warning for not using 'p_void' #endif switch (p_tcb->TaskState) { case OS_TASK_STATE_RDY: // Cannot Post a task that is ready case OS_TASK_STATE_DLY: // Cannot Post a task that is delayed case OS_TASK_STATE_SUSPENDED: // Cannot Post a suspended task case OS_TASK_STATE_DLY_SUSPENDED: // Cannot Post a suspended task that was also dly'd break; case OS_TASK_STATE_PEND: case OS_TASK_STATE_PEND_TIMEOUT: #if (OS_MSG_EN == DEF_ENABLED) p_tcb->MsgPtr = p_void; // Deposit message in OS_TCB of task waiting p_tcb->MsgSize = msg_size; // ... assuming posting a message #endif #if (OS_CFG_TS_EN == DEF_ENABLED) p_tcb->TS = ts; #endif if (p_obj != DEF_NULL) { OS_PendListRemove(p_tcb); // Remove task from pend list } #if (OS_CFG_DBG_EN == DEF_ENABLED) OS_PendDbgNameRemove(p_obj, p_tcb); #endif #if (OS_CFG_TASK_TICK_EN == DEF_ENABLED) if (p_tcb->TaskState == OS_TASK_STATE_PEND_TIMEOUT) { OS_TickListRemove(p_tcb); // Remove from tick list } #endif OS_RdyListInsert(p_tcb); // Insert the task in the ready list p_tcb->TaskState = OS_TASK_STATE_RDY; p_tcb->PendStatus = OS_STATUS_PEND_OK; // Clear pend status p_tcb->PendOn = OS_TASK_PEND_ON_NOTHING; // Indicate no longer pending break; case OS_TASK_STATE_PEND_SUSPENDED: case OS_TASK_STATE_PEND_TIMEOUT_SUSPENDED: #if (OS_MSG_EN == DEF_ENABLED) p_tcb->MsgPtr = p_void; // Deposit message in OS_TCB of task waiting p_tcb->MsgSize = msg_size; // ... assuming posting a message #endif #if (OS_CFG_TS_EN == DEF_ENABLED) p_tcb->TS = ts; #endif if (p_obj != DEF_NULL) { OS_PendListRemove(p_tcb); // Remove from pend list } #if (OS_CFG_DBG_EN == DEF_ENABLED) OS_PendDbgNameRemove(p_obj, p_tcb); #endif #if (OS_CFG_TASK_TICK_EN == DEF_ENABLED) if (p_tcb->TaskState == OS_TASK_STATE_PEND_TIMEOUT_SUSPENDED) { OS_TickListRemove(p_tcb); // Cancel any timeout } #endif p_tcb->TaskState = OS_TASK_STATE_SUSPENDED; p_tcb->PendStatus = OS_STATUS_PEND_OK; // Clear pend status p_tcb->PendOn = OS_TASK_PEND_ON_NOTHING; // Indicate no longer pending break; case OS_TASK_STATE_DEL: #if (OS_CFG_TASK_DEL_EN == DEF_ENABLED) break; #endif default: RTOS_CRITICAL_FAIL_EXEC(RTOS_ERR_OS,; ); } } /****************************************************************************************************//** * OS_RdyListInit() * * @brief This function is called by OSInit() to initialize the ready list. The ready list contains * a list of all the tasks that are ready to run. The list is actually an array of OS_RDY_LIST. * An OS_RDY_LIST contains three fields. The number of OS_TCBs in the list (i.e. .NbrEntries), * a pointer to the first OS_TCB in the OS_RDY_LIST (i.e. .HeadPtr) and a pointer to the last * OS_TCB in the OS_RDY_LIST (i.e. .TailPtr). * @n * OS_TCBs are doubly linked in the OS_RDY_LIST and each OS_TCB points back to the OS_RDY_LIST * it belongs to. * @n * 'OS_RDY_LIST OSRdyTbl[OS_CFG_PRIO_MAX]' looks like this once initialized: * @verbatim * +---------------+--------------+ * | | TailPtr |-----> 0 * [0] | NbrEntries=0 +--------------+ * | | HeadPtr |-----> 0 * +---------------+--------------+ * | | TailPtr |-----> 0 * [1] | NbrEntries=0 +--------------+ * | | HeadPtr |-----> 0 * +---------------+--------------+ * : : * : : * : : * +---------------+--------------+ * | | TailPtr |-----> 0 * [OS_CFG_PRIO_MAX-1] | NbrEntries=0 +--------------+ * | | HeadPtr |-----> 0 * +---------------+--------------+ * @endverbatim * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ void OS_RdyListInit(void) { CPU_INT32U i; OS_RDY_LIST *p_rdy_list; for (i = 0u; i < OS_CFG_PRIO_MAX; i++) { // Initialize the array of OS_RDY_LIST at each priority p_rdy_list = &OSRdyList[i]; #if (OS_CFG_DBG_EN == DEF_ENABLED) p_rdy_list->NbrEntries = 0u; #endif p_rdy_list->HeadPtr = DEF_NULL; p_rdy_list->TailPtr = DEF_NULL; } } /****************************************************************************************************//** * OS_RdyListInsert() * * @brief This function is called to insert a TCB in the ready list. * * The TCB is inserted at the tail of the list if the priority of the TCB is the same as the * priority of the current task. The TCB is inserted at the head of the list if not. * * @param p_tcb Pointer to the TCB to insert into the ready list. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ void OS_RdyListInsert(OS_TCB *p_tcb) { OS_PrioInsert(p_tcb->Prio); if (p_tcb->Prio == OSPrioCur) { // Are we readying a task at the same prio? OS_RdyListInsertTail(p_tcb); // Yes, insert readied task at the end of the list } else { OS_RdyListInsertHead(p_tcb); // No, insert readied task at the beginning of the list } OS_TRACE_TASK_READY(p_tcb); } /****************************************************************************************************//** * OS_RdyListInsertHead() * * @brief This function is called to place an OS_TCB at the beginning of a linked list as follows: * * CASE 0: Insert in an empty list. * * OS_RDY_LIST * +--------------+ * | TailPtr |-> 0 * +--------------+ * | HeadPtr |-> 0 * +--------------+ * | NbrEntries=0 | * +--------------+ * * CASE 1: Insert BEFORE the current head of list * * OS_RDY_LIST * +--------------+ OS_TCB * | TailPtr |-+--> +------------+ * +--------------+ | | NextPtr |->0 * | HeadPtr |-/ +------------+ * +--------------+ 0<-| PrevPtr | * | NbrEntries=1 | +------------+ * +--------------+ : : * : : * +------------+ * * OS_RDY_LIST * +--------------+ * | TailPtr |-----------------------------------------+ * +--------------+ OS_TCB OS_TCB | OS_TCB * | HeadPtr |----> +------------+ +------------+ +-> +------------+ * +--------------+ | NextPtr |---->| NextPtr | .... | NextPtr |->0 * | NbrEntries=N | +------------+ +------------+ +------------+ * +--------------+ 0<-| PrevPtr |<----| PrevPtr | .... | PrevPtr | * +------------+ +------------+ +------------+ * : : : : : : * : : : : : : * +------------+ +------------+ +------------+ * * @param p_tcb Pointer to the TCB to insert into the ready list. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ void OS_RdyListInsertHead(OS_TCB *p_tcb) { OS_RDY_LIST *p_rdy_list; OS_TCB *p_tcb2; p_rdy_list = &OSRdyList[p_tcb->Prio]; if (p_rdy_list->HeadPtr == DEF_NULL) { // CASE 0: Insert when there are no entries #if (OS_CFG_DBG_EN == DEF_ENABLED) p_rdy_list->NbrEntries = 1u; // This is the first entry #endif p_tcb->NextPtr = DEF_NULL; // No other OS_TCBs in the list p_tcb->PrevPtr = DEF_NULL; p_rdy_list->HeadPtr = p_tcb; // Both list pointers point to this OS_TCB p_rdy_list->TailPtr = p_tcb; } else { // CASE 1: Insert BEFORE the current head of list #if (OS_CFG_DBG_EN == DEF_ENABLED) p_rdy_list->NbrEntries++; // One more OS_TCB in the list #endif p_tcb->NextPtr = p_rdy_list->HeadPtr; // Adjust new OS_TCBs links p_tcb->PrevPtr = DEF_NULL; p_tcb2 = p_rdy_list->HeadPtr; // Adjust old head of list's links p_tcb2->PrevPtr = p_tcb; p_rdy_list->HeadPtr = p_tcb; } } /****************************************************************************************************//** * OS_RdyListInsertTail() * * @brief This function is called to place an OS_TCB at the end of a linked list as follows: * * CASE 0: Insert in an empty list. * * OS_RDY_LIST * +--------------+ * | TailPtr |-> 0 * +--------------+ * | HeadPtr |-> 0 * +--------------+ * | NbrEntries=0 | * +--------------+ * * CASE 1: Insert AFTER the current tail of list * * OS_RDY_LIST * +--------------+ OS_TCB * | TailPtr |-+--> +------------+ * +--------------+ | | NextPtr |->0 * | HeadPtr |-/ +------------+ * +--------------+ 0<-| PrevPtr | * | NbrEntries=1 | +------------+ * +--------------+ : : * : : * +------------+ * * OS_RDY_LIST * +--------------+ * | TailPtr |-----------------------------------------+ * +--------------+ OS_TCB OS_TCB | OS_TCB * | HeadPtr |----> +------------+ +------------+ +-> +------------+ * +--------------+ | NextPtr |---->| NextPtr | .... | NextPtr |->0 * | NbrEntries=N | +------------+ +------------+ +------------+ * +--------------+ 0<-| PrevPtr |<----| PrevPtr | .... | PrevPtr | * +------------+ +------------+ +------------+ * : : : : : : * : : : : : : * +------------+ +------------+ +------------+ * * @param p_tcb Pointer to the TCB to insert into the ready list. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ void OS_RdyListInsertTail(OS_TCB *p_tcb) { OS_RDY_LIST *p_rdy_list; OS_TCB *p_tcb2; p_rdy_list = &OSRdyList[p_tcb->Prio]; if (p_rdy_list->HeadPtr == DEF_NULL) { // CASE 0: Insert when there are no entries #if (OS_CFG_DBG_EN == DEF_ENABLED) p_rdy_list->NbrEntries = 1u; // This is the first entry #endif p_tcb->NextPtr = DEF_NULL; // No other OS_TCBs in the list p_tcb->PrevPtr = DEF_NULL; p_rdy_list->HeadPtr = p_tcb; // Both list pointers point to this OS_TCB p_rdy_list->TailPtr = p_tcb; } else { // CASE 1: Insert AFTER the current tail of list #if (OS_CFG_DBG_EN == DEF_ENABLED) p_rdy_list->NbrEntries++; // One more OS_TCB in the list #endif p_tcb->NextPtr = DEF_NULL; // Adjust new OS_TCBs links p_tcb2 = p_rdy_list->TailPtr; p_tcb->PrevPtr = p_tcb2; p_tcb2->NextPtr = p_tcb; // Adjust old tail of list's links p_rdy_list->TailPtr = p_tcb; } } /****************************************************************************************************//** * OS_RdyListMoveHeadToTail() * * @brief This function is called to move the current head of a list to the tail of the list. * * CASE 0: TCB list is empty, nothing to do. * * CASE 1: Only 1 OS_TCB in the list, nothing to do. * * CASE 2: Only 2 OS_TCBs in the list. * * OS_RDY_LIST * +--------------+ * | TailPtr |----------------------+ * +--------------+ OS_TCB | OS_TCB * | HeadPtr |----> +------------+ +-> +------------+ * +--------------+ | NextPtr |----> | NextPtr |->0 * | NbrEntries=2 | +------------+ +------------+ * +--------------+ 0<-| PrevPtr | <----| PrevPtr | * +------------+ +------------+ * : : : : * : : : : * +------------+ +------------+ * * CASE N: More than 2 OS_TCBs in the list. * * OS_RDY_LIST * +--------------+ * | TailPtr |-----------------------------------------+ * +--------------+ OS_TCB OS_TCB | OS_TCB * | HeadPtr |----> +------------+ +------------+ +-> +------------+ * +--------------+ | NextPtr |---->| NextPtr | .... | NextPtr |->0 * | NbrEntries=N | +------------+ +------------+ +------------+ * +--------------+ 0<-| PrevPtr |<----| PrevPtr | .... | PrevPtr | * +------------+ +------------+ +------------+ * : : : : : : * : : : : : : * +------------+ +------------+ +------------+ * * @param p_list Pointer to the OS_RDY_LIST where the OS_TCB will be moved. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ void OS_RdyListMoveHeadToTail(OS_RDY_LIST *p_rdy_list) { OS_TCB *p_tcb1; OS_TCB *p_tcb2; OS_TCB *p_tcb3; if (p_rdy_list->HeadPtr != p_rdy_list->TailPtr) { if (p_rdy_list->HeadPtr->NextPtr == p_rdy_list->TailPtr) { // SWAP the TCBs p_tcb1 = p_rdy_list->HeadPtr; // Point to current head p_tcb2 = p_rdy_list->TailPtr; // Point to current tail p_tcb1->PrevPtr = p_tcb2; p_tcb1->NextPtr = DEF_NULL; p_tcb2->PrevPtr = DEF_NULL; p_tcb2->NextPtr = p_tcb1; p_rdy_list->HeadPtr = p_tcb2; p_rdy_list->TailPtr = p_tcb1; } else { p_tcb1 = p_rdy_list->HeadPtr; // Point to current head p_tcb2 = p_rdy_list->TailPtr; // Point to current tail p_tcb3 = p_tcb1->NextPtr; // Point to new list head p_tcb3->PrevPtr = DEF_NULL; // Adjust back link of new list head p_tcb1->NextPtr = DEF_NULL; // Adjust forward link of new list tail p_tcb1->PrevPtr = p_tcb2; // Adjust back link of new list tail p_tcb2->NextPtr = p_tcb1; // Adjust forward link of old list tail p_rdy_list->HeadPtr = p_tcb3; // Adjust new list head and tail pointers p_rdy_list->TailPtr = p_tcb1; } } } /****************************************************************************************************//** * OS_RdyListRemove() * * @brief This function is called to remove an OS_TCB from an OS_RDY_LIST knowing the address of the * OS_TCB to remove. * * CASE 0: TCB list is empty, nothing to do. * * CASE 1: Only 1 OS_TCBs in the list. * * OS_RDY_LIST * +--------------+ OS_TCB * | TailPtr |-+--> +------------+ * +--------------+ | | NextPtr |->0 * | HeadPtr |-/ +------------+ * +--------------+ 0<-| PrevPtr | * | NbrEntries=1 | +------------+ * +--------------+ : : * : : * +------------+ * * CASE N: Two or more OS_TCBs in the list. * * OS_RDY_LIST * +--------------+ * | TailPtr |-----------------------------------------+ * +--------------+ OS_TCB OS_TCB | OS_TCB * | HeadPtr |----> +------------+ +------------+ +-> +------------+ * +--------------+ | NextPtr |---->| NextPtr | .... | NextPtr |->0 * | NbrEntries=N | +------------+ +------------+ +------------+ * +--------------+ 0<-| PrevPtr |<----| PrevPtr | .... | PrevPtr | * +------------+ +------------+ +------------+ * : : : : : : * : : : : : : * +------------+ +------------+ +------------+ * * @param p_tcb Pointer to the OS_TCB to remove. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ void OS_RdyListRemove(OS_TCB *p_tcb) { OS_RDY_LIST *p_rdy_list; OS_TCB *p_tcb1; OS_TCB *p_tcb2; p_rdy_list = &OSRdyList[p_tcb->Prio]; p_tcb1 = p_tcb->PrevPtr; // Point to next and previous OS_TCB in the list p_tcb2 = p_tcb->NextPtr; if (p_tcb1 == DEF_NULL) { // Was the OS_TCB to remove at the head? if (p_tcb2 == DEF_NULL) { // Yes, was it the only OS_TCB? #if (OS_CFG_DBG_EN == DEF_ENABLED) p_rdy_list->NbrEntries = 0u; // Yes, no more entries #endif p_rdy_list->HeadPtr = DEF_NULL; p_rdy_list->TailPtr = DEF_NULL; OS_PrioRemove(p_tcb->Prio); } else { #if (OS_CFG_DBG_EN == DEF_ENABLED) p_rdy_list->NbrEntries--; // No, one less entry #endif p_tcb2->PrevPtr = DEF_NULL; // adjust back link of new list head p_rdy_list->HeadPtr = p_tcb2; // adjust OS_RDY_LIST's new head } } else { #if (OS_CFG_DBG_EN == DEF_ENABLED) p_rdy_list->NbrEntries--; // No, one less entry #endif p_tcb1->NextPtr = p_tcb2; if (p_tcb2 == DEF_NULL) { p_rdy_list->TailPtr = p_tcb1; // Removing the TCB at the tail, adj the tail ptr } else { p_tcb2->PrevPtr = p_tcb1; } } p_tcb->PrevPtr = DEF_NULL; p_tcb->NextPtr = DEF_NULL; OS_TRACE_TASK_SUSPENDED(p_tcb); } /****************************************************************************************************//** * OS_SchedLockTimeMeasStart() * * @brief Start measuring the time the scheduler is locked. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. * * @note (2) It's assumed that this function is called when interrupts are disabled. * * @note (3) We are reading the CPU_TS_TmrRd() directly even if this is a 16-bit timer. The reason * is that we don't expect to have the scheduler locked for 65536 counts even at the rate * the TS timer is updated. In other words, locking the scheduler for longer than 65536 * counts would not be a good thing for a real-time system. *******************************************************************************************************/ #if (OS_CFG_SCHED_LOCK_TIME_MEAS_EN == DEF_ENABLED) void OS_SchedLockTimeMeasStart(void) { if (OSSchedLockNestingCtr == 1u) { OSSchedLockTimeBegin = CPU_TS_TmrRd(); } } /****************************************************************************************************//** * OS_SchedLockTimeMeasStop() * * @brief Stop measuring the time the scheduler is locked and update the current and max locked * times. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. * * @note (2) It's assumed that this function is called when interrupts are disabled. * * @note (3) We are reading the CPU_TS_TmrRd() directly even if this is a 16-bit timer. The reason * is that we don't expect to have the scheduler locked for 65536 counts even at the rate * the TS timer is updated. In other words, locking the scheduler for longer than 65536 * counts would not be a good thing for a real-time system. *******************************************************************************************************/ void OS_SchedLockTimeMeasStop(void) { CPU_TS_TMR delta; if (OSSchedLockNestingCtr == 0u) { // Make sure we fully un-nested scheduler lock delta = CPU_TS_TmrRd() // Compute the delta time between begin and end - OSSchedLockTimeBegin; if (OSSchedLockTimeMax < delta) { // Detect peak value OSSchedLockTimeMax = delta; } if (OSSchedLockTimeMaxCur < delta) { // Detect peak value (for resettable value) OSSchedLockTimeMaxCur = delta; } } } #endif /****************************************************************************************************//** * OS_SchedRoundRobin() * * @brief This function is called on every tick to determine if a new task at the same priority * needs to execute. * * @param p_rdy_list Pointer to the OS_RDY_LIST entry of the ready list at the current * priority. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ #if (OS_CFG_SCHED_ROUND_ROBIN_EN == DEF_ENABLED) void OS_SchedRoundRobin(OS_RDY_LIST *p_rdy_list) { OS_TCB *p_tcb; CPU_SR_ALLOC(); if (OSSchedRoundRobinEn != DEF_TRUE) { // Make sure round-robin has been enabled return; } CPU_CRITICAL_ENTER(); p_tcb = p_rdy_list->HeadPtr; // Decrement time quanta counter if (p_tcb == DEF_NULL) { CPU_CRITICAL_EXIT(); return; } #if (OS_CFG_TASK_IDLE_EN == DEF_ENABLED) if (p_tcb == &OSIdleTaskTCB) { CPU_CRITICAL_EXIT(); return; } #endif if (p_tcb->TimeQuantaCtr > 0u) { p_tcb->TimeQuantaCtr--; } if (p_tcb->TimeQuantaCtr > 0u) { // Task not done with its time quanta CPU_CRITICAL_EXIT(); return; } if (p_rdy_list->HeadPtr == p_rdy_list->TailPtr) { // See if it's time to time slice current task CPU_CRITICAL_EXIT(); // ... only if multiple tasks at same priority return; } if (OSSchedLockNestingCtr > 0u) { // Can't round-robin if the scheduler is locked CPU_CRITICAL_EXIT(); return; } OS_RdyListMoveHeadToTail(p_rdy_list); // Move current OS_TCB to the end of the list p_tcb = p_rdy_list->HeadPtr; // Point to new OS_TCB at head of the list if (p_tcb->TimeQuanta == 0u) { // See if we need to use the default time slice p_tcb->TimeQuantaCtr = OSSchedRoundRobinDfltTimeQuanta; } else { p_tcb->TimeQuantaCtr = p_tcb->TimeQuanta; // Load time slice counter with new time } CPU_CRITICAL_EXIT(); } #endif /****************************************************************************************************//** * OS_TaskBlock() * * @brief This function is called to remove a task from the ready list and also insert it in the * timer tick list if the specified timeout is non-zero. * * @param p_tcb Pointer to the OS_TCB of the task block. * * @param timeout The desired timeout. * * @note (1) This function is INTERNAL to the Kernel and your application MUST NOT call it. *******************************************************************************************************/ void OS_TaskBlock(OS_TCB *p_tcb, OS_TICK timeout) { #if (OS_CFG_TASK_TICK_EN == DEF_ENABLED) #if (OS_CFG_DYN_TICK_EN == DEF_ENABLED) OS_TICK tick_ctr; #endif if (timeout > 0u) { // Add task to tick list if timeout non zero #if (OS_CFG_DYN_TICK_EN == DEF_ENABLED) tick_ctr = BSP_OS_TickGet(); OS_TickListInsert(&OSTickListTimeout, p_tcb, timeout + (tick_ctr - OSTickCtr)); #else OS_TickListInsert(&OSTickListTimeout, p_tcb, timeout); #endif p_tcb->TaskState = OS_TASK_STATE_PEND_TIMEOUT; } else { p_tcb->TaskState = OS_TASK_STATE_PEND; } #else (void)timeout; // Prevent compiler warning for not using 'timeout' p_tcb->TaskState = OS_TASK_STATE_PEND; #endif OS_RdyListRemove(p_tcb); } /******************************************************************************************************** ******************************************************************************************************** * DEPENDENCIES & AVAIL CHECK(S) END ******************************************************************************************************** *******************************************************************************************************/ #endif // (defined(RTOS_MODULE_KERNEL_AVAIL))
456393.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE606_Unchecked_Loop_Condition__wchar_t_environment_09.c Label Definition File: CWE606_Unchecked_Loop_Condition.label.xml Template File: sources-sinks-09.tmpl.c */ /* * @description * CWE: 606 Unchecked Input For Loop Condition * BadSource: environment Read input from an environment variable * GoodSource: Input a number less than MAX_LOOP * Sinks: * GoodSink: Use data as the for loop variant after checking to see if it is less than MAX_LOOP * BadSink : Use data as the for loop variant without checking its size * Flow Variant: 09 Control flow: if(GLOBAL_CONST_TRUE) and if(GLOBAL_CONST_FALSE) * * */ #include "std_testcase.h" #define MAX_LOOP 10000 #ifndef _WIN32 #include <wchar.h> #endif #define ENV_VARIABLE L"ADD" #ifdef _WIN32 #define GETENV _wgetenv #else #define GETENV getenv #endif #ifndef OMITBAD void CWE606_Unchecked_Loop_Condition__wchar_t_environment_09_bad() { wchar_t * data; wchar_t dataBuffer[100] = L""; data = dataBuffer; if(GLOBAL_CONST_TRUE) { { /* Append input from an environment variable to data */ size_t dataLen = wcslen(data); wchar_t * environment = GETENV(ENV_VARIABLE); /* If there is data in the environment variable */ if (environment != NULL) { /* POTENTIAL FLAW: Read data from an environment variable */ wcsncat(data+dataLen, environment, 100-dataLen-1); } } } if(GLOBAL_CONST_TRUE) { { int i, n, intVariable; if (swscanf(data, L"%d", &n) == 1) { /* POTENTIAL FLAW: user-supplied value 'n' could lead to very large loop iteration */ intVariable = 0; for (i = 0; i < n; i++) { /* INCIDENTAL: CWE 561: Dead Code - non-avoidable if n <= 0 */ intVariable++; /* avoid a dead/empty code block issue */ } printIntLine(intVariable); } } } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodB2G1() - use badsource and goodsink by changing the second GLOBAL_CONST_TRUE to GLOBAL_CONST_FALSE */ static void goodB2G1() { wchar_t * data; wchar_t dataBuffer[100] = L""; data = dataBuffer; if(GLOBAL_CONST_TRUE) { { /* Append input from an environment variable to data */ size_t dataLen = wcslen(data); wchar_t * environment = GETENV(ENV_VARIABLE); /* If there is data in the environment variable */ if (environment != NULL) { /* POTENTIAL FLAW: Read data from an environment variable */ wcsncat(data+dataLen, environment, 100-dataLen-1); } } } if(GLOBAL_CONST_FALSE) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ printLine("Benign, fixed string"); } else { { int i, n, intVariable; if (swscanf(data, L"%d", &n) == 1) { /* FIX: limit loop iteration counts */ if (n < MAX_LOOP) { intVariable = 0; for (i = 0; i < n; i++) { /* INCIDENTAL: CWE 561: Dead Code - non-avoidable if n <= 0 */ intVariable++; /* avoid a dead/empty code block issue */ } printIntLine(intVariable); } } } } } /* goodB2G2() - use badsource and goodsink by reversing the blocks in the second if */ static void goodB2G2() { wchar_t * data; wchar_t dataBuffer[100] = L""; data = dataBuffer; if(GLOBAL_CONST_TRUE) { { /* Append input from an environment variable to data */ size_t dataLen = wcslen(data); wchar_t * environment = GETENV(ENV_VARIABLE); /* If there is data in the environment variable */ if (environment != NULL) { /* POTENTIAL FLAW: Read data from an environment variable */ wcsncat(data+dataLen, environment, 100-dataLen-1); } } } if(GLOBAL_CONST_TRUE) { { int i, n, intVariable; if (swscanf(data, L"%d", &n) == 1) { /* FIX: limit loop iteration counts */ if (n < MAX_LOOP) { intVariable = 0; for (i = 0; i < n; i++) { /* INCIDENTAL: CWE 561: Dead Code - non-avoidable if n <= 0 */ intVariable++; /* avoid a dead/empty code block issue */ } printIntLine(intVariable); } } } } } /* goodG2B1() - use goodsource and badsink by changing the first GLOBAL_CONST_TRUE to GLOBAL_CONST_FALSE */ static void goodG2B1() { wchar_t * data; wchar_t dataBuffer[100] = L""; data = dataBuffer; if(GLOBAL_CONST_FALSE) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ printLine("Benign, fixed string"); } else { /* FIX: Set data to a number less than MAX_LOOP */ wcscpy(data, L"15"); } if(GLOBAL_CONST_TRUE) { { int i, n, intVariable; if (swscanf(data, L"%d", &n) == 1) { /* POTENTIAL FLAW: user-supplied value 'n' could lead to very large loop iteration */ intVariable = 0; for (i = 0; i < n; i++) { /* INCIDENTAL: CWE 561: Dead Code - non-avoidable if n <= 0 */ intVariable++; /* avoid a dead/empty code block issue */ } printIntLine(intVariable); } } } } /* goodG2B2() - use goodsource and badsink by reversing the blocks in the first if */ static void goodG2B2() { wchar_t * data; wchar_t dataBuffer[100] = L""; data = dataBuffer; if(GLOBAL_CONST_TRUE) { /* FIX: Set data to a number less than MAX_LOOP */ wcscpy(data, L"15"); } if(GLOBAL_CONST_TRUE) { { int i, n, intVariable; if (swscanf(data, L"%d", &n) == 1) { /* POTENTIAL FLAW: user-supplied value 'n' could lead to very large loop iteration */ intVariable = 0; for (i = 0; i < n; i++) { /* INCIDENTAL: CWE 561: Dead Code - non-avoidable if n <= 0 */ intVariable++; /* avoid a dead/empty code block issue */ } printIntLine(intVariable); } } } } void CWE606_Unchecked_Loop_Condition__wchar_t_environment_09_good() { goodB2G1(); goodB2G2(); goodG2B1(); goodG2B2(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE606_Unchecked_Loop_Condition__wchar_t_environment_09_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE606_Unchecked_Loop_Condition__wchar_t_environment_09_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
359498.c
/* ********************************************************** * Copyright (c) 2003-2008 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* * test of execve recursively */ #include <sys/types.h> #include <unistd.h> #include <assert.h> #include <stdio.h> #include <stdlib.h> #include "common.h" #define NULL_ENV 0 const int N = 10; int main(int argc, char **argv) { int n, sum = 0; int result; char *arg[5]; char **env = NULL; char carg[10], csum[10]; if (find_dynamo_library()) printf("under DynamoRIO\n"); else printf("natively\n"); //printf("%d %s %s %s\n", argc, argv[0], argv[1], argv[2]); if (1 == argc) { // no args n = N; sum = 0; printf("Sum(%d)\n", n); } else if (2 == argc) { // execve-rec 10 n = atoi(argv[1]); sum = 0; printf("Sum(%d)\n", n); } else { assert(argc == 3); // execve-rec 9 10 n = atoi(argv[1]); sum = atoi(argv[2]); } if (0 == n) { printf(" = %d\n", sum); exit(0); } sum+=n; n--; snprintf(carg, 10, "%d", n); snprintf(csum, 10, "%d", sum); arg[0] = argv[0]; arg[1] = carg; arg[2] = csum; arg[3] = NULL; #if 0 printf("%d,%d\n", n, sum); printf("execing %d %s %s=%s %s\n", 3, arg[0], carg, arg[1], arg[2]); #endif fflush(stdout); result = execve(arg[0], arg, env); if (result < 0) perror("ERROR in execve"); }
157846.c
/* crypto/des/des_old.c -*- mode:C; c-file-style: "eay" -*- */ /* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING * * The function names in here are deprecated and are only present to * provide an interface compatible with OpenSSL 0.9.6c. OpenSSL now * provides functions where "des_" has been replaced with "DES_" in * the names, to make it possible to make incompatible changes that * are needed for C type security and other stuff. * * Please consider starting to use the DES_ functions rather than the * des_ ones. The des_ functions will dissapear completely before * OpenSSL 1.0! * * WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING */ /* Written by Richard Levitte ([email protected]) for the OpenSSL * project 2001. */ /* ==================================================================== * Copyright (c) 1998-2001 The OpenSSL Project. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. All advertising materials mentioning features or use of this * software must display the following acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" * * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to * endorse or promote products derived from this software without * prior written permission. For written permission, please contact * [email protected]. * * 5. Products derived from this software may not be called "OpenSSL" * nor may "OpenSSL" appear in their names without prior written * permission of the OpenSSL Project. * * 6. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by the OpenSSL Project * for use in the OpenSSL Toolkit (http://www.openssl.org/)" * * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. * ==================================================================== * * This product includes cryptographic software written by Eric Young * ([email protected]). This product includes software written by Tim * Hudson ([email protected]). * */ #undef OPENSSL_DES_LIBDES_COMPATIBILITY #include <openssl/des.h> #include <openssl/rand.h> void _ossl_096_des_random_seed(DES_cblock *key) { RAND_seed(key, sizeof(DES_cblock)); }
631687.c
/* { dg-do run } */ /* { dg-require-effective-target avx2 } */ /* { dg-options "-mavx2 -O3 -fopenmp-simd -fdump-tree-vect-details" } */ #include "avx2-check.h" #define N 64 float a[N]; int c[N]; __attribute__ ((noinline)) int foo () { int i, res = 0; #pragma omp simd safelen(8) for (i=0; i<N; i++) { float t = a[i]; if (t > 0.0f & t < 1.0e+2f) if (c[i] != 0) res += 1; } return res; } __attribute__ ((noinline)) float hundred () { return 100.0f; } static void avx2_test (void) { int i, res; for (i=0; i<N; i++) { c[i] = i % 4; if (i < N / 2) a[i] = (float) (i + 1); else a[i] = (float) i + hundred (); } if (foo () != 24) abort (); } /* { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" } } */
905663.c
/********************************************************************** TI TMS9927 and compatible CRT controller emulation Copyright Nicola Salmoria and the MAME Team. Visit http://mamedev.org for licensing and usage restrictions. **********************************************************************/ #include "emu.h" #include "tms9927.h" static const UINT8 chars_per_row_value[8] = { 20, 32, 40, 64, 72, 80, 96, 132 }; static const UINT8 skew_bits_value[4] = { 0, 1, 2, 2 }; #define HCOUNT(t) ((t)->reg[0] + 1) #define INTERLACED(t) (((t)->reg[1] >> 7) & 0x01) #define HSYNC_WIDTH(t) (((t)->reg[1] >> 4) & 0x0f) #define HSYNC_DELAY(t) (((t)->reg[1] >> 0) & 0x07) #define SCANS_PER_DATA_ROW(t) ((((t)->reg[2] >> 3) & 0x0f) + 1) #define CHARS_PER_DATA_ROW(t) (chars_per_row_value[((t)->reg[2] >> 0) & 0x07]) #define SKEW_BITS(t) (skew_bits_value[((t)->reg[3] >> 6) & 0x03]) #define DATA_ROWS_PER_FRAME(t) ((((t)->reg[3] >> 0) & 0x3f) + 1) #define SCAN_LINES_PER_FRAME(t) (((t)->reg[4] * 2) + 256) #define VERTICAL_DATA_START(t) ((t)->reg[5]) #define LAST_DISP_DATA_ROW(t) ((t)->reg[6] & 0x3f) #define CURSOR_CHAR_ADDRESS(t) ((t)->reg[7]) #define CURSOR_ROW_ADDRESS(t) ((t)->reg[8] & 0x3f) typedef struct _tms9927_state tms9927_state; struct _tms9927_state { /* driver-controlled state */ const tms9927_interface *intf; screen_device *screen; const UINT8 *selfload; /* live state */ UINT32 clock; UINT8 reg[9]; UINT8 start_datarow; UINT8 reset; UINT8 hpixels_per_column; /* derived state; no need to save */ UINT8 valid_config; UINT16 total_hpix, total_vpix; UINT16 visible_hpix, visible_vpix; }; static void tms9927_state_save_postload(tms9927_state *state); static void recompute_parameters(tms9927_state *tms, int postload); const tms9927_interface tms9927_null_interface = { 0 }; /* makes sure that the passed in device is the right type */ INLINE tms9927_state *get_safe_token(device_t *device) { assert(device != NULL); assert(device->type() == TMS9927); return (tms9927_state *)downcast<legacy_device_base *>(device)->token(); } static void tms9927_state_save_postload(tms9927_state *state) { recompute_parameters(state, TRUE); } static void generic_access(device_t *device, offs_t offset) { tms9927_state *tms = get_safe_token(device); switch (offset) { case 0x07: /* Processor Self Load */ case 0x0f: /* Non-processor self-load */ if (tms->selfload != NULL) { int cur; for (cur = 0; cur < 7; cur++) tms9927_w(device, cur, tms->selfload[cur]); for (cur = 0; cur < 1; cur++) tms9927_w(device, cur + 0xc, tms->selfload[cur + 7]); } else popmessage("tms9927: self-load initiated with no PROM!"); /* processor self-load waits with reset enabled; non-processor just goes ahead */ tms->reset = (offset == 0x07); break; case 0x0a: /* Reset */ if (!tms->reset) { tms->screen->update_now(); tms->reset = TRUE; } break; case 0x0b: /* Up scroll */ mame_printf_debug("Up scroll\n"); tms->screen->update_now(); tms->start_datarow = (tms->start_datarow + 1) % DATA_ROWS_PER_FRAME(tms); break; case 0x0e: /* Start timing chain */ if (tms->reset) { tms->screen->update_now(); tms->reset = FALSE; recompute_parameters(tms, FALSE); } break; } } WRITE8_DEVICE_HANDLER( tms9927_w ) { tms9927_state *tms = get_safe_token(device); switch (offset) { case 0x00: /* HORIZONTAL CHARACTER COUNT */ case 0x01: /* INTERLACED / HSYNC WIDTH / HSYNC DELAY */ case 0x02: /* SCANS PER DATA ROW / CHARACTERS PER DATA ROW */ case 0x03: /* SKEW BITS / DATA ROWS PER FRAME */ case 0x04: /* SCAN LINES / FRAME */ case 0x05: /* VERTICAL DATA START */ case 0x06: /* LAST DISPLAYED DATA ROW */ tms->reg[offset] = data; recompute_parameters(tms, FALSE); break; case 0x0c: /* LOAD CURSOR CHARACTER ADDRESS */ case 0x0d: /* LOAD CURSOR ROW ADDRESS */ mame_printf_debug("Cursor address changed\n"); tms->reg[offset - 0x0c + 7] = data; recompute_parameters(tms, FALSE); break; default: generic_access(device, offset); break; } } READ8_DEVICE_HANDLER( tms9927_r ) { tms9927_state *tms = get_safe_token(device); switch (offset) { case 0x08: /* READ CURSOR CHARACTER ADDRESS */ case 0x09: /* READ CURSOR ROW ADDRESS */ return tms->reg[offset - 0x08 + 7]; default: generic_access(device, offset); break; } return 0xff; } int tms9927_screen_reset(device_t *device) { tms9927_state *tms = get_safe_token(device); return tms->reset; } int tms9927_upscroll_offset(device_t *device) { tms9927_state *tms = get_safe_token(device); return tms->start_datarow; } int tms9927_cursor_bounds(device_t *device, rectangle *bounds) { tms9927_state *tms = get_safe_token(device); int cursorx = CURSOR_CHAR_ADDRESS(tms); int cursory = CURSOR_ROW_ADDRESS(tms); bounds->min_x = cursorx * tms->hpixels_per_column; bounds->max_x = bounds->min_x + tms->hpixels_per_column - 1; bounds->min_y = cursory * SCANS_PER_DATA_ROW(tms); bounds->max_y = bounds->min_y + SCANS_PER_DATA_ROW(tms) - 1; return (cursorx < HCOUNT(tms) && cursory <= LAST_DISP_DATA_ROW(tms)); } static void recompute_parameters(tms9927_state *tms, int postload) { UINT16 offset_hpix, offset_vpix; attoseconds_t refresh; rectangle visarea; if (tms->intf == NULL || tms->reset) return; /* compute the screen sizes */ tms->total_hpix = HCOUNT(tms) * tms->hpixels_per_column; tms->total_vpix = SCAN_LINES_PER_FRAME(tms); /* determine the visible area, avoid division by 0 */ tms->visible_hpix = CHARS_PER_DATA_ROW(tms) * tms->hpixels_per_column; tms->visible_vpix = (LAST_DISP_DATA_ROW(tms) + 1) * SCANS_PER_DATA_ROW(tms); /* determine the horizontal/vertical offsets */ offset_hpix = HSYNC_DELAY(tms) * tms->hpixels_per_column; offset_vpix = VERTICAL_DATA_START(tms); mame_printf_debug("TMS9937: Total = %dx%d, Visible = %dx%d, Offset=%dx%d, Skew=%d\n", tms->total_hpix, tms->total_vpix, tms->visible_hpix, tms->visible_vpix, offset_hpix, offset_vpix, SKEW_BITS(tms)); /* see if it all makes sense */ tms->valid_config = TRUE; if (tms->visible_hpix > tms->total_hpix || tms->visible_vpix > tms->total_vpix) { tms->valid_config = FALSE; logerror("tms9927: invalid visible size (%dx%d) versus total size (%dx%d)\n", tms->visible_hpix, tms->visible_vpix, tms->total_hpix, tms->total_vpix); } /* update */ if (!tms->valid_config) return; /* create a visible area */ /* fix me: how do the offsets fit in here? */ visarea.min_x = 0; visarea.max_x = tms->visible_hpix - 1; visarea.min_y = 0; visarea.max_y = tms->visible_vpix - 1; refresh = HZ_TO_ATTOSECONDS(tms->clock) * tms->total_hpix * tms->total_vpix; tms->screen->configure(tms->total_hpix, tms->total_vpix, visarea, refresh); } /* device interface */ static DEVICE_START( tms9927 ) { tms9927_state *tms = get_safe_token(device); /* validate arguments */ assert(device != NULL); tms->intf = (const tms9927_interface *)device->static_config(); if (tms->intf != NULL) { assert(device->clock() > 0); assert(tms->intf->hpixels_per_column > 0); /* copy the initial parameters */ tms->clock = device->clock(); tms->hpixels_per_column = tms->intf->hpixels_per_column; /* get the screen device */ tms->screen = downcast<screen_device *>(device->machine().device(tms->intf->screen_tag)); assert(tms->screen != NULL); /* get the self-load PROM */ if (tms->intf->selfload_region != NULL) { tms->selfload = device->machine().region(tms->intf->selfload_region)->base(); assert(tms->selfload != NULL); } } /* register for state saving */ device->machine().save().register_postload(save_prepost_delegate(FUNC(tms9927_state_save_postload), tms)); device->save_item(NAME(tms->clock)); device->save_item(NAME(tms->reg)); device->save_item(NAME(tms->start_datarow)); device->save_item(NAME(tms->reset)); device->save_item(NAME(tms->hpixels_per_column)); } static DEVICE_STOP( tms9927 ) { tms9927_state *tms = get_safe_token(device); mame_printf_debug("TMS9937: Final params: (%d, %d, %d, %d, %d, %d, %d)\n", tms->clock, tms->total_hpix, 0, tms->visible_hpix, tms->total_vpix, 0, tms->visible_vpix); } static DEVICE_RESET( tms9927 ) { } DEVICE_GET_INFO( tms9927 ) { switch (state) { /* --- the following bits of info are returned as 64-bit signed integers --- */ case DEVINFO_INT_TOKEN_BYTES: info->i = sizeof(tms9927_state); break; case DEVINFO_INT_INLINE_CONFIG_BYTES: info->i = 0; break; /* --- the following bits of info are returned as pointers to functions --- */ case DEVINFO_FCT_START: info->start = DEVICE_START_NAME(tms9927); break; case DEVINFO_FCT_STOP: info->stop = DEVICE_STOP_NAME(tms9927); break; case DEVINFO_FCT_RESET: info->reset = DEVICE_RESET_NAME(tms9927); break; /* --- the following bits of info are returned as NULL-terminated strings --- */ case DEVINFO_STR_NAME: strcpy(info->s, "TMS9927"); break; case DEVINFO_STR_FAMILY: strcpy(info->s, "TMS9927 CRTC"); break; case DEVINFO_STR_VERSION: strcpy(info->s, "1.0"); break; case DEVINFO_STR_SOURCE_FILE: strcpy(info->s, __FILE__); break; case DEVINFO_STR_CREDITS: strcpy(info->s, "Copyright Nicola Salmoria and the MAME Team"); break; } } DEVICE_GET_INFO( crt5027 ) { switch (state) { case DEVINFO_STR_NAME: strcpy(info->s, "CRT5027"); break; default: DEVICE_GET_INFO_CALL(tms9927); break; } } DEVICE_GET_INFO( crt5037 ) { switch (state) { case DEVINFO_STR_NAME: strcpy(info->s, "CRT5037"); break; default: DEVICE_GET_INFO_CALL(tms9927); break; } } DEVICE_GET_INFO( crt5057 ) { switch (state) { case DEVINFO_STR_NAME: strcpy(info->s, "CRT5057"); break; default: DEVICE_GET_INFO_CALL(tms9927); break; } } DEFINE_LEGACY_DEVICE(TMS9927, tms9927); DEFINE_LEGACY_DEVICE(CRT5027, crt5027); DEFINE_LEGACY_DEVICE(CRT5037, crt5037); DEFINE_LEGACY_DEVICE(CRT5057, crt5057);
922125.c
/* * Copyright (c) 2021 Sung Ho Park and CSOS * * SPDX-License-Identifier: Apache-2.0 */ #include <ubinos.h> #if (INCLUDE__ARDUINOCORE_API == 1) #if (UBINOS__BSP__BOARD_MODEL == UBINOS__BSP__BOARD_MODEL__STM3221GEVAL) #if (UBINOS__BSP__BOARD_VARIATION__STM3221GEVAL == 1) #include <Arduino.h> #include <ubinos/bsp/arch.h> #include "_variant.h" arduino_d_pin_t const _g_d_pin_map[NUM_DIGITAL_PINS] = { {GPIOG, GPIO_PIN_6 , 0, 0 , 0 , 0 , 0 , 1}, // D0 {GPIOG, GPIO_PIN_8 , 0, 0 , 0 , 0 , 0 , 1}, // D1 }; arduino_a_pin_t const _g_a_pin_map[NUM_ANALOG_INPUTS] = { }; arduino_tone_t _arduino_tone; void initVariant(void) { // for digital io __HAL_RCC_GPIOG_CLK_ENABLE(); // for analog in // for analog out (pwm) // for tone __HAL_RCC_TIM3_CLK_ENABLE(); _arduino_tone.timer_initiated = 0; _arduino_tone.pin_initiated = 0; } #endif /* (UBINOS__BSP__BOARD_VARIATION__STM3221GEVAL == 1) */ #endif /* (UBINOS__BSP__BOARD_MODEL == UBINOS__BSP__BOARD_MODEL__STM3221GEVAL) */ #endif /* (INCLUDE__ARDUINOCORE_API == 1) */
296565.c
/* * Copyright (c) 2012-2017 The Khronos Group Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if defined(EXPERIMENTAL_USE_DOT) #include "vx_internal.h" #include "vx_type_pairs.h" VX_API_ENTRY vx_status VX_API_CALL vxExportGraphToDot(vx_graph graph, vx_char dotfile[], vx_bool showData) { vx_status status = VX_ERROR_INVALID_PARAMETERS; if (ownIsValidSpecificReference(&graph->base, VX_TYPE_GRAPH) == vx_true_e) { FILE *fp = fopen(dotfile, "w+"); if (fp) { vx_uint32 n, p, n2, d; vx_uint32 num_next, next_nodes[VX_INT_MAX_REF]; vx_uint32 num_last, last_nodes[VX_INT_MAX_REF]; vx_uint32 num_left, left_nodes[VX_INT_MAX_REF]; vx_uint32 dep_nodes[VX_INT_MAX_REF]; vx_reference_t *data[VX_INT_MAX_REF]; vx_uint32 num_data = 0u; status = VX_SUCCESS; fprintf(fp, "digraph {\n"); fprintf(fp, "\tsize=4;\n"); fprintf(fp, "\trank=LR;\n"); fprintf(fp, "\tnode [shape=oval style=filled fillcolor=red fontsize=27];\n"); for (n = 0; n < graph->numNodes; n++) { vx_node_t *node = graph->nodes[n]; fprintf(fp, "\tN%u [label=\"N%u\\n%s\"];\n", n, n, node->kernel->name); if (showData) { for (p = 0u; p < node->kernel->signature.num_parameters; p++) { if (node->parameters[p] == NULL) continue; for (d = 0u; d < num_data; d++) { if (data[d] == node->parameters[p]) break; } if (d == num_data) { // new reference added to data list data[num_data++] = node->parameters[p]; } } } } if (showData) { for (d = 0u; d < num_data; d++) { vx_int32 i = ownStringFromType(data[d]->type); if (data[d] == NULL) continue; if (data[d]->type == VX_TYPE_IMAGE) { vx_image_t *image = (vx_image_t *)data[d]; vx_char fcc[5]; strncpy(fcc, (char *)&image->format, 4); fcc[4] = '\0'; fprintf(fp, "\tD%u [shape=box label=\"%ux%u\\n%4s\"];\n", d, image->width, image->height, fcc); } else if (data[d]->type == VX_TYPE_ARRAY) { vx_array_t *arr = (vx_array_t *)data[d]; if (arr->item_type == VX_TYPE_CHAR || arr->item_size == sizeof(char)) fprintf(fp, "\tD%u [shape=box label=\"\\\"%s\\\"\"];\n", d, arr->memory.ptrs[0]); else fprintf(fp, "\tD%u [shape=box label=\"%s\"];\n", d, type_pairs[i].name); } else if (data[d]->type == VX_TYPE_PYRAMID) { vx_pyramid_t *pyr = (vx_pyramid_t *)data[d]; fprintf(fp, "\tD%u [shape=triangle label=\"%lfx"VX_FMT_REF"\\nPyramid\"];\n", d, pyr->scale, pyr->levels); } else { fprintf(fp, "\tD%u [shape=box label=\"%s\"];\n", d, type_pairs[i].name); } } } ownClearVisitation(graph); ownClearExecution(graph); memcpy(next_nodes, graph->heads, graph->numHeads * sizeof(graph->heads[0])); num_next = graph->numHeads; num_last = 0; num_left = 0; do { for (n = 0; n < num_next; n++) { /* for each head, start tracing the graph */ vx_node_t *node = graph->nodes[next_nodes[n]]; if (graph->nodes[next_nodes[n]]->executed == vx_true_e) continue; for (p = 0; p < node->kernel->signature.num_parameters; p++) { vx_uint32 count = dimof(next_nodes); if (showData && node->kernel->signature.directions[p] == VX_INPUT) { ownFindNodesWithReference(graph,node->parameters[p], NULL, &count,VX_OUTPUT); if (count > 0) continue; for (d = 0u; d < num_data; d++) if (data[d] == node->parameters[p]) break; if (d == num_data) continue; // ref not found fprintf(fp, "\tD%u -> N%u;\n", d, next_nodes[n]); } else if (node->kernel->signature.directions[p] == VX_OUTPUT) { status = ownFindNodesWithReference(graph, node->parameters[p], dep_nodes, &count, VX_INPUT); //printf("N%u has %u dep nodes on parameter[%u], %d\n", next_nodes[n], count, p, status); for (n2 = 0; status == VX_SUCCESS && n2 < count; n2++) { if (showData) { for (d = 0u; d < num_data; d++) if (data[d] == node->parameters[p]) break; fprintf(fp, "\tN%u -> D%u -> N%u;\n", next_nodes[n], d, dep_nodes[n2]); } else { fprintf(fp, "\tN%u -> N%u;\n", next_nodes[n], dep_nodes[n2]); } } } } } memcpy(last_nodes, next_nodes, num_next * sizeof(next_nodes[0])); num_last = num_next; num_next = 0; ownFindNextNodes(graph, last_nodes, num_last, next_nodes, &num_next, left_nodes, &num_left); } while (num_next > 0); ownClearVisitation(graph); ownClearExecution(graph); fprintf(fp, "}\n"); fclose(fp); } else { VX_PRINT(VX_ZONE_ERROR, "Failed to open file for writing!\n"); } } else { VX_PRINT(VX_ZONE_ERROR, "Not a graph!\n"); } return status; } #endif
884912.c
/*============================================================ ** ** Source : test.c ** ** Purpose: Test for FormatMessageW() function ** ** ** Copyright (c) 2006 Microsoft Corporation. All rights reserved. ** ** The use and distribution terms for this software are contained in the file ** named license.txt, which can be found in the root of this distribution. ** By using this software in any fashion, you are agreeing to be bound by the ** terms of this license. ** ** You must not remove this notice, or any other, from this software. ** ** **=========================================================*/ #define UNICODE #include <palsuite.h> int __cdecl main(int argc, char *argv[]) { WCHAR * TheString; LPWSTR OutBuffer; WCHAR* TheArray[3]; int ReturnResult; /* * Initialize the PAL and return FAILURE if this fails */ if(0 != (PAL_Initialize(argc, argv))) { return FAIL; } TheString = convert("Pal %1 %2 %3 Testing"); TheArray[0] = convert("Foo"); TheArray[1] = convert("Bar"); TheArray[2] = convert("FooBar"); /* OutBuffer will be allocated in the function, if the flag is working properly. */ ReturnResult = FormatMessage( FORMAT_MESSAGE_FROM_STRING | FORMAT_MESSAGE_ARGUMENT_ARRAY | FORMAT_MESSAGE_ALLOCATE_BUFFER, /* source and processing options */ TheString, /* message source */ 0, /* message identifier */ 0, /* language identifier */ (LPWSTR)&OutBuffer, /* message buffer */ 0, /* maximum size of message buffer */ (va_list *) TheArray /* array of message inserts */ ); if(ReturnResult == 0) { Fail("ERROR: The return value was 0, which indicates failure. " "The function failed when trying to Format a simple string, " "using the ALLOCATE_BUFFER flag."); } if(memcmp(OutBuffer, convert("Pal Foo Bar FooBar Testing"), wcslen(OutBuffer)*2+2) != 0) { Fail("ERROR: Since the FORMAT_MESSAGE_ALLOCATE_BUFFER flag was set, " "the result should have been 'Pal Foo Bar FooBar Testing' but " "was really '%s'.",convertC(OutBuffer)); } PAL_Terminate(); return PASS; }
28396.c
/** ****************************************************************************** * @file stm32f4xx_hal_uart.c * @author MCD Application Team * @version V1.6.0 * @date 04-November-2016 * @brief UART HAL module driver. * This file provides firmware functions to manage the following * functionalities of the Universal Asynchronous Receiver Transmitter (UART) peripheral: * + Initialization and de-initialization functions * + IO operation functions * + Peripheral Control functions * + Peripheral State and Errors functions * @verbatim ============================================================================== ##### How to use this driver ##### ============================================================================== [..] The UART HAL driver can be used as follows: (#) Declare a UART_HandleTypeDef handle structure. (#) Initialize the UART low level resources by implementing the HAL_UART_MspInit() API: (##) Enable the USARTx interface clock. (##) UART pins configuration: (+++) Enable the clock for the UART GPIOs. (+++) Configure these UART pins as alternate function pull-up. (##) NVIC configuration if you need to use interrupt process (HAL_UART_Transmit_IT() and HAL_UART_Receive_IT() APIs): (+++) Configure the USARTx interrupt priority. (+++) Enable the NVIC USART IRQ handle. (##) DMA Configuration if you need to use DMA process (HAL_UART_Transmit_DMA() and HAL_UART_Receive_DMA() APIs): (+++) Declare a DMA handle structure for the Tx/Rx stream. (+++) Enable the DMAx interface clock. (+++) Configure the declared DMA handle structure with the required Tx/Rx parameters. (+++) Configure the DMA Tx/Rx Stream. (+++) Associate the initialized DMA handle to the UART DMA Tx/Rx handle. (+++) Configure the priority and enable the NVIC for the transfer complete interrupt on the DMA Tx/Rx Stream. (#) Program the Baud Rate, Word Length, Stop Bit, Parity, Hardware flow control and Mode(Receiver/Transmitter) in the Init structure. (#) For the UART asynchronous mode, initialize the UART registers by calling the HAL_UART_Init() API. (#) For the UART Half duplex mode, initialize the UART registers by calling the HAL_HalfDuplex_Init() API. (#) For the LIN mode, initialize the UART registers by calling the HAL_LIN_Init() API. (#) For the Multi-Processor mode, initialize the UART registers by calling the HAL_MultiProcessor_Init() API. [..] (@) The specific UART interrupts (Transmission complete interrupt, RXNE interrupt and Error Interrupts) will be managed using the macros __HAL_UART_ENABLE_IT() and __HAL_UART_DISABLE_IT() inside the transmit and receive process. [..] (@) These APIs (HAL_UART_Init() and HAL_HalfDuplex_Init()) configure also the low level Hardware GPIO, CLOCK, CORTEX...etc) by calling the customized HAL_UART_MspInit() API. [..] Three operation modes are available within this driver : *** Polling mode IO operation *** ================================= [..] (+) Send an amount of data in blocking mode using HAL_UART_Transmit() (+) Receive an amount of data in blocking mode using HAL_UART_Receive() *** Interrupt mode IO operation *** =================================== [..] (+) Send an amount of data in non blocking mode using HAL_UART_Transmit_IT() (+) At transmission end of transfer HAL_UART_TxCpltCallback is executed and user can add his own code by customization of function pointer HAL_UART_TxCpltCallback (+) Receive an amount of data in non blocking mode using HAL_UART_Receive_IT() (+) At reception end of transfer HAL_UART_RxCpltCallback is executed and user can add his own code by customization of function pointer HAL_UART_RxCpltCallback (+) In case of transfer Error, HAL_UART_ErrorCallback() function is executed and user can add his own code by customization of function pointer HAL_UART_ErrorCallback *** DMA mode IO operation *** ============================== [..] (+) Send an amount of data in non blocking mode (DMA) using HAL_UART_Transmit_DMA() (+) At transmission end of half transfer HAL_UART_TxHalfCpltCallback is executed and user can add his own code by customization of function pointer HAL_UART_TxHalfCpltCallback (+) At transmission end of transfer HAL_UART_TxCpltCallback is executed and user can add his own code by customization of function pointer HAL_UART_TxCpltCallback (+) Receive an amount of data in non blocking mode (DMA) using HAL_UART_Receive_DMA() (+) At reception end of half transfer HAL_UART_RxHalfCpltCallback is executed and user can add his own code by customization of function pointer HAL_UART_RxHalfCpltCallback (+) At reception end of transfer HAL_UART_RxCpltCallback is executed and user can add his own code by customization of function pointer HAL_UART_RxCpltCallback (+) In case of transfer Error, HAL_UART_ErrorCallback() function is executed and user can add his own code by customization of function pointer HAL_UART_ErrorCallback (+) Pause the DMA Transfer using HAL_UART_DMAPause() (+) Resume the DMA Transfer using HAL_UART_DMAResume() (+) Stop the DMA Transfer using HAL_UART_DMAStop() *** UART HAL driver macros list *** ============================================= [..] Below the list of most used macros in UART HAL driver. (+) __HAL_UART_ENABLE: Enable the UART peripheral (+) __HAL_UART_DISABLE: Disable the UART peripheral (+) __HAL_UART_GET_FLAG : Check whether the specified UART flag is set or not (+) __HAL_UART_CLEAR_FLAG : Clear the specified UART pending flag (+) __HAL_UART_ENABLE_IT: Enable the specified UART interrupt (+) __HAL_UART_DISABLE_IT: Disable the specified UART interrupt (+) __HAL_UART_GET_IT_SOURCE: Check whether the specified UART interrupt has occurred or not [..] (@) You can refer to the UART HAL driver header file for more useful macros @endverbatim ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT(c) 2016 STMicroelectronics</center></h2> * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of STMicroelectronics nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm32f4xx_hal.h" /** @addtogroup STM32F4xx_HAL_Driver * @{ */ /** @defgroup UART UART * @brief HAL UART module driver * @{ */ #ifdef HAL_UART_MODULE_ENABLED /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ /** @addtogroup UART_Private_Constants * @{ */ /** * @} */ /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ /** @addtogroup UART_Private_Functions UART Private Functions * @{ */ static void UART_EndTxTransfer(UART_HandleTypeDef *huart); static void UART_EndRxTransfer(UART_HandleTypeDef *huart); static void UART_DMATransmitCplt(DMA_HandleTypeDef *hdma); static void UART_DMAReceiveCplt(DMA_HandleTypeDef *hdma); static void UART_DMATxHalfCplt(DMA_HandleTypeDef *hdma); static void UART_DMARxHalfCplt(DMA_HandleTypeDef *hdma); static void UART_DMAError(DMA_HandleTypeDef *hdma); static void UART_DMAAbortOnError(DMA_HandleTypeDef *hdma); static void UART_DMATxAbortCallback(DMA_HandleTypeDef *hdma); static void UART_DMARxAbortCallback(DMA_HandleTypeDef *hdma); static void UART_DMATxOnlyAbortCallback(DMA_HandleTypeDef *hdma); static void UART_DMARxOnlyAbortCallback(DMA_HandleTypeDef *hdma); static HAL_StatusTypeDef UART_Transmit_IT(UART_HandleTypeDef *huart); static HAL_StatusTypeDef UART_EndTransmit_IT(UART_HandleTypeDef *huart); static HAL_StatusTypeDef UART_Receive_IT(UART_HandleTypeDef *huart); static HAL_StatusTypeDef UART_WaitOnFlagUntilTimeout(UART_HandleTypeDef *huart, uint32_t Flag, FlagStatus Status, uint32_t Tickstart, uint32_t Timeout); static void UART_SetConfig (UART_HandleTypeDef *huart); /** * @} */ /* Exported functions ---------------------------------------------------------*/ /** @defgroup UART_Exported_Functions UART Exported Functions * @{ */ /** @defgroup UART_Exported_Functions_Group1 Initialization and de-initialization functions * @brief Initialization and Configuration functions * @verbatim =============================================================================== ##### Initialization and Configuration functions ##### =============================================================================== [..] This subsection provides a set of functions allowing to initialize the USARTx or the UARTy in asynchronous mode. (+) For the asynchronous mode only these parameters can be configured: (++) Baud Rate (++) Word Length (++) Stop Bit (++) Parity: If the parity is enabled, then the MSB bit of the data written in the data register is transmitted but is changed by the parity bit. Depending on the frame length defined by the M bit (8-bits or 9-bits), please refer to Reference manual for possible UART frame formats. (++) Hardware flow control (++) Receiver/transmitter modes (++) Over Sampling Method [..] The HAL_UART_Init(), HAL_HalfDuplex_Init(), HAL_LIN_Init() and HAL_MultiProcessor_Init() APIs follow respectively the UART asynchronous, UART Half duplex, LIN and Multi-Processor configuration procedures (details for the procedures are available in reference manual (RM0329)). @endverbatim * @{ */ /** * @brief Initializes the UART mode according to the specified parameters in * the UART_InitTypeDef and create the associated handle. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ HAL_StatusTypeDef HAL_UART_Init(UART_HandleTypeDef *huart) { /* Check the UART handle allocation */ if(huart == NULL) { return HAL_ERROR; } /* Check the parameters */ if(huart->Init.HwFlowCtl != UART_HWCONTROL_NONE) { /* The hardware flow control is available only for USART1, USART2, USART3 and USART6 */ assert_param(IS_UART_HWFLOW_INSTANCE(huart->Instance)); assert_param(IS_UART_HARDWARE_FLOW_CONTROL(huart->Init.HwFlowCtl)); } else { assert_param(IS_UART_INSTANCE(huart->Instance)); } assert_param(IS_UART_WORD_LENGTH(huart->Init.WordLength)); assert_param(IS_UART_OVERSAMPLING(huart->Init.OverSampling)); if(huart->gState == HAL_UART_STATE_RESET) { /* Allocate lock resource and initialize it */ huart->Lock = HAL_UNLOCKED; /* Init the low level hardware */ HAL_UART_MspInit(huart); } huart->gState = HAL_UART_STATE_BUSY; /* Disable the peripheral */ __HAL_UART_DISABLE(huart); /* Set the UART Communication parameters */ UART_SetConfig(huart); /* In asynchronous mode, the following bits must be kept cleared: - LINEN and CLKEN bits in the USART_CR2 register, - SCEN, HDSEL and IREN bits in the USART_CR3 register.*/ CLEAR_BIT(huart->Instance->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); CLEAR_BIT(huart->Instance->CR3, (USART_CR3_SCEN | USART_CR3_HDSEL | USART_CR3_IREN)); /* Enable the peripheral */ __HAL_UART_ENABLE(huart); /* Initialize the UART state */ huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState= HAL_UART_STATE_READY; huart->RxState= HAL_UART_STATE_READY; return HAL_OK; } /** * @brief Initializes the half-duplex mode according to the specified * parameters in the UART_InitTypeDef and create the associated handle. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ HAL_StatusTypeDef HAL_HalfDuplex_Init(UART_HandleTypeDef *huart) { /* Check the UART handle allocation */ if(huart == NULL) { return HAL_ERROR; } /* Check the parameters */ assert_param(IS_UART_INSTANCE(huart->Instance)); assert_param(IS_UART_WORD_LENGTH(huart->Init.WordLength)); assert_param(IS_UART_OVERSAMPLING(huart->Init.OverSampling)); if(huart->gState == HAL_UART_STATE_RESET) { /* Allocate lock resource and initialize it */ huart->Lock = HAL_UNLOCKED; /* Init the low level hardware */ HAL_UART_MspInit(huart); } huart->gState = HAL_UART_STATE_BUSY; /* Disable the peripheral */ __HAL_UART_DISABLE(huart); /* Set the UART Communication parameters */ UART_SetConfig(huart); /* In half-duplex mode, the following bits must be kept cleared: - LINEN and CLKEN bits in the USART_CR2 register, - SCEN and IREN bits in the USART_CR3 register.*/ CLEAR_BIT(huart->Instance->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); CLEAR_BIT(huart->Instance->CR3, (USART_CR3_IREN | USART_CR3_SCEN)); /* Enable the Half-Duplex mode by setting the HDSEL bit in the CR3 register */ SET_BIT(huart->Instance->CR3, USART_CR3_HDSEL); /* Enable the peripheral */ __HAL_UART_ENABLE(huart); /* Initialize the UART state*/ huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState= HAL_UART_STATE_READY; huart->RxState= HAL_UART_STATE_READY; return HAL_OK; } /** * @brief Initializes the LIN mode according to the specified * parameters in the UART_InitTypeDef and create the associated handle. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @param BreakDetectLength: Specifies the LIN break detection length. * This parameter can be one of the following values: * @arg UART_LINBREAKDETECTLENGTH_10B: 10-bit break detection * @arg UART_LINBREAKDETECTLENGTH_11B: 11-bit break detection * @retval HAL status */ HAL_StatusTypeDef HAL_LIN_Init(UART_HandleTypeDef *huart, uint32_t BreakDetectLength) { /* Check the UART handle allocation */ if(huart == NULL) { return HAL_ERROR; } /* Check the parameters */ assert_param(IS_UART_INSTANCE(huart->Instance)); assert_param(IS_UART_LIN_BREAK_DETECT_LENGTH(BreakDetectLength)); assert_param(IS_UART_LIN_WORD_LENGTH(huart->Init.WordLength)); assert_param(IS_UART_LIN_OVERSAMPLING(huart->Init.OverSampling)); if(huart->gState == HAL_UART_STATE_RESET) { /* Allocate lock resource and initialize it */ huart->Lock = HAL_UNLOCKED; /* Init the low level hardware */ HAL_UART_MspInit(huart); } huart->gState = HAL_UART_STATE_BUSY; /* Disable the peripheral */ __HAL_UART_DISABLE(huart); /* Set the UART Communication parameters */ UART_SetConfig(huart); /* In LIN mode, the following bits must be kept cleared: - LINEN and CLKEN bits in the USART_CR2 register, - SCEN and IREN bits in the USART_CR3 register.*/ CLEAR_BIT(huart->Instance->CR2, USART_CR2_CLKEN); CLEAR_BIT(huart->Instance->CR3, (USART_CR3_HDSEL | USART_CR3_IREN | USART_CR3_SCEN)); /* Enable the LIN mode by setting the LINEN bit in the CR2 register */ SET_BIT(huart->Instance->CR2, USART_CR2_LINEN); /* Set the USART LIN Break detection length. */ CLEAR_BIT(huart->Instance->CR2, USART_CR2_LBDL); SET_BIT(huart->Instance->CR2, BreakDetectLength); /* Enable the peripheral */ __HAL_UART_ENABLE(huart); /* Initialize the UART state*/ huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState= HAL_UART_STATE_READY; huart->RxState= HAL_UART_STATE_READY; return HAL_OK; } /** * @brief Initializes the Multi-Processor mode according to the specified * parameters in the UART_InitTypeDef and create the associated handle. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @param Address: USART address * @param WakeUpMethod: specifies the USART wake-up method. * This parameter can be one of the following values: * @arg UART_WAKEUPMETHOD_IDLELINE: Wake-up by an idle line detection * @arg UART_WAKEUPMETHOD_ADDRESSMARK: Wake-up by an address mark * @retval HAL status */ HAL_StatusTypeDef HAL_MultiProcessor_Init(UART_HandleTypeDef *huart, uint8_t Address, uint32_t WakeUpMethod) { /* Check the UART handle allocation */ if(huart == NULL) { return HAL_ERROR; } /* Check the parameters */ assert_param(IS_UART_INSTANCE(huart->Instance)); assert_param(IS_UART_WAKEUPMETHOD(WakeUpMethod)); assert_param(IS_UART_ADDRESS(Address)); assert_param(IS_UART_WORD_LENGTH(huart->Init.WordLength)); assert_param(IS_UART_OVERSAMPLING(huart->Init.OverSampling)); if(huart->gState == HAL_UART_STATE_RESET) { /* Allocate lock resource and initialize it */ huart->Lock = HAL_UNLOCKED; /* Init the low level hardware */ HAL_UART_MspInit(huart); } huart->gState = HAL_UART_STATE_BUSY; /* Disable the peripheral */ __HAL_UART_DISABLE(huart); /* Set the UART Communication parameters */ UART_SetConfig(huart); /* In Multi-Processor mode, the following bits must be kept cleared: - LINEN and CLKEN bits in the USART_CR2 register, - SCEN, HDSEL and IREN bits in the USART_CR3 register */ CLEAR_BIT(huart->Instance->CR2, (USART_CR2_LINEN | USART_CR2_CLKEN)); CLEAR_BIT(huart->Instance->CR3, (USART_CR3_SCEN | USART_CR3_HDSEL | USART_CR3_IREN)); /* Clear the USART address */ CLEAR_BIT(huart->Instance->CR2, USART_CR2_ADD); /* Set the USART address node */ SET_BIT(huart->Instance->CR2, Address); /* Set the wake up method by setting the WAKE bit in the CR1 register */ CLEAR_BIT(huart->Instance->CR1, USART_CR1_WAKE); SET_BIT(huart->Instance->CR1, WakeUpMethod); /* Enable the peripheral */ __HAL_UART_ENABLE(huart); /* Initialize the UART state */ huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState= HAL_UART_STATE_READY; huart->RxState= HAL_UART_STATE_READY; return HAL_OK; } /** * @brief DeInitializes the UART peripheral. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ HAL_StatusTypeDef HAL_UART_DeInit(UART_HandleTypeDef *huart) { /* Check the UART handle allocation */ if(huart == NULL) { return HAL_ERROR; } /* Check the parameters */ assert_param(IS_UART_INSTANCE(huart->Instance)); huart->gState = HAL_UART_STATE_BUSY; /* DeInit the low level hardware */ HAL_UART_MspDeInit(huart); huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState = HAL_UART_STATE_RESET; huart->RxState = HAL_UART_STATE_RESET; /* Process Lock */ __HAL_UNLOCK(huart); return HAL_OK; } /** * @brief UART MSP Init. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval None */ __weak void HAL_UART_MspInit(UART_HandleTypeDef *huart) { /* Prevent unused argument(s) compilation warning */ UNUSED(huart); /* NOTE: This function Should not be modified, when the callback is needed, the HAL_UART_MspInit could be implemented in the user file */ } /** * @brief UART MSP DeInit. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval None */ __weak void HAL_UART_MspDeInit(UART_HandleTypeDef *huart) { /* Prevent unused argument(s) compilation warning */ UNUSED(huart); /* NOTE: This function Should not be modified, when the callback is needed, the HAL_UART_MspDeInit could be implemented in the user file */ } /** * @} */ /** @defgroup UART_Exported_Functions_Group2 IO operation functions * @brief UART Transmit and Receive functions * @verbatim ============================================================================== ##### IO operation functions ##### ============================================================================== [..] This subsection provides a set of functions allowing to manage the UART asynchronous and Half duplex data transfers. (#) There are two modes of transfer: (++) Blocking mode: The communication is performed in polling mode. The HAL status of all data processing is returned by the same function after finishing transfer. (++) Non blocking mode: The communication is performed using Interrupts or DMA, these APIs return the HAL status. The end of the data processing will be indicated through the dedicated UART IRQ when using Interrupt mode or the DMA IRQ when using DMA mode. The HAL_UART_TxCpltCallback(), HAL_UART_RxCpltCallback() user callbacks will be executed respectively at the end of the transmit or receive process. The HAL_UART_ErrorCallback() user callback will be executed when a communication error is detected. (#) Blocking mode APIs are: (++) HAL_UART_Transmit() (++) HAL_UART_Receive() (#) Non Blocking mode APIs with Interrupt are: (++) HAL_UART_Transmit_IT() (++) HAL_UART_Receive_IT() (++) HAL_UART_IRQHandler() (#) Non Blocking mode functions with DMA are: (++) HAL_UART_Transmit_DMA() (++) HAL_UART_Receive_DMA() (#) A set of Transfer Complete Callbacks are provided in non blocking mode: (++) HAL_UART_TxCpltCallback() (++) HAL_UART_RxCpltCallback() (++) HAL_UART_ErrorCallback() [..] (@) In the Half duplex communication, it is forbidden to run the transmit and receive process in parallel, the UART state HAL_UART_STATE_BUSY_TX_RX can't be useful. @endverbatim * @{ */ /** * @brief Sends an amount of data in blocking mode. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @param pData: Pointer to data buffer * @param Size: Amount of data to be sent * @param Timeout: Timeout duration * @retval HAL status */ HAL_StatusTypeDef HAL_UART_Transmit(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint32_t Timeout) { uint16_t* tmp; uint32_t tickstart = 0U; /* Check that a Tx process is not already ongoing */ if(huart->gState == HAL_UART_STATE_READY) { if((pData == NULL ) || (Size == 0U)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(huart); huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState = HAL_UART_STATE_BUSY_TX; /* Init tickstart for timeout managment */ tickstart = HAL_GetTick(); huart->TxXferSize = Size; huart->TxXferCount = Size; while(huart->TxXferCount > 0U) { huart->TxXferCount--; if(huart->Init.WordLength == UART_WORDLENGTH_9B) { if(UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_TXE, RESET, tickstart, Timeout) != HAL_OK) { return HAL_TIMEOUT; } tmp = (uint16_t*) pData; huart->Instance->DR = (*tmp & (uint16_t)0x01FFU); if(huart->Init.Parity == UART_PARITY_NONE) { pData +=2U; } else { pData +=1U; } } else { if(UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_TXE, RESET, tickstart, Timeout) != HAL_OK) { return HAL_TIMEOUT; } huart->Instance->DR = (*pData++ & (uint8_t)0xFFU); } } if(UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_TC, RESET, tickstart, Timeout) != HAL_OK) { return HAL_TIMEOUT; } /* At end of Tx process, restore huart->gState to Ready */ huart->gState = HAL_UART_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(huart); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Receives an amount of data in blocking mode. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @param pData: Pointer to data buffer * @param Size: Amount of data to be received * @param Timeout: Timeout duration * @retval HAL status */ HAL_StatusTypeDef HAL_UART_Receive(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size, uint32_t Timeout) { uint16_t* tmp; uint32_t tickstart = 0U; /* Check that a Rx process is not already ongoing */ if(huart->RxState == HAL_UART_STATE_READY) { if((pData == NULL ) || (Size == 0U)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(huart); huart->ErrorCode = HAL_UART_ERROR_NONE; huart->RxState = HAL_UART_STATE_BUSY_RX; /* Init tickstart for timeout managment */ tickstart = HAL_GetTick(); huart->RxXferSize = Size; huart->RxXferCount = Size; /* Check the remain data to be received */ while(huart->RxXferCount > 0U) { huart->RxXferCount--; if(huart->Init.WordLength == UART_WORDLENGTH_9B) { if(UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_RXNE, RESET, tickstart, Timeout) != HAL_OK) { return HAL_TIMEOUT; } tmp = (uint16_t*) pData; if(huart->Init.Parity == UART_PARITY_NONE) { *tmp = (uint16_t)(huart->Instance->DR & (uint16_t)0x01FFU); pData +=2U; } else { *tmp = (uint16_t)(huart->Instance->DR & (uint16_t)0x00FFU); pData +=1U; } } else { if(UART_WaitOnFlagUntilTimeout(huart, UART_FLAG_RXNE, RESET, tickstart, Timeout) != HAL_OK) { return HAL_TIMEOUT; } if(huart->Init.Parity == UART_PARITY_NONE) { *pData++ = (uint8_t)(huart->Instance->DR & (uint8_t)0x00FFU); } else { *pData++ = (uint8_t)(huart->Instance->DR & (uint8_t)0x007FU); } } } /* At end of Rx process, restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(huart); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Sends an amount of data in non blocking mode. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @param pData: Pointer to data buffer * @param Size: Amount of data to be sent * @retval HAL status */ HAL_StatusTypeDef HAL_UART_Transmit_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) { /* Check that a Tx process is not already ongoing */ if(huart->gState == HAL_UART_STATE_READY) { if((pData == NULL ) || (Size == 0U)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(huart); huart->pTxBuffPtr = pData; huart->TxXferSize = Size; huart->TxXferCount = Size; huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState = HAL_UART_STATE_BUSY_TX; /* Process Unlocked */ __HAL_UNLOCK(huart); /* Enable the UART Transmit data register empty Interrupt */ SET_BIT(huart->Instance->CR1, USART_CR1_TXEIE); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Receives an amount of data in non blocking mode * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @param pData: Pointer to data buffer * @param Size: Amount of data to be received * @retval HAL status */ HAL_StatusTypeDef HAL_UART_Receive_IT(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) { /* Check that a Rx process is not already ongoing */ if(huart->RxState == HAL_UART_STATE_READY) { if((pData == NULL ) || (Size == 0U)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(huart); huart->pRxBuffPtr = pData; huart->RxXferSize = Size; huart->RxXferCount = Size; huart->ErrorCode = HAL_UART_ERROR_NONE; huart->RxState = HAL_UART_STATE_BUSY_RX; /* Process Unlocked */ __HAL_UNLOCK(huart); /* Enable the UART Error Interrupt: (Frame error, noise error, overrun error) */ SET_BIT(huart->Instance->CR3, USART_CR3_EIE); /* Enable the UART Parity Error and Data Register not empty Interrupts */ SET_BIT(huart->Instance->CR1, USART_CR1_PEIE | USART_CR1_RXNEIE); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Sends an amount of data in non blocking mode. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @param pData: Pointer to data buffer * @param Size: Amount of data to be sent * @retval HAL status */ HAL_StatusTypeDef HAL_UART_Transmit_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) { uint32_t *tmp; /* Check that a Tx process is not already ongoing */ if(huart->gState == HAL_UART_STATE_READY) { if((pData == NULL ) || (Size == 0U)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(huart); huart->pTxBuffPtr = pData; huart->TxXferSize = Size; huart->TxXferCount = Size; huart->ErrorCode = HAL_UART_ERROR_NONE; huart->gState = HAL_UART_STATE_BUSY_TX; /* Set the UART DMA transfer complete callback */ huart->hdmatx->XferCpltCallback = UART_DMATransmitCplt; /* Set the UART DMA Half transfer complete callback */ huart->hdmatx->XferHalfCpltCallback = UART_DMATxHalfCplt; /* Set the DMA error callback */ huart->hdmatx->XferErrorCallback = UART_DMAError; /* Set the DMA abort callback */ huart->hdmatx->XferAbortCallback = NULL; /* Enable the UART transmit DMA Stream */ tmp = (uint32_t*)&pData; HAL_DMA_Start_IT(huart->hdmatx, *(uint32_t*)tmp, (uint32_t)&huart->Instance->DR, Size); /* Clear the TC flag in the SR register by writing 0 to it */ __HAL_UART_CLEAR_FLAG(huart, UART_FLAG_TC); /* Process Unlocked */ __HAL_UNLOCK(huart); /* Enable the DMA transfer for transmit request by setting the DMAT bit in the UART CR3 register */ SET_BIT(huart->Instance->CR3, USART_CR3_DMAT); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Receives an amount of data in non blocking mode. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @param pData: Pointer to data buffer * @param Size: Amount of data to be received * @note When the UART parity is enabled (PCE = 1) the data received contain the parity bit. * @retval HAL status */ HAL_StatusTypeDef HAL_UART_Receive_DMA(UART_HandleTypeDef *huart, uint8_t *pData, uint16_t Size) { uint32_t *tmp; /* Check that a Rx process is not already ongoing */ if(huart->RxState == HAL_UART_STATE_READY) { if((pData == NULL ) || (Size == 0U)) { return HAL_ERROR; } /* Process Locked */ __HAL_LOCK(huart); huart->pRxBuffPtr = pData; huart->RxXferSize = Size; huart->ErrorCode = HAL_UART_ERROR_NONE; huart->RxState = HAL_UART_STATE_BUSY_RX; /* Set the UART DMA transfer complete callback */ huart->hdmarx->XferCpltCallback = UART_DMAReceiveCplt; /* Set the UART DMA Half transfer complete callback */ huart->hdmarx->XferHalfCpltCallback = UART_DMARxHalfCplt; /* Set the DMA error callback */ huart->hdmarx->XferErrorCallback = UART_DMAError; /* Set the DMA abort callback */ huart->hdmarx->XferAbortCallback = NULL; /* Enable the DMA Stream */ tmp = (uint32_t*)&pData; HAL_DMA_Start_IT(huart->hdmarx, (uint32_t)&huart->Instance->DR, *(uint32_t*)tmp, Size); /* Enable the UART Parity Error Interrupt */ SET_BIT(huart->Instance->CR1, USART_CR1_PEIE); /* Enable the UART Error Interrupt: (Frame error, noise error, overrun error) */ SET_BIT(huart->Instance->CR3, USART_CR3_EIE); /* Enable the DMA transfer for the receiver request by setting the DMAR bit in the UART CR3 register */ SET_BIT(huart->Instance->CR3, USART_CR3_DMAR); /* Process Unlocked */ __HAL_UNLOCK(huart); return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Pauses the DMA Transfer. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ HAL_StatusTypeDef HAL_UART_DMAPause(UART_HandleTypeDef *huart) { uint32_t dmarequest = 0x00U; /* Process Locked */ __HAL_LOCK(huart); dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT); if((huart->gState == HAL_UART_STATE_BUSY_TX) && dmarequest) { /* Disable the UART DMA Tx request */ CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); } dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR); if((huart->RxState == HAL_UART_STATE_BUSY_RX) && dmarequest) { /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); /* Disable the UART DMA Rx request */ CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); } /* Process Unlocked */ __HAL_UNLOCK(huart); return HAL_OK; } /** * @brief Resumes the DMA Transfer. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ HAL_StatusTypeDef HAL_UART_DMAResume(UART_HandleTypeDef *huart) { /* Process Locked */ __HAL_LOCK(huart); if(huart->gState == HAL_UART_STATE_BUSY_TX) { /* Enable the UART DMA Tx request */ SET_BIT(huart->Instance->CR3, USART_CR3_DMAT); } if(huart->RxState == HAL_UART_STATE_BUSY_RX) { /* Clear the Overrun flag before resuming the Rx transfer*/ __HAL_UART_CLEAR_OREFLAG(huart); /* Reenable PE and ERR (Frame error, noise error, overrun error) interrupts */ SET_BIT(huart->Instance->CR1, USART_CR1_PEIE); SET_BIT(huart->Instance->CR3, USART_CR3_EIE); /* Enable the UART DMA Rx request */ SET_BIT(huart->Instance->CR3, USART_CR3_DMAR); } /* Process Unlocked */ __HAL_UNLOCK(huart); return HAL_OK; } /** * @brief Stops the DMA Transfer. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ HAL_StatusTypeDef HAL_UART_DMAStop(UART_HandleTypeDef *huart) { uint32_t dmarequest = 0x00U; /* The Lock is not implemented on this API to allow the user application to call the HAL UART API under callbacks HAL_UART_TxCpltCallback() / HAL_UART_RxCpltCallback(): when calling HAL_DMA_Abort() API the DMA TX/RX Transfer complete interrupt is generated and the correspond call back is executed HAL_UART_TxCpltCallback() / HAL_UART_RxCpltCallback() */ /* Stop UART DMA Tx request if ongoing */ dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT); if((huart->gState == HAL_UART_STATE_BUSY_TX) && dmarequest) { CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); /* Abort the UART DMA Tx channel */ if(huart->hdmatx != NULL) { HAL_DMA_Abort(huart->hdmatx); } UART_EndTxTransfer(huart); } /* Stop UART DMA Rx request if ongoing */ dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR); if((huart->RxState == HAL_UART_STATE_BUSY_RX) && dmarequest) { CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); /* Abort the UART DMA Rx channel */ if(huart->hdmarx != NULL) { HAL_DMA_Abort(huart->hdmarx); } UART_EndRxTransfer(huart); } return HAL_OK; } /** * @brief Abort ongoing transfers (blocking mode). * @param huart UART handle. * @note This procedure could be used for aborting any ongoing transfer started in Interrupt or DMA mode. * This procedure performs following operations : * - Disable PPP Interrupts * - Disable the DMA transfer in the peripheral register (if enabled) * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) * - Set handle State to READY * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. * @retval HAL status */ HAL_StatusTypeDef HAL_UART_Abort(UART_HandleTypeDef *huart) { /* Disable TXEIE, TCIE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE | USART_CR1_TCIE)); CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); /* Disable the UART DMA Tx request if enabled */ if(HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) { CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); /* Abort the UART DMA Tx channel: use blocking DMA Abort API (no callback) */ if(huart->hdmatx != NULL) { /* Set the UART DMA Abort callback to Null. No call back execution at end of DMA abort procedure */ huart->hdmatx->XferAbortCallback = NULL; HAL_DMA_Abort(huart->hdmatx); } } /* Disable the UART DMA Rx request if enabled */ if(HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) { CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); /* Abort the UART DMA Rx channel: use blocking DMA Abort API (no callback) */ if(huart->hdmarx != NULL) { /* Set the UART DMA Abort callback to Null. No call back execution at end of DMA abort procedure */ huart->hdmarx->XferAbortCallback = NULL; HAL_DMA_Abort(huart->hdmarx); } } /* Reset Tx and Rx transfer counters */ huart->TxXferCount = 0x00U; huart->RxXferCount = 0x00U; /* Reset ErrorCode */ huart->ErrorCode = HAL_UART_ERROR_NONE; /* Restore huart->RxState and huart->gState to Ready */ huart->RxState = HAL_UART_STATE_READY; huart->gState = HAL_UART_STATE_READY; return HAL_OK; } /** * @brief Abort ongoing Transmit transfer (blocking mode). * @param huart UART handle. * @note This procedure could be used for aborting any ongoing transfer started in Interrupt or DMA mode. * This procedure performs following operations : * - Disable PPP Interrupts * - Disable the DMA transfer in the peripheral register (if enabled) * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) * - Set handle State to READY * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. * @retval HAL status */ HAL_StatusTypeDef HAL_UART_AbortTransmit(UART_HandleTypeDef *huart) { /* Disable TXEIE and TCIE interrupts */ CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); /* Disable the UART DMA Tx request if enabled */ if(HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) { CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); /* Abort the UART DMA Tx channel : use blocking DMA Abort API (no callback) */ if(huart->hdmatx != NULL) { /* Set the UART DMA Abort callback to Null. No call back execution at end of DMA abort procedure */ huart->hdmatx->XferAbortCallback = NULL; HAL_DMA_Abort(huart->hdmatx); } } /* Reset Tx transfer counter */ huart->TxXferCount = 0x00U; /* Restore huart->gState to Ready */ huart->gState = HAL_UART_STATE_READY; return HAL_OK; } /** * @brief Abort ongoing Receive transfer (blocking mode). * @param huart UART handle. * @note This procedure could be used for aborting any ongoing transfer started in Interrupt or DMA mode. * This procedure performs following operations : * - Disable PPP Interrupts * - Disable the DMA transfer in the peripheral register (if enabled) * - Abort DMA transfer by calling HAL_DMA_Abort (in case of transfer in DMA mode) * - Set handle State to READY * @note This procedure is executed in blocking mode : when exiting function, Abort is considered as completed. * @retval HAL status */ HAL_StatusTypeDef HAL_UART_AbortReceive(UART_HandleTypeDef *huart) { /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); /* Disable the UART DMA Rx request if enabled */ if(HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) { CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); /* Abort the UART DMA Rx channel : use blocking DMA Abort API (no callback) */ if(huart->hdmarx != NULL) { /* Set the UART DMA Abort callback to Null. No call back execution at end of DMA abort procedure */ huart->hdmarx->XferAbortCallback = NULL; HAL_DMA_Abort(huart->hdmarx); } } /* Reset Rx transfer counter */ huart->RxXferCount = 0x00U; /* Restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; return HAL_OK; } /** * @brief Abort ongoing transfers (Interrupt mode). * @param huart UART handle. * @note This procedure could be used for aborting any ongoing transfer started in Interrupt or DMA mode. * This procedure performs following operations : * - Disable PPP Interrupts * - Disable the DMA transfer in the peripheral register (if enabled) * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) * - Set handle State to READY * - At abort completion, call user abort complete callback * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be * considered as completed only when user abort complete callback is executed (not when exiting function). * @retval HAL status */ HAL_StatusTypeDef HAL_UART_Abort_IT(UART_HandleTypeDef *huart) { uint32_t AbortCplt = 0x01U; /* Disable TXEIE, TCIE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE | USART_CR1_TCIE)); CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); /* If DMA Tx and/or DMA Rx Handles are associated to UART Handle, DMA Abort complete callbacks should be initialised before any call to DMA Abort functions */ /* DMA Tx Handle is valid */ if(huart->hdmatx != NULL) { /* Set DMA Abort Complete callback if UART DMA Tx request if enabled. Otherwise, set it to NULL */ if(HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) { huart->hdmatx->XferAbortCallback = UART_DMATxAbortCallback; } else { huart->hdmatx->XferAbortCallback = NULL; } } /* DMA Rx Handle is valid */ if(huart->hdmarx != NULL) { /* Set DMA Abort Complete callback if UART DMA Rx request if enabled. Otherwise, set it to NULL */ if(HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) { huart->hdmarx->XferAbortCallback = UART_DMARxAbortCallback; } else { huart->hdmarx->XferAbortCallback = NULL; } } /* Disable the UART DMA Tx request if enabled */ if(HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) { /* Disable DMA Tx at UART level */ CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); /* Abort the UART DMA Tx channel : use non blocking DMA Abort API (callback) */ if(huart->hdmatx != NULL) { /* UART Tx DMA Abort callback has already been initialised : will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ /* Abort DMA TX */ if(HAL_DMA_Abort_IT(huart->hdmatx) != HAL_OK) { huart->hdmatx->XferAbortCallback = NULL; } else { AbortCplt = 0x00U; } } } /* Disable the UART DMA Rx request if enabled */ if(HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) { CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); /* Abort the UART DMA Rx channel : use non blocking DMA Abort API (callback) */ if(huart->hdmarx != NULL) { /* UART Rx DMA Abort callback has already been initialised : will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ /* Abort DMA RX */ if(HAL_DMA_Abort_IT(huart->hdmarx) != HAL_OK) { huart->hdmarx->XferAbortCallback = NULL; AbortCplt = 0x01U; } else { AbortCplt = 0x00U; } } } /* if no DMA abort complete callback execution is required => call user Abort Complete callback */ if(AbortCplt == 0x01U) { /* Reset Tx and Rx transfer counters */ huart->TxXferCount = 0x00U; huart->RxXferCount = 0x00U; /* Reset ErrorCode */ huart->ErrorCode = HAL_UART_ERROR_NONE; /* Restore huart->gState and huart->RxState to Ready */ huart->gState = HAL_UART_STATE_READY; huart->RxState = HAL_UART_STATE_READY; /* As no DMA to be aborted, call directly user Abort complete callback */ HAL_UART_AbortCpltCallback(huart); } return HAL_OK; } /** * @brief Abort ongoing Transmit transfer (Interrupt mode). * @param huart UART handle. * @note This procedure could be used for aborting any ongoing transfer started in Interrupt or DMA mode. * This procedure performs following operations : * - Disable PPP Interrupts * - Disable the DMA transfer in the peripheral register (if enabled) * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) * - Set handle State to READY * - At abort completion, call user abort complete callback * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be * considered as completed only when user abort complete callback is executed (not when exiting function). * @retval HAL status */ HAL_StatusTypeDef HAL_UART_AbortTransmit_IT(UART_HandleTypeDef *huart) { /* Disable TXEIE and TCIE interrupts */ CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); /* Disable the UART DMA Tx request if enabled */ if(HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT)) { CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); /* Abort the UART DMA Tx channel : use blocking DMA Abort API (no callback) */ if(huart->hdmatx != NULL) { /* Set the UART DMA Abort callback : will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ huart->hdmatx->XferAbortCallback = UART_DMATxOnlyAbortCallback; /* Abort DMA TX */ if(HAL_DMA_Abort_IT(huart->hdmatx) != HAL_OK) { /* Call Directly huart->hdmatx->XferAbortCallback function in case of error */ huart->hdmatx->XferAbortCallback(huart->hdmatx); } } else { /* Reset Tx transfer counter */ huart->TxXferCount = 0x00U; /* Restore huart->gState to Ready */ huart->gState = HAL_UART_STATE_READY; /* As no DMA to be aborted, call directly user Abort complete callback */ HAL_UART_AbortTransmitCpltCallback(huart); } } else { /* Reset Tx transfer counter */ huart->TxXferCount = 0x00U; /* Restore huart->gState to Ready */ huart->gState = HAL_UART_STATE_READY; /* As no DMA to be aborted, call directly user Abort complete callback */ HAL_UART_AbortTransmitCpltCallback(huart); } return HAL_OK; } /** * @brief Abort ongoing Receive transfer (Interrupt mode). * @param huart UART handle. * @note This procedure could be used for aborting any ongoing transfer started in Interrupt or DMA mode. * This procedure performs following operations : * - Disable PPP Interrupts * - Disable the DMA transfer in the peripheral register (if enabled) * - Abort DMA transfer by calling HAL_DMA_Abort_IT (in case of transfer in DMA mode) * - Set handle State to READY * - At abort completion, call user abort complete callback * @note This procedure is executed in Interrupt mode, meaning that abort procedure could be * considered as completed only when user abort complete callback is executed (not when exiting function). * @retval HAL status */ HAL_StatusTypeDef HAL_UART_AbortReceive_IT(UART_HandleTypeDef *huart) { /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); /* Disable the UART DMA Rx request if enabled */ if(HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) { CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); /* Abort the UART DMA Rx channel : use blocking DMA Abort API (no callback) */ if(huart->hdmarx != NULL) { /* Set the UART DMA Abort callback : will lead to call HAL_UART_AbortCpltCallback() at end of DMA abort procedure */ huart->hdmarx->XferAbortCallback = UART_DMARxOnlyAbortCallback; /* Abort DMA RX */ if(HAL_DMA_Abort_IT(huart->hdmarx) != HAL_OK) { /* Call Directly huart->hdmarx->XferAbortCallback function in case of error */ huart->hdmarx->XferAbortCallback(huart->hdmarx); } } else { /* Reset Rx transfer counter */ huart->RxXferCount = 0x00U; /* Restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; /* As no DMA to be aborted, call directly user Abort complete callback */ HAL_UART_AbortReceiveCpltCallback(huart); } } else { /* Reset Rx transfer counter */ huart->RxXferCount = 0x00U; /* Restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; /* As no DMA to be aborted, call directly user Abort complete callback */ HAL_UART_AbortReceiveCpltCallback(huart); } return HAL_OK; } /** * @brief This function handles UART interrupt request. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval None */ void HAL_UART_IRQHandler(UART_HandleTypeDef *huart) { uint32_t isrflags = READ_REG(huart->Instance->SR); uint32_t cr1its = READ_REG(huart->Instance->CR1); uint32_t cr3its = READ_REG(huart->Instance->CR3); uint32_t errorflags = 0x00U; uint32_t dmarequest = 0x00U; /* If no error occurs */ errorflags = (isrflags & (uint32_t)(USART_SR_PE | USART_SR_FE | USART_SR_ORE | USART_SR_NE)); if(errorflags == RESET) { /* UART in mode Receiver -------------------------------------------------*/ if(((isrflags & USART_SR_RXNE) != RESET) && ((cr1its & USART_CR1_RXNEIE) != RESET)) { UART_Receive_IT(huart); return; } } /* If some errors occur */ if((errorflags != RESET) && (((cr3its & USART_CR3_EIE) != RESET) || ((cr1its & (USART_CR1_RXNEIE | USART_CR1_PEIE)) != RESET))) { /* UART parity error interrupt occurred ----------------------------------*/ if(((isrflags & USART_SR_PE) != RESET) && ((cr1its & USART_CR1_PEIE) != RESET)) { huart->ErrorCode |= HAL_UART_ERROR_PE; } /* UART noise error interrupt occurred -----------------------------------*/ if(((isrflags & USART_SR_NE) != RESET) && ((cr3its & USART_CR3_EIE) != RESET)) { huart->ErrorCode |= HAL_UART_ERROR_NE; } /* UART frame error interrupt occurred -----------------------------------*/ if(((isrflags & USART_SR_FE) != RESET) && ((cr3its & USART_CR3_EIE) != RESET)) { huart->ErrorCode |= HAL_UART_ERROR_FE; } /* UART Over-Run interrupt occurred --------------------------------------*/ if(((isrflags & USART_SR_ORE) != RESET) && ((cr3its & USART_CR3_EIE) != RESET)) { huart->ErrorCode |= HAL_UART_ERROR_ORE; } /* Call UART Error Call back function if need be --------------------------*/ if(huart->ErrorCode != HAL_UART_ERROR_NONE) { /* UART in mode Receiver -----------------------------------------------*/ if(((isrflags & USART_SR_RXNE) != RESET) && ((cr1its & USART_CR1_RXNEIE) != RESET)) { UART_Receive_IT(huart); } /* If Overrun error occurs, or if any error occurs in DMA mode reception, consider error as blocking */ dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR); if(((huart->ErrorCode & HAL_UART_ERROR_ORE) != RESET) || dmarequest) { /* Blocking error : transfer is aborted Set the UART state ready to be able to start again the process, Disable Rx Interrupts, and disable Rx DMA request, if ongoing */ UART_EndRxTransfer(huart); /* Disable the UART DMA Rx request if enabled */ if(HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR)) { CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); /* Abort the UART DMA Rx channel */ if(huart->hdmarx != NULL) { /* Set the UART DMA Abort callback : will lead to call HAL_UART_ErrorCallback() at end of DMA abort procedure */ huart->hdmarx->XferAbortCallback = UART_DMAAbortOnError; if(HAL_DMA_Abort_IT(huart->hdmarx) != HAL_OK) { /* Call Directly XferAbortCallback function in case of error */ huart->hdmarx->XferAbortCallback(huart->hdmarx); } } else { /* Call user error callback */ HAL_UART_ErrorCallback(huart); } } else { /* Call user error callback */ HAL_UART_ErrorCallback(huart); } } else { /* Non Blocking error : transfer could go on. Error is notified to user through user error callback */ HAL_UART_ErrorCallback(huart); huart->ErrorCode = HAL_UART_ERROR_NONE; } } return; } /* End if some error occurs */ /* UART in mode Transmitter ------------------------------------------------*/ if(((isrflags & USART_SR_TXE) != RESET) && ((cr1its & USART_CR1_TXEIE) != RESET)) { UART_Transmit_IT(huart); return; } /* UART in mode Transmitter end --------------------------------------------*/ if(((isrflags & USART_SR_TC) != RESET) && ((cr1its & USART_CR1_TCIE) != RESET)) { UART_EndTransmit_IT(huart); return; } } /** * @brief Tx Transfer completed callbacks. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval None */ __weak void HAL_UART_TxCpltCallback(UART_HandleTypeDef *huart) { /* Prevent unused argument(s) compilation warning */ UNUSED(huart); /* NOTE: This function Should not be modified, when the callback is needed, the HAL_UART_TxCpltCallback could be implemented in the user file */ } /** * @brief Tx Half Transfer completed callbacks. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval None */ __weak void HAL_UART_TxHalfCpltCallback(UART_HandleTypeDef *huart) { /* Prevent unused argument(s) compilation warning */ UNUSED(huart); /* NOTE: This function Should not be modified, when the callback is needed, the HAL_UART_TxCpltCallback could be implemented in the user file */ } /** * @brief Rx Transfer completed callbacks. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval None */ __weak void HAL_UART_RxCpltCallback(UART_HandleTypeDef *huart) { /* Prevent unused argument(s) compilation warning */ UNUSED(huart); /* NOTE: This function Should not be modified, when the callback is needed, the HAL_UART_TxCpltCallback could be implemented in the user file */ } /** * @brief Rx Half Transfer completed callbacks. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval None */ __weak void HAL_UART_RxHalfCpltCallback(UART_HandleTypeDef *huart) { /* Prevent unused argument(s) compilation warning */ UNUSED(huart); /* NOTE: This function Should not be modified, when the callback is needed, the HAL_UART_TxCpltCallback could be implemented in the user file */ } /** * @brief UART error callbacks. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval None */ __weak void HAL_UART_ErrorCallback(UART_HandleTypeDef *huart) { /* Prevent unused argument(s) compilation warning */ UNUSED(huart); /* NOTE: This function Should not be modified, when the callback is needed, the HAL_UART_ErrorCallback could be implemented in the user file */ } /** * @brief UART Abort Complete callback. * @param huart UART handle. * @retval None */ __weak void HAL_UART_AbortCpltCallback (UART_HandleTypeDef *huart) { /* Prevent unused argument(s) compilation warning */ UNUSED(huart); /* NOTE : This function should not be modified, when the callback is needed, the HAL_UART_AbortCpltCallback can be implemented in the user file. */ } /** * @brief UART Abort Complete callback. * @param huart UART handle. * @retval None */ __weak void HAL_UART_AbortTransmitCpltCallback (UART_HandleTypeDef *huart) { /* Prevent unused argument(s) compilation warning */ UNUSED(huart); /* NOTE : This function should not be modified, when the callback is needed, the HAL_UART_AbortTransmitCpltCallback can be implemented in the user file. */ } /** * @brief UART Abort Receive Complete callback. * @param huart UART handle. * @retval None */ __weak void HAL_UART_AbortReceiveCpltCallback (UART_HandleTypeDef *huart) { /* Prevent unused argument(s) compilation warning */ UNUSED(huart); /* NOTE : This function should not be modified, when the callback is needed, the HAL_UART_AbortReceiveCpltCallback can be implemented in the user file. */ } /** * @} */ /** @defgroup UART_Exported_Functions_Group3 Peripheral Control functions * @brief UART control functions * @verbatim ============================================================================== ##### Peripheral Control functions ##### ============================================================================== [..] This subsection provides a set of functions allowing to control the UART: (+) HAL_LIN_SendBreak() API can be helpful to transmit the break character. (+) HAL_MultiProcessor_EnterMuteMode() API can be helpful to enter the UART in mute mode. (+) HAL_MultiProcessor_ExitMuteMode() API can be helpful to exit the UART mute mode by software. @endverbatim * @{ */ /** * @brief Transmits break characters. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ HAL_StatusTypeDef HAL_LIN_SendBreak(UART_HandleTypeDef *huart) { /* Check the parameters */ assert_param(IS_UART_INSTANCE(huart->Instance)); /* Process Locked */ __HAL_LOCK(huart); huart->gState = HAL_UART_STATE_BUSY; /* Send break characters */ SET_BIT(huart->Instance->CR1, USART_CR1_SBK); huart->gState = HAL_UART_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(huart); return HAL_OK; } /** * @brief Enters the UART in mute mode. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ HAL_StatusTypeDef HAL_MultiProcessor_EnterMuteMode(UART_HandleTypeDef *huart) { /* Check the parameters */ assert_param(IS_UART_INSTANCE(huart->Instance)); /* Process Locked */ __HAL_LOCK(huart); huart->gState = HAL_UART_STATE_BUSY; /* Enable the USART mute mode by setting the RWU bit in the CR1 register */ SET_BIT(huart->Instance->CR1, USART_CR1_RWU); huart->gState = HAL_UART_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(huart); return HAL_OK; } /** * @brief Exits the UART mute mode: wake up software. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ HAL_StatusTypeDef HAL_MultiProcessor_ExitMuteMode(UART_HandleTypeDef *huart) { /* Check the parameters */ assert_param(IS_UART_INSTANCE(huart->Instance)); /* Process Locked */ __HAL_LOCK(huart); huart->gState = HAL_UART_STATE_BUSY; /* Disable the USART mute mode by clearing the RWU bit in the CR1 register */ CLEAR_BIT(huart->Instance->CR1, USART_CR1_RWU); huart->gState = HAL_UART_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(huart); return HAL_OK; } /** * @brief Enables the UART transmitter and disables the UART receiver. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ HAL_StatusTypeDef HAL_HalfDuplex_EnableTransmitter(UART_HandleTypeDef *huart) { uint32_t tmpreg = 0x00U; /* Process Locked */ __HAL_LOCK(huart); huart->gState = HAL_UART_STATE_BUSY; /*-------------------------- USART CR1 Configuration -----------------------*/ tmpreg = huart->Instance->CR1; /* Clear TE and RE bits */ tmpreg &= (uint32_t)~((uint32_t)(USART_CR1_TE | USART_CR1_RE)); /* Enable the USART's transmit interface by setting the TE bit in the USART CR1 register */ tmpreg |= (uint32_t)USART_CR1_TE; /* Write to USART CR1 */ WRITE_REG(huart->Instance->CR1, (uint32_t)tmpreg); huart->gState = HAL_UART_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(huart); return HAL_OK; } /** * @brief Enables the UART receiver and disables the UART transmitter. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ HAL_StatusTypeDef HAL_HalfDuplex_EnableReceiver(UART_HandleTypeDef *huart) { uint32_t tmpreg = 0x00U; /* Process Locked */ __HAL_LOCK(huart); huart->gState = HAL_UART_STATE_BUSY; /*-------------------------- USART CR1 Configuration -----------------------*/ tmpreg = huart->Instance->CR1; /* Clear TE and RE bits */ tmpreg &= (uint32_t)~((uint32_t)(USART_CR1_TE | USART_CR1_RE)); /* Enable the USART's receive interface by setting the RE bit in the USART CR1 register */ tmpreg |= (uint32_t)USART_CR1_RE; /* Write to USART CR1 */ WRITE_REG(huart->Instance->CR1, (uint32_t)tmpreg); huart->gState = HAL_UART_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(huart); return HAL_OK; } /** * @} */ /** @defgroup UART_Exported_Functions_Group4 Peripheral State and Errors functions * @brief UART State and Errors functions * @verbatim ============================================================================== ##### Peripheral State and Errors functions ##### ============================================================================== [..] This subsection provides a set of functions allowing to return the State of UART communication process, return Peripheral Errors occurred during communication process (+) HAL_UART_GetState() API can be helpful to check in run-time the state of the UART peripheral. (+) HAL_UART_GetError() check in run-time errors that could be occurred during communication. @endverbatim * @{ */ /** * @brief Returns the UART state. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL state */ HAL_UART_StateTypeDef HAL_UART_GetState(UART_HandleTypeDef *huart) { uint32_t temp1= 0x00U, temp2 = 0x00U; temp1 = huart->gState; temp2 = huart->RxState; return (HAL_UART_StateTypeDef)(temp1 | temp2); } /** * @brief Return the UART error code * @param huart : pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART. * @retval UART Error Code */ uint32_t HAL_UART_GetError(UART_HandleTypeDef *huart) { return huart->ErrorCode; } /** * @} */ /** * @brief DMA UART transmit process complete callback. * @param hdma: DMA handle * @retval None */ static void UART_DMATransmitCplt(DMA_HandleTypeDef *hdma) { UART_HandleTypeDef* huart = ( UART_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; /* DMA Normal mode*/ if((hdma->Instance->CR & DMA_SxCR_CIRC) == 0U) { huart->TxXferCount = 0U; /* Disable the DMA transfer for transmit request by setting the DMAT bit in the UART CR3 register */ CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAT); /* Enable the UART Transmit Complete Interrupt */ SET_BIT(huart->Instance->CR1, USART_CR1_TCIE); } /* DMA Circular mode */ else { HAL_UART_TxCpltCallback(huart); } } /** * @brief DMA UART transmit process half complete callback * @param hdma: pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ static void UART_DMATxHalfCplt(DMA_HandleTypeDef *hdma) { UART_HandleTypeDef* huart = (UART_HandleTypeDef*)((DMA_HandleTypeDef*)hdma)->Parent; HAL_UART_TxHalfCpltCallback(huart); } /** * @brief DMA UART receive process complete callback. * @param hdma: DMA handle * @retval None */ static void UART_DMAReceiveCplt(DMA_HandleTypeDef *hdma) { UART_HandleTypeDef* huart = ( UART_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; /* DMA Normal mode*/ if((hdma->Instance->CR & DMA_SxCR_CIRC) == 0U) { huart->RxXferCount = 0U; /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ CLEAR_BIT(huart->Instance->CR1, USART_CR1_PEIE); CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); /* Disable the DMA transfer for the receiver request by setting the DMAR bit in the UART CR3 register */ CLEAR_BIT(huart->Instance->CR3, USART_CR3_DMAR); /* At end of Rx process, restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; } HAL_UART_RxCpltCallback(huart); } /** * @brief DMA UART receive process half complete callback * @param hdma: pointer to a DMA_HandleTypeDef structure that contains * the configuration information for the specified DMA module. * @retval None */ static void UART_DMARxHalfCplt(DMA_HandleTypeDef *hdma) { UART_HandleTypeDef* huart = (UART_HandleTypeDef*)((DMA_HandleTypeDef*)hdma)->Parent; HAL_UART_RxHalfCpltCallback(huart); } /** * @brief DMA UART communication error callback. * @param hdma: DMA handle * @retval None */ static void UART_DMAError(DMA_HandleTypeDef *hdma) { uint32_t dmarequest = 0x00U; UART_HandleTypeDef* huart = ( UART_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; /* Stop UART DMA Tx request if ongoing */ dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAT); if((huart->gState == HAL_UART_STATE_BUSY_TX) && dmarequest) { huart->TxXferCount = 0U; UART_EndTxTransfer(huart); } /* Stop UART DMA Rx request if ongoing */ dmarequest = HAL_IS_BIT_SET(huart->Instance->CR3, USART_CR3_DMAR); if((huart->RxState == HAL_UART_STATE_BUSY_RX) && dmarequest) { huart->RxXferCount = 0U; UART_EndRxTransfer(huart); } huart->ErrorCode |= HAL_UART_ERROR_DMA; HAL_UART_ErrorCallback(huart); } /** * @brief This function handles UART Communication Timeout. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @param Flag: specifies the UART flag to check. * @param Status: The new Flag status (SET or RESET). * @param Tickstart Tick start value * @param Timeout: Timeout duration * @retval HAL status */ static HAL_StatusTypeDef UART_WaitOnFlagUntilTimeout(UART_HandleTypeDef *huart, uint32_t Flag, FlagStatus Status, uint32_t Tickstart, uint32_t Timeout) { /* Wait until flag is set */ while((__HAL_UART_GET_FLAG(huart, Flag) ? SET : RESET) == Status) { /* Check for the Timeout */ if(Timeout != HAL_MAX_DELAY) { if((Timeout == 0U)||((HAL_GetTick() - Tickstart ) > Timeout)) { /* Disable TXE, RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts for the interrupt process */ CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE | USART_CR1_TXEIE)); CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); huart->gState = HAL_UART_STATE_READY; huart->RxState = HAL_UART_STATE_READY; /* Process Unlocked */ __HAL_UNLOCK(huart); return HAL_TIMEOUT; } } } return HAL_OK; } /** * @brief End ongoing Tx transfer on UART peripheral (following error detection or Transmit completion). * @param huart: UART handle. * @retval None */ static void UART_EndTxTransfer(UART_HandleTypeDef *huart) { /* Disable TXEIE and TCIE interrupts */ CLEAR_BIT(huart->Instance->CR1, (USART_CR1_TXEIE | USART_CR1_TCIE)); /* At end of Tx process, restore huart->gState to Ready */ huart->gState = HAL_UART_STATE_READY; } /** * @brief End ongoing Rx transfer on UART peripheral (following error detection or Reception completion). * @param huart: UART handle. * @retval None */ static void UART_EndRxTransfer(UART_HandleTypeDef *huart) { /* Disable RXNE, PE and ERR (Frame error, noise error, overrun error) interrupts */ CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); /* At end of Rx process, restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; } /** * @brief DMA UART communication abort callback, when initiated by HAL services on Error * (To be called at end of DMA Abort procedure following error occurrence). * @param hdma DMA handle. * @retval None */ static void UART_DMAAbortOnError(DMA_HandleTypeDef *hdma) { UART_HandleTypeDef* huart = ( UART_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; huart->RxXferCount = 0; huart->TxXferCount = 0; HAL_UART_ErrorCallback(huart); } /** * @brief DMA UART Tx communication abort callback, when initiated by user * (To be called at end of DMA Tx Abort procedure following user abort request). * @note When this callback is executed, User Abort complete call back is called only if no * Abort still ongoing for Rx DMA Handle. * @param hdma DMA handle. * @retval None */ static void UART_DMATxAbortCallback(DMA_HandleTypeDef *hdma) { UART_HandleTypeDef* huart = ( UART_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; huart->hdmatx->XferAbortCallback = NULL; /* Check if an Abort process is still ongoing */ if(huart->hdmarx != NULL) { if(huart->hdmarx->XferAbortCallback != NULL) { return; } } /* No Abort process still ongoing : All DMA channels are aborted, call user Abort Complete callback */ huart->TxXferCount = 0x00U; huart->RxXferCount = 0x00U; /* Reset ErrorCode */ huart->ErrorCode = HAL_UART_ERROR_NONE; /* Restore huart->gState and huart->RxState to Ready */ huart->gState = HAL_UART_STATE_READY; huart->RxState = HAL_UART_STATE_READY; /* Call user Abort complete callback */ HAL_UART_AbortCpltCallback(huart); } /** * @brief DMA UART Rx communication abort callback, when initiated by user * (To be called at end of DMA Rx Abort procedure following user abort request). * @note When this callback is executed, User Abort complete call back is called only if no * Abort still ongoing for Tx DMA Handle. * @param hdma DMA handle. * @retval None */ static void UART_DMARxAbortCallback(DMA_HandleTypeDef *hdma) { UART_HandleTypeDef* huart = ( UART_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; huart->hdmarx->XferAbortCallback = NULL; /* Check if an Abort process is still ongoing */ if(huart->hdmatx != NULL) { if(huart->hdmatx->XferAbortCallback != NULL) { return; } } /* No Abort process still ongoing : All DMA channels are aborted, call user Abort Complete callback */ huart->TxXferCount = 0x00U; huart->RxXferCount = 0x00U; /* Reset ErrorCode */ huart->ErrorCode = HAL_UART_ERROR_NONE; /* Restore huart->gState and huart->RxState to Ready */ huart->gState = HAL_UART_STATE_READY; huart->RxState = HAL_UART_STATE_READY; /* Call user Abort complete callback */ HAL_UART_AbortCpltCallback(huart); } /** * @brief DMA UART Tx communication abort callback, when initiated by user by a call to * HAL_UART_AbortTransmit_IT API (Abort only Tx transfer) * (This callback is executed at end of DMA Tx Abort procedure following user abort request, * and leads to user Tx Abort Complete callback execution). * @param hdma DMA handle. * @retval None */ static void UART_DMATxOnlyAbortCallback(DMA_HandleTypeDef *hdma) { UART_HandleTypeDef* huart = ( UART_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; huart->TxXferCount = 0x00U; /* Restore huart->gState to Ready */ huart->gState = HAL_UART_STATE_READY; /* Call user Abort complete callback */ HAL_UART_AbortTransmitCpltCallback(huart); } /** * @brief DMA UART Rx communication abort callback, when initiated by user by a call to * HAL_UART_AbortReceive_IT API (Abort only Rx transfer) * (This callback is executed at end of DMA Rx Abort procedure following user abort request, * and leads to user Rx Abort Complete callback execution). * @param hdma DMA handle. * @retval None */ static void UART_DMARxOnlyAbortCallback(DMA_HandleTypeDef *hdma) { UART_HandleTypeDef* huart = ( UART_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent; huart->RxXferCount = 0x00U; /* Restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; /* Call user Abort complete callback */ HAL_UART_AbortReceiveCpltCallback(huart); } /** * @brief Sends an amount of data in non blocking mode. * @param huart: Pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ static HAL_StatusTypeDef UART_Transmit_IT(UART_HandleTypeDef *huart) { uint16_t* tmp; /* Check that a Tx process is ongoing */ if(huart->gState == HAL_UART_STATE_BUSY_TX) { if(huart->Init.WordLength == UART_WORDLENGTH_9B) { tmp = (uint16_t*) huart->pTxBuffPtr; huart->Instance->DR = (uint16_t)(*tmp & (uint16_t)0x01FFU); if(huart->Init.Parity == UART_PARITY_NONE) { huart->pTxBuffPtr += 2U; } else { huart->pTxBuffPtr += 1U; } } else { huart->Instance->DR = (uint8_t)(*huart->pTxBuffPtr++ & (uint8_t)0x00FFU); } if(--huart->TxXferCount == 0U) { /* Disable the UART Transmit Complete Interrupt */ CLEAR_BIT(huart->Instance->CR1, USART_CR1_TXEIE); /* Enable the UART Transmit Complete Interrupt */ SET_BIT(huart->Instance->CR1, USART_CR1_TCIE); } return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Wraps up transmission in non blocking mode. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ static HAL_StatusTypeDef UART_EndTransmit_IT(UART_HandleTypeDef *huart) { /* Disable the UART Transmit Complete Interrupt */ CLEAR_BIT(huart->Instance->CR1, USART_CR1_TCIE); /* Tx process is ended, restore huart->gState to Ready */ huart->gState = HAL_UART_STATE_READY; HAL_UART_TxCpltCallback(huart); return HAL_OK; } /** * @brief Receives an amount of data in non blocking mode * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval HAL status */ static HAL_StatusTypeDef UART_Receive_IT(UART_HandleTypeDef *huart) { uint16_t* tmp; /* Check that a Rx process is ongoing */ if(huart->RxState == HAL_UART_STATE_BUSY_RX) { if(huart->Init.WordLength == UART_WORDLENGTH_9B) { tmp = (uint16_t*) huart->pRxBuffPtr; if(huart->Init.Parity == UART_PARITY_NONE) { *tmp = (uint16_t)(huart->Instance->DR & (uint16_t)0x01FFU); huart->pRxBuffPtr += 2U; } else { *tmp = (uint16_t)(huart->Instance->DR & (uint16_t)0x00FFU); huart->pRxBuffPtr += 1U; } } else { if(huart->Init.Parity == UART_PARITY_NONE) { *huart->pRxBuffPtr++ = (uint8_t)(huart->Instance->DR & (uint8_t)0x00FFU); } else { *huart->pRxBuffPtr++ = (uint8_t)(huart->Instance->DR & (uint8_t)0x007FU); } } if(--huart->RxXferCount == 0U) { /* Disable the UART Parity Error Interrupt and RXNE interrupt*/ CLEAR_BIT(huart->Instance->CR1, (USART_CR1_RXNEIE | USART_CR1_PEIE)); /* Disable the UART Error Interrupt: (Frame error, noise error, overrun error) */ CLEAR_BIT(huart->Instance->CR3, USART_CR3_EIE); /* Rx process is completed, restore huart->RxState to Ready */ huart->RxState = HAL_UART_STATE_READY; HAL_UART_RxCpltCallback(huart); return HAL_OK; } return HAL_OK; } else { return HAL_BUSY; } } /** * @brief Configures the UART peripheral. * @param huart: pointer to a UART_HandleTypeDef structure that contains * the configuration information for the specified UART module. * @retval None */ static void UART_SetConfig(UART_HandleTypeDef *huart) { uint32_t tmpreg = 0x00U; /* Check the parameters */ assert_param(IS_UART_BAUDRATE(huart->Init.BaudRate)); assert_param(IS_UART_STOPBITS(huart->Init.StopBits)); assert_param(IS_UART_PARITY(huart->Init.Parity)); assert_param(IS_UART_MODE(huart->Init.Mode)); /*-------------------------- USART CR2 Configuration -----------------------*/ tmpreg = huart->Instance->CR2; /* Clear STOP[13:12] bits */ tmpreg &= (uint32_t)~((uint32_t)USART_CR2_STOP); /* Configure the UART Stop Bits: Set STOP[13:12] bits according to huart->Init.StopBits value */ tmpreg |= (uint32_t)huart->Init.StopBits; /* Write to USART CR2 */ WRITE_REG(huart->Instance->CR2, (uint32_t)tmpreg); /*-------------------------- USART CR1 Configuration -----------------------*/ tmpreg = huart->Instance->CR1; /* Clear M, PCE, PS, TE and RE bits */ tmpreg &= (uint32_t)~((uint32_t)(USART_CR1_M | USART_CR1_PCE | USART_CR1_PS | USART_CR1_TE | \ USART_CR1_RE | USART_CR1_OVER8)); /* Configure the UART Word Length, Parity and mode: Set the M bits according to huart->Init.WordLength value Set PCE and PS bits according to huart->Init.Parity value Set TE and RE bits according to huart->Init.Mode value Set OVER8 bit according to huart->Init.OverSampling value */ tmpreg |= (uint32_t)huart->Init.WordLength | huart->Init.Parity | huart->Init.Mode | huart->Init.OverSampling; /* Write to USART CR1 */ WRITE_REG(huart->Instance->CR1, (uint32_t)tmpreg); /*-------------------------- USART CR3 Configuration -----------------------*/ tmpreg = huart->Instance->CR3; /* Clear CTSE and RTSE bits */ tmpreg &= (uint32_t)~((uint32_t)(USART_CR3_RTSE | USART_CR3_CTSE)); /* Configure the UART HFC: Set CTSE and RTSE bits according to huart->Init.HwFlowCtl value */ tmpreg |= huart->Init.HwFlowCtl; /* Write to USART CR3 */ WRITE_REG(huart->Instance->CR3, (uint32_t)tmpreg); /* Check the Over Sampling */ if(huart->Init.OverSampling == UART_OVERSAMPLING_8) { /*-------------------------- USART BRR Configuration ---------------------*/ if((huart->Instance == USART1) || (huart->Instance == USART6)) { huart->Instance->BRR = UART_BRR_SAMPLING8(HAL_RCC_GetPCLK2Freq(), huart->Init.BaudRate); } else { huart->Instance->BRR = UART_BRR_SAMPLING8(HAL_RCC_GetPCLK1Freq(), huart->Init.BaudRate); } } else { /*-------------------------- USART BRR Configuration ---------------------*/ if((huart->Instance == USART1) || (huart->Instance == USART6)) { huart->Instance->BRR = UART_BRR_SAMPLING16(HAL_RCC_GetPCLK2Freq(), huart->Init.BaudRate); } else { huart->Instance->BRR = UART_BRR_SAMPLING16(HAL_RCC_GetPCLK1Freq(), huart->Init.BaudRate); } } } /** * @} */ #endif /* HAL_UART_MODULE_ENABLED */ /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
1002759.c
/* Searching in a string. Copyright (C) 2005-2008 Free Software Foundation, Inc. Written by Bruno Haible <[email protected]>, 2005. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> /* Specification. */ #include <string.h> #include <stdbool.h> #include <stddef.h> /* for NULL, in case a nonstandard string.h lacks it */ #include "malloca.h" #if HAVE_MBRTOWC # include "mbuiter.h" #endif /* Knuth-Morris-Pratt algorithm. */ #define CANON_ELEMENT(c) c #include "str-kmp.h" #if HAVE_MBRTOWC /* Knuth-Morris-Pratt algorithm. See http://en.wikipedia.org/wiki/Knuth-Morris-Pratt_algorithm Return a boolean indicating success: Return true and set *RESULTP if the search was completed. Return false if it was aborted because not enough memory was available. */ static bool knuth_morris_pratt_multibyte (const char *haystack, const char *needle, const char **resultp) { size_t m = mbslen (needle); mbchar_t *needle_mbchars; size_t *table; /* Allocate room for needle_mbchars and the table. */ char *memory = (char *) nmalloca (m, sizeof (mbchar_t) + sizeof (size_t)); if (memory == NULL) return false; needle_mbchars = (mbchar_t *) memory; table = (size_t *) (memory + m * sizeof (mbchar_t)); /* Fill needle_mbchars. */ { mbui_iterator_t iter; size_t j; j = 0; for (mbui_init (iter, needle); mbui_avail (iter); mbui_advance (iter), j++) mb_copy (&needle_mbchars[j], &mbui_cur (iter)); } /* Fill the table. For 0 < i < m: 0 < table[i] <= i is defined such that forall 0 < x < table[i]: needle[x..i-1] != needle[0..i-1-x], and table[i] is as large as possible with this property. This implies: 1) For 0 < i < m: If table[i] < i, needle[table[i]..i-1] = needle[0..i-1-table[i]]. 2) For 0 < i < m: rhaystack[0..i-1] == needle[0..i-1] and exists h, i <= h < m: rhaystack[h] != needle[h] implies forall 0 <= x < table[i]: rhaystack[x..x+m-1] != needle[0..m-1]. table[0] remains uninitialized. */ { size_t i, j; /* i = 1: Nothing to verify for x = 0. */ table[1] = 1; j = 0; for (i = 2; i < m; i++) { /* Here: j = i-1 - table[i-1]. The inequality needle[x..i-1] != needle[0..i-1-x] is known to hold for x < table[i-1], by induction. Furthermore, if j>0: needle[i-1-j..i-2] = needle[0..j-1]. */ mbchar_t *b = &needle_mbchars[i - 1]; for (;;) { /* Invariants: The inequality needle[x..i-1] != needle[0..i-1-x] is known to hold for x < i-1-j. Furthermore, if j>0: needle[i-1-j..i-2] = needle[0..j-1]. */ if (mb_equal (*b, needle_mbchars[j])) { /* Set table[i] := i-1-j. */ table[i] = i - ++j; break; } /* The inequality needle[x..i-1] != needle[0..i-1-x] also holds for x = i-1-j, because needle[i-1] != needle[j] = needle[i-1-x]. */ if (j == 0) { /* The inequality holds for all possible x. */ table[i] = i; break; } /* The inequality needle[x..i-1] != needle[0..i-1-x] also holds for i-1-j < x < i-1-j+table[j], because for these x: needle[x..i-2] = needle[x-(i-1-j)..j-1] != needle[0..j-1-(x-(i-1-j))] (by definition of table[j]) = needle[0..i-2-x], hence needle[x..i-1] != needle[0..i-1-x]. Furthermore needle[i-1-j+table[j]..i-2] = needle[table[j]..j-1] = needle[0..j-1-table[j]] (by definition of table[j]). */ j = j - table[j]; } /* Here: j = i - table[i]. */ } } /* Search, using the table to accelerate the processing. */ { size_t j; mbui_iterator_t rhaystack; mbui_iterator_t phaystack; *resultp = NULL; j = 0; mbui_init (rhaystack, haystack); mbui_init (phaystack, haystack); /* Invariant: phaystack = rhaystack + j. */ while (mbui_avail (phaystack)) if (mb_equal (needle_mbchars[j], mbui_cur (phaystack))) { j++; mbui_advance (phaystack); if (j == m) { /* The entire needle has been found. */ *resultp = mbui_cur_ptr (rhaystack); break; } } else if (j > 0) { /* Found a match of needle[0..j-1], mismatch at needle[j]. */ size_t count = table[j]; j -= count; for (; count > 0; count--) { if (!mbui_avail (rhaystack)) abort (); mbui_advance (rhaystack); } } else { /* Found a mismatch at needle[0] already. */ if (!mbui_avail (rhaystack)) abort (); mbui_advance (rhaystack); mbui_advance (phaystack); } } freea (memory); return true; } #endif /* Find the first occurrence of the character string NEEDLE in the character string HAYSTACK. Return NULL if NEEDLE is not found in HAYSTACK. */ char * mbsstr (const char *haystack, const char *needle) { /* Be careful not to look at the entire extent of haystack or needle until needed. This is useful because of these two cases: - haystack may be very long, and a match of needle found early, - needle may be very long, and not even a short initial segment of needle may be found in haystack. */ #if HAVE_MBRTOWC if (MB_CUR_MAX > 1) { mbui_iterator_t iter_needle; mbui_init (iter_needle, needle); if (mbui_avail (iter_needle)) { /* Minimizing the worst-case complexity: Let n = mbslen(haystack), m = mbslen(needle). The naïve algorithm is O(n*m) worst-case. The Knuth-Morris-Pratt algorithm is O(n) worst-case but it needs a memory allocation. To achieve linear complexity and yet amortize the cost of the memory allocation, we activate the Knuth-Morris-Pratt algorithm only once the naïve algorithm has already run for some time; more precisely, when - the outer loop count is >= 10, - the average number of comparisons per outer loop is >= 5, - the total number of comparisons is >= m. But we try it only once. If the memory allocation attempt failed, we don't retry it. */ bool try_kmp = true; size_t outer_loop_count = 0; size_t comparison_count = 0; size_t last_ccount = 0; /* last comparison count */ mbui_iterator_t iter_needle_last_ccount; /* = needle + last_ccount */ mbui_iterator_t iter_haystack; mbui_init (iter_needle_last_ccount, needle); mbui_init (iter_haystack, haystack); for (;; mbui_advance (iter_haystack)) { if (!mbui_avail (iter_haystack)) /* No match. */ return NULL; /* See whether it's advisable to use an asymptotically faster algorithm. */ if (try_kmp && outer_loop_count >= 10 && comparison_count >= 5 * outer_loop_count) { /* See if needle + comparison_count now reaches the end of needle. */ size_t count = comparison_count - last_ccount; for (; count > 0 && mbui_avail (iter_needle_last_ccount); count--) mbui_advance (iter_needle_last_ccount); last_ccount = comparison_count; if (!mbui_avail (iter_needle_last_ccount)) { /* Try the Knuth-Morris-Pratt algorithm. */ const char *result; bool success = knuth_morris_pratt_multibyte (haystack, needle, &result); if (success) return (char *) result; try_kmp = false; } } outer_loop_count++; comparison_count++; if (mb_equal (mbui_cur (iter_haystack), mbui_cur (iter_needle))) /* The first character matches. */ { mbui_iterator_t rhaystack; mbui_iterator_t rneedle; memcpy (&rhaystack, &iter_haystack, sizeof (mbui_iterator_t)); mbui_advance (rhaystack); mbui_init (rneedle, needle); if (!mbui_avail (rneedle)) abort (); mbui_advance (rneedle); for (;; mbui_advance (rhaystack), mbui_advance (rneedle)) { if (!mbui_avail (rneedle)) /* Found a match. */ return (char *) mbui_cur_ptr (iter_haystack); if (!mbui_avail (rhaystack)) /* No match. */ return NULL; comparison_count++; if (!mb_equal (mbui_cur (rhaystack), mbui_cur (rneedle))) /* Nothing in this round. */ break; } } } } else return (char *) haystack; } else #endif { if (*needle != '\0') { /* Minimizing the worst-case complexity: Let n = strlen(haystack), m = strlen(needle). The naïve algorithm is O(n*m) worst-case. The Knuth-Morris-Pratt algorithm is O(n) worst-case but it needs a memory allocation. To achieve linear complexity and yet amortize the cost of the memory allocation, we activate the Knuth-Morris-Pratt algorithm only once the naïve algorithm has already run for some time; more precisely, when - the outer loop count is >= 10, - the average number of comparisons per outer loop is >= 5, - the total number of comparisons is >= m. But we try it only once. If the memory allocation attempt failed, we don't retry it. */ bool try_kmp = true; size_t outer_loop_count = 0; size_t comparison_count = 0; size_t last_ccount = 0; /* last comparison count */ const char *needle_last_ccount = needle; /* = needle + last_ccount */ /* Speed up the following searches of needle by caching its first character. */ char b = *needle++; for (;; haystack++) { if (*haystack == '\0') /* No match. */ return NULL; /* See whether it's advisable to use an asymptotically faster algorithm. */ if (try_kmp && outer_loop_count >= 10 && comparison_count >= 5 * outer_loop_count) { /* See if needle + comparison_count now reaches the end of needle. */ if (needle_last_ccount != NULL) { needle_last_ccount += strnlen (needle_last_ccount, comparison_count - last_ccount); if (*needle_last_ccount == '\0') needle_last_ccount = NULL; last_ccount = comparison_count; } if (needle_last_ccount == NULL) { /* Try the Knuth-Morris-Pratt algorithm. */ const char *result; bool success = knuth_morris_pratt_unibyte (haystack, needle - 1, &result); if (success) return (char *) result; try_kmp = false; } } outer_loop_count++; comparison_count++; if (*haystack == b) /* The first character matches. */ { const char *rhaystack = haystack + 1; const char *rneedle = needle; for (;; rhaystack++, rneedle++) { if (*rneedle == '\0') /* Found a match. */ return (char *) haystack; if (*rhaystack == '\0') /* No match. */ return NULL; comparison_count++; if (*rhaystack != *rneedle) /* Nothing in this round. */ break; } } } } else return (char *) haystack; } }
608305.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE121_Stack_Based_Buffer_Overflow__CWE806_char_declare_ncpy_09.c Label Definition File: CWE121_Stack_Based_Buffer_Overflow__CWE806.label.xml Template File: sources-sink-09.tmpl.c */ /* * @description * CWE: 121 Stack Based Buffer Overflow * BadSource: Initialize data as a large string * GoodSource: Initialize data as a small string * Sink: ncpy * BadSink : Copy data to string using strncpy * Flow Variant: 09 Control flow: if(GLOBAL_CONST_TRUE) and if(GLOBAL_CONST_FALSE) * * */ #include "std_testcase.h" #include <wchar.h> #ifndef OMITBAD void CWE121_Stack_Based_Buffer_Overflow__CWE806_char_declare_ncpy_09_bad() { char * data; char dataBuffer[100]; data = dataBuffer; if(GLOBAL_CONST_TRUE) { /* FLAW: Initialize data as a large buffer that is larger than the small buffer used in the sink */ memset(data, 'A', 100-1); /* fill with 'A's */ data[100-1] = '\0'; /* null terminate */ } { char dest[50] = ""; /* POTENTIAL FLAW: Possible buffer overflow if data is larger than dest */ strncpy(dest, data, strlen(data)); dest[50-1] = '\0'; /* Ensure the destination buffer is null terminated */ printLine(data); } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B1() - use goodsource and badsink by changing the GLOBAL_CONST_TRUE to GLOBAL_CONST_FALSE */ static void goodG2B1() { char * data; char dataBuffer[100]; data = dataBuffer; if(GLOBAL_CONST_FALSE) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ printLine("Benign, fixed string"); } else { /* FIX: Initialize data as a small buffer that as small or smaller than the small buffer used in the sink */ memset(data, 'A', 50-1); /* fill with 'A's */ data[50-1] = '\0'; /* null terminate */ } { char dest[50] = ""; /* POTENTIAL FLAW: Possible buffer overflow if data is larger than dest */ strncpy(dest, data, strlen(data)); dest[50-1] = '\0'; /* Ensure the destination buffer is null terminated */ printLine(data); } } /* goodG2B2() - use goodsource and badsink by reversing the blocks in the if statement */ static void goodG2B2() { char * data; char dataBuffer[100]; data = dataBuffer; if(GLOBAL_CONST_TRUE) { /* FIX: Initialize data as a small buffer that as small or smaller than the small buffer used in the sink */ memset(data, 'A', 50-1); /* fill with 'A's */ data[50-1] = '\0'; /* null terminate */ } { char dest[50] = ""; /* POTENTIAL FLAW: Possible buffer overflow if data is larger than dest */ strncpy(dest, data, strlen(data)); dest[50-1] = '\0'; /* Ensure the destination buffer is null terminated */ printLine(data); } } void CWE121_Stack_Based_Buffer_Overflow__CWE806_char_declare_ncpy_09_good() { goodG2B1(); goodG2B2(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on * its own for testing or for building a binary to use in testing binary * analysis tools. It is not used when compiling all the testcases as one * application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE121_Stack_Based_Buffer_Overflow__CWE806_char_declare_ncpy_09_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE121_Stack_Based_Buffer_Overflow__CWE806_char_declare_ncpy_09_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
120348.c
/* * lib/netfilter/ct.c Conntrack * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation version 2.1 * of the License. * * Copyright (c) 2003-2008 Thomas Graf <[email protected]> * Copyright (c) 2007 Philip Craig <[email protected]> * Copyright (c) 2007 Secure Computing Corporation * Copyright (c= 2008 Patrick McHardy <[email protected]> */ /** * @ingroup nfnl * @defgroup ct Conntrack * @brief * @{ */ #include <byteswap.h> #include <linux/netfilter/nfnetlink_conntrack.h> #include <netlink-private/netlink.h> #include <netlink-private/utils.h> #include <netlink/attr.h> #include <netlink/netfilter/ct.h> #include <netlink/netfilter/nfnl.h> #include <sys/types.h> static struct nl_cache_ops nfnl_ct_ops; static struct nla_policy ct_policy[CTA_MAX + 1] = { [CTA_TUPLE_ORIG] = {.type = NLA_NESTED}, [CTA_TUPLE_REPLY] = {.type = NLA_NESTED}, [CTA_STATUS] = {.type = NLA_U32}, [CTA_PROTOINFO] = {.type = NLA_NESTED}, //[CTA_HELP] //[CTA_NAT_SRC] [CTA_TIMEOUT] = {.type = NLA_U32}, [CTA_MARK] = {.type = NLA_U32}, [CTA_COUNTERS_ORIG] = {.type = NLA_NESTED}, [CTA_COUNTERS_REPLY] = {.type = NLA_NESTED}, [CTA_USE] = {.type = NLA_U32}, [CTA_ID] = {.type = NLA_U32}, [CTA_ZONE] = {.type = NLA_U16}, //[CTA_NAT_DST] }; static struct nla_policy ct_tuple_policy[CTA_TUPLE_MAX + 1] = { [CTA_TUPLE_IP] = {.type = NLA_NESTED}, [CTA_TUPLE_PROTO] = {.type = NLA_NESTED}, }; static struct nla_policy ct_ip_policy[CTA_IP_MAX + 1] = { [CTA_IP_V4_SRC] = {.type = NLA_U32}, [CTA_IP_V4_DST] = {.type = NLA_U32}, [CTA_IP_V6_SRC] = {.minlen = 16}, [CTA_IP_V6_DST] = {.minlen = 16}, }; static struct nla_policy ct_proto_policy[CTA_PROTO_MAX + 1] = { [CTA_PROTO_NUM] = {.type = NLA_U8}, [CTA_PROTO_SRC_PORT] = {.type = NLA_U16}, [CTA_PROTO_DST_PORT] = {.type = NLA_U16}, [CTA_PROTO_ICMP_ID] = {.type = NLA_U16}, [CTA_PROTO_ICMP_TYPE] = {.type = NLA_U8}, [CTA_PROTO_ICMP_CODE] = {.type = NLA_U8}, [CTA_PROTO_ICMPV6_ID] = {.type = NLA_U16}, [CTA_PROTO_ICMPV6_TYPE] = {.type = NLA_U8}, [CTA_PROTO_ICMPV6_CODE] = {.type = NLA_U8}, }; static struct nla_policy ct_protoinfo_policy[CTA_PROTOINFO_MAX + 1] = { [CTA_PROTOINFO_TCP] = {.type = NLA_NESTED}, }; static struct nla_policy ct_protoinfo_tcp_policy[CTA_PROTOINFO_TCP_MAX + 1] = { [CTA_PROTOINFO_TCP_STATE] = {.type = NLA_U8}, [CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = {.type = NLA_U8}, [CTA_PROTOINFO_TCP_WSCALE_REPLY] = {.type = NLA_U8}, [CTA_PROTOINFO_TCP_FLAGS_ORIGINAL] = {.minlen = 2}, [CTA_PROTOINFO_TCP_FLAGS_REPLY] = {.minlen = 2}, }; static struct nla_policy ct_counters_policy[CTA_COUNTERS_MAX + 1] = { [CTA_COUNTERS_PACKETS] = {.type = NLA_U64}, [CTA_COUNTERS_BYTES] = {.type = NLA_U64}, [CTA_COUNTERS32_PACKETS] = {.type = NLA_U32}, [CTA_COUNTERS32_BYTES] = {.type = NLA_U32}, }; static struct nla_policy ct_timestamp_policy[CTA_TIMESTAMP_MAX + 1] = { [CTA_TIMESTAMP_START] = {.type = NLA_U64}, [CTA_TIMESTAMP_STOP] = {.type = NLA_U64}, }; static int ct_parse_ip(struct nfnl_ct* ct, int repl, struct nlattr* attr) { struct nlattr* tb[CTA_IP_MAX + 1]; struct nl_addr* addr; int err; err = nla_parse_nested(tb, CTA_IP_MAX, attr, ct_ip_policy); if (err < 0) goto errout; if (tb[CTA_IP_V4_SRC]) { addr = nl_addr_alloc_attr(tb[CTA_IP_V4_SRC], AF_INET); if (addr == NULL) goto errout_enomem; err = nfnl_ct_set_src(ct, repl, addr); nl_addr_put(addr); if (err < 0) goto errout; } if (tb[CTA_IP_V4_DST]) { addr = nl_addr_alloc_attr(tb[CTA_IP_V4_DST], AF_INET); if (addr == NULL) goto errout_enomem; err = nfnl_ct_set_dst(ct, repl, addr); nl_addr_put(addr); if (err < 0) goto errout; } if (tb[CTA_IP_V6_SRC]) { addr = nl_addr_alloc_attr(tb[CTA_IP_V6_SRC], AF_INET6); if (addr == NULL) goto errout_enomem; err = nfnl_ct_set_src(ct, repl, addr); nl_addr_put(addr); if (err < 0) goto errout; } if (tb[CTA_IP_V6_DST]) { addr = nl_addr_alloc_attr(tb[CTA_IP_V6_DST], AF_INET6); if (addr == NULL) goto errout_enomem; err = nfnl_ct_set_dst(ct, repl, addr); nl_addr_put(addr); if (err < 0) goto errout; } return 0; errout_enomem: err = -NLE_NOMEM; errout: return err; } static int ct_parse_proto(struct nfnl_ct* ct, int repl, struct nlattr* attr) { struct nlattr* tb[CTA_PROTO_MAX + 1]; int err; err = nla_parse_nested(tb, CTA_PROTO_MAX, attr, ct_proto_policy); if (err < 0) return err; if (!repl && tb[CTA_PROTO_NUM]) nfnl_ct_set_proto(ct, nla_get_u8(tb[CTA_PROTO_NUM])); if (tb[CTA_PROTO_SRC_PORT]) nfnl_ct_set_src_port(ct, repl, ntohs(nla_get_u16(tb[CTA_PROTO_SRC_PORT]))); if (tb[CTA_PROTO_DST_PORT]) nfnl_ct_set_dst_port(ct, repl, ntohs(nla_get_u16(tb[CTA_PROTO_DST_PORT]))); if (ct->ct_family == AF_INET) { if (tb[CTA_PROTO_ICMP_ID]) nfnl_ct_set_icmp_id(ct, repl, ntohs(nla_get_u16(tb[CTA_PROTO_ICMP_ID]))); if (tb[CTA_PROTO_ICMP_TYPE]) nfnl_ct_set_icmp_type(ct, repl, nla_get_u8(tb[CTA_PROTO_ICMP_TYPE])); if (tb[CTA_PROTO_ICMP_CODE]) nfnl_ct_set_icmp_code(ct, repl, nla_get_u8(tb[CTA_PROTO_ICMP_CODE])); } else if (ct->ct_family == AF_INET6) { if (tb[CTA_PROTO_ICMPV6_ID]) nfnl_ct_set_icmp_id(ct, repl, ntohs(nla_get_u16(tb[CTA_PROTO_ICMPV6_ID]))); if (tb[CTA_PROTO_ICMPV6_TYPE]) nfnl_ct_set_icmp_type(ct, repl, nla_get_u8(tb[CTA_PROTO_ICMPV6_TYPE])); if (tb[CTA_PROTO_ICMPV6_CODE]) nfnl_ct_set_icmp_code(ct, repl, nla_get_u8(tb[CTA_PROTO_ICMPV6_CODE])); } return 0; } static int ct_parse_tuple(struct nfnl_ct* ct, int repl, struct nlattr* attr) { struct nlattr* tb[CTA_TUPLE_MAX + 1]; int err; err = nla_parse_nested(tb, CTA_TUPLE_MAX, attr, ct_tuple_policy); if (err < 0) return err; if (tb[CTA_TUPLE_IP]) { err = ct_parse_ip(ct, repl, tb[CTA_TUPLE_IP]); if (err < 0) return err; } if (tb[CTA_TUPLE_PROTO]) { err = ct_parse_proto(ct, repl, tb[CTA_TUPLE_PROTO]); if (err < 0) return err; } return 0; } static int ct_parse_protoinfo_tcp(struct nfnl_ct* ct, struct nlattr* attr) { struct nlattr* tb[CTA_PROTOINFO_TCP_MAX + 1]; int err; err = nla_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr, ct_protoinfo_tcp_policy); if (err < 0) return err; if (tb[CTA_PROTOINFO_TCP_STATE]) nfnl_ct_set_tcp_state(ct, nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE])); return 0; } static int ct_parse_protoinfo(struct nfnl_ct* ct, struct nlattr* attr) { struct nlattr* tb[CTA_PROTOINFO_MAX + 1]; int err; err = nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, ct_protoinfo_policy); if (err < 0) return err; if (tb[CTA_PROTOINFO_TCP]) { err = ct_parse_protoinfo_tcp(ct, tb[CTA_PROTOINFO_TCP]); if (err < 0) return err; } return 0; } static int ct_parse_counters(struct nfnl_ct* ct, int repl, struct nlattr* attr) { struct nlattr* tb[CTA_COUNTERS_MAX + 1]; int err; err = nla_parse_nested(tb, CTA_COUNTERS_MAX, attr, ct_counters_policy); if (err < 0) return err; if (tb[CTA_COUNTERS_PACKETS]) nfnl_ct_set_packets(ct, repl, ntohll(nla_get_u64(tb[CTA_COUNTERS_PACKETS]))); if (tb[CTA_COUNTERS32_PACKETS]) nfnl_ct_set_packets(ct, repl, ntohl(nla_get_u32(tb[CTA_COUNTERS32_PACKETS]))); if (tb[CTA_COUNTERS_BYTES]) nfnl_ct_set_bytes(ct, repl, ntohll(nla_get_u64(tb[CTA_COUNTERS_BYTES]))); if (tb[CTA_COUNTERS32_BYTES]) nfnl_ct_set_bytes(ct, repl, ntohl(nla_get_u32(tb[CTA_COUNTERS32_BYTES]))); return 0; } int nfnlmsg_ct_group(struct nlmsghdr* nlh) { switch (nfnlmsg_subtype(nlh)) { case IPCTNL_MSG_CT_NEW: if (nlh->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL)) return NFNLGRP_CONNTRACK_NEW; else return NFNLGRP_CONNTRACK_UPDATE; case IPCTNL_MSG_CT_DELETE: return NFNLGRP_CONNTRACK_DESTROY; default: return NFNLGRP_NONE; } } static int ct_parse_timestamp(struct nfnl_ct* ct, struct nlattr* attr) { struct nlattr* tb[CTA_TIMESTAMP_MAX + 1]; int err; err = nla_parse_nested(tb, CTA_TIMESTAMP_MAX, attr, ct_timestamp_policy); if (err < 0) return err; if (tb[CTA_TIMESTAMP_START] && tb[CTA_TIMESTAMP_STOP]) nfnl_ct_set_timestamp(ct, ntohll(nla_get_u64(tb[CTA_TIMESTAMP_START])), ntohll(nla_get_u64(tb[CTA_TIMESTAMP_STOP]))); return 0; } int nfnlmsg_ct_parse(struct nlmsghdr* nlh, struct nfnl_ct** result) { struct nfnl_ct* ct; struct nlattr* tb[CTA_MAX + 1]; int err; ct = nfnl_ct_alloc(); if (!ct) return -NLE_NOMEM; ct->ce_msgtype = nlh->nlmsg_type; err = nlmsg_parse(nlh, sizeof(struct nfgenmsg), tb, CTA_MAX, ct_policy); if (err < 0) goto errout; nfnl_ct_set_family(ct, nfnlmsg_family(nlh)); if (tb[CTA_TUPLE_ORIG]) { err = ct_parse_tuple(ct, 0, tb[CTA_TUPLE_ORIG]); if (err < 0) goto errout; } if (tb[CTA_TUPLE_REPLY]) { err = ct_parse_tuple(ct, 1, tb[CTA_TUPLE_REPLY]); if (err < 0) goto errout; } if (tb[CTA_PROTOINFO]) { err = ct_parse_protoinfo(ct, tb[CTA_PROTOINFO]); if (err < 0) goto errout; } if (tb[CTA_STATUS]) nfnl_ct_set_status(ct, ntohl(nla_get_u32(tb[CTA_STATUS]))); if (tb[CTA_TIMEOUT]) nfnl_ct_set_timeout(ct, ntohl(nla_get_u32(tb[CTA_TIMEOUT]))); if (tb[CTA_MARK]) nfnl_ct_set_mark(ct, ntohl(nla_get_u32(tb[CTA_MARK]))); if (tb[CTA_USE]) nfnl_ct_set_use(ct, ntohl(nla_get_u32(tb[CTA_USE]))); if (tb[CTA_ID]) nfnl_ct_set_id(ct, ntohl(nla_get_u32(tb[CTA_ID]))); if (tb[CTA_ZONE]) nfnl_ct_set_zone(ct, ntohs(nla_get_u16(tb[CTA_ZONE]))); if (tb[CTA_COUNTERS_ORIG]) { err = ct_parse_counters(ct, 0, tb[CTA_COUNTERS_ORIG]); if (err < 0) goto errout; } if (tb[CTA_COUNTERS_REPLY]) { err = ct_parse_counters(ct, 1, tb[CTA_COUNTERS_REPLY]); if (err < 0) goto errout; } if (tb[CTA_TIMESTAMP]) { err = ct_parse_timestamp(ct, tb[CTA_TIMESTAMP]); if (err < 0) goto errout; } *result = ct; return 0; errout: nfnl_ct_put(ct); return err; } static int ct_msg_parser(struct nl_cache_ops* ops, struct sockaddr_nl* who, struct nlmsghdr* nlh, struct nl_parser_param* pp) { struct nfnl_ct* ct; int err; if ((err = nfnlmsg_ct_parse(nlh, &ct)) < 0) return err; err = pp->pp_cb((struct nl_object*)ct, pp); nfnl_ct_put(ct); return err; } /** * Send nfnl ct dump request * @arg sk Netlink socket. * * @return 0 on success or a negative error code. Due to a bug, this function * returns the number of bytes sent. Treat any non-negative number as success. */ int nfnl_ct_dump_request(struct nl_sock* sk) { return nfnl_send_simple(sk, NFNL_SUBSYS_CTNETLINK, IPCTNL_MSG_CT_GET, NLM_F_DUMP, AF_UNSPEC, 0); } static int ct_request_update(struct nl_cache* cache, struct nl_sock* sk) { return nfnl_ct_dump_request(sk); } static int nfnl_ct_build_tuple(struct nl_msg* msg, const struct nfnl_ct* ct, int repl) { struct nlattr *tuple, *ip, *proto; struct nl_addr* addr; int family; family = nfnl_ct_get_family(ct); tuple = nla_nest_start(msg, repl ? CTA_TUPLE_REPLY : CTA_TUPLE_ORIG); if (!tuple) goto nla_put_failure; ip = nla_nest_start(msg, CTA_TUPLE_IP); if (!ip) goto nla_put_failure; addr = nfnl_ct_get_src(ct, repl); if (addr) NLA_PUT_ADDR(msg, family == AF_INET ? CTA_IP_V4_SRC : CTA_IP_V6_SRC, addr); addr = nfnl_ct_get_dst(ct, repl); if (addr) NLA_PUT_ADDR(msg, family == AF_INET ? CTA_IP_V4_DST : CTA_IP_V6_DST, addr); nla_nest_end(msg, ip); proto = nla_nest_start(msg, CTA_TUPLE_PROTO); if (!proto) goto nla_put_failure; if (nfnl_ct_test_proto(ct)) NLA_PUT_U8(msg, CTA_PROTO_NUM, nfnl_ct_get_proto(ct)); if (nfnl_ct_test_src_port(ct, repl)) NLA_PUT_U16(msg, CTA_PROTO_SRC_PORT, htons(nfnl_ct_get_src_port(ct, repl))); if (nfnl_ct_test_dst_port(ct, repl)) NLA_PUT_U16(msg, CTA_PROTO_DST_PORT, htons(nfnl_ct_get_dst_port(ct, repl))); if (family == AF_INET) { if (nfnl_ct_test_icmp_id(ct, repl)) NLA_PUT_U16(msg, CTA_PROTO_ICMP_ID, htons(nfnl_ct_get_icmp_id(ct, repl))); if (nfnl_ct_test_icmp_type(ct, repl)) NLA_PUT_U8(msg, CTA_PROTO_ICMP_TYPE, nfnl_ct_get_icmp_type(ct, repl)); if (nfnl_ct_test_icmp_code(ct, repl)) NLA_PUT_U8(msg, CTA_PROTO_ICMP_CODE, nfnl_ct_get_icmp_code(ct, repl)); } else if (family == AF_INET6) { if (nfnl_ct_test_icmp_id(ct, repl)) NLA_PUT_U16(msg, CTA_PROTO_ICMPV6_ID, htons(nfnl_ct_get_icmp_id(ct, repl))); if (nfnl_ct_test_icmp_type(ct, repl)) NLA_PUT_U8(msg, CTA_PROTO_ICMPV6_TYPE, nfnl_ct_get_icmp_type(ct, repl)); if (nfnl_ct_test_icmp_code(ct, repl)) NLA_PUT_U8(msg, CTA_PROTO_ICMPV6_CODE, nfnl_ct_get_icmp_code(ct, repl)); } nla_nest_end(msg, proto); nla_nest_end(msg, tuple); return 0; nla_put_failure: return -NLE_MSGSIZE; } static int nfnl_ct_build_message(const struct nfnl_ct* ct, int cmd, int flags, struct nl_msg** result) { struct nl_msg* msg; int err; msg = nfnlmsg_alloc_simple(NFNL_SUBSYS_CTNETLINK, cmd, flags, nfnl_ct_get_family(ct), 0); if (msg == NULL) return -NLE_NOMEM; if ((err = nfnl_ct_build_tuple(msg, ct, 0)) < 0) goto err_out; /* REPLY tuple is optional, dont add unless at least src/dst specified */ if (nfnl_ct_get_src(ct, 1) && nfnl_ct_get_dst(ct, 1)) if ((err = nfnl_ct_build_tuple(msg, ct, 1)) < 0) goto err_out; if (nfnl_ct_test_status(ct)) NLA_PUT_U32(msg, CTA_STATUS, htonl(nfnl_ct_get_status(ct))); if (nfnl_ct_test_timeout(ct)) NLA_PUT_U32(msg, CTA_TIMEOUT, htonl(nfnl_ct_get_timeout(ct))); if (nfnl_ct_test_mark(ct)) NLA_PUT_U32(msg, CTA_MARK, htonl(nfnl_ct_get_mark(ct))); if (nfnl_ct_test_id(ct)) NLA_PUT_U32(msg, CTA_ID, htonl(nfnl_ct_get_id(ct))); if (nfnl_ct_test_zone(ct)) NLA_PUT_U16(msg, CTA_ZONE, htons(nfnl_ct_get_zone(ct))); *result = msg; return 0; nla_put_failure: err_out: nlmsg_free(msg); return err; } int nfnl_ct_build_add_request(const struct nfnl_ct* ct, int flags, struct nl_msg** result) { return nfnl_ct_build_message(ct, IPCTNL_MSG_CT_NEW, flags, result); } int nfnl_ct_add(struct nl_sock* sk, const struct nfnl_ct* ct, int flags) { struct nl_msg* msg; int err; if ((err = nfnl_ct_build_add_request(ct, flags, &msg)) < 0) return err; err = nl_send_auto_complete(sk, msg); nlmsg_free(msg); if (err < 0) return err; return wait_for_ack(sk); } int nfnl_ct_build_delete_request(const struct nfnl_ct* ct, int flags, struct nl_msg** result) { return nfnl_ct_build_message(ct, IPCTNL_MSG_CT_DELETE, flags, result); } int nfnl_ct_del(struct nl_sock* sk, const struct nfnl_ct* ct, int flags) { struct nl_msg* msg; int err; if ((err = nfnl_ct_build_delete_request(ct, flags, &msg)) < 0) return err; err = nl_send_auto_complete(sk, msg); nlmsg_free(msg); if (err < 0) return err; return wait_for_ack(sk); } int nfnl_ct_build_query_request(const struct nfnl_ct* ct, int flags, struct nl_msg** result) { return nfnl_ct_build_message(ct, IPCTNL_MSG_CT_GET, flags, result); } int nfnl_ct_query(struct nl_sock* sk, const struct nfnl_ct* ct, int flags) { struct nl_msg* msg; int err; if ((err = nfnl_ct_build_query_request(ct, flags, &msg)) < 0) return err; err = nl_send_auto_complete(sk, msg); nlmsg_free(msg); if (err < 0) return err; return wait_for_ack(sk); } /** * @name Cache Management * @{ */ /** * Build a conntrack cache holding all conntrack currently in the kernel * @arg sk Netlink socket. * @arg result Pointer to store resulting cache. * * Allocates a new cache, initializes it properly and updates it to * contain all conntracks currently in the kernel. * * @return 0 on success or a negative error code. */ int nfnl_ct_alloc_cache(struct nl_sock* sk, struct nl_cache** result) { return nl_cache_alloc_and_fill(&nfnl_ct_ops, sk, result); } /** @} */ /** * @name Conntrack Addition * @{ */ /** @} */ static struct nl_af_group ct_groups[] = { {AF_UNSPEC, NFNLGRP_CONNTRACK_NEW}, {AF_UNSPEC, NFNLGRP_CONNTRACK_UPDATE}, {AF_UNSPEC, NFNLGRP_CONNTRACK_DESTROY}, {END_OF_GROUP_LIST}, }; #define NFNLMSG_CT_TYPE(type) NFNLMSG_TYPE(NFNL_SUBSYS_CTNETLINK, (type)) static struct nl_cache_ops nfnl_ct_ops = { .co_name = "netfilter/ct", .co_hdrsize = NFNL_HDRLEN, .co_msgtypes = { {NFNLMSG_CT_TYPE(IPCTNL_MSG_CT_NEW), NL_ACT_NEW, "new"}, {NFNLMSG_CT_TYPE(IPCTNL_MSG_CT_GET), NL_ACT_GET, "get"}, {NFNLMSG_CT_TYPE(IPCTNL_MSG_CT_DELETE), NL_ACT_DEL, "del"}, END_OF_MSGTYPES_LIST, }, .co_protocol = NETLINK_NETFILTER, .co_groups = ct_groups, .co_request_update = ct_request_update, .co_msg_parser = ct_msg_parser, .co_obj_ops = &ct_obj_ops, }; static void __init ct_init(void) { nl_cache_mngt_register(&nfnl_ct_ops); } static void __exit ct_exit(void) { nl_cache_mngt_unregister(&nfnl_ct_ops); } /** @} */
448157.c
// to read formatted input from a file #include <stdio.h> #include <stdlib.h> int main() { char str1[10], str2[10], str3[10]; int year; FILE *fp; fp = fopen("file.txt", "w+"); if(fp != NULL) fputs("Hello how are you", fp); rewind(fp); fscanf(fp, "%s %s %s %d", str1, str2, str3, &year); printf("Read String1 |%s|\n", str1); printf("Read String2 |%s|\n", str2); printf("Read String3 |%s|\n", str3); printf("Read Integer |%d|\n", year); fclose(fp); fp = NULL; return 0; }
566664.c
/* conf.c (GENERATED FILE; DO NOT EDIT) */ #include <xinu.h> extern devcall ioerr(void); extern devcall ionull(void); /* Device independent I/O switch */ struct dentry devtab[NDEVS] = { /** * Format of entries is: * dev-number, minor-number, dev-name, * init, open, close, * read, write, seek, * getc, putc, control, * dev-csr-address, intr-handler, irq */ /* CONSOLE is tty */ { 0, 0, "CONSOLE", (void *)ttyinit, (void *)ionull, (void *)ionull, (void *)ttyread, (void *)ttywrite, (void *)ioerr, (void *)ttygetc, (void *)ttyputc, (void *)ttycontrol, (void *)0x44e09000, (void *)ttyhandler, 72 }, /* NULLDEV is null */ { 1, 0, "NULLDEV", (void *)ionull, (void *)ionull, (void *)ionull, (void *)ionull, (void *)ionull, (void *)ioerr, (void *)ionull, (void *)ionull, (void *)ioerr, (void *)0x0, (void *)ioerr, 0 }, /* ETHER0 is eth */ { 2, 0, "ETHER0", (void *)ethinit, (void *)ioerr, (void *)ioerr, (void *)ethread, (void *)ethwrite, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)ethcontrol, (void *)0x0, (void *)ethhandler, 0 }, /* NAMESPACE is nam */ { 3, 0, "NAMESPACE", (void *)naminit, (void *)namopen, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)0x0, (void *)ioerr, 0 }, /* RDISK is rds */ { 4, 0, "RDISK", (void *)rdsinit, (void *)rdsopen, (void *)rdsclose, (void *)rdsread, (void *)rdswrite, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)rdscontrol, (void *)0x0, (void *)ionull, 0 }, /* RAM0 is ram */ { 5, 0, "RAM0", (void *)raminit, (void *)ramopen, (void *)ramclose, (void *)ramread, (void *)ramwrite, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)0x0, (void *)ionull, 0 }, /* RFILESYS is rfs */ { 6, 0, "RFILESYS", (void *)rfsinit, (void *)rfsopen, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)rfscontrol, (void *)0x0, (void *)ionull, 0 }, /* RFILE0 is rfl */ { 7, 0, "RFILE0", (void *)rflinit, (void *)ioerr, (void *)rflclose, (void *)rflread, (void *)rflwrite, (void *)rflseek, (void *)rflgetc, (void *)rflputc, (void *)ioerr, (void *)0x0, (void *)ionull, 0 }, /* RFILE1 is rfl */ { 8, 1, "RFILE1", (void *)rflinit, (void *)ioerr, (void *)rflclose, (void *)rflread, (void *)rflwrite, (void *)rflseek, (void *)rflgetc, (void *)rflputc, (void *)ioerr, (void *)0x0, (void *)ionull, 0 }, /* RFILE2 is rfl */ { 9, 2, "RFILE2", (void *)rflinit, (void *)ioerr, (void *)rflclose, (void *)rflread, (void *)rflwrite, (void *)rflseek, (void *)rflgetc, (void *)rflputc, (void *)ioerr, (void *)0x0, (void *)ionull, 0 }, /* RFILE3 is rfl */ { 10, 3, "RFILE3", (void *)rflinit, (void *)ioerr, (void *)rflclose, (void *)rflread, (void *)rflwrite, (void *)rflseek, (void *)rflgetc, (void *)rflputc, (void *)ioerr, (void *)0x0, (void *)ionull, 0 }, /* RFILE4 is rfl */ { 11, 4, "RFILE4", (void *)rflinit, (void *)ioerr, (void *)rflclose, (void *)rflread, (void *)rflwrite, (void *)rflseek, (void *)rflgetc, (void *)rflputc, (void *)ioerr, (void *)0x0, (void *)ionull, 0 }, /* RFILE5 is rfl */ { 12, 5, "RFILE5", (void *)rflinit, (void *)ioerr, (void *)rflclose, (void *)rflread, (void *)rflwrite, (void *)rflseek, (void *)rflgetc, (void *)rflputc, (void *)ioerr, (void *)0x0, (void *)ionull, 0 }, /* RFILE6 is rfl */ { 13, 6, "RFILE6", (void *)rflinit, (void *)ioerr, (void *)rflclose, (void *)rflread, (void *)rflwrite, (void *)rflseek, (void *)rflgetc, (void *)rflputc, (void *)ioerr, (void *)0x0, (void *)ionull, 0 }, /* RFILE7 is rfl */ { 14, 7, "RFILE7", (void *)rflinit, (void *)ioerr, (void *)rflclose, (void *)rflread, (void *)rflwrite, (void *)rflseek, (void *)rflgetc, (void *)rflputc, (void *)ioerr, (void *)0x0, (void *)ionull, 0 }, /* RFILE8 is rfl */ { 15, 8, "RFILE8", (void *)rflinit, (void *)ioerr, (void *)rflclose, (void *)rflread, (void *)rflwrite, (void *)rflseek, (void *)rflgetc, (void *)rflputc, (void *)ioerr, (void *)0x0, (void *)ionull, 0 }, /* RFILE9 is rfl */ { 16, 9, "RFILE9", (void *)rflinit, (void *)ioerr, (void *)rflclose, (void *)rflread, (void *)rflwrite, (void *)rflseek, (void *)rflgetc, (void *)rflputc, (void *)ioerr, (void *)0x0, (void *)ionull, 0 }, /* LFILESYS is lfs */ { 17, 0, "LFILESYS", (void *)lfsinit, (void *)lfsopen, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)ioerr, (void *)0x0, (void *)ionull, 0 }, /* LFILE0 is lfl */ { 18, 0, "LFILE0", (void *)lflinit, (void *)ioerr, (void *)lflclose, (void *)lflread, (void *)lflwrite, (void *)lflseek, (void *)lflgetc, (void *)lflputc, (void *)lflcontrol, (void *)0x0, (void *)ionull, 0 }, /* LFILE1 is lfl */ { 19, 1, "LFILE1", (void *)lflinit, (void *)ioerr, (void *)lflclose, (void *)lflread, (void *)lflwrite, (void *)lflseek, (void *)lflgetc, (void *)lflputc, (void *)lflcontrol, (void *)0x0, (void *)ionull, 0 }, /* LFILE2 is lfl */ { 20, 2, "LFILE2", (void *)lflinit, (void *)ioerr, (void *)lflclose, (void *)lflread, (void *)lflwrite, (void *)lflseek, (void *)lflgetc, (void *)lflputc, (void *)lflcontrol, (void *)0x0, (void *)ionull, 0 }, /* LFILE3 is lfl */ { 21, 3, "LFILE3", (void *)lflinit, (void *)ioerr, (void *)lflclose, (void *)lflread, (void *)lflwrite, (void *)lflseek, (void *)lflgetc, (void *)lflputc, (void *)lflcontrol, (void *)0x0, (void *)ionull, 0 }, /* LFILE4 is lfl */ { 22, 4, "LFILE4", (void *)lflinit, (void *)ioerr, (void *)lflclose, (void *)lflread, (void *)lflwrite, (void *)lflseek, (void *)lflgetc, (void *)lflputc, (void *)lflcontrol, (void *)0x0, (void *)ionull, 0 }, /* LFILE5 is lfl */ { 23, 5, "LFILE5", (void *)lflinit, (void *)ioerr, (void *)lflclose, (void *)lflread, (void *)lflwrite, (void *)lflseek, (void *)lflgetc, (void *)lflputc, (void *)lflcontrol, (void *)0x0, (void *)ionull, 0 }, /* SPI0 is spi */ { 24, 0, "SPI0", (void *)spiinit, (void *)ionull, (void *)ionull, (void *)ionull, (void *)ionull, (void *)ionull, (void *)ionull, (void *)ionull, (void *)spicontrol, (void *)0x48030000, (void *)ionull, 0 }, /* SPI1 is spi */ { 25, 1, "SPI1", (void *)spiinit, (void *)ionull, (void *)ionull, (void *)ionull, (void *)ionull, (void *)ionull, (void *)ionull, (void *)ionull, (void *)spicontrol, (void *)0x481a0000, (void *)ionull, 0 }, /* ADC is adc */ { 26, 0, "ADC", (void *)adcinit, (void *)ionull, (void *)ionull, (void *)adcread, (void *)ionull, (void *)ionull, (void *)ionull, (void *)ionull, (void *)ionull, (void *)0x44e0d000, (void *)ionull, 0 }, /* GPIO is gpio */ { 27, 0, "GPIO", (void *)gpioinit, (void *)ionull, (void *)ionull, (void *)gpioread, (void *)gpiowrite, (void *)ionull, (void *)ionull, (void *)ionull, (void *)gpioctl, (void *)0x44e07000, (void *)ionull, 0 }, /* ACCEL is accel */ { 28, 0, "ACCEL", (void *)accel_init, (void *)ionull, (void *)ionull, (void *)accel_read, (void *)ionull, (void *)ionull, (void *)ionull, (void *)ionull, (void *)ionull, (void *)0x0, (void *)ionull, 0 } };
11614.c
/*------------------------------------------------------------------------- * * win32_latch.c * Routines for inter-process latches * * See unix_latch.c for header comments for the exported functions; * the API presented here is supposed to be the same as there. * * The Windows implementation uses Windows events that are inherited by * all postmaster child processes. * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION * src/backend/port/win32_latch.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include <fcntl.h> #include <signal.h> #include <unistd.h> #include "miscadmin.h" #include "postmaster/postmaster.h" #include "storage/latch.h" #include "storage/pmsignal.h" #include "storage/shmem.h" void InitLatch(volatile Latch *latch) { latch->is_set = false; latch->owner_pid = MyProcPid; latch->is_shared = false; latch->event = CreateEvent(NULL, TRUE, FALSE, NULL); if (latch->event == NULL) elog(ERROR, "CreateEvent failed: error code %lu", GetLastError()); } void InitSharedLatch(volatile Latch *latch) { SECURITY_ATTRIBUTES sa; latch->is_set = false; latch->owner_pid = 0; latch->is_shared = true; /* * Set up security attributes to specify that the events are inherited. */ ZeroMemory(&sa, sizeof(sa)); sa.nLength = sizeof(sa); sa.bInheritHandle = TRUE; latch->event = CreateEvent(&sa, TRUE, FALSE, NULL); if (latch->event == NULL) elog(ERROR, "CreateEvent failed: error code %lu", GetLastError()); } void OwnLatch(volatile Latch *latch) { /* Sanity checks */ Assert(latch->is_shared); if (latch->owner_pid != 0) elog(ERROR, "latch already owned"); latch->owner_pid = MyProcPid; } void DisownLatch(volatile Latch *latch) { Assert(latch->is_shared); Assert(latch->owner_pid == MyProcPid); latch->owner_pid = 0; } int WaitLatch(volatile Latch *latch, int wakeEvents, long timeout) { return WaitLatchOrSocket(latch, wakeEvents, PGINVALID_SOCKET, timeout); } int WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock, long timeout) { DWORD rc; HANDLE events[4]; HANDLE latchevent; HANDLE sockevent = WSA_INVALID_EVENT; int numevents; int result = 0; int pmdeath_eventno = 0; /* Ignore WL_SOCKET_* events if no valid socket is given */ if (sock == PGINVALID_SOCKET) wakeEvents &= ~(WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE); Assert(wakeEvents != 0); /* must have at least one wake event */ /* Cannot specify WL_SOCKET_WRITEABLE without WL_SOCKET_READABLE */ Assert((wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE)) != WL_SOCKET_WRITEABLE); if ((wakeEvents & WL_LATCH_SET) && latch->owner_pid != MyProcPid) elog(ERROR, "cannot wait on a latch owned by another process"); /* Convert timeout to form used by WaitForMultipleObjects() */ if (wakeEvents & WL_TIMEOUT) Assert(timeout >= 0); else timeout = INFINITE; /* * Construct an array of event handles for WaitforMultipleObjects(). * * Note: pgwin32_signal_event should be first to ensure that it will be * reported when multiple events are set. We want to guarantee that * pending signals are serviced. */ latchevent = latch->event; events[0] = pgwin32_signal_event; events[1] = latchevent; numevents = 2; if (wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE)) { /* Need an event object to represent events on the socket */ int flags = 0; if (wakeEvents & WL_SOCKET_READABLE) flags |= (FD_READ | FD_CLOSE); if (wakeEvents & WL_SOCKET_WRITEABLE) flags |= FD_WRITE; sockevent = WSACreateEvent(); if (sockevent == WSA_INVALID_EVENT) elog(ERROR, "failed to create event for socket: error code %u", WSAGetLastError()); if (WSAEventSelect(sock, sockevent, flags) != 0) elog(ERROR, "failed to set up event for socket: error code %u", WSAGetLastError()); events[numevents++] = sockevent; } if (wakeEvents & WL_POSTMASTER_DEATH) { pmdeath_eventno = numevents; events[numevents++] = PostmasterHandle; } /* Ensure that signals are serviced even if latch is already set */ pgwin32_dispatch_queued_signals(); do { /* * Reset the event, and check if the latch is set already. If someone * sets the latch between this and the WaitForMultipleObjects() call * below, the setter will set the event and WaitForMultipleObjects() * will return immediately. */ if (!ResetEvent(latchevent)) elog(ERROR, "ResetEvent failed: error code %lu", GetLastError()); if ((wakeEvents & WL_LATCH_SET) && latch->is_set) { result |= WL_LATCH_SET; /* * Leave loop immediately, avoid blocking again. We don't attempt * to report any other events that might also be satisfied. */ break; } rc = WaitForMultipleObjects(numevents, events, FALSE, timeout); if (rc == WAIT_FAILED) elog(ERROR, "WaitForMultipleObjects() failed: error code %lu", GetLastError()); else if (rc == WAIT_TIMEOUT) { result |= WL_TIMEOUT; } else if (rc == WAIT_OBJECT_0) { /* Service newly-arrived signals */ pgwin32_dispatch_queued_signals(); } else if (rc == WAIT_OBJECT_0 + 1) { /* Latch is set, we'll handle that on next iteration of loop */ } else if ((wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE)) && rc == WAIT_OBJECT_0 + 2) /* socket is at event slot 2 */ { WSANETWORKEVENTS resEvents; ZeroMemory(&resEvents, sizeof(resEvents)); if (WSAEnumNetworkEvents(sock, sockevent, &resEvents) != 0) elog(ERROR, "failed to enumerate network events: error code %u", WSAGetLastError()); if ((wakeEvents & WL_SOCKET_READABLE) && (resEvents.lNetworkEvents & (FD_READ | FD_CLOSE))) { result |= WL_SOCKET_READABLE; } if ((wakeEvents & WL_SOCKET_WRITEABLE) && (resEvents.lNetworkEvents & FD_WRITE)) { result |= WL_SOCKET_WRITEABLE; } } else if ((wakeEvents & WL_POSTMASTER_DEATH) && rc == WAIT_OBJECT_0 + pmdeath_eventno) { /* * Postmaster apparently died. Since the consequences of falsely * returning WL_POSTMASTER_DEATH could be pretty unpleasant, we * take the trouble to positively verify this with * PostmasterIsAlive(), even though there is no known reason to * think that the event could be falsely set on Windows. */ if (!PostmasterIsAlive()) result |= WL_POSTMASTER_DEATH; } else elog(ERROR, "unexpected return code from WaitForMultipleObjects(): %lu", rc); } while (result == 0); /* Clean up the event object we created for the socket */ if (sockevent != WSA_INVALID_EVENT) { WSAEventSelect(sock, NULL, 0); WSACloseEvent(sockevent); } return result; } void SetLatch(volatile Latch *latch) { HANDLE handle; /* Quick exit if already set */ if (latch->is_set) return; latch->is_set = true; /* * See if anyone's waiting for the latch. It can be the current process if * we're in a signal handler. * * Use a local variable here just in case somebody changes the event field * concurrently (which really should not happen). */ handle = latch->event; if (handle) { SetEvent(handle); /* * Note that we silently ignore any errors. We might be in a signal * handler or other critical path where it's not safe to call elog(). */ } } void ResetLatch(volatile Latch *latch) { /* Only the owner should reset the latch */ Assert(latch->owner_pid == MyProcPid); latch->is_set = false; }
36152.c
#include "event.h" #define MAX(a, b) ((a) > (b) ? (a) : (b)) Point prev_mouse_xy; Time click1_time; Time click2_time; void on_key_press(const XKeyEvent *ev) { if (ev->subwindow != None) { XRaiseWindow(dpy, ev->subwindow); } } void on_button_press(const XButtonEvent *ev) { XWindowAttributes menu_attr; XGetWindowAttributes(dpy, root_menu, &menu_attr); if (ev->window == root && ev->subwindow == None) { if (ev->button == Button3) { if (menu_attr.map_state == IsUnmapped) { XMoveWindow(dpy, root_menu, ev->x_root, ev->y_root); XRaiseWindow(dpy, root_menu); XMapWindow(dpy, root_menu); } else { XUnmapWindow(dpy, root_menu); } } else { XUnmapWindow(dpy, root_menu); } } else if (menu_attr.map_state != IsUnmapped && ev->button == Button1) { YNode *curr = ylist_head(&menu_items); while (curr != NULL) { MenuItem *menuItem = (MenuItem *)curr->data; if (menuItem->window == ev->window) { flash_menu(menuItem); fork_exec(menuItem->command); XUnmapWindow(dpy, root_menu); break; } curr = curr->next; } } else { XAllowEvents(dpy, ReplayPointer, CurrentTime); Window returned_root; int x, y; unsigned width, height, border_width, depth; XGetGeometry( dpy, ev->window, &returned_root, &x, &y, &width, &height, &border_width, &depth); cursor_start_point = (Point){ .x = ev->x_root, .y = ev->y_root }; cursor_start_win_point = (Point){ .x = ev->x, .y = ev->y }; prev_mouse_xy = (Point){ .x = ev->x_root, .y = ev->y_root }; start_window_geom = (Rect){ .x = x, .y = y, .width = width, .height = height }; current_window_geom = (Rect){ .x = x, .y = y, .width = width, .height = height }; if (is_title_bar(cursor_start_win_point)) { if (click1_time == 0) { click1_time = ev->time; click2_time = 0; } else if (click2_time == 0) { click2_time = ev->time; long diff = click2_time - click1_time; if (diff <= DBL_CLICK_SPEED) { Client *client = find_client(ev->window); if (client != NULL) { handle_shading(client); } } click1_time = 0; click2_time = 0; } } else { click1_time = 0; click2_time = 0; } XGrabPointer(dpy, ev->window, True, PointerMotionMask | ButtonReleaseMask, GrabModeAsync, GrabModeAsync, None, None, CurrentTime); XRaiseWindow(dpy, ev->window); focused_client = find_client(ev->window); YNode *curr = ylist_head(&clients); while (curr != NULL) { Client *client = (Client *)ylist_data(curr); redraw(client); curr = curr->next; } } } // TODO: implement callbacks void on_button_release(const XButtonEvent *ev) { Client *c = find_client(ev->window); if (c != NULL) { if (ev->window == c->close_button) { send_wm_delete(c->client); remove_client(c); } else if (ev->window == c->shade_button) { handle_shading(c); } } XUngrabPointer(dpy, CurrentTime); } void on_motion_notify(const XMotionEvent *ev) { Client *c = find_client(ev->window); int x = current_window_geom.x; int y = current_window_geom.y; int width = current_window_geom.width; int height = current_window_geom.height; if (ev->state & Button1Mask && c->close_button != ev->window && c->shade_button != ev->window && !is_resize_frame(cursor_start_win_point)) { int xdiff = ev->x_root - cursor_start_point.x; int ydiff = ev->y_root - cursor_start_point.y; x = start_window_geom.x + xdiff; y = start_window_geom.y + ydiff; if (snap_window_right(x)) { x = screen_w - start_window_geom.width; } else if (snap_window_left(x)) { x = 0; } if (snap_window_bottom(y)) { y = screen_h - start_window_geom.height; } else if (snap_window_top(y)) { y = 0; } XMoveWindow(dpy, ev->window, x, y); } else if (!c->shaded) { // resize motion if (is_lower_right_corner(cursor_start_win_point)) { int xdiff = ev->x_root - cursor_start_point.x; int ydiff = ev->y_root - cursor_start_point.y; width = start_window_geom.width + xdiff; height = start_window_geom.height + ydiff; if (width > MIN_WIDTH && height > MIN_HEIGHT) { XResizeWindow(dpy, c->frame, width + TOTAL_FRAME_WIDTH, height + TOTAL_FRAME_HEIGHT); XResizeWindow(dpy, c->client, width, height); } } else { int xdiff = ev->x_root - prev_mouse_xy.x; int ydiff = ev->y_root - prev_mouse_xy.y; if (is_lower_left_corner(cursor_start_win_point)) { x = current_window_geom.x + xdiff; width = current_window_geom.width + (xdiff * -1); height = current_window_geom.height + ydiff; } else if (is_left_frame(cursor_start_win_point.x)) { x = current_window_geom.x + xdiff; width = current_window_geom.width + (xdiff * -1); } else if (is_right_frame(cursor_start_win_point.x)) { width = current_window_geom.width + xdiff; } else if (is_bottom_frame(cursor_start_win_point.y)) { height = current_window_geom.height + ydiff; } if (width > MIN_WIDTH && height > MIN_HEIGHT) { XMoveResizeWindow(dpy, c->frame, x, y, width, height); XMoveResizeWindow(dpy, c->client, FRAME_BORDER_WIDTH, FRAME_TITLEBAR_HEIGHT, width - TOTAL_FRAME_WIDTH, height - TOTAL_FRAME_HEIGHT); } } } current_window_geom = (Rect){ .x = x, .y = y, .width = width, .height = height }; prev_mouse_xy = (Point){ .x = ev->x_root, .y = ev->y_root }; click1_time = 0; click2_time = 0; } void on_expose(const XExposeEvent *ev) { if (ev->count == 0) { Client* c = find_client(ev->window); if (c != NULL) { redraw(c); } XWindowAttributes menu_attr; XGetWindowAttributes(dpy, root_menu, &menu_attr); if (menu_attr.map_state != IsUnmapped) { draw_menu(); } } } void on_reparent_notify(const XReparentEvent *ev) { } void on_create_notify(const XCreateWindowEvent *ev) { } void on_destroy_notify(const XDestroyWindowEvent *ev) { Client *c = find_client(ev->window); if (c != NULL) { remove_client(c); } } void on_configure_request(const XConfigureRequestEvent *ev) { XWindowChanges changes; changes.x = ev->x; changes.y = ev->y; changes.width = ev->width; changes.height = ev->height; changes.border_width = ev->border_width; changes.sibling = ev->above; changes.stack_mode = ev->detail; XConfigureWindow(dpy, ev->window, ev->value_mask, &changes); } void on_configure_notify(const XConfigureEvent* ev) { Client *c = find_client(ev->window); if (c != NULL) { c->x = ev->x; c->y = ev->y; c->width = ev->width; c->height = ev->height; } } void on_map_request(Window root, const XMapRequestEvent* ev) { Client *c = find_client(ev->window); if (c == NULL) { frame(root, ev->window); XMapWindow(dpy, ev->window); } } void on_unmap_notify(const XUnmapEvent* ev) { } void on_enter_notify(const XCrossingEvent* ev) { focused_client = find_client(ev->window); YNode *curr = ylist_head(&clients); while (curr != NULL) { Client *client = (Client *)ylist_data(curr); redraw(client); curr = curr->next; } } void on_leave_notify(const XCrossingEvent* ev) { }
99910.c
/* * Copyright (c) by Jaroslav Kysela <[email protected]> * Routines for control of YMF724/740/744/754 chips * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/firmware.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mutex.h> #include <sound/core.h> #include <sound/control.h> #include <sound/info.h> #include <sound/tlv.h> #include <sound/ymfpci.h> #include <sound/asoundef.h> #include <sound/mpu401.h> #include <asm/io.h> #include <asm/byteorder.h> /* * common I/O routines */ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip); static inline u8 snd_ymfpci_readb(struct snd_ymfpci *chip, u32 offset) { return readb(chip->reg_area_virt + offset); } static inline void snd_ymfpci_writeb(struct snd_ymfpci *chip, u32 offset, u8 val) { writeb(val, chip->reg_area_virt + offset); } static inline u16 snd_ymfpci_readw(struct snd_ymfpci *chip, u32 offset) { return readw(chip->reg_area_virt + offset); } static inline void snd_ymfpci_writew(struct snd_ymfpci *chip, u32 offset, u16 val) { writew(val, chip->reg_area_virt + offset); } static inline u32 snd_ymfpci_readl(struct snd_ymfpci *chip, u32 offset) { return readl(chip->reg_area_virt + offset); } static inline void snd_ymfpci_writel(struct snd_ymfpci *chip, u32 offset, u32 val) { writel(val, chip->reg_area_virt + offset); } static int snd_ymfpci_codec_ready(struct snd_ymfpci *chip, int secondary) { unsigned long end_time; u32 reg = secondary ? YDSXGR_SECSTATUSADR : YDSXGR_PRISTATUSADR; end_time = jiffies + msecs_to_jiffies(750); do { if ((snd_ymfpci_readw(chip, reg) & 0x8000) == 0) return 0; schedule_timeout_uninterruptible(1); } while (time_before(jiffies, end_time)); snd_printk(KERN_ERR "codec_ready: codec %i is not ready [0x%x]\n", secondary, snd_ymfpci_readw(chip, reg)); return -EBUSY; } static void snd_ymfpci_codec_write(struct snd_ac97 *ac97, u16 reg, u16 val) { struct snd_ymfpci *chip = ac97->private_data; u32 cmd; snd_ymfpci_codec_ready(chip, 0); cmd = ((YDSXG_AC97WRITECMD | reg) << 16) | val; snd_ymfpci_writel(chip, YDSXGR_AC97CMDDATA, cmd); } static u16 snd_ymfpci_codec_read(struct snd_ac97 *ac97, u16 reg) { struct snd_ymfpci *chip = ac97->private_data; if (snd_ymfpci_codec_ready(chip, 0)) return ~0; snd_ymfpci_writew(chip, YDSXGR_AC97CMDADR, YDSXG_AC97READCMD | reg); if (snd_ymfpci_codec_ready(chip, 0)) return ~0; if (chip->device_id == PCI_DEVICE_ID_YAMAHA_744 && chip->rev < 2) { int i; for (i = 0; i < 600; i++) snd_ymfpci_readw(chip, YDSXGR_PRISTATUSDATA); } return snd_ymfpci_readw(chip, YDSXGR_PRISTATUSDATA); } /* * Misc routines */ static u32 snd_ymfpci_calc_delta(u32 rate) { switch (rate) { case 8000: return 0x02aaab00; case 11025: return 0x03accd00; case 16000: return 0x05555500; case 22050: return 0x07599a00; case 32000: return 0x0aaaab00; case 44100: return 0x0eb33300; default: return ((rate << 16) / 375) << 5; } } static u32 def_rate[8] = { 100, 2000, 8000, 11025, 16000, 22050, 32000, 48000 }; static u32 snd_ymfpci_calc_lpfK(u32 rate) { u32 i; static u32 val[8] = { 0x00570000, 0x06AA0000, 0x18B20000, 0x20930000, 0x2B9A0000, 0x35A10000, 0x3EAA0000, 0x40000000 }; if (rate == 44100) return 0x40000000; /* FIXME: What's the right value? */ for (i = 0; i < 8; i++) if (rate <= def_rate[i]) return val[i]; return val[0]; } static u32 snd_ymfpci_calc_lpfQ(u32 rate) { u32 i; static u32 val[8] = { 0x35280000, 0x34A70000, 0x32020000, 0x31770000, 0x31390000, 0x31C90000, 0x33D00000, 0x40000000 }; if (rate == 44100) return 0x370A0000; for (i = 0; i < 8; i++) if (rate <= def_rate[i]) return val[i]; return val[0]; } /* * Hardware start management */ static void snd_ymfpci_hw_start(struct snd_ymfpci *chip) { unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); if (chip->start_count++ > 0) goto __end; snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) | 3); chip->active_bank = snd_ymfpci_readl(chip, YDSXGR_CTRLSELECT) & 1; __end: spin_unlock_irqrestore(&chip->reg_lock, flags); } static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip) { unsigned long flags; long timeout = 1000; spin_lock_irqsave(&chip->reg_lock, flags); if (--chip->start_count > 0) goto __end; snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) & ~3); while (timeout-- > 0) { if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0) break; } if (atomic_read(&chip->interrupt_sleep_count)) { atomic_set(&chip->interrupt_sleep_count, 0); wake_up(&chip->interrupt_sleep); } __end: spin_unlock_irqrestore(&chip->reg_lock, flags); } /* * Playback voice management */ static int voice_alloc(struct snd_ymfpci *chip, enum snd_ymfpci_voice_type type, int pair, struct snd_ymfpci_voice **rvoice) { struct snd_ymfpci_voice *voice, *voice2; int idx; *rvoice = NULL; for (idx = 0; idx < YDSXG_PLAYBACK_VOICES; idx += pair ? 2 : 1) { voice = &chip->voices[idx]; voice2 = pair ? &chip->voices[idx+1] : NULL; if (voice->use || (voice2 && voice2->use)) continue; voice->use = 1; if (voice2) voice2->use = 1; switch (type) { case YMFPCI_PCM: voice->pcm = 1; if (voice2) voice2->pcm = 1; break; case YMFPCI_SYNTH: voice->synth = 1; break; case YMFPCI_MIDI: voice->midi = 1; break; } snd_ymfpci_hw_start(chip); if (voice2) snd_ymfpci_hw_start(chip); *rvoice = voice; return 0; } return -ENOMEM; } static int snd_ymfpci_voice_alloc(struct snd_ymfpci *chip, enum snd_ymfpci_voice_type type, int pair, struct snd_ymfpci_voice **rvoice) { unsigned long flags; int result; if (snd_BUG_ON(!rvoice)) return -EINVAL; if (snd_BUG_ON(pair && type != YMFPCI_PCM)) return -EINVAL; spin_lock_irqsave(&chip->voice_lock, flags); for (;;) { result = voice_alloc(chip, type, pair, rvoice); if (result == 0 || type != YMFPCI_PCM) break; /* TODO: synth/midi voice deallocation */ break; } spin_unlock_irqrestore(&chip->voice_lock, flags); return result; } static int snd_ymfpci_voice_free(struct snd_ymfpci *chip, struct snd_ymfpci_voice *pvoice) { unsigned long flags; if (snd_BUG_ON(!pvoice)) return -EINVAL; snd_ymfpci_hw_stop(chip); spin_lock_irqsave(&chip->voice_lock, flags); if (pvoice->number == chip->src441_used) { chip->src441_used = -1; pvoice->ypcm->use_441_slot = 0; } pvoice->use = pvoice->pcm = pvoice->synth = pvoice->midi = 0; pvoice->ypcm = NULL; pvoice->interrupt = NULL; spin_unlock_irqrestore(&chip->voice_lock, flags); return 0; } /* * PCM part */ static void snd_ymfpci_pcm_interrupt(struct snd_ymfpci *chip, struct snd_ymfpci_voice *voice) { struct snd_ymfpci_pcm *ypcm; u32 pos, delta; if ((ypcm = voice->ypcm) == NULL) return; if (ypcm->substream == NULL) return; spin_lock(&chip->reg_lock); if (ypcm->running) { pos = le32_to_cpu(voice->bank[chip->active_bank].start); if (pos < ypcm->last_pos) delta = pos + (ypcm->buffer_size - ypcm->last_pos); else delta = pos - ypcm->last_pos; ypcm->period_pos += delta; ypcm->last_pos = pos; if (ypcm->period_pos >= ypcm->period_size) { /* printk(KERN_DEBUG "done - active_bank = 0x%x, start = 0x%x\n", chip->active_bank, voice->bank[chip->active_bank].start); */ ypcm->period_pos %= ypcm->period_size; spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(ypcm->substream); spin_lock(&chip->reg_lock); } if (unlikely(ypcm->update_pcm_vol)) { unsigned int subs = ypcm->substream->number; unsigned int next_bank = 1 - chip->active_bank; struct snd_ymfpci_playback_bank *bank; u32 volume; bank = &voice->bank[next_bank]; volume = cpu_to_le32(chip->pcm_mixer[subs].left << 15); bank->left_gain_end = volume; if (ypcm->output_rear) bank->eff2_gain_end = volume; if (ypcm->voices[1]) bank = &ypcm->voices[1]->bank[next_bank]; volume = cpu_to_le32(chip->pcm_mixer[subs].right << 15); bank->right_gain_end = volume; if (ypcm->output_rear) bank->eff3_gain_end = volume; ypcm->update_pcm_vol--; } } spin_unlock(&chip->reg_lock); } static void snd_ymfpci_pcm_capture_interrupt(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_ymfpci *chip = ypcm->chip; u32 pos, delta; spin_lock(&chip->reg_lock); if (ypcm->running) { pos = le32_to_cpu(chip->bank_capture[ypcm->capture_bank_number][chip->active_bank]->start) >> ypcm->shift; if (pos < ypcm->last_pos) delta = pos + (ypcm->buffer_size - ypcm->last_pos); else delta = pos - ypcm->last_pos; ypcm->period_pos += delta; ypcm->last_pos = pos; if (ypcm->period_pos >= ypcm->period_size) { ypcm->period_pos %= ypcm->period_size; /* printk(KERN_DEBUG "done - active_bank = 0x%x, start = 0x%x\n", chip->active_bank, voice->bank[chip->active_bank].start); */ spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(substream); spin_lock(&chip->reg_lock); } } spin_unlock(&chip->reg_lock); } static int snd_ymfpci_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; struct snd_kcontrol *kctl = NULL; int result = 0; spin_lock(&chip->reg_lock); if (ypcm->voices[0] == NULL) { result = -EINVAL; goto __unlock; } switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: chip->ctrl_playback[ypcm->voices[0]->number + 1] = cpu_to_le32(ypcm->voices[0]->bank_addr); if (ypcm->voices[1] != NULL && !ypcm->use_441_slot) chip->ctrl_playback[ypcm->voices[1]->number + 1] = cpu_to_le32(ypcm->voices[1]->bank_addr); ypcm->running = 1; break; case SNDRV_PCM_TRIGGER_STOP: if (substream->pcm == chip->pcm && !ypcm->use_441_slot) { kctl = chip->pcm_mixer[substream->number].ctl; kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; } /* fall through */ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: chip->ctrl_playback[ypcm->voices[0]->number + 1] = 0; if (ypcm->voices[1] != NULL && !ypcm->use_441_slot) chip->ctrl_playback[ypcm->voices[1]->number + 1] = 0; ypcm->running = 0; break; default: result = -EINVAL; break; } __unlock: spin_unlock(&chip->reg_lock); if (kctl) snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kctl->id); return result; } static int snd_ymfpci_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; int result = 0; u32 tmp; spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: tmp = snd_ymfpci_readl(chip, YDSXGR_MAPOFREC) | (1 << ypcm->capture_bank_number); snd_ymfpci_writel(chip, YDSXGR_MAPOFREC, tmp); ypcm->running = 1; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: case SNDRV_PCM_TRIGGER_SUSPEND: tmp = snd_ymfpci_readl(chip, YDSXGR_MAPOFREC) & ~(1 << ypcm->capture_bank_number); snd_ymfpci_writel(chip, YDSXGR_MAPOFREC, tmp); ypcm->running = 0; break; default: result = -EINVAL; break; } spin_unlock(&chip->reg_lock); return result; } static int snd_ymfpci_pcm_voice_alloc(struct snd_ymfpci_pcm *ypcm, int voices) { int err; if (ypcm->voices[1] != NULL && voices < 2) { snd_ymfpci_voice_free(ypcm->chip, ypcm->voices[1]); ypcm->voices[1] = NULL; } if (voices == 1 && ypcm->voices[0] != NULL) return 0; /* already allocated */ if (voices == 2 && ypcm->voices[0] != NULL && ypcm->voices[1] != NULL) return 0; /* already allocated */ if (voices > 1) { if (ypcm->voices[0] != NULL && ypcm->voices[1] == NULL) { snd_ymfpci_voice_free(ypcm->chip, ypcm->voices[0]); ypcm->voices[0] = NULL; } } err = snd_ymfpci_voice_alloc(ypcm->chip, YMFPCI_PCM, voices > 1, &ypcm->voices[0]); if (err < 0) return err; ypcm->voices[0]->ypcm = ypcm; ypcm->voices[0]->interrupt = snd_ymfpci_pcm_interrupt; if (voices > 1) { ypcm->voices[1] = &ypcm->chip->voices[ypcm->voices[0]->number + 1]; ypcm->voices[1]->ypcm = ypcm; } return 0; } static void snd_ymfpci_pcm_init_voice(struct snd_ymfpci_pcm *ypcm, unsigned int voiceidx, struct snd_pcm_runtime *runtime, int has_pcm_volume) { struct snd_ymfpci_voice *voice = ypcm->voices[voiceidx]; u32 format; u32 delta = snd_ymfpci_calc_delta(runtime->rate); u32 lpfQ = snd_ymfpci_calc_lpfQ(runtime->rate); u32 lpfK = snd_ymfpci_calc_lpfK(runtime->rate); struct snd_ymfpci_playback_bank *bank; unsigned int nbank; u32 vol_left, vol_right; u8 use_left, use_right; unsigned long flags; if (snd_BUG_ON(!voice)) return; if (runtime->channels == 1) { use_left = 1; use_right = 1; } else { use_left = (voiceidx & 1) == 0; use_right = !use_left; } if (has_pcm_volume) { vol_left = cpu_to_le32(ypcm->chip->pcm_mixer [ypcm->substream->number].left << 15); vol_right = cpu_to_le32(ypcm->chip->pcm_mixer [ypcm->substream->number].right << 15); } else { vol_left = cpu_to_le32(0x40000000); vol_right = cpu_to_le32(0x40000000); } spin_lock_irqsave(&ypcm->chip->voice_lock, flags); format = runtime->channels == 2 ? 0x00010000 : 0; if (snd_pcm_format_width(runtime->format) == 8) format |= 0x80000000; else if (ypcm->chip->device_id == PCI_DEVICE_ID_YAMAHA_754 && runtime->rate == 44100 && runtime->channels == 2 && voiceidx == 0 && (ypcm->chip->src441_used == -1 || ypcm->chip->src441_used == voice->number)) { ypcm->chip->src441_used = voice->number; ypcm->use_441_slot = 1; format |= 0x10000000; } if (ypcm->chip->src441_used == voice->number && (format & 0x10000000) == 0) { ypcm->chip->src441_used = -1; ypcm->use_441_slot = 0; } if (runtime->channels == 2 && (voiceidx & 1) != 0) format |= 1; spin_unlock_irqrestore(&ypcm->chip->voice_lock, flags); for (nbank = 0; nbank < 2; nbank++) { bank = &voice->bank[nbank]; memset(bank, 0, sizeof(*bank)); bank->format = cpu_to_le32(format); bank->base = cpu_to_le32(runtime->dma_addr); bank->loop_end = cpu_to_le32(ypcm->buffer_size); bank->lpfQ = cpu_to_le32(lpfQ); bank->delta = bank->delta_end = cpu_to_le32(delta); bank->lpfK = bank->lpfK_end = cpu_to_le32(lpfK); bank->eg_gain = bank->eg_gain_end = cpu_to_le32(0x40000000); if (ypcm->output_front) { if (use_left) { bank->left_gain = bank->left_gain_end = vol_left; } if (use_right) { bank->right_gain = bank->right_gain_end = vol_right; } } if (ypcm->output_rear) { if (!ypcm->swap_rear) { if (use_left) { bank->eff2_gain = bank->eff2_gain_end = vol_left; } if (use_right) { bank->eff3_gain = bank->eff3_gain_end = vol_right; } } else { /* The SPDIF out channels seem to be swapped, so we have * to swap them here, too. The rear analog out channels * will be wrong, but otherwise AC3 would not work. */ if (use_left) { bank->eff3_gain = bank->eff3_gain_end = vol_left; } if (use_right) { bank->eff2_gain = bank->eff2_gain_end = vol_right; } } } } } static int __devinit snd_ymfpci_ac3_init(struct snd_ymfpci *chip) { if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 4096, &chip->ac3_tmp_base) < 0) return -ENOMEM; chip->bank_effect[3][0]->base = chip->bank_effect[3][1]->base = cpu_to_le32(chip->ac3_tmp_base.addr); chip->bank_effect[3][0]->loop_end = chip->bank_effect[3][1]->loop_end = cpu_to_le32(1024); chip->bank_effect[4][0]->base = chip->bank_effect[4][1]->base = cpu_to_le32(chip->ac3_tmp_base.addr + 2048); chip->bank_effect[4][0]->loop_end = chip->bank_effect[4][1]->loop_end = cpu_to_le32(1024); spin_lock_irq(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_MAPOFEFFECT, snd_ymfpci_readl(chip, YDSXGR_MAPOFEFFECT) | 3 << 3); spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_ac3_done(struct snd_ymfpci *chip) { spin_lock_irq(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_MAPOFEFFECT, snd_ymfpci_readl(chip, YDSXGR_MAPOFEFFECT) & ~(3 << 3)); spin_unlock_irq(&chip->reg_lock); // snd_ymfpci_irq_wait(chip); if (chip->ac3_tmp_base.area) { snd_dma_free_pages(&chip->ac3_tmp_base); chip->ac3_tmp_base.area = NULL; } return 0; } static int snd_ymfpci_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; int err; if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; if ((err = snd_ymfpci_pcm_voice_alloc(ypcm, params_channels(hw_params))) < 0) return err; return 0; } static int snd_ymfpci_playback_hw_free(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; if (runtime->private_data == NULL) return 0; ypcm = runtime->private_data; /* wait, until the PCI operations are not finished */ snd_ymfpci_irq_wait(chip); snd_pcm_lib_free_pages(substream); if (ypcm->voices[1]) { snd_ymfpci_voice_free(chip, ypcm->voices[1]); ypcm->voices[1] = NULL; } if (ypcm->voices[0]) { snd_ymfpci_voice_free(chip, ypcm->voices[0]); ypcm->voices[0] = NULL; } return 0; } static int snd_ymfpci_playback_prepare(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_kcontrol *kctl; unsigned int nvoice; ypcm->period_size = runtime->period_size; ypcm->buffer_size = runtime->buffer_size; ypcm->period_pos = 0; ypcm->last_pos = 0; for (nvoice = 0; nvoice < runtime->channels; nvoice++) snd_ymfpci_pcm_init_voice(ypcm, nvoice, runtime, substream->pcm == chip->pcm); if (substream->pcm == chip->pcm && !ypcm->use_441_slot) { kctl = chip->pcm_mixer[substream->number].ctl; kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kctl->id); } return 0; } static int snd_ymfpci_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_ymfpci_capture_hw_free(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); /* wait, until the PCI operations are not finished */ snd_ymfpci_irq_wait(chip); return snd_pcm_lib_free_pages(substream); } static int snd_ymfpci_capture_prepare(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_ymfpci_capture_bank * bank; int nbank; u32 rate, format; ypcm->period_size = runtime->period_size; ypcm->buffer_size = runtime->buffer_size; ypcm->period_pos = 0; ypcm->last_pos = 0; ypcm->shift = 0; rate = ((48000 * 4096) / runtime->rate) - 1; format = 0; if (runtime->channels == 2) { format |= 2; ypcm->shift++; } if (snd_pcm_format_width(runtime->format) == 8) format |= 1; else ypcm->shift++; switch (ypcm->capture_bank_number) { case 0: snd_ymfpci_writel(chip, YDSXGR_RECFORMAT, format); snd_ymfpci_writel(chip, YDSXGR_RECSLOTSR, rate); break; case 1: snd_ymfpci_writel(chip, YDSXGR_ADCFORMAT, format); snd_ymfpci_writel(chip, YDSXGR_ADCSLOTSR, rate); break; } for (nbank = 0; nbank < 2; nbank++) { bank = chip->bank_capture[ypcm->capture_bank_number][nbank]; bank->base = cpu_to_le32(runtime->dma_addr); bank->loop_end = cpu_to_le32(ypcm->buffer_size << ypcm->shift); bank->start = 0; bank->num_of_loops = 0; } return 0; } static snd_pcm_uframes_t snd_ymfpci_playback_pointer(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; struct snd_ymfpci_voice *voice = ypcm->voices[0]; if (!(ypcm->running && voice)) return 0; return le32_to_cpu(voice->bank[chip->active_bank].start); } static snd_pcm_uframes_t snd_ymfpci_capture_pointer(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; if (!ypcm->running) return 0; return le32_to_cpu(chip->bank_capture[ypcm->capture_bank_number][chip->active_bank]->start) >> ypcm->shift; } static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip) { wait_queue_t wait; int loops = 4; while (loops-- > 0) { if ((snd_ymfpci_readl(chip, YDSXGR_MODE) & 3) == 0) continue; init_waitqueue_entry(&wait, current); add_wait_queue(&chip->interrupt_sleep, &wait); atomic_inc(&chip->interrupt_sleep_count); schedule_timeout_uninterruptible(msecs_to_jiffies(50)); remove_wait_queue(&chip->interrupt_sleep, &wait); } } static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id) { struct snd_ymfpci *chip = dev_id; u32 status, nvoice, mode; struct snd_ymfpci_voice *voice; status = snd_ymfpci_readl(chip, YDSXGR_STATUS); if (status & 0x80000000) { chip->active_bank = snd_ymfpci_readl(chip, YDSXGR_CTRLSELECT) & 1; spin_lock(&chip->voice_lock); for (nvoice = 0; nvoice < YDSXG_PLAYBACK_VOICES; nvoice++) { voice = &chip->voices[nvoice]; if (voice->interrupt) voice->interrupt(chip, voice); } for (nvoice = 0; nvoice < YDSXG_CAPTURE_VOICES; nvoice++) { if (chip->capture_substream[nvoice]) snd_ymfpci_pcm_capture_interrupt(chip->capture_substream[nvoice]); } #if 0 for (nvoice = 0; nvoice < YDSXG_EFFECT_VOICES; nvoice++) { if (chip->effect_substream[nvoice]) snd_ymfpci_pcm_effect_interrupt(chip->effect_substream[nvoice]); } #endif spin_unlock(&chip->voice_lock); spin_lock(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_STATUS, 0x80000000); mode = snd_ymfpci_readl(chip, YDSXGR_MODE) | 2; snd_ymfpci_writel(chip, YDSXGR_MODE, mode); spin_unlock(&chip->reg_lock); if (atomic_read(&chip->interrupt_sleep_count)) { atomic_set(&chip->interrupt_sleep_count, 0); wake_up(&chip->interrupt_sleep); } } status = snd_ymfpci_readw(chip, YDSXGR_INTFLAG); if (status & 1) { if (chip->timer) snd_timer_interrupt(chip->timer, chip->timer_ticks); } snd_ymfpci_writew(chip, YDSXGR_INTFLAG, status); if (chip->rawmidi) snd_mpu401_uart_interrupt(irq, chip->rawmidi->private_data); return IRQ_HANDLED; } static struct snd_pcm_hardware snd_ymfpci_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 256 * 1024, /* FIXME: enough? */ .period_bytes_min = 64, .period_bytes_max = 256 * 1024, /* FIXME: enough? */ .periods_min = 3, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_ymfpci_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 256 * 1024, /* FIXME: enough? */ .period_bytes_min = 64, .period_bytes_max = 256 * 1024, /* FIXME: enough? */ .periods_min = 3, .periods_max = 1024, .fifo_size = 0, }; static void snd_ymfpci_pcm_free_substream(struct snd_pcm_runtime *runtime) { kfree(runtime->private_data); } static int snd_ymfpci_playback_open_1(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; ypcm = kzalloc(sizeof(*ypcm), GFP_KERNEL); if (ypcm == NULL) return -ENOMEM; ypcm->chip = chip; ypcm->type = PLAYBACK_VOICE; ypcm->substream = substream; runtime->hw = snd_ymfpci_playback; runtime->private_data = ypcm; runtime->private_free = snd_ymfpci_pcm_free_substream; /* FIXME? True value is 256/48 = 5.33333 ms */ snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 5333, UINT_MAX); return 0; } /* call with spinlock held */ static void ymfpci_open_extension(struct snd_ymfpci *chip) { if (! chip->rear_opened) { if (! chip->spdif_opened) /* set AC3 */ snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) | (1 << 30)); /* enable second codec (4CHEN) */ snd_ymfpci_writew(chip, YDSXGR_SECCONFIG, (snd_ymfpci_readw(chip, YDSXGR_SECCONFIG) & ~0x0330) | 0x0010); } } /* call with spinlock held */ static void ymfpci_close_extension(struct snd_ymfpci *chip) { if (! chip->rear_opened) { if (! chip->spdif_opened) snd_ymfpci_writel(chip, YDSXGR_MODE, snd_ymfpci_readl(chip, YDSXGR_MODE) & ~(1 << 30)); snd_ymfpci_writew(chip, YDSXGR_SECCONFIG, (snd_ymfpci_readw(chip, YDSXGR_SECCONFIG) & ~0x0330) & ~0x0010); } } static int snd_ymfpci_playback_open(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; if ((err = snd_ymfpci_playback_open_1(substream)) < 0) return err; ypcm = runtime->private_data; ypcm->output_front = 1; ypcm->output_rear = chip->mode_dup4ch ? 1 : 0; ypcm->swap_rear = 0; spin_lock_irq(&chip->reg_lock); if (ypcm->output_rear) { ymfpci_open_extension(chip); chip->rear_opened++; } spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_playback_spdif_open(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; if ((err = snd_ymfpci_playback_open_1(substream)) < 0) return err; ypcm = runtime->private_data; ypcm->output_front = 0; ypcm->output_rear = 1; ypcm->swap_rear = 1; spin_lock_irq(&chip->reg_lock); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTCTRL, snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) | 2); ymfpci_open_extension(chip); chip->spdif_pcm_bits = chip->spdif_bits; snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_pcm_bits); chip->spdif_opened++; spin_unlock_irq(&chip->reg_lock); chip->spdif_pcm_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &chip->spdif_pcm_ctl->id); return 0; } static int snd_ymfpci_playback_4ch_open(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; int err; if ((err = snd_ymfpci_playback_open_1(substream)) < 0) return err; ypcm = runtime->private_data; ypcm->output_front = 0; ypcm->output_rear = 1; ypcm->swap_rear = 0; spin_lock_irq(&chip->reg_lock); ymfpci_open_extension(chip); chip->rear_opened++; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_capture_open(struct snd_pcm_substream *substream, u32 capture_bank_number) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm; ypcm = kzalloc(sizeof(*ypcm), GFP_KERNEL); if (ypcm == NULL) return -ENOMEM; ypcm->chip = chip; ypcm->type = capture_bank_number + CAPTURE_REC; ypcm->substream = substream; ypcm->capture_bank_number = capture_bank_number; chip->capture_substream[capture_bank_number] = substream; runtime->hw = snd_ymfpci_capture; /* FIXME? True value is 256/48 = 5.33333 ms */ snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_TIME, 5333, UINT_MAX); runtime->private_data = ypcm; runtime->private_free = snd_ymfpci_pcm_free_substream; snd_ymfpci_hw_start(chip); return 0; } static int snd_ymfpci_capture_rec_open(struct snd_pcm_substream *substream) { return snd_ymfpci_capture_open(substream, 0); } static int snd_ymfpci_capture_ac97_open(struct snd_pcm_substream *substream) { return snd_ymfpci_capture_open(substream, 1); } static int snd_ymfpci_playback_close_1(struct snd_pcm_substream *substream) { return 0; } static int snd_ymfpci_playback_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; spin_lock_irq(&chip->reg_lock); if (ypcm->output_rear && chip->rear_opened > 0) { chip->rear_opened--; ymfpci_close_extension(chip); } spin_unlock_irq(&chip->reg_lock); return snd_ymfpci_playback_close_1(substream); } static int snd_ymfpci_playback_spdif_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); spin_lock_irq(&chip->reg_lock); chip->spdif_opened = 0; ymfpci_close_extension(chip); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTCTRL, snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) & ~2); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_bits); spin_unlock_irq(&chip->reg_lock); chip->spdif_pcm_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &chip->spdif_pcm_ctl->id); return snd_ymfpci_playback_close_1(substream); } static int snd_ymfpci_playback_4ch_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); spin_lock_irq(&chip->reg_lock); if (chip->rear_opened > 0) { chip->rear_opened--; ymfpci_close_extension(chip); } spin_unlock_irq(&chip->reg_lock); return snd_ymfpci_playback_close_1(substream); } static int snd_ymfpci_capture_close(struct snd_pcm_substream *substream) { struct snd_ymfpci *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_ymfpci_pcm *ypcm = runtime->private_data; if (ypcm != NULL) { chip->capture_substream[ypcm->capture_bank_number] = NULL; snd_ymfpci_hw_stop(chip); } return 0; } static struct snd_pcm_ops snd_ymfpci_playback_ops = { .open = snd_ymfpci_playback_open, .close = snd_ymfpci_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_playback_hw_params, .hw_free = snd_ymfpci_playback_hw_free, .prepare = snd_ymfpci_playback_prepare, .trigger = snd_ymfpci_playback_trigger, .pointer = snd_ymfpci_playback_pointer, }; static struct snd_pcm_ops snd_ymfpci_capture_rec_ops = { .open = snd_ymfpci_capture_rec_open, .close = snd_ymfpci_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_capture_hw_params, .hw_free = snd_ymfpci_capture_hw_free, .prepare = snd_ymfpci_capture_prepare, .trigger = snd_ymfpci_capture_trigger, .pointer = snd_ymfpci_capture_pointer, }; int __devinit snd_ymfpci_pcm(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "YMFPCI", device, 32, 1, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ymfpci_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ymfpci_capture_rec_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "YMFPCI"); chip->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } static struct snd_pcm_ops snd_ymfpci_capture_ac97_ops = { .open = snd_ymfpci_capture_ac97_open, .close = snd_ymfpci_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_capture_hw_params, .hw_free = snd_ymfpci_capture_hw_free, .prepare = snd_ymfpci_capture_prepare, .trigger = snd_ymfpci_capture_trigger, .pointer = snd_ymfpci_capture_pointer, }; int __devinit snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "YMFPCI - PCM2", device, 0, 1, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_ymfpci_capture_ac97_ops); /* global setup */ pcm->info_flags = 0; sprintf(pcm->name, "YMFPCI - %s", chip->device_id == PCI_DEVICE_ID_YAMAHA_754 ? "Direct Recording" : "AC'97"); chip->pcm2 = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } static struct snd_pcm_ops snd_ymfpci_playback_spdif_ops = { .open = snd_ymfpci_playback_spdif_open, .close = snd_ymfpci_playback_spdif_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_playback_hw_params, .hw_free = snd_ymfpci_playback_hw_free, .prepare = snd_ymfpci_playback_prepare, .trigger = snd_ymfpci_playback_trigger, .pointer = snd_ymfpci_playback_pointer, }; int __devinit snd_ymfpci_pcm_spdif(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "YMFPCI - IEC958", device, 1, 0, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ymfpci_playback_spdif_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "YMFPCI - IEC958"); chip->pcm_spdif = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } static struct snd_pcm_ops snd_ymfpci_playback_4ch_ops = { .open = snd_ymfpci_playback_4ch_open, .close = snd_ymfpci_playback_4ch_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_ymfpci_playback_hw_params, .hw_free = snd_ymfpci_playback_hw_free, .prepare = snd_ymfpci_playback_prepare, .trigger = snd_ymfpci_playback_trigger, .pointer = snd_ymfpci_playback_pointer, }; int __devinit snd_ymfpci_pcm_4ch(struct snd_ymfpci *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "YMFPCI - Rear", device, 1, 0, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_ymfpci_playback_4ch_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "YMFPCI - Rear PCM"); chip->pcm_4ch = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } static int snd_ymfpci_spdif_default_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ymfpci_spdif_default_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); spin_lock_irq(&chip->reg_lock); ucontrol->value.iec958.status[0] = (chip->spdif_bits >> 0) & 0xff; ucontrol->value.iec958.status[1] = (chip->spdif_bits >> 8) & 0xff; ucontrol->value.iec958.status[3] = IEC958_AES3_CON_FS_48000; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_spdif_default_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ((ucontrol->value.iec958.status[0] & 0x3e) << 0) | (ucontrol->value.iec958.status[1] << 8); spin_lock_irq(&chip->reg_lock); change = chip->spdif_bits != val; chip->spdif_bits = val; if ((snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) & 1) && chip->pcm_spdif == NULL) snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_bits); spin_unlock_irq(&chip->reg_lock); return change; } static struct snd_kcontrol_new snd_ymfpci_spdif_default __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .info = snd_ymfpci_spdif_default_info, .get = snd_ymfpci_spdif_default_get, .put = snd_ymfpci_spdif_default_put }; static int snd_ymfpci_spdif_mask_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ymfpci_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); spin_lock_irq(&chip->reg_lock); ucontrol->value.iec958.status[0] = 0x3e; ucontrol->value.iec958.status[1] = 0xff; spin_unlock_irq(&chip->reg_lock); return 0; } static struct snd_kcontrol_new snd_ymfpci_spdif_mask __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READ, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK), .info = snd_ymfpci_spdif_mask_info, .get = snd_ymfpci_spdif_mask_get, }; static int snd_ymfpci_spdif_stream_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_ymfpci_spdif_stream_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); spin_lock_irq(&chip->reg_lock); ucontrol->value.iec958.status[0] = (chip->spdif_pcm_bits >> 0) & 0xff; ucontrol->value.iec958.status[1] = (chip->spdif_pcm_bits >> 8) & 0xff; ucontrol->value.iec958.status[3] = IEC958_AES3_CON_FS_48000; spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_ymfpci_spdif_stream_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int val; int change; val = ((ucontrol->value.iec958.status[0] & 0x3e) << 0) | (ucontrol->value.iec958.status[1] << 8); spin_lock_irq(&chip->reg_lock); change = chip->spdif_pcm_bits != val; chip->spdif_pcm_bits = val; if ((snd_ymfpci_readw(chip, YDSXGR_SPDIFOUTCTRL) & 2)) snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_pcm_bits); spin_unlock_irq(&chip->reg_lock); return change; } static struct snd_kcontrol_new snd_ymfpci_spdif_stream __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM), .info = snd_ymfpci_spdif_stream_info, .get = snd_ymfpci_spdif_stream_get, .put = snd_ymfpci_spdif_stream_put }; static int snd_ymfpci_drec_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info) { static char *texts[3] = {"AC'97", "IEC958", "ZV Port"}; info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; info->count = 1; info->value.enumerated.items = 3; if (info->value.enumerated.item > 2) info->value.enumerated.item = 2; strcpy(info->value.enumerated.name, texts[info->value.enumerated.item]); return 0; } static int snd_ymfpci_drec_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); u16 reg; spin_lock_irq(&chip->reg_lock); reg = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); spin_unlock_irq(&chip->reg_lock); if (!(reg & 0x100)) value->value.enumerated.item[0] = 0; else value->value.enumerated.item[0] = 1 + ((reg & 0x200) != 0); return 0; } static int snd_ymfpci_drec_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); u16 reg, old_reg; spin_lock_irq(&chip->reg_lock); old_reg = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); if (value->value.enumerated.item[0] == 0) reg = old_reg & ~0x100; else reg = (old_reg & ~0x300) | 0x100 | ((value->value.enumerated.item[0] == 2) << 9); snd_ymfpci_writew(chip, YDSXGR_GLOBALCTRL, reg); spin_unlock_irq(&chip->reg_lock); return reg != old_reg; } static struct snd_kcontrol_new snd_ymfpci_drec_source __devinitdata = { .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Direct Recording Source", .info = snd_ymfpci_drec_source_info, .get = snd_ymfpci_drec_source_get, .put = snd_ymfpci_drec_source_put }; /* * Mixer controls */ #define YMFPCI_SINGLE(xname, xindex, reg, shift) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_ymfpci_info_single, \ .get = snd_ymfpci_get_single, .put = snd_ymfpci_put_single, \ .private_value = ((reg) | ((shift) << 16)) } #define snd_ymfpci_info_single snd_ctl_boolean_mono_info static int snd_ymfpci_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xffff; unsigned int shift = (kcontrol->private_value >> 16) & 0xff; unsigned int mask = 1; switch (reg) { case YDSXGR_SPDIFOUTCTRL: break; case YDSXGR_SPDIFINCTRL: break; default: return -EINVAL; } ucontrol->value.integer.value[0] = (snd_ymfpci_readl(chip, reg) >> shift) & mask; return 0; } static int snd_ymfpci_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xffff; unsigned int shift = (kcontrol->private_value >> 16) & 0xff; unsigned int mask = 1; int change; unsigned int val, oval; switch (reg) { case YDSXGR_SPDIFOUTCTRL: break; case YDSXGR_SPDIFINCTRL: break; default: return -EINVAL; } val = (ucontrol->value.integer.value[0] & mask); val <<= shift; spin_lock_irq(&chip->reg_lock); oval = snd_ymfpci_readl(chip, reg); val = (oval & ~(mask << shift)) | val; change = val != oval; snd_ymfpci_writel(chip, reg, val); spin_unlock_irq(&chip->reg_lock); return change; } static const DECLARE_TLV_DB_LINEAR(db_scale_native, TLV_DB_GAIN_MUTE, 0); #define YMFPCI_DOUBLE(xname, xindex, reg) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \ .info = snd_ymfpci_info_double, \ .get = snd_ymfpci_get_double, .put = snd_ymfpci_put_double, \ .private_value = reg, \ .tlv = { .p = db_scale_native } } static int snd_ymfpci_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { unsigned int reg = kcontrol->private_value; if (reg < 0x80 || reg >= 0xc0) return -EINVAL; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 16383; return 0; } static int snd_ymfpci_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int reg = kcontrol->private_value; unsigned int shift_left = 0, shift_right = 16, mask = 16383; unsigned int val; if (reg < 0x80 || reg >= 0xc0) return -EINVAL; spin_lock_irq(&chip->reg_lock); val = snd_ymfpci_readl(chip, reg); spin_unlock_irq(&chip->reg_lock); ucontrol->value.integer.value[0] = (val >> shift_left) & mask; ucontrol->value.integer.value[1] = (val >> shift_right) & mask; return 0; } static int snd_ymfpci_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int reg = kcontrol->private_value; unsigned int shift_left = 0, shift_right = 16, mask = 16383; int change; unsigned int val1, val2, oval; if (reg < 0x80 || reg >= 0xc0) return -EINVAL; val1 = ucontrol->value.integer.value[0] & mask; val2 = ucontrol->value.integer.value[1] & mask; val1 <<= shift_left; val2 <<= shift_right; spin_lock_irq(&chip->reg_lock); oval = snd_ymfpci_readl(chip, reg); val1 = (oval & ~((mask << shift_left) | (mask << shift_right))) | val1 | val2; change = val1 != oval; snd_ymfpci_writel(chip, reg, val1); spin_unlock_irq(&chip->reg_lock); return change; } static int snd_ymfpci_put_nativedacvol(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int reg = YDSXGR_NATIVEDACOUTVOL; unsigned int reg2 = YDSXGR_BUF441OUTVOL; int change; unsigned int value, oval; value = ucontrol->value.integer.value[0] & 0x3fff; value |= (ucontrol->value.integer.value[1] & 0x3fff) << 16; spin_lock_irq(&chip->reg_lock); oval = snd_ymfpci_readl(chip, reg); change = value != oval; snd_ymfpci_writel(chip, reg, value); snd_ymfpci_writel(chip, reg2, value); spin_unlock_irq(&chip->reg_lock); return change; } /* * 4ch duplication */ #define snd_ymfpci_info_dup4ch snd_ctl_boolean_mono_info static int snd_ymfpci_get_dup4ch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = chip->mode_dup4ch; return 0; } static int snd_ymfpci_put_dup4ch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int change; change = (ucontrol->value.integer.value[0] != chip->mode_dup4ch); if (change) chip->mode_dup4ch = !!ucontrol->value.integer.value[0]; return change; } static struct snd_kcontrol_new snd_ymfpci_controls[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Wave Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .info = snd_ymfpci_info_double, .get = snd_ymfpci_get_double, .put = snd_ymfpci_put_nativedacvol, .private_value = YDSXGR_NATIVEDACOUTVOL, .tlv = { .p = db_scale_native }, }, YMFPCI_DOUBLE("Wave Capture Volume", 0, YDSXGR_NATIVEDACLOOPVOL), YMFPCI_DOUBLE("Digital Capture Volume", 0, YDSXGR_NATIVEDACINVOL), YMFPCI_DOUBLE("Digital Capture Volume", 1, YDSXGR_NATIVEADCINVOL), YMFPCI_DOUBLE("ADC Playback Volume", 0, YDSXGR_PRIADCOUTVOL), YMFPCI_DOUBLE("ADC Capture Volume", 0, YDSXGR_PRIADCLOOPVOL), YMFPCI_DOUBLE("ADC Playback Volume", 1, YDSXGR_SECADCOUTVOL), YMFPCI_DOUBLE("ADC Capture Volume", 1, YDSXGR_SECADCLOOPVOL), YMFPCI_DOUBLE("FM Legacy Volume", 0, YDSXGR_LEGACYOUTVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("AC97 ", PLAYBACK,VOLUME), 0, YDSXGR_ZVOUTVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("", CAPTURE,VOLUME), 0, YDSXGR_ZVLOOPVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("AC97 ",PLAYBACK,VOLUME), 1, YDSXGR_SPDIFOUTVOL), YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,VOLUME), 1, YDSXGR_SPDIFLOOPVOL), YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH), 0, YDSXGR_SPDIFOUTCTRL, 0), YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), 0, YDSXGR_SPDIFINCTRL, 0), YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("Loop",NONE,NONE), 0, YDSXGR_SPDIFINCTRL, 4), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "4ch Duplication", .info = snd_ymfpci_info_dup4ch, .get = snd_ymfpci_get_dup4ch, .put = snd_ymfpci_put_dup4ch, }, }; /* * GPIO */ static int snd_ymfpci_get_gpio_out(struct snd_ymfpci *chip, int pin) { u16 reg, mode; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); reg = snd_ymfpci_readw(chip, YDSXGR_GPIOFUNCENABLE); reg &= ~(1 << (pin + 8)); reg |= (1 << pin); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg); /* set the level mode for input line */ mode = snd_ymfpci_readw(chip, YDSXGR_GPIOTYPECONFIG); mode &= ~(3 << (pin * 2)); snd_ymfpci_writew(chip, YDSXGR_GPIOTYPECONFIG, mode); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg | (1 << (pin + 8))); mode = snd_ymfpci_readw(chip, YDSXGR_GPIOINSTATUS); spin_unlock_irqrestore(&chip->reg_lock, flags); return (mode >> pin) & 1; } static int snd_ymfpci_set_gpio_out(struct snd_ymfpci *chip, int pin, int enable) { u16 reg; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); reg = snd_ymfpci_readw(chip, YDSXGR_GPIOFUNCENABLE); reg &= ~(1 << pin); reg &= ~(1 << (pin + 8)); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg); snd_ymfpci_writew(chip, YDSXGR_GPIOOUTCTRL, enable << pin); snd_ymfpci_writew(chip, YDSXGR_GPIOFUNCENABLE, reg | (1 << (pin + 8))); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } #define snd_ymfpci_gpio_sw_info snd_ctl_boolean_mono_info static int snd_ymfpci_gpio_sw_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int pin = (int)kcontrol->private_value; ucontrol->value.integer.value[0] = snd_ymfpci_get_gpio_out(chip, pin); return 0; } static int snd_ymfpci_gpio_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); int pin = (int)kcontrol->private_value; if (snd_ymfpci_get_gpio_out(chip, pin) != ucontrol->value.integer.value[0]) { snd_ymfpci_set_gpio_out(chip, pin, !!ucontrol->value.integer.value[0]); ucontrol->value.integer.value[0] = snd_ymfpci_get_gpio_out(chip, pin); return 1; } return 0; } static struct snd_kcontrol_new snd_ymfpci_rear_shared __devinitdata = { .name = "Shared Rear/Line-In Switch", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = snd_ymfpci_gpio_sw_info, .get = snd_ymfpci_gpio_sw_get, .put = snd_ymfpci_gpio_sw_put, .private_value = 2, }; /* * PCM voice volume */ static int snd_ymfpci_pcm_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 0x8000; return 0; } static int snd_ymfpci_pcm_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int subs = kcontrol->id.subdevice; ucontrol->value.integer.value[0] = chip->pcm_mixer[subs].left; ucontrol->value.integer.value[1] = chip->pcm_mixer[subs].right; return 0; } static int snd_ymfpci_pcm_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ymfpci *chip = snd_kcontrol_chip(kcontrol); unsigned int subs = kcontrol->id.subdevice; struct snd_pcm_substream *substream; unsigned long flags; if (ucontrol->value.integer.value[0] != chip->pcm_mixer[subs].left || ucontrol->value.integer.value[1] != chip->pcm_mixer[subs].right) { chip->pcm_mixer[subs].left = ucontrol->value.integer.value[0]; chip->pcm_mixer[subs].right = ucontrol->value.integer.value[1]; if (chip->pcm_mixer[subs].left > 0x8000) chip->pcm_mixer[subs].left = 0x8000; if (chip->pcm_mixer[subs].right > 0x8000) chip->pcm_mixer[subs].right = 0x8000; substream = (struct snd_pcm_substream *)kcontrol->private_value; spin_lock_irqsave(&chip->voice_lock, flags); if (substream->runtime && substream->runtime->private_data) { struct snd_ymfpci_pcm *ypcm = substream->runtime->private_data; if (!ypcm->use_441_slot) ypcm->update_pcm_vol = 2; } spin_unlock_irqrestore(&chip->voice_lock, flags); return 1; } return 0; } static struct snd_kcontrol_new snd_ymfpci_pcm_volume __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = "PCM Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .info = snd_ymfpci_pcm_vol_info, .get = snd_ymfpci_pcm_vol_get, .put = snd_ymfpci_pcm_vol_put, }; /* * Mixer routines */ static void snd_ymfpci_mixer_free_ac97_bus(struct snd_ac97_bus *bus) { struct snd_ymfpci *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_ymfpci_mixer_free_ac97(struct snd_ac97 *ac97) { struct snd_ymfpci *chip = ac97->private_data; chip->ac97 = NULL; } int __devinit snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch) { struct snd_ac97_template ac97; struct snd_kcontrol *kctl; struct snd_pcm_substream *substream; unsigned int idx; int err; static struct snd_ac97_bus_ops ops = { .write = snd_ymfpci_codec_write, .read = snd_ymfpci_codec_read, }; if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus)) < 0) return err; chip->ac97_bus->private_free = snd_ymfpci_mixer_free_ac97_bus; chip->ac97_bus->no_vra = 1; /* YMFPCI doesn't need VRA */ memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_ymfpci_mixer_free_ac97; if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97)) < 0) return err; /* to be sure */ snd_ac97_update_bits(chip->ac97, AC97_EXTENDED_STATUS, AC97_EA_VRA|AC97_EA_VRM, 0); for (idx = 0; idx < ARRAY_SIZE(snd_ymfpci_controls); idx++) { if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_ymfpci_controls[idx], chip))) < 0) return err; } /* add S/PDIF control */ if (snd_BUG_ON(!chip->pcm_spdif)) return -ENXIO; if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_default, chip))) < 0) return err; kctl->id.device = chip->pcm_spdif->device; if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_mask, chip))) < 0) return err; kctl->id.device = chip->pcm_spdif->device; if ((err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_spdif_stream, chip))) < 0) return err; kctl->id.device = chip->pcm_spdif->device; chip->spdif_pcm_ctl = kctl; /* direct recording source */ if (chip->device_id == PCI_DEVICE_ID_YAMAHA_754 && (err = snd_ctl_add(chip->card, kctl = snd_ctl_new1(&snd_ymfpci_drec_source, chip))) < 0) return err; /* * shared rear/line-in */ if (rear_switch) { if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_ymfpci_rear_shared, chip))) < 0) return err; } /* per-voice volume */ substream = chip->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; for (idx = 0; idx < 32; ++idx) { kctl = snd_ctl_new1(&snd_ymfpci_pcm_volume, chip); if (!kctl) return -ENOMEM; kctl->id.device = chip->pcm->device; kctl->id.subdevice = idx; kctl->private_value = (unsigned long)substream; if ((err = snd_ctl_add(chip->card, kctl)) < 0) return err; chip->pcm_mixer[idx].left = 0x8000; chip->pcm_mixer[idx].right = 0x8000; chip->pcm_mixer[idx].ctl = kctl; substream = substream->next; } return 0; } /* * timer */ static int snd_ymfpci_timer_start(struct snd_timer *timer) { struct snd_ymfpci *chip; unsigned long flags; unsigned int count; chip = snd_timer_chip(timer); spin_lock_irqsave(&chip->reg_lock, flags); if (timer->sticks > 1) { chip->timer_ticks = timer->sticks; count = timer->sticks - 1; } else { /* * Divisor 1 is not allowed; fake it by using divisor 2 and * counting two ticks for each interrupt. */ chip->timer_ticks = 2; count = 2 - 1; } snd_ymfpci_writew(chip, YDSXGR_TIMERCOUNT, count); snd_ymfpci_writeb(chip, YDSXGR_TIMERCTRL, 0x03); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_ymfpci_timer_stop(struct snd_timer *timer) { struct snd_ymfpci *chip; unsigned long flags; chip = snd_timer_chip(timer); spin_lock_irqsave(&chip->reg_lock, flags); snd_ymfpci_writeb(chip, YDSXGR_TIMERCTRL, 0x00); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_ymfpci_timer_precise_resolution(struct snd_timer *timer, unsigned long *num, unsigned long *den) { *num = 1; *den = 96000; return 0; } static struct snd_timer_hardware snd_ymfpci_timer_hw = { .flags = SNDRV_TIMER_HW_AUTO, .resolution = 10417, /* 1 / 96 kHz = 10.41666...us */ .ticks = 0x10000, .start = snd_ymfpci_timer_start, .stop = snd_ymfpci_timer_stop, .precise_resolution = snd_ymfpci_timer_precise_resolution, }; int __devinit snd_ymfpci_timer(struct snd_ymfpci *chip, int device) { struct snd_timer *timer = NULL; struct snd_timer_id tid; int err; tid.dev_class = SNDRV_TIMER_CLASS_CARD; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.card = chip->card->number; tid.device = device; tid.subdevice = 0; if ((err = snd_timer_new(chip->card, "YMFPCI", &tid, &timer)) >= 0) { strcpy(timer->name, "YMFPCI timer"); timer->private_data = chip; timer->hw = snd_ymfpci_timer_hw; } chip->timer = timer; return err; } /* * proc interface */ static void snd_ymfpci_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ymfpci *chip = entry->private_data; int i; snd_iprintf(buffer, "YMFPCI\n\n"); for (i = 0; i <= YDSXGR_WORKBASE; i += 4) snd_iprintf(buffer, "%04x: %04x\n", i, snd_ymfpci_readl(chip, i)); } static int __devinit snd_ymfpci_proc_init(struct snd_card *card, struct snd_ymfpci *chip) { struct snd_info_entry *entry; if (! snd_card_proc_new(card, "ymfpci", &entry)) snd_info_set_text_ops(entry, chip, snd_ymfpci_proc_read); return 0; } /* * initialization routines */ static void snd_ymfpci_aclink_reset(struct pci_dev * pci) { u8 cmd; pci_read_config_byte(pci, PCIR_DSXG_CTRL, &cmd); #if 0 // force to reset if (cmd & 0x03) { #endif pci_write_config_byte(pci, PCIR_DSXG_CTRL, cmd & 0xfc); pci_write_config_byte(pci, PCIR_DSXG_CTRL, cmd | 0x03); pci_write_config_byte(pci, PCIR_DSXG_CTRL, cmd & 0xfc); pci_write_config_word(pci, PCIR_DSXG_PWRCTRL1, 0); pci_write_config_word(pci, PCIR_DSXG_PWRCTRL2, 0); #if 0 } #endif } static void snd_ymfpci_enable_dsp(struct snd_ymfpci *chip) { snd_ymfpci_writel(chip, YDSXGR_CONFIG, 0x00000001); } static void snd_ymfpci_disable_dsp(struct snd_ymfpci *chip) { u32 val; int timeout = 1000; val = snd_ymfpci_readl(chip, YDSXGR_CONFIG); if (val) snd_ymfpci_writel(chip, YDSXGR_CONFIG, 0x00000000); while (timeout-- > 0) { val = snd_ymfpci_readl(chip, YDSXGR_STATUS); if ((val & 0x00000002) == 0) break; } } static int snd_ymfpci_request_firmware(struct snd_ymfpci *chip) { int err, is_1e; const char *name; err = request_firmware(&chip->dsp_microcode, "yamaha/ds1_dsp.fw", &chip->pci->dev); if (err >= 0) { if (chip->dsp_microcode->size != YDSXG_DSPLENGTH) { snd_printk(KERN_ERR "DSP microcode has wrong size\n"); err = -EINVAL; } } if (err < 0) return err; is_1e = chip->device_id == PCI_DEVICE_ID_YAMAHA_724F || chip->device_id == PCI_DEVICE_ID_YAMAHA_740C || chip->device_id == PCI_DEVICE_ID_YAMAHA_744 || chip->device_id == PCI_DEVICE_ID_YAMAHA_754; name = is_1e ? "yamaha/ds1e_ctrl.fw" : "yamaha/ds1_ctrl.fw"; err = request_firmware(&chip->controller_microcode, name, &chip->pci->dev); if (err >= 0) { if (chip->controller_microcode->size != YDSXG_CTRLLENGTH) { snd_printk(KERN_ERR "controller microcode" " has wrong size\n"); err = -EINVAL; } } if (err < 0) return err; return 0; } MODULE_FIRMWARE("yamaha/ds1_dsp.fw"); MODULE_FIRMWARE("yamaha/ds1_ctrl.fw"); MODULE_FIRMWARE("yamaha/ds1e_ctrl.fw"); static void snd_ymfpci_download_image(struct snd_ymfpci *chip) { int i; u16 ctrl; const __le32 *inst; snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0x00000000); snd_ymfpci_disable_dsp(chip); snd_ymfpci_writel(chip, YDSXGR_MODE, 0x00010000); snd_ymfpci_writel(chip, YDSXGR_MODE, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_MAPOFREC, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_MAPOFEFFECT, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_RECCTRLBASE, 0x00000000); snd_ymfpci_writel(chip, YDSXGR_EFFCTRLBASE, 0x00000000); ctrl = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); snd_ymfpci_writew(chip, YDSXGR_GLOBALCTRL, ctrl & ~0x0007); /* setup DSP instruction code */ inst = (const __le32 *)chip->dsp_microcode->data; for (i = 0; i < YDSXG_DSPLENGTH / 4; i++) snd_ymfpci_writel(chip, YDSXGR_DSPINSTRAM + (i << 2), le32_to_cpu(inst[i])); /* setup control instruction code */ inst = (const __le32 *)chip->controller_microcode->data; for (i = 0; i < YDSXG_CTRLLENGTH / 4; i++) snd_ymfpci_writel(chip, YDSXGR_CTRLINSTRAM + (i << 2), le32_to_cpu(inst[i])); snd_ymfpci_enable_dsp(chip); } static int __devinit snd_ymfpci_memalloc(struct snd_ymfpci *chip) { long size, playback_ctrl_size; int voice, bank, reg; u8 *ptr; dma_addr_t ptr_addr; playback_ctrl_size = 4 + 4 * YDSXG_PLAYBACK_VOICES; chip->bank_size_playback = snd_ymfpci_readl(chip, YDSXGR_PLAYCTRLSIZE) << 2; chip->bank_size_capture = snd_ymfpci_readl(chip, YDSXGR_RECCTRLSIZE) << 2; chip->bank_size_effect = snd_ymfpci_readl(chip, YDSXGR_EFFCTRLSIZE) << 2; chip->work_size = YDSXG_DEFAULT_WORK_SIZE; size = ALIGN(playback_ctrl_size, 0x100) + ALIGN(chip->bank_size_playback * 2 * YDSXG_PLAYBACK_VOICES, 0x100) + ALIGN(chip->bank_size_capture * 2 * YDSXG_CAPTURE_VOICES, 0x100) + ALIGN(chip->bank_size_effect * 2 * YDSXG_EFFECT_VOICES, 0x100) + chip->work_size; /* work_ptr must be aligned to 256 bytes, but it's already covered with the kernel page allocation mechanism */ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), size, &chip->work_ptr) < 0) return -ENOMEM; ptr = chip->work_ptr.area; ptr_addr = chip->work_ptr.addr; memset(ptr, 0, size); /* for sure */ chip->bank_base_playback = ptr; chip->bank_base_playback_addr = ptr_addr; chip->ctrl_playback = (u32 *)ptr; chip->ctrl_playback[0] = cpu_to_le32(YDSXG_PLAYBACK_VOICES); ptr += ALIGN(playback_ctrl_size, 0x100); ptr_addr += ALIGN(playback_ctrl_size, 0x100); for (voice = 0; voice < YDSXG_PLAYBACK_VOICES; voice++) { chip->voices[voice].number = voice; chip->voices[voice].bank = (struct snd_ymfpci_playback_bank *)ptr; chip->voices[voice].bank_addr = ptr_addr; for (bank = 0; bank < 2; bank++) { chip->bank_playback[voice][bank] = (struct snd_ymfpci_playback_bank *)ptr; ptr += chip->bank_size_playback; ptr_addr += chip->bank_size_playback; } } ptr = (char *)ALIGN((unsigned long)ptr, 0x100); ptr_addr = ALIGN(ptr_addr, 0x100); chip->bank_base_capture = ptr; chip->bank_base_capture_addr = ptr_addr; for (voice = 0; voice < YDSXG_CAPTURE_VOICES; voice++) for (bank = 0; bank < 2; bank++) { chip->bank_capture[voice][bank] = (struct snd_ymfpci_capture_bank *)ptr; ptr += chip->bank_size_capture; ptr_addr += chip->bank_size_capture; } ptr = (char *)ALIGN((unsigned long)ptr, 0x100); ptr_addr = ALIGN(ptr_addr, 0x100); chip->bank_base_effect = ptr; chip->bank_base_effect_addr = ptr_addr; for (voice = 0; voice < YDSXG_EFFECT_VOICES; voice++) for (bank = 0; bank < 2; bank++) { chip->bank_effect[voice][bank] = (struct snd_ymfpci_effect_bank *)ptr; ptr += chip->bank_size_effect; ptr_addr += chip->bank_size_effect; } ptr = (char *)ALIGN((unsigned long)ptr, 0x100); ptr_addr = ALIGN(ptr_addr, 0x100); chip->work_base = ptr; chip->work_base_addr = ptr_addr; snd_BUG_ON(ptr + chip->work_size != chip->work_ptr.area + chip->work_ptr.bytes); snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, chip->bank_base_playback_addr); snd_ymfpci_writel(chip, YDSXGR_RECCTRLBASE, chip->bank_base_capture_addr); snd_ymfpci_writel(chip, YDSXGR_EFFCTRLBASE, chip->bank_base_effect_addr); snd_ymfpci_writel(chip, YDSXGR_WORKBASE, chip->work_base_addr); snd_ymfpci_writel(chip, YDSXGR_WORKSIZE, chip->work_size >> 2); /* S/PDIF output initialization */ chip->spdif_bits = chip->spdif_pcm_bits = SNDRV_PCM_DEFAULT_CON_SPDIF & 0xffff; snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTCTRL, 0); snd_ymfpci_writew(chip, YDSXGR_SPDIFOUTSTATUS, chip->spdif_bits); /* S/PDIF input initialization */ snd_ymfpci_writew(chip, YDSXGR_SPDIFINCTRL, 0); /* digital mixer setup */ for (reg = 0x80; reg < 0xc0; reg += 4) snd_ymfpci_writel(chip, reg, 0); snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_BUF441OUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_ZVOUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_SPDIFOUTVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_NATIVEADCINVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_NATIVEDACINVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_PRIADCLOOPVOL, 0x3fff3fff); snd_ymfpci_writel(chip, YDSXGR_LEGACYOUTVOL, 0x3fff3fff); return 0; } static int snd_ymfpci_free(struct snd_ymfpci *chip) { u16 ctrl; if (snd_BUG_ON(!chip)) return -EINVAL; if (chip->res_reg_area) { /* don't touch busy hardware */ snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_BUF441OUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_LEGACYOUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_STATUS, ~0); snd_ymfpci_disable_dsp(chip); snd_ymfpci_writel(chip, YDSXGR_PLAYCTRLBASE, 0); snd_ymfpci_writel(chip, YDSXGR_RECCTRLBASE, 0); snd_ymfpci_writel(chip, YDSXGR_EFFCTRLBASE, 0); snd_ymfpci_writel(chip, YDSXGR_WORKBASE, 0); snd_ymfpci_writel(chip, YDSXGR_WORKSIZE, 0); ctrl = snd_ymfpci_readw(chip, YDSXGR_GLOBALCTRL); snd_ymfpci_writew(chip, YDSXGR_GLOBALCTRL, ctrl & ~0x0007); } snd_ymfpci_ac3_done(chip); /* Set PCI device to D3 state */ #if 0 /* FIXME: temporarily disabled, otherwise we cannot fire up * the chip again unless reboot. ACPI bug? */ pci_set_power_state(chip->pci, 3); #endif #ifdef CONFIG_PM vfree(chip->saved_regs); #endif if (chip->irq >= 0) free_irq(chip->irq, chip); release_and_free_resource(chip->mpu_res); release_and_free_resource(chip->fm_res); snd_ymfpci_free_gameport(chip); if (chip->reg_area_virt) iounmap(chip->reg_area_virt); if (chip->work_ptr.area) snd_dma_free_pages(&chip->work_ptr); release_and_free_resource(chip->res_reg_area); pci_write_config_word(chip->pci, 0x40, chip->old_legacy_ctrl); pci_disable_device(chip->pci); release_firmware(chip->dsp_microcode); release_firmware(chip->controller_microcode); kfree(chip); return 0; } static int snd_ymfpci_dev_free(struct snd_device *device) { struct snd_ymfpci *chip = device->device_data; return snd_ymfpci_free(chip); } #ifdef CONFIG_PM static int saved_regs_index[] = { /* spdif */ YDSXGR_SPDIFOUTCTRL, YDSXGR_SPDIFOUTSTATUS, YDSXGR_SPDIFINCTRL, /* volumes */ YDSXGR_PRIADCLOOPVOL, YDSXGR_NATIVEDACINVOL, YDSXGR_NATIVEDACOUTVOL, YDSXGR_BUF441OUTVOL, YDSXGR_NATIVEADCINVOL, YDSXGR_SPDIFLOOPVOL, YDSXGR_SPDIFOUTVOL, YDSXGR_ZVOUTVOL, YDSXGR_LEGACYOUTVOL, /* address bases */ YDSXGR_PLAYCTRLBASE, YDSXGR_RECCTRLBASE, YDSXGR_EFFCTRLBASE, YDSXGR_WORKBASE, /* capture set up */ YDSXGR_MAPOFREC, YDSXGR_RECFORMAT, YDSXGR_RECSLOTSR, YDSXGR_ADCFORMAT, YDSXGR_ADCSLOTSR, }; #define YDSXGR_NUM_SAVED_REGS ARRAY_SIZE(saved_regs_index) int snd_ymfpci_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct snd_ymfpci *chip = card->private_data; unsigned int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); snd_pcm_suspend_all(chip->pcm2); snd_pcm_suspend_all(chip->pcm_spdif); snd_pcm_suspend_all(chip->pcm_4ch); snd_ac97_suspend(chip->ac97); for (i = 0; i < YDSXGR_NUM_SAVED_REGS; i++) chip->saved_regs[i] = snd_ymfpci_readl(chip, saved_regs_index[i]); chip->saved_ydsxgr_mode = snd_ymfpci_readl(chip, YDSXGR_MODE); snd_ymfpci_writel(chip, YDSXGR_NATIVEDACOUTVOL, 0); snd_ymfpci_writel(chip, YDSXGR_BUF441OUTVOL, 0); snd_ymfpci_disable_dsp(chip); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } int snd_ymfpci_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct snd_ymfpci *chip = card->private_data; unsigned int i; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "ymfpci: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); snd_ymfpci_aclink_reset(pci); snd_ymfpci_codec_ready(chip, 0); snd_ymfpci_download_image(chip); udelay(100); for (i = 0; i < YDSXGR_NUM_SAVED_REGS; i++) snd_ymfpci_writel(chip, saved_regs_index[i], chip->saved_regs[i]); snd_ac97_resume(chip->ac97); /* start hw again */ if (chip->start_count > 0) { spin_lock_irq(&chip->reg_lock); snd_ymfpci_writel(chip, YDSXGR_MODE, chip->saved_ydsxgr_mode); chip->active_bank = snd_ymfpci_readl(chip, YDSXGR_CTRLSELECT); spin_unlock_irq(&chip->reg_lock); } snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ int __devinit snd_ymfpci_create(struct snd_card *card, struct pci_dev * pci, unsigned short old_legacy_ctrl, struct snd_ymfpci ** rchip) { struct snd_ymfpci *chip; int err; static struct snd_device_ops ops = { .dev_free = snd_ymfpci_dev_free, }; *rchip = NULL; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->old_legacy_ctrl = old_legacy_ctrl; spin_lock_init(&chip->reg_lock); spin_lock_init(&chip->voice_lock); init_waitqueue_head(&chip->interrupt_sleep); atomic_set(&chip->interrupt_sleep_count, 0); chip->card = card; chip->pci = pci; chip->irq = -1; chip->device_id = pci->device; chip->rev = pci->revision; chip->reg_area_phys = pci_resource_start(pci, 0); chip->reg_area_virt = ioremap_nocache(chip->reg_area_phys, 0x8000); pci_set_master(pci); chip->src441_used = -1; if ((chip->res_reg_area = request_mem_region(chip->reg_area_phys, 0x8000, "YMFPCI")) == NULL) { snd_printk(KERN_ERR "unable to grab memory region 0x%lx-0x%lx\n", chip->reg_area_phys, chip->reg_area_phys + 0x8000 - 1); snd_ymfpci_free(chip); return -EBUSY; } if (request_irq(pci->irq, snd_ymfpci_interrupt, IRQF_SHARED, "YMFPCI", chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_ymfpci_free(chip); return -EBUSY; } chip->irq = pci->irq; snd_ymfpci_aclink_reset(pci); if (snd_ymfpci_codec_ready(chip, 0) < 0) { snd_ymfpci_free(chip); return -EIO; } err = snd_ymfpci_request_firmware(chip); if (err < 0) { snd_printk(KERN_ERR "firmware request failed: %d\n", err); snd_ymfpci_free(chip); return err; } snd_ymfpci_download_image(chip); udelay(100); /* seems we need a delay after downloading image.. */ if (snd_ymfpci_memalloc(chip) < 0) { snd_ymfpci_free(chip); return -EIO; } if ((err = snd_ymfpci_ac3_init(chip)) < 0) { snd_ymfpci_free(chip); return err; } #ifdef CONFIG_PM chip->saved_regs = vmalloc(YDSXGR_NUM_SAVED_REGS * sizeof(u32)); if (chip->saved_regs == NULL) { snd_ymfpci_free(chip); return -ENOMEM; } #endif if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_ymfpci_free(chip); return err; } snd_ymfpci_proc_init(card, chip); snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; }
541122.c
/*************************************************************************\ * Copyright (c) 2012 UChicago Argonne LLC, as Operator of Argonne * National Laboratory. * EPICS BASE is distributed subject to a Software License Agreement found * in file LICENSE that is included with this distribution. \*************************************************************************/ /* * Author: Andrew Johnson * Date: 28 Sept 2012 */ #include "dbAccess.h" #include "printfRecord.h" #include "epicsExport.h" static long write_string(printfRecord *prec) { return dbPutLinkLS(&prec->out, prec->val, prec->len); } printfdset devPrintfSoft = { 5, NULL, NULL, NULL, NULL, write_string }; epicsExportAddress(dset, devPrintfSoft);
701370.c
/* * mmt_business_app.c * * Created on: Jun 3, 2021 * Author: nhnghia */ #include <stdio.h> #include "mmt_business_app_internal.h" int init_proto() { if (!init_proto_ips_data()) { fprintf(stderr, "Error initializing protocol PROTO_IPS_DATA\n Exiting\n"); exit(0); } return 1; } int cleanup_proto() { //printf("close s1ap protocol"); return 0; }
835394.c
/* $NoKeywords:$ */ /** * @file * * Config Fch SATA (IDE mode) controller * * Init SATA IDE (Native IDE) mode features. * * @xrefitem bom "File Content Label" "Release Content" * @e project: AGESA * @e sub-project: FCH * @e \$Revision: 44324 $ @e \$Date: 2010-12-22 17:16:51 +0800 (Wed, 22 Dec 2010) $ * */ /* ***************************************************************************** * * Copyright (c) 2011, Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **************************************************************************** */ #include "FchPlatform.h" #include "Filecode.h" #define FILECODE PROC_FCH_SATA_SATAIDEMID_FILECODE /** * FchInitMidSataIde - Config SATA controller after PCI * emulation * * * * @param[in] FchDataPtr Fch configuration structure pointer. * */ VOID FchInitMidSataIde ( IN VOID *FchDataPtr ) { UINT32 Bar5; FCH_DATA_BLOCK *LocalCfgPtr; LocalCfgPtr = (FCH_DATA_BLOCK *) FchDataPtr; Bar5 = 0; SataBar5setting (LocalCfgPtr, &Bar5); // //If this is not S3 resume and also if SATA set to one of IDE mode, them implement drive detection workaround. // if ( ! (LocalCfgPtr->Misc.S3Resume) ) { SataDriveDetection (LocalCfgPtr, &Bar5); } }
726436.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE680_Integer_Overflow_to_Buffer_Overflow__malloc_fixed_08.c Label Definition File: CWE680_Integer_Overflow_to_Buffer_Overflow__malloc.label.xml Template File: sources-sink-08.tmpl.c */ /* * @description * CWE: 680 Integer Overflow to Buffer Overflow * BadSource: fixed Fixed value that will cause an integer overflow in the sink * GoodSource: Small number greater than zero that will not cause an integer overflow in the sink * Sink: * BadSink : Attempt to allocate array using length value from source * Flow Variant: 08 Control flow: if(staticReturnsTrue()) and if(staticReturnsFalse()) * * */ #include "std_testcase.h" /* The two function below always return the same value, so a tool * should be able to identify that calls to the functions will always * return a fixed value. */ static int staticReturnsTrue() { return 1; } static int staticReturnsFalse() { return 0; } #ifndef OMITBAD void CWE680_Integer_Overflow_to_Buffer_Overflow__malloc_fixed_08_bad() { int data; /* Initialize data */ data = -1; if(staticReturnsTrue()) { /* FLAW: Set data to a value that will cause an integer overflow in the call to malloc() in the sink */ data = INT_MAX / 2 + 2; /* 1073741825 */ /* NOTE: This value will cause the sink to only allocate 4 bytes of memory, however * the for loop will attempt to access indices 0-1073741824 */ } { size_t i; int *intPointer; /* POTENTIAL FLAW: if data * sizeof(int) > SIZE_MAX, overflows to a small value * so that the for loop doing the initialization causes a buffer overflow */ intPointer = (int*)malloc(data * sizeof(int)); if (intPointer == NULL) {exit(-1);} for (i = 0; i < (size_t)data; i++) { intPointer[i] = 0; /* Potentially writes beyond the boundary of intPointer */ } printIntLine(intPointer[0]); free(intPointer); } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B1() - use goodsource and badsink by changing the staticReturnsTrue() to staticReturnsFalse() */ static void goodG2B1() { int data; /* Initialize data */ data = -1; if(staticReturnsFalse()) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ printLine("Benign, fixed string"); } else { /* FIX: Set data to a relatively small number greater than zero */ data = 20; } { size_t i; int *intPointer; /* POTENTIAL FLAW: if data * sizeof(int) > SIZE_MAX, overflows to a small value * so that the for loop doing the initialization causes a buffer overflow */ intPointer = (int*)malloc(data * sizeof(int)); if (intPointer == NULL) {exit(-1);} for (i = 0; i < (size_t)data; i++) { intPointer[i] = 0; /* Potentially writes beyond the boundary of intPointer */ } printIntLine(intPointer[0]); free(intPointer); } } /* goodG2B2() - use goodsource and badsink by reversing the blocks in the if statement */ static void goodG2B2() { int data; /* Initialize data */ data = -1; if(staticReturnsTrue()) { /* FIX: Set data to a relatively small number greater than zero */ data = 20; } { size_t i; int *intPointer; /* POTENTIAL FLAW: if data * sizeof(int) > SIZE_MAX, overflows to a small value * so that the for loop doing the initialization causes a buffer overflow */ intPointer = (int*)malloc(data * sizeof(int)); if (intPointer == NULL) {exit(-1);} for (i = 0; i < (size_t)data; i++) { intPointer[i] = 0; /* Potentially writes beyond the boundary of intPointer */ } printIntLine(intPointer[0]); free(intPointer); } } void CWE680_Integer_Overflow_to_Buffer_Overflow__malloc_fixed_08_good() { goodG2B1(); goodG2B2(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on * its own for testing or for building a binary to use in testing binary * analysis tools. It is not used when compiling all the testcases as one * application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE680_Integer_Overflow_to_Buffer_Overflow__malloc_fixed_08_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE680_Integer_Overflow_to_Buffer_Overflow__malloc_fixed_08_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
660947.c
#include <stdio.h> void funkcja(void); int main(void) { funkcja(); funkcja(); funkcja(); printf("\n"); funkcja(); funkcja(); printf("\n"); funkcja(); printf("\n"); return 0; } void funkcja(void) { printf("Usmiech!"); }
361292.c
#include<stdio.h> long fibonacci( long n ); int main( void ) { long result; long number; printf( "Enter an integer: " ); scanf( "%ld", &number ); result = fibonacci( number ); printf( "Fibonacci( %ld ) = %ld\n", number, result); return 0; } long fibonacci( long n ) { if ( n == 0 || n == 1 ) { return n; } else { return fibonacci( n - 1) + fibonacci( n - 2 ); } }
106786.c
/* Conversion from and to CP1256. Copyright (C) 1998-2015 Free Software Foundation, Inc. This file is part of the GNU C Library. Contributed by Ulrich Drepper <[email protected]>, 1998. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ #include <stdint.h> /* Get the conversion table. */ #define TABLES <cp1256.h> #define CHARSET_NAME "CP1256//" #define HAS_HOLES 1 /* Not all 256 character are defined. */ #include <8bit-gap.c>
289429.c
#include <zmq.h> #include <string.h> // memcpy #include <stdlib.h> //realloc #include <stdio.h> #include <unistd.h> #include <stdbool.h> #include <stdint.h> #define ZMQ_NUM_IOTHREADS 1 // Reads the message body to a string, returns pointer you must free char* msg_to_str(zmq_msg_t* msg) { size_t size = zmq_msg_size(msg); if (size < 1) { return NULL; } char *string = malloc(size + 1); memcpy(string, zmq_msg_data(msg), size); string[size] = 0x0; // Force last byte to null return (string); } int main(int argc, char *argv[]) { if (argc < 3) { printf("Usage:\n req_test tcp://localhost:7070 card_uid\n"); return 1; } int err; // TODO: IFDEF on the API version ? we use 2.2 due to raspbian not having 3.2 packages void *context = zmq_init(ZMQ_NUM_IOTHREADS); void *requester = zmq_socket(context, ZMQ_REQ); err = zmq_connect(requester, argv[1]); if (err != 0) { printf("ERROR: zmq_connect failed with %s\n", zmq_strerror(zmq_errno())); goto END; } zmq_msg_t request; int uidlen = strlen(argv[2]); err = zmq_msg_init_size(&request, uidlen); if (err != 0) { printf("ERROR: zmq_msg_init_size failed with %s\n", zmq_strerror(zmq_errno())); goto END; } memcpy(zmq_msg_data(&request), argv[2], uidlen); printf("Sending request\n"); err = zmq_sendmsg(requester, &request, 0); if (err != 0) { printf("ERROR: zmq_send failed with %s\n", zmq_strerror(zmq_errno())); goto END; } zmq_msg_close(&request); printf("Waiting for response\n"); int partno=0; while (1) { partno++; zmq_msg_t message; zmq_msg_init(&message); if (err != 0) { printf("ERROR: zmq_msg_init failed with %s\n", zmq_strerror(zmq_errno())); goto END; } err = zmq_recvmsg(requester, &message, 0); if (err != 0) { zmq_msg_close (&message); printf("ERROR: zmq_recv failed with %s\n", zmq_strerror(zmq_errno())); goto END; } printf("Received part %d, %d bytes\n", partno, (int)zmq_msg_size(&message)); // Read the body as string char* body = msg_to_str(&message); printf("==\n%s\n==\n", body); free(body); // Done with message zmq_msg_close (&message); // See if we have more parts int64_t more; size_t more_size = sizeof(more); err = zmq_getsockopt(requester, ZMQ_RCVMORE, &more, &more_size); if (err != 0) { printf("ERROR: zmq_getsockopt failed with %s\n", zmq_strerror(zmq_errno())); goto END; } if (!more) { break; } } printf("All %d parts received\n", partno); END: zmq_close(requester); zmq_term(context); return err; }
586322.c
#include "types.h" #include "stat.h" #include "user.h" int main(int argc, char *argv[]) { int num = atoi(argv[1]); printf(1, "system call counter %d: %d\n", num, getsyscallcounter(num)); exit(); }
769932.c
/* * Public Key layer for writing key files and structures * * Copyright The Mbed TLS Contributors * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common.h" #if defined(MBEDTLS_PK_WRITE_C) #include "mbedtls/pk.h" #include "mbedtls/asn1write.h" #include "mbedtls/oid.h" #include "mbedtls/platform_util.h" #include "mbedtls/error.h" #include <string.h> #if defined(MBEDTLS_RSA_C) #include "mbedtls/rsa.h" #endif #if defined(MBEDTLS_ECP_C) #include "mbedtls/bignum.h" #include "mbedtls/ecp.h" #include "mbedtls/platform_util.h" #endif #if defined(MBEDTLS_ECDSA_C) #include "mbedtls/ecdsa.h" #endif #if defined(MBEDTLS_PEM_WRITE_C) #include "mbedtls/pem.h" #endif #if defined(MBEDTLS_USE_PSA_CRYPTO) #include "psa/crypto.h" #include "mbedtls/psa_util.h" #endif #if defined(MBEDTLS_PLATFORM_C) #include "mbedtls/platform.h" #else #include <stdlib.h> #define mbedtls_calloc calloc #define mbedtls_free free #endif /* Parameter validation macros based on platform_util.h */ #define PK_VALIDATE_RET( cond ) \ MBEDTLS_INTERNAL_VALIDATE_RET( cond, MBEDTLS_ERR_PK_BAD_INPUT_DATA ) #define PK_VALIDATE( cond ) \ MBEDTLS_INTERNAL_VALIDATE( cond ) #if defined(MBEDTLS_RSA_C) /* * RSAPublicKey ::= SEQUENCE { * modulus INTEGER, -- n * publicExponent INTEGER -- e * } */ static int pk_write_rsa_pubkey( unsigned char **p, unsigned char *start, mbedtls_rsa_context *rsa ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; size_t len = 0; mbedtls_mpi T; mbedtls_mpi_init( &T ); /* Export E */ if ( ( ret = mbedtls_rsa_export( rsa, NULL, NULL, NULL, NULL, &T ) ) != 0 || ( ret = mbedtls_asn1_write_mpi( p, start, &T ) ) < 0 ) goto end_of_export; len += ret; /* Export N */ if ( ( ret = mbedtls_rsa_export( rsa, &T, NULL, NULL, NULL, NULL ) ) != 0 || ( ret = mbedtls_asn1_write_mpi( p, start, &T ) ) < 0 ) goto end_of_export; len += ret; end_of_export: mbedtls_mpi_free( &T ); if( ret < 0 ) return( ret ); MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( p, start, len ) ); MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( p, start, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE ) ); return( (int) len ); } #endif /* MBEDTLS_RSA_C */ #if defined(MBEDTLS_ECP_C) /* * EC public key is an EC point */ static int pk_write_ec_pubkey( unsigned char **p, unsigned char *start, mbedtls_ecp_keypair *ec ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; size_t len = 0; unsigned char buf[MBEDTLS_ECP_MAX_PT_LEN]; if( ( ret = mbedtls_ecp_point_write_binary( &ec->grp, &ec->Q, MBEDTLS_ECP_PF_UNCOMPRESSED, &len, buf, sizeof( buf ) ) ) != 0 ) { return( ret ); } if( *p < start || (size_t)( *p - start ) < len ) return( MBEDTLS_ERR_ASN1_BUF_TOO_SMALL ); *p -= len; memcpy( *p, buf, len ); return( (int) len ); } /* * ECParameters ::= CHOICE { * namedCurve OBJECT IDENTIFIER * } */ static int pk_write_ec_param( unsigned char **p, unsigned char *start, mbedtls_ecp_keypair *ec ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; size_t len = 0; const char *oid; size_t oid_len; if( ( ret = mbedtls_oid_get_oid_by_ec_grp( ec->grp.id, &oid, &oid_len ) ) != 0 ) return( ret ); MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_oid( p, start, oid, oid_len ) ); return( (int) len ); } /* * privateKey OCTET STRING -- always of length ceil(log2(n)/8) */ static int pk_write_ec_private( unsigned char **p, unsigned char *start, mbedtls_ecp_keypair *ec ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; size_t byte_length = ( ec->grp.pbits + 7 ) / 8; unsigned char tmp[MBEDTLS_ECP_MAX_BYTES]; ret = mbedtls_ecp_write_key( ec, tmp, byte_length ); if( ret != 0 ) goto exit; ret = mbedtls_asn1_write_octet_string( p, start, tmp, byte_length ); exit: mbedtls_platform_zeroize( tmp, byte_length ); return( ret ); } #endif /* MBEDTLS_ECP_C */ int mbedtls_pk_write_pubkey( unsigned char **p, unsigned char *start, const mbedtls_pk_context *key ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; size_t len = 0; PK_VALIDATE_RET( p != NULL ); PK_VALIDATE_RET( *p != NULL ); PK_VALIDATE_RET( start != NULL ); PK_VALIDATE_RET( key != NULL ); #if defined(MBEDTLS_RSA_C) if( mbedtls_pk_get_type( key ) == MBEDTLS_PK_RSA ) MBEDTLS_ASN1_CHK_ADD( len, pk_write_rsa_pubkey( p, start, mbedtls_pk_rsa( *key ) ) ); else #endif #if defined(MBEDTLS_ECP_C) if( mbedtls_pk_get_type( key ) == MBEDTLS_PK_ECKEY ) MBEDTLS_ASN1_CHK_ADD( len, pk_write_ec_pubkey( p, start, mbedtls_pk_ec( *key ) ) ); else #endif #if defined(MBEDTLS_USE_PSA_CRYPTO) if( mbedtls_pk_get_type( key ) == MBEDTLS_PK_OPAQUE ) { size_t buffer_size; psa_key_id_t* key_id = (psa_key_id_t*) key->pk_ctx; if ( *p < start ) return( MBEDTLS_ERR_PK_BAD_INPUT_DATA ); buffer_size = (size_t)( *p - start ); if ( psa_export_public_key( *key_id, start, buffer_size, &len ) != PSA_SUCCESS ) { return( MBEDTLS_ERR_PK_BAD_INPUT_DATA ); } else { *p -= len; memmove( *p, start, len ); } } else #endif /* MBEDTLS_USE_PSA_CRYPTO */ return( MBEDTLS_ERR_PK_FEATURE_UNAVAILABLE ); return( (int) len ); } int mbedtls_pk_write_pubkey_der( mbedtls_pk_context *key, unsigned char *buf, size_t size ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; unsigned char *c; size_t len = 0, par_len = 0, oid_len; mbedtls_pk_type_t pk_type; const char *oid; PK_VALIDATE_RET( key != NULL ); if( size == 0 ) return( MBEDTLS_ERR_ASN1_BUF_TOO_SMALL ); PK_VALIDATE_RET( buf != NULL ); c = buf + size; MBEDTLS_ASN1_CHK_ADD( len, mbedtls_pk_write_pubkey( &c, buf, key ) ); if( c - buf < 1 ) return( MBEDTLS_ERR_ASN1_BUF_TOO_SMALL ); /* * SubjectPublicKeyInfo ::= SEQUENCE { * algorithm AlgorithmIdentifier, * subjectPublicKey BIT STRING } */ *--c = 0; len += 1; MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( &c, buf, len ) ); MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( &c, buf, MBEDTLS_ASN1_BIT_STRING ) ); pk_type = mbedtls_pk_get_type( key ); #if defined(MBEDTLS_ECP_C) if( pk_type == MBEDTLS_PK_ECKEY ) { MBEDTLS_ASN1_CHK_ADD( par_len, pk_write_ec_param( &c, buf, mbedtls_pk_ec( *key ) ) ); } #endif #if defined(MBEDTLS_USE_PSA_CRYPTO) if( pk_type == MBEDTLS_PK_OPAQUE ) { psa_key_attributes_t attributes = PSA_KEY_ATTRIBUTES_INIT; psa_key_type_t key_type; psa_key_id_t key_id; psa_ecc_family_t curve; size_t bits; key_id = *((psa_key_id_t*) key->pk_ctx ); if( PSA_SUCCESS != psa_get_key_attributes( key_id, &attributes ) ) return( MBEDTLS_ERR_PK_HW_ACCEL_FAILED ); key_type = psa_get_key_type( &attributes ); bits = psa_get_key_bits( &attributes ); psa_reset_key_attributes( &attributes ); curve = PSA_KEY_TYPE_ECC_GET_FAMILY( key_type ); if( curve == 0 ) return( MBEDTLS_ERR_PK_FEATURE_UNAVAILABLE ); ret = mbedtls_psa_get_ecc_oid_from_id( curve, bits, &oid, &oid_len ); if( ret != 0 ) return( MBEDTLS_ERR_PK_FEATURE_UNAVAILABLE ); /* Write EC algorithm parameters; that's akin * to pk_write_ec_param() above. */ MBEDTLS_ASN1_CHK_ADD( par_len, mbedtls_asn1_write_oid( &c, buf, oid, oid_len ) ); /* The rest of the function works as for legacy EC contexts. */ pk_type = MBEDTLS_PK_ECKEY; } #endif /* MBEDTLS_USE_PSA_CRYPTO */ if( ( ret = mbedtls_oid_get_oid_by_pk_alg( pk_type, &oid, &oid_len ) ) != 0 ) { return( ret ); } MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_algorithm_identifier( &c, buf, oid, oid_len, par_len ) ); MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( &c, buf, len ) ); MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( &c, buf, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE ) ); return( (int) len ); } int mbedtls_pk_write_key_der( mbedtls_pk_context *key, unsigned char *buf, size_t size ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; unsigned char *c; size_t len = 0; PK_VALIDATE_RET( key != NULL ); if( size == 0 ) return( MBEDTLS_ERR_ASN1_BUF_TOO_SMALL ); PK_VALIDATE_RET( buf != NULL ); c = buf + size; #if defined(MBEDTLS_RSA_C) if( mbedtls_pk_get_type( key ) == MBEDTLS_PK_RSA ) { mbedtls_mpi T; /* Temporary holding the exported parameters */ mbedtls_rsa_context *rsa = mbedtls_pk_rsa( *key ); /* * Export the parameters one after another to avoid simultaneous copies. */ mbedtls_mpi_init( &T ); /* Export QP */ if( ( ret = mbedtls_rsa_export_crt( rsa, NULL, NULL, &T ) ) != 0 || ( ret = mbedtls_asn1_write_mpi( &c, buf, &T ) ) < 0 ) goto end_of_export; len += ret; /* Export DQ */ if( ( ret = mbedtls_rsa_export_crt( rsa, NULL, &T, NULL ) ) != 0 || ( ret = mbedtls_asn1_write_mpi( &c, buf, &T ) ) < 0 ) goto end_of_export; len += ret; /* Export DP */ if( ( ret = mbedtls_rsa_export_crt( rsa, &T, NULL, NULL ) ) != 0 || ( ret = mbedtls_asn1_write_mpi( &c, buf, &T ) ) < 0 ) goto end_of_export; len += ret; /* Export Q */ if ( ( ret = mbedtls_rsa_export( rsa, NULL, NULL, &T, NULL, NULL ) ) != 0 || ( ret = mbedtls_asn1_write_mpi( &c, buf, &T ) ) < 0 ) goto end_of_export; len += ret; /* Export P */ if ( ( ret = mbedtls_rsa_export( rsa, NULL, &T, NULL, NULL, NULL ) ) != 0 || ( ret = mbedtls_asn1_write_mpi( &c, buf, &T ) ) < 0 ) goto end_of_export; len += ret; /* Export D */ if ( ( ret = mbedtls_rsa_export( rsa, NULL, NULL, NULL, &T, NULL ) ) != 0 || ( ret = mbedtls_asn1_write_mpi( &c, buf, &T ) ) < 0 ) goto end_of_export; len += ret; /* Export E */ if ( ( ret = mbedtls_rsa_export( rsa, NULL, NULL, NULL, NULL, &T ) ) != 0 || ( ret = mbedtls_asn1_write_mpi( &c, buf, &T ) ) < 0 ) goto end_of_export; len += ret; /* Export N */ if ( ( ret = mbedtls_rsa_export( rsa, &T, NULL, NULL, NULL, NULL ) ) != 0 || ( ret = mbedtls_asn1_write_mpi( &c, buf, &T ) ) < 0 ) goto end_of_export; len += ret; end_of_export: mbedtls_mpi_free( &T ); if( ret < 0 ) return( ret ); MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_int( &c, buf, 0 ) ); MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( &c, buf, len ) ); MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( &c, buf, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE ) ); } else #endif /* MBEDTLS_RSA_C */ #if defined(MBEDTLS_ECP_C) if( mbedtls_pk_get_type( key ) == MBEDTLS_PK_ECKEY ) { mbedtls_ecp_keypair *ec = mbedtls_pk_ec( *key ); size_t pub_len = 0, par_len = 0; /* * RFC 5915, or SEC1 Appendix C.4 * * ECPrivateKey ::= SEQUENCE { * version INTEGER { ecPrivkeyVer1(1) } (ecPrivkeyVer1), * privateKey OCTET STRING, * parameters [0] ECParameters {{ NamedCurve }} OPTIONAL, * publicKey [1] BIT STRING OPTIONAL * } */ /* publicKey */ MBEDTLS_ASN1_CHK_ADD( pub_len, pk_write_ec_pubkey( &c, buf, ec ) ); if( c - buf < 1 ) return( MBEDTLS_ERR_ASN1_BUF_TOO_SMALL ); *--c = 0; pub_len += 1; MBEDTLS_ASN1_CHK_ADD( pub_len, mbedtls_asn1_write_len( &c, buf, pub_len ) ); MBEDTLS_ASN1_CHK_ADD( pub_len, mbedtls_asn1_write_tag( &c, buf, MBEDTLS_ASN1_BIT_STRING ) ); MBEDTLS_ASN1_CHK_ADD( pub_len, mbedtls_asn1_write_len( &c, buf, pub_len ) ); MBEDTLS_ASN1_CHK_ADD( pub_len, mbedtls_asn1_write_tag( &c, buf, MBEDTLS_ASN1_CONTEXT_SPECIFIC | MBEDTLS_ASN1_CONSTRUCTED | 1 ) ); len += pub_len; /* parameters */ MBEDTLS_ASN1_CHK_ADD( par_len, pk_write_ec_param( &c, buf, ec ) ); MBEDTLS_ASN1_CHK_ADD( par_len, mbedtls_asn1_write_len( &c, buf, par_len ) ); MBEDTLS_ASN1_CHK_ADD( par_len, mbedtls_asn1_write_tag( &c, buf, MBEDTLS_ASN1_CONTEXT_SPECIFIC | MBEDTLS_ASN1_CONSTRUCTED | 0 ) ); len += par_len; /* privateKey */ MBEDTLS_ASN1_CHK_ADD( len, pk_write_ec_private( &c, buf, ec ) ); /* version */ MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_int( &c, buf, 1 ) ); MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_len( &c, buf, len ) ); MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_tag( &c, buf, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE ) ); } else #endif /* MBEDTLS_ECP_C */ return( MBEDTLS_ERR_PK_FEATURE_UNAVAILABLE ); return( (int) len ); } #if defined(MBEDTLS_PEM_WRITE_C) #define PEM_BEGIN_PUBLIC_KEY "-----BEGIN PUBLIC KEY-----\n" #define PEM_END_PUBLIC_KEY "-----END PUBLIC KEY-----\n" #define PEM_BEGIN_PRIVATE_KEY_RSA "-----BEGIN RSA PRIVATE KEY-----\n" #define PEM_END_PRIVATE_KEY_RSA "-----END RSA PRIVATE KEY-----\n" #define PEM_BEGIN_PRIVATE_KEY_EC "-----BEGIN EC PRIVATE KEY-----\n" #define PEM_END_PRIVATE_KEY_EC "-----END EC PRIVATE KEY-----\n" /* * Max sizes of key per types. Shown as tag + len (+ content). */ #if defined(MBEDTLS_RSA_C) /* * RSA public keys: * SubjectPublicKeyInfo ::= SEQUENCE { 1 + 3 * algorithm AlgorithmIdentifier, 1 + 1 (sequence) * + 1 + 1 + 9 (rsa oid) * + 1 + 1 (params null) * subjectPublicKey BIT STRING } 1 + 3 + (1 + below) * RSAPublicKey ::= SEQUENCE { 1 + 3 * modulus INTEGER, -- n 1 + 3 + MPI_MAX + 1 * publicExponent INTEGER -- e 1 + 3 + MPI_MAX + 1 * } */ #define RSA_PUB_DER_MAX_BYTES 38 + 2 * MBEDTLS_MPI_MAX_SIZE /* * RSA private keys: * RSAPrivateKey ::= SEQUENCE { 1 + 3 * version Version, 1 + 1 + 1 * modulus INTEGER, 1 + 3 + MPI_MAX + 1 * publicExponent INTEGER, 1 + 3 + MPI_MAX + 1 * privateExponent INTEGER, 1 + 3 + MPI_MAX + 1 * prime1 INTEGER, 1 + 3 + MPI_MAX / 2 + 1 * prime2 INTEGER, 1 + 3 + MPI_MAX / 2 + 1 * exponent1 INTEGER, 1 + 3 + MPI_MAX / 2 + 1 * exponent2 INTEGER, 1 + 3 + MPI_MAX / 2 + 1 * coefficient INTEGER, 1 + 3 + MPI_MAX / 2 + 1 * otherPrimeInfos OtherPrimeInfos OPTIONAL 0 (not supported) * } */ #define MPI_MAX_SIZE_2 MBEDTLS_MPI_MAX_SIZE / 2 + \ MBEDTLS_MPI_MAX_SIZE % 2 #define RSA_PRV_DER_MAX_BYTES 47 + 3 * MBEDTLS_MPI_MAX_SIZE \ + 5 * MPI_MAX_SIZE_2 #else /* MBEDTLS_RSA_C */ #define RSA_PUB_DER_MAX_BYTES 0 #define RSA_PRV_DER_MAX_BYTES 0 #endif /* MBEDTLS_RSA_C */ #if defined(MBEDTLS_ECP_C) /* * EC public keys: * SubjectPublicKeyInfo ::= SEQUENCE { 1 + 2 * algorithm AlgorithmIdentifier, 1 + 1 (sequence) * + 1 + 1 + 7 (ec oid) * + 1 + 1 + 9 (namedCurve oid) * subjectPublicKey BIT STRING 1 + 2 + 1 [1] * + 1 (point format) [1] * + 2 * ECP_MAX (coords) [1] * } */ #define ECP_PUB_DER_MAX_BYTES 30 + 2 * MBEDTLS_ECP_MAX_BYTES /* * EC private keys: * ECPrivateKey ::= SEQUENCE { 1 + 2 * version INTEGER , 1 + 1 + 1 * privateKey OCTET STRING, 1 + 1 + ECP_MAX * parameters [0] ECParameters OPTIONAL, 1 + 1 + (1 + 1 + 9) * publicKey [1] BIT STRING OPTIONAL 1 + 2 + [1] above * } */ #define ECP_PRV_DER_MAX_BYTES 29 + 3 * MBEDTLS_ECP_MAX_BYTES #else /* MBEDTLS_ECP_C */ #define ECP_PUB_DER_MAX_BYTES 0 #define ECP_PRV_DER_MAX_BYTES 0 #endif /* MBEDTLS_ECP_C */ #define PUB_DER_MAX_BYTES RSA_PUB_DER_MAX_BYTES > ECP_PUB_DER_MAX_BYTES ? \ RSA_PUB_DER_MAX_BYTES : ECP_PUB_DER_MAX_BYTES #define PRV_DER_MAX_BYTES RSA_PRV_DER_MAX_BYTES > ECP_PRV_DER_MAX_BYTES ? \ RSA_PRV_DER_MAX_BYTES : ECP_PRV_DER_MAX_BYTES int mbedtls_pk_write_pubkey_pem( mbedtls_pk_context *key, unsigned char *buf, size_t size ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; unsigned char output_buf[PUB_DER_MAX_BYTES]; size_t olen = 0; PK_VALIDATE_RET( key != NULL ); PK_VALIDATE_RET( buf != NULL || size == 0 ); if( ( ret = mbedtls_pk_write_pubkey_der( key, output_buf, sizeof(output_buf) ) ) < 0 ) { return( ret ); } if( ( ret = mbedtls_pem_write_buffer( PEM_BEGIN_PUBLIC_KEY, PEM_END_PUBLIC_KEY, output_buf + sizeof(output_buf) - ret, ret, buf, size, &olen ) ) != 0 ) { return( ret ); } return( 0 ); } int mbedtls_pk_write_key_pem( mbedtls_pk_context *key, unsigned char *buf, size_t size ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; unsigned char output_buf[PRV_DER_MAX_BYTES]; const char *begin, *end; size_t olen = 0; PK_VALIDATE_RET( key != NULL ); PK_VALIDATE_RET( buf != NULL || size == 0 ); if( ( ret = mbedtls_pk_write_key_der( key, output_buf, sizeof(output_buf) ) ) < 0 ) return( ret ); #if defined(MBEDTLS_RSA_C) if( mbedtls_pk_get_type( key ) == MBEDTLS_PK_RSA ) { begin = PEM_BEGIN_PRIVATE_KEY_RSA; end = PEM_END_PRIVATE_KEY_RSA; } else #endif #if defined(MBEDTLS_ECP_C) if( mbedtls_pk_get_type( key ) == MBEDTLS_PK_ECKEY ) { begin = PEM_BEGIN_PRIVATE_KEY_EC; end = PEM_END_PRIVATE_KEY_EC; } else #endif return( MBEDTLS_ERR_PK_FEATURE_UNAVAILABLE ); if( ( ret = mbedtls_pem_write_buffer( begin, end, output_buf + sizeof(output_buf) - ret, ret, buf, size, &olen ) ) != 0 ) { return( ret ); } return( 0 ); } #endif /* MBEDTLS_PEM_WRITE_C */ #endif /* MBEDTLS_PK_WRITE_C */
898685.c
#include<stdio.h> int main() { long long int n,t,i; scanf("%lld",&t); for(i=1;i<=t;i++) { scanf("%lld",&n); printf("%lld\n",(long long int)n/2 + 1); } return 0; }
810981.c
//STM32F103C8T6. 64 MHz #include "config.h" #include "stm32f10x.h" #include "hardware.h" #include <stdio.h> #include <stdlib.h> #include "main.h" #include "display_spi.h" #include "uart_handling.h" #include "draw_monitor.h" #include "mavlink_handling.h" /* Private typedef -----------------------------------------------------------*/ typedef enum { DATA_REQUEST_IDLE = 0, DATA_REQUEST_WAIT_RX, DATA_REQUEST_DONE, DATA_REQUEST_FULL_FAIL, } data_request_state_t; #define DATA_REQUEST_TIMEOUT_MS (2000) #define DATA_REQUESTS_MAX_NUMBER (5) #define MONITOR_UPDATE_PERIOD_MS (8 * 60 * 1000) /* Private variables ---------------------------------------------------------*/ data_request_state_t data_request_state = DATA_REQUEST_IDLE; uint32_t data_request_send_time = 0; uint8_t data_request_counter = 0; uint8_t monitor_updated = 0; /* Private function prototypes -----------------------------------------------*/ void main_send_data_request(void); void main_data_request_handling(void); void main_monitor_handling(void); /* Private functions ---------------------------------------------------------*/ int main() { init_all_hardware(); delay_ms(1000);//for connection by debugger uart_handling_init(); delay_ms(50); hardware_enable_power(); go_to_sleep_mode(1000); init_adc_single_measure(); delay_ms(8); dispaly_spi_init(); displayed_params.battery_voltage = measure_battery_voltage(); hardware_deinit_adc(); displayed_params.update_counter = hardware_rtc_read_16_bit_backup_value(BACKUP_UPDATE_CNT_REG); if (displayed_params.battery_voltage < BATTER_LOW_THRESHOLD_V) { displayed_params.low_batt_flag = 1; } //0 - charging if (GPIO_ReadInputDataBit(BATERRY_CHARGE_GPIO, BATERRY_CHARGE_PIN) == 0) { displayed_params.is_charging_flag = 1; } displayed_params.ext_temperature_deg = 44; displayed_params.temperature1_deg = 44; while(1) { uart_handling_rx_data(); main_data_request_handling(); main_monitor_handling(); } } void main_monitor_handling(void) { if (monitor_updated) return; if ((data_request_state == DATA_REQUEST_DONE) || (data_request_state == DATA_REQUEST_FULL_FAIL)) { if (data_request_state == DATA_REQUEST_FULL_FAIL) displayed_params.no_connection_flag = 1; monitor_updated = 1; uart_handling_periph_deinit(); draw_monitor(); hardware_update_backup(); dispaly_spi_deinit(); hardware_disable_power();//All proccess are completed //go_to_sleep_mode(10000);//test, gebugger working // NVIC_SystemReset();//test, gebugger working go_to_standby_mode(MONITOR_UPDATE_PERIOD_MS); //MCU will be resetted after wakeup } } void main_data_request_handling(void) { if (data_request_state == DATA_REQUEST_IDLE) { main_send_data_request(); } else if (data_request_state == DATA_REQUEST_WAIT_RX) { if (mavlink_is_data_received()) { data_request_state = DATA_REQUEST_DONE; return; } if (CHECK_TIMER(data_request_send_time, DATA_REQUEST_TIMEOUT_MS)) { //timeout if (data_request_counter < DATA_REQUESTS_MAX_NUMBER) main_send_data_request(); else data_request_state = DATA_REQUEST_FULL_FAIL; } } } void main_send_data_request(void) { mavlink_request_temperatures(); //mavlink_request_beep(); SET_TIMESTAMP(data_request_send_time); data_request_counter++; data_request_state = DATA_REQUEST_WAIT_RX; } int putchar(int c) { return c; }
724965.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE400_Resource_Exhaustion__listen_socket_for_loop_05.c Label Definition File: CWE400_Resource_Exhaustion.label.xml Template File: sources-sinks-05.tmpl.c */ /* * @description * CWE: 400 Resource Exhaustion * BadSource: listen_socket Read data using a listen socket (server side) * GoodSource: Assign count to be a relatively small number * Sinks: for_loop * GoodSink: Validate count before using it as the loop variant in a for loop * BadSink : Use count as the loop variant in a for loop * Flow Variant: 05 Control flow: if(staticTrue) and if(staticFalse) * * */ #include "std_testcase.h" #ifdef _WIN32 #include <winsock2.h> #include <windows.h> #include <direct.h> #pragma comment(lib, "ws2_32") /* include ws2_32.lib when linking */ #define CLOSE_SOCKET closesocket #else #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <unistd.h> #define INVALID_SOCKET -1 #define SOCKET_ERROR -1 #define CLOSE_SOCKET close #define SOCKET int #endif #define TCP_PORT 27015 #define LISTEN_BACKLOG 5 #define CHAR_ARRAY_SIZE (3 * sizeof(count) + 2) /* The two variables below are not defined as "const", but are never assigned any other value, so a tool should be able to identify that reads of these will always return their initialized values. */ static int staticTrue = 1; /* true */ static int staticFalse = 0; /* false */ #ifndef OMITBAD void CWE400_Resource_Exhaustion__listen_socket_for_loop_05_bad() { int count; /* Initialize count */ count = -1; if(staticTrue) { { #ifdef _WIN32 WSADATA wsaData; int wsaDataInit = 0; #endif int recvResult; struct sockaddr_in service; SOCKET listenSocket = INVALID_SOCKET; SOCKET acceptSocket = INVALID_SOCKET; char inputBuffer[CHAR_ARRAY_SIZE]; do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsaData) != NO_ERROR) { break; } wsaDataInit = 1; #endif /* POTENTIAL FLAW: Read count using a listen socket */ listenSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (listenSocket == INVALID_SOCKET) { break; } memset(&service, 0, sizeof(service)); service.sin_family = AF_INET; service.sin_addr.s_addr = INADDR_ANY; service.sin_port = htons(TCP_PORT); if (bind(listenSocket, (struct sockaddr*)&service, sizeof(service)) == SOCKET_ERROR) { break; } if (listen(listenSocket, LISTEN_BACKLOG) == SOCKET_ERROR) { break; } acceptSocket = accept(listenSocket, NULL, NULL); if (acceptSocket == SOCKET_ERROR) { break; } /* Abort on error or the connection was closed */ recvResult = recv(acceptSocket, inputBuffer, CHAR_ARRAY_SIZE - 1, 0); if (recvResult == SOCKET_ERROR || recvResult == 0) { break; } /* NUL-terminate the string */ inputBuffer[recvResult] = '\0'; /* Convert to int */ count = atoi(inputBuffer); } while (0); if (listenSocket != INVALID_SOCKET) { CLOSE_SOCKET(listenSocket); } if (acceptSocket != INVALID_SOCKET) { CLOSE_SOCKET(acceptSocket); } #ifdef _WIN32 if (wsaDataInit) { WSACleanup(); } #endif } } if(staticTrue) { { size_t i = 0; /* POTENTIAL FLAW: For loop using count as the loop variant and no validation */ for (i = 0; i < (size_t)count; i++) { printLine("Hello"); } } } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodB2G1() - use badsource and goodsink by changing the second staticTrue to staticFalse */ static void goodB2G1() { int count; /* Initialize count */ count = -1; if(staticTrue) { { #ifdef _WIN32 WSADATA wsaData; int wsaDataInit = 0; #endif int recvResult; struct sockaddr_in service; SOCKET listenSocket = INVALID_SOCKET; SOCKET acceptSocket = INVALID_SOCKET; char inputBuffer[CHAR_ARRAY_SIZE]; do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsaData) != NO_ERROR) { break; } wsaDataInit = 1; #endif /* POTENTIAL FLAW: Read count using a listen socket */ listenSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (listenSocket == INVALID_SOCKET) { break; } memset(&service, 0, sizeof(service)); service.sin_family = AF_INET; service.sin_addr.s_addr = INADDR_ANY; service.sin_port = htons(TCP_PORT); if (bind(listenSocket, (struct sockaddr*)&service, sizeof(service)) == SOCKET_ERROR) { break; } if (listen(listenSocket, LISTEN_BACKLOG) == SOCKET_ERROR) { break; } acceptSocket = accept(listenSocket, NULL, NULL); if (acceptSocket == SOCKET_ERROR) { break; } /* Abort on error or the connection was closed */ recvResult = recv(acceptSocket, inputBuffer, CHAR_ARRAY_SIZE - 1, 0); if (recvResult == SOCKET_ERROR || recvResult == 0) { break; } /* NUL-terminate the string */ inputBuffer[recvResult] = '\0'; /* Convert to int */ count = atoi(inputBuffer); } while (0); if (listenSocket != INVALID_SOCKET) { CLOSE_SOCKET(listenSocket); } if (acceptSocket != INVALID_SOCKET) { CLOSE_SOCKET(acceptSocket); } #ifdef _WIN32 if (wsaDataInit) { WSACleanup(); } #endif } } if(staticFalse) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ printLine("Benign, fixed string"); } else { { size_t i = 0; /* FIX: Validate count before using it as the for loop variant */ if (count > 0 && count <= 20) { for (i = 0; i < (size_t)count; i++) { printLine("Hello"); } } } } } /* goodB2G2() - use badsource and goodsink by reversing the blocks in the second if */ static void goodB2G2() { int count; /* Initialize count */ count = -1; if(staticTrue) { { #ifdef _WIN32 WSADATA wsaData; int wsaDataInit = 0; #endif int recvResult; struct sockaddr_in service; SOCKET listenSocket = INVALID_SOCKET; SOCKET acceptSocket = INVALID_SOCKET; char inputBuffer[CHAR_ARRAY_SIZE]; do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsaData) != NO_ERROR) { break; } wsaDataInit = 1; #endif /* POTENTIAL FLAW: Read count using a listen socket */ listenSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (listenSocket == INVALID_SOCKET) { break; } memset(&service, 0, sizeof(service)); service.sin_family = AF_INET; service.sin_addr.s_addr = INADDR_ANY; service.sin_port = htons(TCP_PORT); if (bind(listenSocket, (struct sockaddr*)&service, sizeof(service)) == SOCKET_ERROR) { break; } if (listen(listenSocket, LISTEN_BACKLOG) == SOCKET_ERROR) { break; } acceptSocket = accept(listenSocket, NULL, NULL); if (acceptSocket == SOCKET_ERROR) { break; } /* Abort on error or the connection was closed */ recvResult = recv(acceptSocket, inputBuffer, CHAR_ARRAY_SIZE - 1, 0); if (recvResult == SOCKET_ERROR || recvResult == 0) { break; } /* NUL-terminate the string */ inputBuffer[recvResult] = '\0'; /* Convert to int */ count = atoi(inputBuffer); } while (0); if (listenSocket != INVALID_SOCKET) { CLOSE_SOCKET(listenSocket); } if (acceptSocket != INVALID_SOCKET) { CLOSE_SOCKET(acceptSocket); } #ifdef _WIN32 if (wsaDataInit) { WSACleanup(); } #endif } } if(staticTrue) { { size_t i = 0; /* FIX: Validate count before using it as the for loop variant */ if (count > 0 && count <= 20) { for (i = 0; i < (size_t)count; i++) { printLine("Hello"); } } } } } /* goodG2B1() - use goodsource and badsink by changing the first staticTrue to staticFalse */ static void goodG2B1() { int count; /* Initialize count */ count = -1; if(staticFalse) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ printLine("Benign, fixed string"); } else { /* FIX: Use a relatively small number */ count = 20; } if(staticTrue) { { size_t i = 0; /* POTENTIAL FLAW: For loop using count as the loop variant and no validation */ for (i = 0; i < (size_t)count; i++) { printLine("Hello"); } } } } /* goodG2B2() - use goodsource and badsink by reversing the blocks in the first if */ static void goodG2B2() { int count; /* Initialize count */ count = -1; if(staticTrue) { /* FIX: Use a relatively small number */ count = 20; } if(staticTrue) { { size_t i = 0; /* POTENTIAL FLAW: For loop using count as the loop variant and no validation */ for (i = 0; i < (size_t)count; i++) { printLine("Hello"); } } } } void CWE400_Resource_Exhaustion__listen_socket_for_loop_05_good() { goodB2G1(); goodB2G2(); goodG2B1(); goodG2B2(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE400_Resource_Exhaustion__listen_socket_for_loop_05_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE400_Resource_Exhaustion__listen_socket_for_loop_05_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
5391.c
#define MEM 0 #include "test/misctest.c"
957778.c
/** ****************************************************************************** * @file TIM/TIM_PWMOutput/Src/main.c * @author MCD Application Team * @version V1.3.0 * @date 18-December-2015 * @brief This sample code shows how to use STM32F1xx TIM HAL API to generate * 4 signals in PWM. ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT(c) 2015 STMicroelectronics</center></h2> * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of STMicroelectronics nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "main.h" /** @addtogroup STM32F1xx_HAL_Examples * @{ */ /** @addtogroup TIM_PWMOutput * @{ */ /* Private typedef -----------------------------------------------------------*/ #define PERIOD_VALUE (uint32_t)(700 - 1) /* Period Value */ #define PULSE1_VALUE (uint32_t)(PERIOD_VALUE/2) /* Capture Compare 1 Value */ #define PULSE2_VALUE (uint32_t)(PERIOD_VALUE*37.5/100) /* Capture Compare 2 Value */ #define PULSE3_VALUE (uint32_t)(PERIOD_VALUE/4) /* Capture Compare 3 Value */ #define PULSE4_VALUE (uint32_t)(PERIOD_VALUE*12.5/100) /* Capture Compare 4 Value */ /* Private define ------------------------------------------------------------*/ /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Timer handler declaration */ TIM_HandleTypeDef TimHandle; /* Timer Output Compare Configuration Structure declaration */ TIM_OC_InitTypeDef sConfig; /* Counter Prescaler value */ uint32_t uhPrescalerValue = 0; /* Private function prototypes -----------------------------------------------*/ void SystemClock_Config(void); static void Error_Handler(void); /* Private functions ---------------------------------------------------------*/ /** * @brief Main program. * @param None * @retval None */ int main(void) { /* STM32F103xG HAL library initialization: - Configure the Flash prefetch - Systick timer is configured by default as source of time base, but user can eventually implement his proper time base source (a general purpose timer for example or other time source), keeping in mind that Time base duration should be kept 1ms since PPP_TIMEOUT_VALUEs are defined and handled in milliseconds basis. - Set NVIC Group Priority to 4 - Low Level Initialization */ HAL_Init(); /* Configure the system clock to 72 MHz */ SystemClock_Config(); /* Configure LED3 */ BSP_LED_Init(LED3); /* Compute the prescaler value to have TIM2 counter clock equal to 2000000 Hz */ uhPrescalerValue = (uint32_t)(SystemCoreClock / 2000000) - 1; /*##-1- Configure the TIM peripheral #######################################*/ /* ----------------------------------------------------------------------- TIM2 Configuration: generate 4 PWM signals with 4 different duty cycles. In this example TIM2 input clock (TIM2CLK) is set to APB1 clock (PCLK1) x2, since APB1 prescaler is set to 4 (0x100). TIM2CLK = PCLK1*2 PCLK1 = HCLK/2 => TIM2CLK = PCLK1*2 = (HCLK/2)*2 = HCLK = SystemCoreClock To get TIM2 counter clock at 2.1 MHz, the prescaler is computed as follows: Prescaler = (TIM2CLK / TIM2 counter clock) - 1 Prescaler = ((SystemCoreClock) /2.1 MHz) - 1 To get TIM2 output clock at 3 KHz, the period (ARR)) is computed as follows: ARR = (TIM2 counter clock / TIM2 output clock) - 1 = 699 TIM2 Channel1 duty cycle = (TIM2_CCR1/ TIM2_ARR + 1)* 100 = 50% TIM2 Channel2 duty cycle = (TIM2_CCR2/ TIM2_ARR + 1)* 100 = 37.5% TIM2 Channel3 duty cycle = (TIM2_CCR3/ TIM2_ARR + 1)* 100 = 25% TIM2 Channel4 duty cycle = (TIM2_CCR4/ TIM2_ARR + 1)* 100 = 12.5% Note: SystemCoreClock variable holds HCLK frequency and is defined in system_stm32f1xx.c file. Each time the core clock (HCLK) changes, user had to update SystemCoreClock variable value. Otherwise, any configuration based on this variable will be incorrect. This variable is updated in three ways: 1) by calling CMSIS function SystemCoreClockUpdate() 2) by calling HAL API function HAL_RCC_GetSysClockFreq() 3) each time HAL_RCC_ClockConfig() is called to configure the system clock frequency ----------------------------------------------------------------------- */ /* Initialize TIMx peripheral as follows: + Prescaler = (SystemCoreClock / 2000000) - 1 + Period = (700 - 1) + ClockDivision = 0 + Counter direction = Up */ TimHandle.Instance = TIMx; TimHandle.Init.Prescaler = uhPrescalerValue; TimHandle.Init.Period = PERIOD_VALUE; TimHandle.Init.ClockDivision = 0; TimHandle.Init.CounterMode = TIM_COUNTERMODE_UP; TimHandle.Init.RepetitionCounter = 0; if (HAL_TIM_PWM_Init(&TimHandle) != HAL_OK) { /* Initialization Error */ Error_Handler(); } /*##-2- Configure the PWM channels #########################################*/ /* Common configuration for all channels */ sConfig.OCMode = TIM_OCMODE_PWM1; sConfig.OCPolarity = TIM_OCPOLARITY_HIGH; sConfig.OCFastMode = TIM_OCFAST_DISABLE; sConfig.OCNPolarity = TIM_OCNPOLARITY_HIGH; sConfig.OCNIdleState = TIM_OCNIDLESTATE_RESET; sConfig.OCIdleState = TIM_OCIDLESTATE_RESET; /* Set the pulse value for channel 1 */ sConfig.Pulse = PULSE1_VALUE; if (HAL_TIM_PWM_ConfigChannel(&TimHandle, &sConfig, TIM_CHANNEL_1) != HAL_OK) { /* Configuration Error */ Error_Handler(); } /* Set the pulse value for channel 2 */ sConfig.Pulse = PULSE2_VALUE; if (HAL_TIM_PWM_ConfigChannel(&TimHandle, &sConfig, TIM_CHANNEL_2) != HAL_OK) { /* Configuration Error */ Error_Handler(); } /* Set the pulse value for channel 3 */ sConfig.Pulse = PULSE3_VALUE; if (HAL_TIM_PWM_ConfigChannel(&TimHandle, &sConfig, TIM_CHANNEL_3) != HAL_OK) { /* Configuration Error */ Error_Handler(); } /* Set the pulse value for channel 4 */ sConfig.Pulse = PULSE4_VALUE; if (HAL_TIM_PWM_ConfigChannel(&TimHandle, &sConfig, TIM_CHANNEL_4) != HAL_OK) { /* Configuration Error */ Error_Handler(); } /*##-3- Start PWM signals generation #######################################*/ /* Start channel 1 */ if (HAL_TIM_PWM_Start(&TimHandle, TIM_CHANNEL_1) != HAL_OK) { /* PWM Generation Error */ Error_Handler(); } /* Start channel 2 */ if (HAL_TIM_PWM_Start(&TimHandle, TIM_CHANNEL_2) != HAL_OK) { /* PWM Generation Error */ Error_Handler(); } /* Start channel 3 */ if (HAL_TIM_PWM_Start(&TimHandle, TIM_CHANNEL_3) != HAL_OK) { /* PWM generation Error */ Error_Handler(); } /* Start channel 4 */ if (HAL_TIM_PWM_Start(&TimHandle, TIM_CHANNEL_4) != HAL_OK) { /* PWM generation Error */ Error_Handler(); } while (1) { } } /** * @brief This function is executed in case of error occurrence. * @param None * @retval None */ static void Error_Handler(void) { /* Turn LED3 on */ BSP_LED_On(LED3); while (1) { } } /** * @brief System Clock Configuration * The system Clock is configured as follow : * System Clock source = PLL (HSE) * SYSCLK(Hz) = 72000000 * HCLK(Hz) = 72000000 * AHB Prescaler = 1 * APB1 Prescaler = 2 * APB2 Prescaler = 1 * HSE Frequency(Hz) = 8000000 * HSE PREDIV1 = 1 * PLLMUL = 9 * Flash Latency(WS) = 2 * @param None * @retval None */ void SystemClock_Config(void) { RCC_ClkInitTypeDef clkinitstruct = {0}; RCC_OscInitTypeDef oscinitstruct = {0}; /* Enable HSE Oscillator and activate PLL with HSE as source */ oscinitstruct.OscillatorType = RCC_OSCILLATORTYPE_HSE; oscinitstruct.HSEState = RCC_HSE_ON; oscinitstruct.HSEPredivValue = RCC_HSE_PREDIV_DIV1; oscinitstruct.PLL.PLLState = RCC_PLL_ON; oscinitstruct.PLL.PLLSource = RCC_PLLSOURCE_HSE; oscinitstruct.PLL.PLLMUL = RCC_PLL_MUL9; if (HAL_RCC_OscConfig(&oscinitstruct)!= HAL_OK) { /* Initialization Error */ while(1); } /* Select PLL as system clock source and configure the HCLK, PCLK1 and PCLK2 clocks dividers */ clkinitstruct.ClockType = (RCC_CLOCKTYPE_SYSCLK | RCC_CLOCKTYPE_HCLK | RCC_CLOCKTYPE_PCLK1 | RCC_CLOCKTYPE_PCLK2); clkinitstruct.SYSCLKSource = RCC_SYSCLKSOURCE_PLLCLK; clkinitstruct.AHBCLKDivider = RCC_SYSCLK_DIV1; clkinitstruct.APB2CLKDivider = RCC_HCLK_DIV1; clkinitstruct.APB1CLKDivider = RCC_HCLK_DIV2; if (HAL_RCC_ClockConfig(&clkinitstruct, FLASH_LATENCY_2)!= HAL_OK) { /* Initialization Error */ while(1); } } #ifdef USE_FULL_ASSERT /** * @brief Reports the name of the source file and the source line number * where the assert_param error has occurred. * @param file: pointer to the source file name * @param line: assert_param error line source number * @retval None */ void assert_failed(uint8_t *file, uint32_t line) { /* User can add his own implementation to report the file name and line number, ex: printf("Wrong parameters value: file %s on line %d\r\n", file, line) */ /* Infinite loop */ while (1) { } } #endif /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
404288.c
/* OpenGL loader generated by glad 0.1.33 on Mon Feb 24 12:32:18 2020. Language/Generator: C/C++ Specification: gl APIs: gl=4.6 Profile: core Extensions: Loader: True Local files: False Omit khrplatform: False Reproducible: False Commandline: --profile="core" --api="gl=4.6" --generator="c" --spec="gl" --extensions="" Online: https://glad.dav1d.de/#profile=core&language=c&specification=gl&loader=on&api=gl%3D4.6 */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <glad/glad.h> static void* get_proc(const char *namez); #if defined(_WIN32) || defined(__CYGWIN__) #ifndef _WINDOWS_ #undef APIENTRY #endif #include <windows.h> static HMODULE libGL; typedef void* (APIENTRYP PFNWGLGETPROCADDRESSPROC_PRIVATE)(const char*); static PFNWGLGETPROCADDRESSPROC_PRIVATE gladGetProcAddressPtr; #ifdef _MSC_VER #ifdef __has_include #if __has_include(<winapifamily.h>) #define HAVE_WINAPIFAMILY 1 #endif #elif _MSC_VER >= 1700 && !_USING_V110_SDK71_ #define HAVE_WINAPIFAMILY 1 #endif #endif #ifdef HAVE_WINAPIFAMILY #include <winapifamily.h> #if !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) && WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) #define IS_UWP 1 #endif #endif static int open_gl(void) { #ifndef IS_UWP libGL = LoadLibraryW(L"opengl32.dll"); if(libGL != NULL) { void (* tmp)(void); tmp = (void(*)(void)) GetProcAddress(libGL, "wglGetProcAddress"); gladGetProcAddressPtr = (PFNWGLGETPROCADDRESSPROC_PRIVATE) tmp; return gladGetProcAddressPtr != NULL; } #endif return 0; } static void close_gl(void) { if(libGL != NULL) { FreeLibrary((HMODULE) libGL); libGL = NULL; } } #else #include <dlfcn.h> static void* libGL; #if !defined(__APPLE__) && !defined(__HAIKU__) typedef void* (APIENTRYP PFNGLXGETPROCADDRESSPROC_PRIVATE)(const char*); static PFNGLXGETPROCADDRESSPROC_PRIVATE gladGetProcAddressPtr; #endif static int open_gl(void) { #ifdef __APPLE__ static const char *NAMES[] = { "../Frameworks/OpenGL.framework/OpenGL", "/Library/Frameworks/OpenGL.framework/OpenGL", "/System/Library/Frameworks/OpenGL.framework/OpenGL", "/System/Library/Frameworks/OpenGL.framework/Versions/Current/OpenGL" }; #else static const char *NAMES[] = {"libGL.so.1", "libGL.so"}; #endif unsigned int index = 0; for(index = 0; index < (sizeof(NAMES) / sizeof(NAMES[0])); index++) { libGL = dlopen(NAMES[index], RTLD_NOW | RTLD_GLOBAL); if(libGL != NULL) { #if defined(__APPLE__) || defined(__HAIKU__) return 1; #else gladGetProcAddressPtr = (PFNGLXGETPROCADDRESSPROC_PRIVATE)dlsym(libGL, "glXGetProcAddressARB"); return gladGetProcAddressPtr != NULL; #endif } } return 0; } static void close_gl(void) { if(libGL != NULL) { dlclose(libGL); libGL = NULL; } } #endif static void* get_proc(const char *namez) { void* result = NULL; if(libGL == NULL) return NULL; #if !defined(__APPLE__) && !defined(__HAIKU__) if(gladGetProcAddressPtr != NULL) { result = gladGetProcAddressPtr(namez); } #endif if(result == NULL) { #if defined(_WIN32) || defined(__CYGWIN__) result = (void*)GetProcAddress((HMODULE) libGL, namez); #else result = dlsym(libGL, namez); #endif } return result; } int gladLoadGL(void) { int status = 0; if(open_gl()) { status = gladLoadGLLoader(&get_proc); close_gl(); } return status; } struct gladGLversionStruct GLVersion = { 0, 0 }; #if defined(GL_ES_VERSION_3_0) || defined(GL_VERSION_3_0) #define _GLAD_IS_SOME_NEW_VERSION 1 #endif static int max_loaded_major; static int max_loaded_minor; static const char *exts = NULL; static int num_exts_i = 0; static char **exts_i = NULL; static int get_exts(void) { #ifdef _GLAD_IS_SOME_NEW_VERSION if(max_loaded_major < 3) { #endif exts = (const char *)glGetString(GL_EXTENSIONS); #ifdef _GLAD_IS_SOME_NEW_VERSION } else { unsigned int index; num_exts_i = 0; glGetIntegerv(GL_NUM_EXTENSIONS, &num_exts_i); if (num_exts_i > 0) { exts_i = (char **)malloc((size_t)num_exts_i * (sizeof *exts_i)); } if (exts_i == NULL) { return 0; } for(index = 0; index < (unsigned)num_exts_i; index++) { const char *gl_str_tmp = (const char*)glGetStringi(GL_EXTENSIONS, index); size_t len = strlen(gl_str_tmp); char *local_str = (char*)malloc((len+1) * sizeof(char)); if(local_str != NULL) { memcpy(local_str, gl_str_tmp, (len+1) * sizeof(char)); } exts_i[index] = local_str; } } #endif return 1; } static void free_exts(void) { if (exts_i != NULL) { int index; for(index = 0; index < num_exts_i; index++) { free((char *)exts_i[index]); } free((void *)exts_i); exts_i = NULL; } } static int has_ext(const char *ext) { #ifdef _GLAD_IS_SOME_NEW_VERSION if(max_loaded_major < 3) { #endif const char *extensions; const char *loc; const char *terminator; extensions = exts; if(extensions == NULL || ext == NULL) { return 0; } while(1) { loc = strstr(extensions, ext); if(loc == NULL) { return 0; } terminator = loc + strlen(ext); if((loc == extensions || *(loc - 1) == ' ') && (*terminator == ' ' || *terminator == '\0')) { return 1; } extensions = terminator; } #ifdef _GLAD_IS_SOME_NEW_VERSION } else { int index; if(exts_i == NULL) return 0; for(index = 0; index < num_exts_i; index++) { const char *e = exts_i[index]; if(exts_i[index] != NULL && strcmp(e, ext) == 0) { return 1; } } } #endif return 0; } int GLAD_GL_VERSION_1_0 = 0; int GLAD_GL_VERSION_1_1 = 0; int GLAD_GL_VERSION_1_2 = 0; int GLAD_GL_VERSION_1_3 = 0; int GLAD_GL_VERSION_1_4 = 0; int GLAD_GL_VERSION_1_5 = 0; int GLAD_GL_VERSION_2_0 = 0; int GLAD_GL_VERSION_2_1 = 0; int GLAD_GL_VERSION_3_0 = 0; int GLAD_GL_VERSION_3_1 = 0; int GLAD_GL_VERSION_3_2 = 0; int GLAD_GL_VERSION_3_3 = 0; int GLAD_GL_VERSION_4_0 = 0; int GLAD_GL_VERSION_4_1 = 0; int GLAD_GL_VERSION_4_2 = 0; int GLAD_GL_VERSION_4_3 = 0; int GLAD_GL_VERSION_4_4 = 0; int GLAD_GL_VERSION_4_5 = 0; int GLAD_GL_VERSION_4_6 = 0; PFNGLACTIVESHADERPROGRAMPROC glad_glActiveShaderProgram = NULL; PFNGLACTIVETEXTUREPROC glad_glActiveTexture = NULL; PFNGLATTACHSHADERPROC glad_glAttachShader = NULL; PFNGLBEGINCONDITIONALRENDERPROC glad_glBeginConditionalRender = NULL; PFNGLBEGINQUERYPROC glad_glBeginQuery = NULL; PFNGLBEGINQUERYINDEXEDPROC glad_glBeginQueryIndexed = NULL; PFNGLBEGINTRANSFORMFEEDBACKPROC glad_glBeginTransformFeedback = NULL; PFNGLBINDATTRIBLOCATIONPROC glad_glBindAttribLocation = NULL; PFNGLBINDBUFFERPROC glad_glBindBuffer = NULL; PFNGLBINDBUFFERBASEPROC glad_glBindBufferBase = NULL; PFNGLBINDBUFFERRANGEPROC glad_glBindBufferRange = NULL; PFNGLBINDBUFFERSBASEPROC glad_glBindBuffersBase = NULL; PFNGLBINDBUFFERSRANGEPROC glad_glBindBuffersRange = NULL; PFNGLBINDFRAGDATALOCATIONPROC glad_glBindFragDataLocation = NULL; PFNGLBINDFRAGDATALOCATIONINDEXEDPROC glad_glBindFragDataLocationIndexed = NULL; PFNGLBINDFRAMEBUFFERPROC glad_glBindFramebuffer = NULL; PFNGLBINDIMAGETEXTUREPROC glad_glBindImageTexture = NULL; PFNGLBINDIMAGETEXTURESPROC glad_glBindImageTextures = NULL; PFNGLBINDPROGRAMPIPELINEPROC glad_glBindProgramPipeline = NULL; PFNGLBINDRENDERBUFFERPROC glad_glBindRenderbuffer = NULL; PFNGLBINDSAMPLERPROC glad_glBindSampler = NULL; PFNGLBINDSAMPLERSPROC glad_glBindSamplers = NULL; PFNGLBINDTEXTUREPROC glad_glBindTexture = NULL; PFNGLBINDTEXTUREUNITPROC glad_glBindTextureUnit = NULL; PFNGLBINDTEXTURESPROC glad_glBindTextures = NULL; PFNGLBINDTRANSFORMFEEDBACKPROC glad_glBindTransformFeedback = NULL; PFNGLBINDVERTEXARRAYPROC glad_glBindVertexArray = NULL; PFNGLBINDVERTEXBUFFERPROC glad_glBindVertexBuffer = NULL; PFNGLBINDVERTEXBUFFERSPROC glad_glBindVertexBuffers = NULL; PFNGLBLENDCOLORPROC glad_glBlendColor = NULL; PFNGLBLENDEQUATIONPROC glad_glBlendEquation = NULL; PFNGLBLENDEQUATIONSEPARATEPROC glad_glBlendEquationSeparate = NULL; PFNGLBLENDEQUATIONSEPARATEIPROC glad_glBlendEquationSeparatei = NULL; PFNGLBLENDEQUATIONIPROC glad_glBlendEquationi = NULL; PFNGLBLENDFUNCPROC glad_glBlendFunc = NULL; PFNGLBLENDFUNCSEPARATEPROC glad_glBlendFuncSeparate = NULL; PFNGLBLENDFUNCSEPARATEIPROC glad_glBlendFuncSeparatei = NULL; PFNGLBLENDFUNCIPROC glad_glBlendFunci = NULL; PFNGLBLITFRAMEBUFFERPROC glad_glBlitFramebuffer = NULL; PFNGLBLITNAMEDFRAMEBUFFERPROC glad_glBlitNamedFramebuffer = NULL; PFNGLBUFFERDATAPROC glad_glBufferData = NULL; PFNGLBUFFERSTORAGEPROC glad_glBufferStorage = NULL; PFNGLBUFFERSUBDATAPROC glad_glBufferSubData = NULL; PFNGLCHECKFRAMEBUFFERSTATUSPROC glad_glCheckFramebufferStatus = NULL; PFNGLCHECKNAMEDFRAMEBUFFERSTATUSPROC glad_glCheckNamedFramebufferStatus = NULL; PFNGLCLAMPCOLORPROC glad_glClampColor = NULL; PFNGLCLEARPROC glad_glClear = NULL; PFNGLCLEARBUFFERDATAPROC glad_glClearBufferData = NULL; PFNGLCLEARBUFFERSUBDATAPROC glad_glClearBufferSubData = NULL; PFNGLCLEARBUFFERFIPROC glad_glClearBufferfi = NULL; PFNGLCLEARBUFFERFVPROC glad_glClearBufferfv = NULL; PFNGLCLEARBUFFERIVPROC glad_glClearBufferiv = NULL; PFNGLCLEARBUFFERUIVPROC glad_glClearBufferuiv = NULL; PFNGLCLEARCOLORPROC glad_glClearColor = NULL; PFNGLCLEARDEPTHPROC glad_glClearDepth = NULL; PFNGLCLEARDEPTHFPROC glad_glClearDepthf = NULL; PFNGLCLEARNAMEDBUFFERDATAPROC glad_glClearNamedBufferData = NULL; PFNGLCLEARNAMEDBUFFERSUBDATAPROC glad_glClearNamedBufferSubData = NULL; PFNGLCLEARNAMEDFRAMEBUFFERFIPROC glad_glClearNamedFramebufferfi = NULL; PFNGLCLEARNAMEDFRAMEBUFFERFVPROC glad_glClearNamedFramebufferfv = NULL; PFNGLCLEARNAMEDFRAMEBUFFERIVPROC glad_glClearNamedFramebufferiv = NULL; PFNGLCLEARNAMEDFRAMEBUFFERUIVPROC glad_glClearNamedFramebufferuiv = NULL; PFNGLCLEARSTENCILPROC glad_glClearStencil = NULL; PFNGLCLEARTEXIMAGEPROC glad_glClearTexImage = NULL; PFNGLCLEARTEXSUBIMAGEPROC glad_glClearTexSubImage = NULL; PFNGLCLIENTWAITSYNCPROC glad_glClientWaitSync = NULL; PFNGLCLIPCONTROLPROC glad_glClipControl = NULL; PFNGLCOLORMASKPROC glad_glColorMask = NULL; PFNGLCOLORMASKIPROC glad_glColorMaski = NULL; PFNGLCOLORP3UIPROC glad_glColorP3ui = NULL; PFNGLCOLORP3UIVPROC glad_glColorP3uiv = NULL; PFNGLCOLORP4UIPROC glad_glColorP4ui = NULL; PFNGLCOLORP4UIVPROC glad_glColorP4uiv = NULL; PFNGLCOMPILESHADERPROC glad_glCompileShader = NULL; PFNGLCOMPRESSEDTEXIMAGE1DPROC glad_glCompressedTexImage1D = NULL; PFNGLCOMPRESSEDTEXIMAGE2DPROC glad_glCompressedTexImage2D = NULL; PFNGLCOMPRESSEDTEXIMAGE3DPROC glad_glCompressedTexImage3D = NULL; PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC glad_glCompressedTexSubImage1D = NULL; PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC glad_glCompressedTexSubImage2D = NULL; PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC glad_glCompressedTexSubImage3D = NULL; PFNGLCOMPRESSEDTEXTURESUBIMAGE1DPROC glad_glCompressedTextureSubImage1D = NULL; PFNGLCOMPRESSEDTEXTURESUBIMAGE2DPROC glad_glCompressedTextureSubImage2D = NULL; PFNGLCOMPRESSEDTEXTURESUBIMAGE3DPROC glad_glCompressedTextureSubImage3D = NULL; PFNGLCOPYBUFFERSUBDATAPROC glad_glCopyBufferSubData = NULL; PFNGLCOPYIMAGESUBDATAPROC glad_glCopyImageSubData = NULL; PFNGLCOPYNAMEDBUFFERSUBDATAPROC glad_glCopyNamedBufferSubData = NULL; PFNGLCOPYTEXIMAGE1DPROC glad_glCopyTexImage1D = NULL; PFNGLCOPYTEXIMAGE2DPROC glad_glCopyTexImage2D = NULL; PFNGLCOPYTEXSUBIMAGE1DPROC glad_glCopyTexSubImage1D = NULL; PFNGLCOPYTEXSUBIMAGE2DPROC glad_glCopyTexSubImage2D = NULL; PFNGLCOPYTEXSUBIMAGE3DPROC glad_glCopyTexSubImage3D = NULL; PFNGLCOPYTEXTURESUBIMAGE1DPROC glad_glCopyTextureSubImage1D = NULL; PFNGLCOPYTEXTURESUBIMAGE2DPROC glad_glCopyTextureSubImage2D = NULL; PFNGLCOPYTEXTURESUBIMAGE3DPROC glad_glCopyTextureSubImage3D = NULL; PFNGLCREATEBUFFERSPROC glad_glCreateBuffers = NULL; PFNGLCREATEFRAMEBUFFERSPROC glad_glCreateFramebuffers = NULL; PFNGLCREATEPROGRAMPROC glad_glCreateProgram = NULL; PFNGLCREATEPROGRAMPIPELINESPROC glad_glCreateProgramPipelines = NULL; PFNGLCREATEQUERIESPROC glad_glCreateQueries = NULL; PFNGLCREATERENDERBUFFERSPROC glad_glCreateRenderbuffers = NULL; PFNGLCREATESAMPLERSPROC glad_glCreateSamplers = NULL; PFNGLCREATESHADERPROC glad_glCreateShader = NULL; PFNGLCREATESHADERPROGRAMVPROC glad_glCreateShaderProgramv = NULL; PFNGLCREATETEXTURESPROC glad_glCreateTextures = NULL; PFNGLCREATETRANSFORMFEEDBACKSPROC glad_glCreateTransformFeedbacks = NULL; PFNGLCREATEVERTEXARRAYSPROC glad_glCreateVertexArrays = NULL; PFNGLCULLFACEPROC glad_glCullFace = NULL; PFNGLDEBUGMESSAGECALLBACKPROC glad_glDebugMessageCallback = NULL; PFNGLDEBUGMESSAGECONTROLPROC glad_glDebugMessageControl = NULL; PFNGLDEBUGMESSAGEINSERTPROC glad_glDebugMessageInsert = NULL; PFNGLDELETEBUFFERSPROC glad_glDeleteBuffers = NULL; PFNGLDELETEFRAMEBUFFERSPROC glad_glDeleteFramebuffers = NULL; PFNGLDELETEPROGRAMPROC glad_glDeleteProgram = NULL; PFNGLDELETEPROGRAMPIPELINESPROC glad_glDeleteProgramPipelines = NULL; PFNGLDELETEQUERIESPROC glad_glDeleteQueries = NULL; PFNGLDELETERENDERBUFFERSPROC glad_glDeleteRenderbuffers = NULL; PFNGLDELETESAMPLERSPROC glad_glDeleteSamplers = NULL; PFNGLDELETESHADERPROC glad_glDeleteShader = NULL; PFNGLDELETESYNCPROC glad_glDeleteSync = NULL; PFNGLDELETETEXTURESPROC glad_glDeleteTextures = NULL; PFNGLDELETETRANSFORMFEEDBACKSPROC glad_glDeleteTransformFeedbacks = NULL; PFNGLDELETEVERTEXARRAYSPROC glad_glDeleteVertexArrays = NULL; PFNGLDEPTHFUNCPROC glad_glDepthFunc = NULL; PFNGLDEPTHMASKPROC glad_glDepthMask = NULL; PFNGLDEPTHRANGEPROC glad_glDepthRange = NULL; PFNGLDEPTHRANGEARRAYVPROC glad_glDepthRangeArrayv = NULL; PFNGLDEPTHRANGEINDEXEDPROC glad_glDepthRangeIndexed = NULL; PFNGLDEPTHRANGEFPROC glad_glDepthRangef = NULL; PFNGLDETACHSHADERPROC glad_glDetachShader = NULL; PFNGLDISABLEPROC glad_glDisable = NULL; PFNGLDISABLEVERTEXARRAYATTRIBPROC glad_glDisableVertexArrayAttrib = NULL; PFNGLDISABLEVERTEXATTRIBARRAYPROC glad_glDisableVertexAttribArray = NULL; PFNGLDISABLEIPROC glad_glDisablei = NULL; PFNGLDISPATCHCOMPUTEPROC glad_glDispatchCompute = NULL; PFNGLDISPATCHCOMPUTEINDIRECTPROC glad_glDispatchComputeIndirect = NULL; PFNGLDRAWARRAYSPROC glad_glDrawArrays = NULL; PFNGLDRAWARRAYSINDIRECTPROC glad_glDrawArraysIndirect = NULL; PFNGLDRAWARRAYSINSTANCEDPROC glad_glDrawArraysInstanced = NULL; PFNGLDRAWARRAYSINSTANCEDBASEINSTANCEPROC glad_glDrawArraysInstancedBaseInstance = NULL; PFNGLDRAWBUFFERPROC glad_glDrawBuffer = NULL; PFNGLDRAWBUFFERSPROC glad_glDrawBuffers = NULL; PFNGLDRAWELEMENTSPROC glad_glDrawElements = NULL; PFNGLDRAWELEMENTSBASEVERTEXPROC glad_glDrawElementsBaseVertex = NULL; PFNGLDRAWELEMENTSINDIRECTPROC glad_glDrawElementsIndirect = NULL; PFNGLDRAWELEMENTSINSTANCEDPROC glad_glDrawElementsInstanced = NULL; PFNGLDRAWELEMENTSINSTANCEDBASEINSTANCEPROC glad_glDrawElementsInstancedBaseInstance = NULL; PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC glad_glDrawElementsInstancedBaseVertex = NULL; PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXBASEINSTANCEPROC glad_glDrawElementsInstancedBaseVertexBaseInstance = NULL; PFNGLDRAWRANGEELEMENTSPROC glad_glDrawRangeElements = NULL; PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC glad_glDrawRangeElementsBaseVertex = NULL; PFNGLDRAWTRANSFORMFEEDBACKPROC glad_glDrawTransformFeedback = NULL; PFNGLDRAWTRANSFORMFEEDBACKINSTANCEDPROC glad_glDrawTransformFeedbackInstanced = NULL; PFNGLDRAWTRANSFORMFEEDBACKSTREAMPROC glad_glDrawTransformFeedbackStream = NULL; PFNGLDRAWTRANSFORMFEEDBACKSTREAMINSTANCEDPROC glad_glDrawTransformFeedbackStreamInstanced = NULL; PFNGLENABLEPROC glad_glEnable = NULL; PFNGLENABLEVERTEXARRAYATTRIBPROC glad_glEnableVertexArrayAttrib = NULL; PFNGLENABLEVERTEXATTRIBARRAYPROC glad_glEnableVertexAttribArray = NULL; PFNGLENABLEIPROC glad_glEnablei = NULL; PFNGLENDCONDITIONALRENDERPROC glad_glEndConditionalRender = NULL; PFNGLENDQUERYPROC glad_glEndQuery = NULL; PFNGLENDQUERYINDEXEDPROC glad_glEndQueryIndexed = NULL; PFNGLENDTRANSFORMFEEDBACKPROC glad_glEndTransformFeedback = NULL; PFNGLFENCESYNCPROC glad_glFenceSync = NULL; PFNGLFINISHPROC glad_glFinish = NULL; PFNGLFLUSHPROC glad_glFlush = NULL; PFNGLFLUSHMAPPEDBUFFERRANGEPROC glad_glFlushMappedBufferRange = NULL; PFNGLFLUSHMAPPEDNAMEDBUFFERRANGEPROC glad_glFlushMappedNamedBufferRange = NULL; PFNGLFRAMEBUFFERPARAMETERIPROC glad_glFramebufferParameteri = NULL; PFNGLFRAMEBUFFERRENDERBUFFERPROC glad_glFramebufferRenderbuffer = NULL; PFNGLFRAMEBUFFERTEXTUREPROC glad_glFramebufferTexture = NULL; PFNGLFRAMEBUFFERTEXTURE1DPROC glad_glFramebufferTexture1D = NULL; PFNGLFRAMEBUFFERTEXTURE2DPROC glad_glFramebufferTexture2D = NULL; PFNGLFRAMEBUFFERTEXTURE3DPROC glad_glFramebufferTexture3D = NULL; PFNGLFRAMEBUFFERTEXTURELAYERPROC glad_glFramebufferTextureLayer = NULL; PFNGLFRONTFACEPROC glad_glFrontFace = NULL; PFNGLGENBUFFERSPROC glad_glGenBuffers = NULL; PFNGLGENFRAMEBUFFERSPROC glad_glGenFramebuffers = NULL; PFNGLGENPROGRAMPIPELINESPROC glad_glGenProgramPipelines = NULL; PFNGLGENQUERIESPROC glad_glGenQueries = NULL; PFNGLGENRENDERBUFFERSPROC glad_glGenRenderbuffers = NULL; PFNGLGENSAMPLERSPROC glad_glGenSamplers = NULL; PFNGLGENTEXTURESPROC glad_glGenTextures = NULL; PFNGLGENTRANSFORMFEEDBACKSPROC glad_glGenTransformFeedbacks = NULL; PFNGLGENVERTEXARRAYSPROC glad_glGenVertexArrays = NULL; PFNGLGENERATEMIPMAPPROC glad_glGenerateMipmap = NULL; PFNGLGENERATETEXTUREMIPMAPPROC glad_glGenerateTextureMipmap = NULL; PFNGLGETACTIVEATOMICCOUNTERBUFFERIVPROC glad_glGetActiveAtomicCounterBufferiv = NULL; PFNGLGETACTIVEATTRIBPROC glad_glGetActiveAttrib = NULL; PFNGLGETACTIVESUBROUTINENAMEPROC glad_glGetActiveSubroutineName = NULL; PFNGLGETACTIVESUBROUTINEUNIFORMNAMEPROC glad_glGetActiveSubroutineUniformName = NULL; PFNGLGETACTIVESUBROUTINEUNIFORMIVPROC glad_glGetActiveSubroutineUniformiv = NULL; PFNGLGETACTIVEUNIFORMPROC glad_glGetActiveUniform = NULL; PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC glad_glGetActiveUniformBlockName = NULL; PFNGLGETACTIVEUNIFORMBLOCKIVPROC glad_glGetActiveUniformBlockiv = NULL; PFNGLGETACTIVEUNIFORMNAMEPROC glad_glGetActiveUniformName = NULL; PFNGLGETACTIVEUNIFORMSIVPROC glad_glGetActiveUniformsiv = NULL; PFNGLGETATTACHEDSHADERSPROC glad_glGetAttachedShaders = NULL; PFNGLGETATTRIBLOCATIONPROC glad_glGetAttribLocation = NULL; PFNGLGETBOOLEANI_VPROC glad_glGetBooleani_v = NULL; PFNGLGETBOOLEANVPROC glad_glGetBooleanv = NULL; PFNGLGETBUFFERPARAMETERI64VPROC glad_glGetBufferParameteri64v = NULL; PFNGLGETBUFFERPARAMETERIVPROC glad_glGetBufferParameteriv = NULL; PFNGLGETBUFFERPOINTERVPROC glad_glGetBufferPointerv = NULL; PFNGLGETBUFFERSUBDATAPROC glad_glGetBufferSubData = NULL; PFNGLGETCOMPRESSEDTEXIMAGEPROC glad_glGetCompressedTexImage = NULL; PFNGLGETCOMPRESSEDTEXTUREIMAGEPROC glad_glGetCompressedTextureImage = NULL; PFNGLGETCOMPRESSEDTEXTURESUBIMAGEPROC glad_glGetCompressedTextureSubImage = NULL; PFNGLGETDEBUGMESSAGELOGPROC glad_glGetDebugMessageLog = NULL; PFNGLGETDOUBLEI_VPROC glad_glGetDoublei_v = NULL; PFNGLGETDOUBLEVPROC glad_glGetDoublev = NULL; PFNGLGETERRORPROC glad_glGetError = NULL; PFNGLGETFLOATI_VPROC glad_glGetFloati_v = NULL; PFNGLGETFLOATVPROC glad_glGetFloatv = NULL; PFNGLGETFRAGDATAINDEXPROC glad_glGetFragDataIndex = NULL; PFNGLGETFRAGDATALOCATIONPROC glad_glGetFragDataLocation = NULL; PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC glad_glGetFramebufferAttachmentParameteriv = NULL; PFNGLGETFRAMEBUFFERPARAMETERIVPROC glad_glGetFramebufferParameteriv = NULL; PFNGLGETGRAPHICSRESETSTATUSPROC glad_glGetGraphicsResetStatus = NULL; PFNGLGETINTEGER64I_VPROC glad_glGetInteger64i_v = NULL; PFNGLGETINTEGER64VPROC glad_glGetInteger64v = NULL; PFNGLGETINTEGERI_VPROC glad_glGetIntegeri_v = NULL; PFNGLGETINTEGERVPROC glad_glGetIntegerv = NULL; PFNGLGETINTERNALFORMATI64VPROC glad_glGetInternalformati64v = NULL; PFNGLGETINTERNALFORMATIVPROC glad_glGetInternalformativ = NULL; PFNGLGETMULTISAMPLEFVPROC glad_glGetMultisamplefv = NULL; PFNGLGETNAMEDBUFFERPARAMETERI64VPROC glad_glGetNamedBufferParameteri64v = NULL; PFNGLGETNAMEDBUFFERPARAMETERIVPROC glad_glGetNamedBufferParameteriv = NULL; PFNGLGETNAMEDBUFFERPOINTERVPROC glad_glGetNamedBufferPointerv = NULL; PFNGLGETNAMEDBUFFERSUBDATAPROC glad_glGetNamedBufferSubData = NULL; PFNGLGETNAMEDFRAMEBUFFERATTACHMENTPARAMETERIVPROC glad_glGetNamedFramebufferAttachmentParameteriv = NULL; PFNGLGETNAMEDFRAMEBUFFERPARAMETERIVPROC glad_glGetNamedFramebufferParameteriv = NULL; PFNGLGETNAMEDRENDERBUFFERPARAMETERIVPROC glad_glGetNamedRenderbufferParameteriv = NULL; PFNGLGETOBJECTLABELPROC glad_glGetObjectLabel = NULL; PFNGLGETOBJECTPTRLABELPROC glad_glGetObjectPtrLabel = NULL; PFNGLGETPOINTERVPROC glad_glGetPointerv = NULL; PFNGLGETPROGRAMBINARYPROC glad_glGetProgramBinary = NULL; PFNGLGETPROGRAMINFOLOGPROC glad_glGetProgramInfoLog = NULL; PFNGLGETPROGRAMINTERFACEIVPROC glad_glGetProgramInterfaceiv = NULL; PFNGLGETPROGRAMPIPELINEINFOLOGPROC glad_glGetProgramPipelineInfoLog = NULL; PFNGLGETPROGRAMPIPELINEIVPROC glad_glGetProgramPipelineiv = NULL; PFNGLGETPROGRAMRESOURCEINDEXPROC glad_glGetProgramResourceIndex = NULL; PFNGLGETPROGRAMRESOURCELOCATIONPROC glad_glGetProgramResourceLocation = NULL; PFNGLGETPROGRAMRESOURCELOCATIONINDEXPROC glad_glGetProgramResourceLocationIndex = NULL; PFNGLGETPROGRAMRESOURCENAMEPROC glad_glGetProgramResourceName = NULL; PFNGLGETPROGRAMRESOURCEIVPROC glad_glGetProgramResourceiv = NULL; PFNGLGETPROGRAMSTAGEIVPROC glad_glGetProgramStageiv = NULL; PFNGLGETPROGRAMIVPROC glad_glGetProgramiv = NULL; PFNGLGETQUERYBUFFEROBJECTI64VPROC glad_glGetQueryBufferObjecti64v = NULL; PFNGLGETQUERYBUFFEROBJECTIVPROC glad_glGetQueryBufferObjectiv = NULL; PFNGLGETQUERYBUFFEROBJECTUI64VPROC glad_glGetQueryBufferObjectui64v = NULL; PFNGLGETQUERYBUFFEROBJECTUIVPROC glad_glGetQueryBufferObjectuiv = NULL; PFNGLGETQUERYINDEXEDIVPROC glad_glGetQueryIndexediv = NULL; PFNGLGETQUERYOBJECTI64VPROC glad_glGetQueryObjecti64v = NULL; PFNGLGETQUERYOBJECTIVPROC glad_glGetQueryObjectiv = NULL; PFNGLGETQUERYOBJECTUI64VPROC glad_glGetQueryObjectui64v = NULL; PFNGLGETQUERYOBJECTUIVPROC glad_glGetQueryObjectuiv = NULL; PFNGLGETQUERYIVPROC glad_glGetQueryiv = NULL; PFNGLGETRENDERBUFFERPARAMETERIVPROC glad_glGetRenderbufferParameteriv = NULL; PFNGLGETSAMPLERPARAMETERIIVPROC glad_glGetSamplerParameterIiv = NULL; PFNGLGETSAMPLERPARAMETERIUIVPROC glad_glGetSamplerParameterIuiv = NULL; PFNGLGETSAMPLERPARAMETERFVPROC glad_glGetSamplerParameterfv = NULL; PFNGLGETSAMPLERPARAMETERIVPROC glad_glGetSamplerParameteriv = NULL; PFNGLGETSHADERINFOLOGPROC glad_glGetShaderInfoLog = NULL; PFNGLGETSHADERPRECISIONFORMATPROC glad_glGetShaderPrecisionFormat = NULL; PFNGLGETSHADERSOURCEPROC glad_glGetShaderSource = NULL; PFNGLGETSHADERIVPROC glad_glGetShaderiv = NULL; PFNGLGETSTRINGPROC glad_glGetString = NULL; PFNGLGETSTRINGIPROC glad_glGetStringi = NULL; PFNGLGETSUBROUTINEINDEXPROC glad_glGetSubroutineIndex = NULL; PFNGLGETSUBROUTINEUNIFORMLOCATIONPROC glad_glGetSubroutineUniformLocation = NULL; PFNGLGETSYNCIVPROC glad_glGetSynciv = NULL; PFNGLGETTEXIMAGEPROC glad_glGetTexImage = NULL; PFNGLGETTEXLEVELPARAMETERFVPROC glad_glGetTexLevelParameterfv = NULL; PFNGLGETTEXLEVELPARAMETERIVPROC glad_glGetTexLevelParameteriv = NULL; PFNGLGETTEXPARAMETERIIVPROC glad_glGetTexParameterIiv = NULL; PFNGLGETTEXPARAMETERIUIVPROC glad_glGetTexParameterIuiv = NULL; PFNGLGETTEXPARAMETERFVPROC glad_glGetTexParameterfv = NULL; PFNGLGETTEXPARAMETERIVPROC glad_glGetTexParameteriv = NULL; PFNGLGETTEXTUREIMAGEPROC glad_glGetTextureImage = NULL; PFNGLGETTEXTURELEVELPARAMETERFVPROC glad_glGetTextureLevelParameterfv = NULL; PFNGLGETTEXTURELEVELPARAMETERIVPROC glad_glGetTextureLevelParameteriv = NULL; PFNGLGETTEXTUREPARAMETERIIVPROC glad_glGetTextureParameterIiv = NULL; PFNGLGETTEXTUREPARAMETERIUIVPROC glad_glGetTextureParameterIuiv = NULL; PFNGLGETTEXTUREPARAMETERFVPROC glad_glGetTextureParameterfv = NULL; PFNGLGETTEXTUREPARAMETERIVPROC glad_glGetTextureParameteriv = NULL; PFNGLGETTEXTURESUBIMAGEPROC glad_glGetTextureSubImage = NULL; PFNGLGETTRANSFORMFEEDBACKVARYINGPROC glad_glGetTransformFeedbackVarying = NULL; PFNGLGETTRANSFORMFEEDBACKI64_VPROC glad_glGetTransformFeedbacki64_v = NULL; PFNGLGETTRANSFORMFEEDBACKI_VPROC glad_glGetTransformFeedbacki_v = NULL; PFNGLGETTRANSFORMFEEDBACKIVPROC glad_glGetTransformFeedbackiv = NULL; PFNGLGETUNIFORMBLOCKINDEXPROC glad_glGetUniformBlockIndex = NULL; PFNGLGETUNIFORMINDICESPROC glad_glGetUniformIndices = NULL; PFNGLGETUNIFORMLOCATIONPROC glad_glGetUniformLocation = NULL; PFNGLGETUNIFORMSUBROUTINEUIVPROC glad_glGetUniformSubroutineuiv = NULL; PFNGLGETUNIFORMDVPROC glad_glGetUniformdv = NULL; PFNGLGETUNIFORMFVPROC glad_glGetUniformfv = NULL; PFNGLGETUNIFORMIVPROC glad_glGetUniformiv = NULL; PFNGLGETUNIFORMUIVPROC glad_glGetUniformuiv = NULL; PFNGLGETVERTEXARRAYINDEXED64IVPROC glad_glGetVertexArrayIndexed64iv = NULL; PFNGLGETVERTEXARRAYINDEXEDIVPROC glad_glGetVertexArrayIndexediv = NULL; PFNGLGETVERTEXARRAYIVPROC glad_glGetVertexArrayiv = NULL; PFNGLGETVERTEXATTRIBIIVPROC glad_glGetVertexAttribIiv = NULL; PFNGLGETVERTEXATTRIBIUIVPROC glad_glGetVertexAttribIuiv = NULL; PFNGLGETVERTEXATTRIBLDVPROC glad_glGetVertexAttribLdv = NULL; PFNGLGETVERTEXATTRIBPOINTERVPROC glad_glGetVertexAttribPointerv = NULL; PFNGLGETVERTEXATTRIBDVPROC glad_glGetVertexAttribdv = NULL; PFNGLGETVERTEXATTRIBFVPROC glad_glGetVertexAttribfv = NULL; PFNGLGETVERTEXATTRIBIVPROC glad_glGetVertexAttribiv = NULL; PFNGLGETNCOLORTABLEPROC glad_glGetnColorTable = NULL; PFNGLGETNCOMPRESSEDTEXIMAGEPROC glad_glGetnCompressedTexImage = NULL; PFNGLGETNCONVOLUTIONFILTERPROC glad_glGetnConvolutionFilter = NULL; PFNGLGETNHISTOGRAMPROC glad_glGetnHistogram = NULL; PFNGLGETNMAPDVPROC glad_glGetnMapdv = NULL; PFNGLGETNMAPFVPROC glad_glGetnMapfv = NULL; PFNGLGETNMAPIVPROC glad_glGetnMapiv = NULL; PFNGLGETNMINMAXPROC glad_glGetnMinmax = NULL; PFNGLGETNPIXELMAPFVPROC glad_glGetnPixelMapfv = NULL; PFNGLGETNPIXELMAPUIVPROC glad_glGetnPixelMapuiv = NULL; PFNGLGETNPIXELMAPUSVPROC glad_glGetnPixelMapusv = NULL; PFNGLGETNPOLYGONSTIPPLEPROC glad_glGetnPolygonStipple = NULL; PFNGLGETNSEPARABLEFILTERPROC glad_glGetnSeparableFilter = NULL; PFNGLGETNTEXIMAGEPROC glad_glGetnTexImage = NULL; PFNGLGETNUNIFORMDVPROC glad_glGetnUniformdv = NULL; PFNGLGETNUNIFORMFVPROC glad_glGetnUniformfv = NULL; PFNGLGETNUNIFORMIVPROC glad_glGetnUniformiv = NULL; PFNGLGETNUNIFORMUIVPROC glad_glGetnUniformuiv = NULL; PFNGLHINTPROC glad_glHint = NULL; PFNGLINVALIDATEBUFFERDATAPROC glad_glInvalidateBufferData = NULL; PFNGLINVALIDATEBUFFERSUBDATAPROC glad_glInvalidateBufferSubData = NULL; PFNGLINVALIDATEFRAMEBUFFERPROC glad_glInvalidateFramebuffer = NULL; PFNGLINVALIDATENAMEDFRAMEBUFFERDATAPROC glad_glInvalidateNamedFramebufferData = NULL; PFNGLINVALIDATENAMEDFRAMEBUFFERSUBDATAPROC glad_glInvalidateNamedFramebufferSubData = NULL; PFNGLINVALIDATESUBFRAMEBUFFERPROC glad_glInvalidateSubFramebuffer = NULL; PFNGLINVALIDATETEXIMAGEPROC glad_glInvalidateTexImage = NULL; PFNGLINVALIDATETEXSUBIMAGEPROC glad_glInvalidateTexSubImage = NULL; PFNGLISBUFFERPROC glad_glIsBuffer = NULL; PFNGLISENABLEDPROC glad_glIsEnabled = NULL; PFNGLISENABLEDIPROC glad_glIsEnabledi = NULL; PFNGLISFRAMEBUFFERPROC glad_glIsFramebuffer = NULL; PFNGLISPROGRAMPROC glad_glIsProgram = NULL; PFNGLISPROGRAMPIPELINEPROC glad_glIsProgramPipeline = NULL; PFNGLISQUERYPROC glad_glIsQuery = NULL; PFNGLISRENDERBUFFERPROC glad_glIsRenderbuffer = NULL; PFNGLISSAMPLERPROC glad_glIsSampler = NULL; PFNGLISSHADERPROC glad_glIsShader = NULL; PFNGLISSYNCPROC glad_glIsSync = NULL; PFNGLISTEXTUREPROC glad_glIsTexture = NULL; PFNGLISTRANSFORMFEEDBACKPROC glad_glIsTransformFeedback = NULL; PFNGLISVERTEXARRAYPROC glad_glIsVertexArray = NULL; PFNGLLINEWIDTHPROC glad_glLineWidth = NULL; PFNGLLINKPROGRAMPROC glad_glLinkProgram = NULL; PFNGLLOGICOPPROC glad_glLogicOp = NULL; PFNGLMAPBUFFERPROC glad_glMapBuffer = NULL; PFNGLMAPBUFFERRANGEPROC glad_glMapBufferRange = NULL; PFNGLMAPNAMEDBUFFERPROC glad_glMapNamedBuffer = NULL; PFNGLMAPNAMEDBUFFERRANGEPROC glad_glMapNamedBufferRange = NULL; PFNGLMEMORYBARRIERPROC glad_glMemoryBarrier = NULL; PFNGLMEMORYBARRIERBYREGIONPROC glad_glMemoryBarrierByRegion = NULL; PFNGLMINSAMPLESHADINGPROC glad_glMinSampleShading = NULL; PFNGLMULTIDRAWARRAYSPROC glad_glMultiDrawArrays = NULL; PFNGLMULTIDRAWARRAYSINDIRECTPROC glad_glMultiDrawArraysIndirect = NULL; PFNGLMULTIDRAWARRAYSINDIRECTCOUNTPROC glad_glMultiDrawArraysIndirectCount = NULL; PFNGLMULTIDRAWELEMENTSPROC glad_glMultiDrawElements = NULL; PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC glad_glMultiDrawElementsBaseVertex = NULL; PFNGLMULTIDRAWELEMENTSINDIRECTPROC glad_glMultiDrawElementsIndirect = NULL; PFNGLMULTIDRAWELEMENTSINDIRECTCOUNTPROC glad_glMultiDrawElementsIndirectCount = NULL; PFNGLMULTITEXCOORDP1UIPROC glad_glMultiTexCoordP1ui = NULL; PFNGLMULTITEXCOORDP1UIVPROC glad_glMultiTexCoordP1uiv = NULL; PFNGLMULTITEXCOORDP2UIPROC glad_glMultiTexCoordP2ui = NULL; PFNGLMULTITEXCOORDP2UIVPROC glad_glMultiTexCoordP2uiv = NULL; PFNGLMULTITEXCOORDP3UIPROC glad_glMultiTexCoordP3ui = NULL; PFNGLMULTITEXCOORDP3UIVPROC glad_glMultiTexCoordP3uiv = NULL; PFNGLMULTITEXCOORDP4UIPROC glad_glMultiTexCoordP4ui = NULL; PFNGLMULTITEXCOORDP4UIVPROC glad_glMultiTexCoordP4uiv = NULL; PFNGLNAMEDBUFFERDATAPROC glad_glNamedBufferData = NULL; PFNGLNAMEDBUFFERSTORAGEPROC glad_glNamedBufferStorage = NULL; PFNGLNAMEDBUFFERSUBDATAPROC glad_glNamedBufferSubData = NULL; PFNGLNAMEDFRAMEBUFFERDRAWBUFFERPROC glad_glNamedFramebufferDrawBuffer = NULL; PFNGLNAMEDFRAMEBUFFERDRAWBUFFERSPROC glad_glNamedFramebufferDrawBuffers = NULL; PFNGLNAMEDFRAMEBUFFERPARAMETERIPROC glad_glNamedFramebufferParameteri = NULL; PFNGLNAMEDFRAMEBUFFERREADBUFFERPROC glad_glNamedFramebufferReadBuffer = NULL; PFNGLNAMEDFRAMEBUFFERRENDERBUFFERPROC glad_glNamedFramebufferRenderbuffer = NULL; PFNGLNAMEDFRAMEBUFFERTEXTUREPROC glad_glNamedFramebufferTexture = NULL; PFNGLNAMEDFRAMEBUFFERTEXTURELAYERPROC glad_glNamedFramebufferTextureLayer = NULL; PFNGLNAMEDRENDERBUFFERSTORAGEPROC glad_glNamedRenderbufferStorage = NULL; PFNGLNAMEDRENDERBUFFERSTORAGEMULTISAMPLEPROC glad_glNamedRenderbufferStorageMultisample = NULL; PFNGLNORMALP3UIPROC glad_glNormalP3ui = NULL; PFNGLNORMALP3UIVPROC glad_glNormalP3uiv = NULL; PFNGLOBJECTLABELPROC glad_glObjectLabel = NULL; PFNGLOBJECTPTRLABELPROC glad_glObjectPtrLabel = NULL; PFNGLPATCHPARAMETERFVPROC glad_glPatchParameterfv = NULL; PFNGLPATCHPARAMETERIPROC glad_glPatchParameteri = NULL; PFNGLPAUSETRANSFORMFEEDBACKPROC glad_glPauseTransformFeedback = NULL; PFNGLPIXELSTOREFPROC glad_glPixelStoref = NULL; PFNGLPIXELSTOREIPROC glad_glPixelStorei = NULL; PFNGLPOINTPARAMETERFPROC glad_glPointParameterf = NULL; PFNGLPOINTPARAMETERFVPROC glad_glPointParameterfv = NULL; PFNGLPOINTPARAMETERIPROC glad_glPointParameteri = NULL; PFNGLPOINTPARAMETERIVPROC glad_glPointParameteriv = NULL; PFNGLPOINTSIZEPROC glad_glPointSize = NULL; PFNGLPOLYGONMODEPROC glad_glPolygonMode = NULL; PFNGLPOLYGONOFFSETPROC glad_glPolygonOffset = NULL; PFNGLPOLYGONOFFSETCLAMPPROC glad_glPolygonOffsetClamp = NULL; PFNGLPOPDEBUGGROUPPROC glad_glPopDebugGroup = NULL; PFNGLPRIMITIVERESTARTINDEXPROC glad_glPrimitiveRestartIndex = NULL; PFNGLPROGRAMBINARYPROC glad_glProgramBinary = NULL; PFNGLPROGRAMPARAMETERIPROC glad_glProgramParameteri = NULL; PFNGLPROGRAMUNIFORM1DPROC glad_glProgramUniform1d = NULL; PFNGLPROGRAMUNIFORM1DVPROC glad_glProgramUniform1dv = NULL; PFNGLPROGRAMUNIFORM1FPROC glad_glProgramUniform1f = NULL; PFNGLPROGRAMUNIFORM1FVPROC glad_glProgramUniform1fv = NULL; PFNGLPROGRAMUNIFORM1IPROC glad_glProgramUniform1i = NULL; PFNGLPROGRAMUNIFORM1IVPROC glad_glProgramUniform1iv = NULL; PFNGLPROGRAMUNIFORM1UIPROC glad_glProgramUniform1ui = NULL; PFNGLPROGRAMUNIFORM1UIVPROC glad_glProgramUniform1uiv = NULL; PFNGLPROGRAMUNIFORM2DPROC glad_glProgramUniform2d = NULL; PFNGLPROGRAMUNIFORM2DVPROC glad_glProgramUniform2dv = NULL; PFNGLPROGRAMUNIFORM2FPROC glad_glProgramUniform2f = NULL; PFNGLPROGRAMUNIFORM2FVPROC glad_glProgramUniform2fv = NULL; PFNGLPROGRAMUNIFORM2IPROC glad_glProgramUniform2i = NULL; PFNGLPROGRAMUNIFORM2IVPROC glad_glProgramUniform2iv = NULL; PFNGLPROGRAMUNIFORM2UIPROC glad_glProgramUniform2ui = NULL; PFNGLPROGRAMUNIFORM2UIVPROC glad_glProgramUniform2uiv = NULL; PFNGLPROGRAMUNIFORM3DPROC glad_glProgramUniform3d = NULL; PFNGLPROGRAMUNIFORM3DVPROC glad_glProgramUniform3dv = NULL; PFNGLPROGRAMUNIFORM3FPROC glad_glProgramUniform3f = NULL; PFNGLPROGRAMUNIFORM3FVPROC glad_glProgramUniform3fv = NULL; PFNGLPROGRAMUNIFORM3IPROC glad_glProgramUniform3i = NULL; PFNGLPROGRAMUNIFORM3IVPROC glad_glProgramUniform3iv = NULL; PFNGLPROGRAMUNIFORM3UIPROC glad_glProgramUniform3ui = NULL; PFNGLPROGRAMUNIFORM3UIVPROC glad_glProgramUniform3uiv = NULL; PFNGLPROGRAMUNIFORM4DPROC glad_glProgramUniform4d = NULL; PFNGLPROGRAMUNIFORM4DVPROC glad_glProgramUniform4dv = NULL; PFNGLPROGRAMUNIFORM4FPROC glad_glProgramUniform4f = NULL; PFNGLPROGRAMUNIFORM4FVPROC glad_glProgramUniform4fv = NULL; PFNGLPROGRAMUNIFORM4IPROC glad_glProgramUniform4i = NULL; PFNGLPROGRAMUNIFORM4IVPROC glad_glProgramUniform4iv = NULL; PFNGLPROGRAMUNIFORM4UIPROC glad_glProgramUniform4ui = NULL; PFNGLPROGRAMUNIFORM4UIVPROC glad_glProgramUniform4uiv = NULL; PFNGLPROGRAMUNIFORMMATRIX2DVPROC glad_glProgramUniformMatrix2dv = NULL; PFNGLPROGRAMUNIFORMMATRIX2FVPROC glad_glProgramUniformMatrix2fv = NULL; PFNGLPROGRAMUNIFORMMATRIX2X3DVPROC glad_glProgramUniformMatrix2x3dv = NULL; PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC glad_glProgramUniformMatrix2x3fv = NULL; PFNGLPROGRAMUNIFORMMATRIX2X4DVPROC glad_glProgramUniformMatrix2x4dv = NULL; PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC glad_glProgramUniformMatrix2x4fv = NULL; PFNGLPROGRAMUNIFORMMATRIX3DVPROC glad_glProgramUniformMatrix3dv = NULL; PFNGLPROGRAMUNIFORMMATRIX3FVPROC glad_glProgramUniformMatrix3fv = NULL; PFNGLPROGRAMUNIFORMMATRIX3X2DVPROC glad_glProgramUniformMatrix3x2dv = NULL; PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC glad_glProgramUniformMatrix3x2fv = NULL; PFNGLPROGRAMUNIFORMMATRIX3X4DVPROC glad_glProgramUniformMatrix3x4dv = NULL; PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC glad_glProgramUniformMatrix3x4fv = NULL; PFNGLPROGRAMUNIFORMMATRIX4DVPROC glad_glProgramUniformMatrix4dv = NULL; PFNGLPROGRAMUNIFORMMATRIX4FVPROC glad_glProgramUniformMatrix4fv = NULL; PFNGLPROGRAMUNIFORMMATRIX4X2DVPROC glad_glProgramUniformMatrix4x2dv = NULL; PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC glad_glProgramUniformMatrix4x2fv = NULL; PFNGLPROGRAMUNIFORMMATRIX4X3DVPROC glad_glProgramUniformMatrix4x3dv = NULL; PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC glad_glProgramUniformMatrix4x3fv = NULL; PFNGLPROVOKINGVERTEXPROC glad_glProvokingVertex = NULL; PFNGLPUSHDEBUGGROUPPROC glad_glPushDebugGroup = NULL; PFNGLQUERYCOUNTERPROC glad_glQueryCounter = NULL; PFNGLREADBUFFERPROC glad_glReadBuffer = NULL; PFNGLREADPIXELSPROC glad_glReadPixels = NULL; PFNGLREADNPIXELSPROC glad_glReadnPixels = NULL; PFNGLRELEASESHADERCOMPILERPROC glad_glReleaseShaderCompiler = NULL; PFNGLRENDERBUFFERSTORAGEPROC glad_glRenderbufferStorage = NULL; PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC glad_glRenderbufferStorageMultisample = NULL; PFNGLRESUMETRANSFORMFEEDBACKPROC glad_glResumeTransformFeedback = NULL; PFNGLSAMPLECOVERAGEPROC glad_glSampleCoverage = NULL; PFNGLSAMPLEMASKIPROC glad_glSampleMaski = NULL; PFNGLSAMPLERPARAMETERIIVPROC glad_glSamplerParameterIiv = NULL; PFNGLSAMPLERPARAMETERIUIVPROC glad_glSamplerParameterIuiv = NULL; PFNGLSAMPLERPARAMETERFPROC glad_glSamplerParameterf = NULL; PFNGLSAMPLERPARAMETERFVPROC glad_glSamplerParameterfv = NULL; PFNGLSAMPLERPARAMETERIPROC glad_glSamplerParameteri = NULL; PFNGLSAMPLERPARAMETERIVPROC glad_glSamplerParameteriv = NULL; PFNGLSCISSORPROC glad_glScissor = NULL; PFNGLSCISSORARRAYVPROC glad_glScissorArrayv = NULL; PFNGLSCISSORINDEXEDPROC glad_glScissorIndexed = NULL; PFNGLSCISSORINDEXEDVPROC glad_glScissorIndexedv = NULL; PFNGLSECONDARYCOLORP3UIPROC glad_glSecondaryColorP3ui = NULL; PFNGLSECONDARYCOLORP3UIVPROC glad_glSecondaryColorP3uiv = NULL; PFNGLSHADERBINARYPROC glad_glShaderBinary = NULL; PFNGLSHADERSOURCEPROC glad_glShaderSource = NULL; PFNGLSHADERSTORAGEBLOCKBINDINGPROC glad_glShaderStorageBlockBinding = NULL; PFNGLSPECIALIZESHADERPROC glad_glSpecializeShader = NULL; PFNGLSTENCILFUNCPROC glad_glStencilFunc = NULL; PFNGLSTENCILFUNCSEPARATEPROC glad_glStencilFuncSeparate = NULL; PFNGLSTENCILMASKPROC glad_glStencilMask = NULL; PFNGLSTENCILMASKSEPARATEPROC glad_glStencilMaskSeparate = NULL; PFNGLSTENCILOPPROC glad_glStencilOp = NULL; PFNGLSTENCILOPSEPARATEPROC glad_glStencilOpSeparate = NULL; PFNGLTEXBUFFERPROC glad_glTexBuffer = NULL; PFNGLTEXBUFFERRANGEPROC glad_glTexBufferRange = NULL; PFNGLTEXCOORDP1UIPROC glad_glTexCoordP1ui = NULL; PFNGLTEXCOORDP1UIVPROC glad_glTexCoordP1uiv = NULL; PFNGLTEXCOORDP2UIPROC glad_glTexCoordP2ui = NULL; PFNGLTEXCOORDP2UIVPROC glad_glTexCoordP2uiv = NULL; PFNGLTEXCOORDP3UIPROC glad_glTexCoordP3ui = NULL; PFNGLTEXCOORDP3UIVPROC glad_glTexCoordP3uiv = NULL; PFNGLTEXCOORDP4UIPROC glad_glTexCoordP4ui = NULL; PFNGLTEXCOORDP4UIVPROC glad_glTexCoordP4uiv = NULL; PFNGLTEXIMAGE1DPROC glad_glTexImage1D = NULL; PFNGLTEXIMAGE2DPROC glad_glTexImage2D = NULL; PFNGLTEXIMAGE2DMULTISAMPLEPROC glad_glTexImage2DMultisample = NULL; PFNGLTEXIMAGE3DPROC glad_glTexImage3D = NULL; PFNGLTEXIMAGE3DMULTISAMPLEPROC glad_glTexImage3DMultisample = NULL; PFNGLTEXPARAMETERIIVPROC glad_glTexParameterIiv = NULL; PFNGLTEXPARAMETERIUIVPROC glad_glTexParameterIuiv = NULL; PFNGLTEXPARAMETERFPROC glad_glTexParameterf = NULL; PFNGLTEXPARAMETERFVPROC glad_glTexParameterfv = NULL; PFNGLTEXPARAMETERIPROC glad_glTexParameteri = NULL; PFNGLTEXPARAMETERIVPROC glad_glTexParameteriv = NULL; PFNGLTEXSTORAGE1DPROC glad_glTexStorage1D = NULL; PFNGLTEXSTORAGE2DPROC glad_glTexStorage2D = NULL; PFNGLTEXSTORAGE2DMULTISAMPLEPROC glad_glTexStorage2DMultisample = NULL; PFNGLTEXSTORAGE3DPROC glad_glTexStorage3D = NULL; PFNGLTEXSTORAGE3DMULTISAMPLEPROC glad_glTexStorage3DMultisample = NULL; PFNGLTEXSUBIMAGE1DPROC glad_glTexSubImage1D = NULL; PFNGLTEXSUBIMAGE2DPROC glad_glTexSubImage2D = NULL; PFNGLTEXSUBIMAGE3DPROC glad_glTexSubImage3D = NULL; PFNGLTEXTUREBARRIERPROC glad_glTextureBarrier = NULL; PFNGLTEXTUREBUFFERPROC glad_glTextureBuffer = NULL; PFNGLTEXTUREBUFFERRANGEPROC glad_glTextureBufferRange = NULL; PFNGLTEXTUREPARAMETERIIVPROC glad_glTextureParameterIiv = NULL; PFNGLTEXTUREPARAMETERIUIVPROC glad_glTextureParameterIuiv = NULL; PFNGLTEXTUREPARAMETERFPROC glad_glTextureParameterf = NULL; PFNGLTEXTUREPARAMETERFVPROC glad_glTextureParameterfv = NULL; PFNGLTEXTUREPARAMETERIPROC glad_glTextureParameteri = NULL; PFNGLTEXTUREPARAMETERIVPROC glad_glTextureParameteriv = NULL; PFNGLTEXTURESTORAGE1DPROC glad_glTextureStorage1D = NULL; PFNGLTEXTURESTORAGE2DPROC glad_glTextureStorage2D = NULL; PFNGLTEXTURESTORAGE2DMULTISAMPLEPROC glad_glTextureStorage2DMultisample = NULL; PFNGLTEXTURESTORAGE3DPROC glad_glTextureStorage3D = NULL; PFNGLTEXTURESTORAGE3DMULTISAMPLEPROC glad_glTextureStorage3DMultisample = NULL; PFNGLTEXTURESUBIMAGE1DPROC glad_glTextureSubImage1D = NULL; PFNGLTEXTURESUBIMAGE2DPROC glad_glTextureSubImage2D = NULL; PFNGLTEXTURESUBIMAGE3DPROC glad_glTextureSubImage3D = NULL; PFNGLTEXTUREVIEWPROC glad_glTextureView = NULL; PFNGLTRANSFORMFEEDBACKBUFFERBASEPROC glad_glTransformFeedbackBufferBase = NULL; PFNGLTRANSFORMFEEDBACKBUFFERRANGEPROC glad_glTransformFeedbackBufferRange = NULL; PFNGLTRANSFORMFEEDBACKVARYINGSPROC glad_glTransformFeedbackVaryings = NULL; PFNGLUNIFORM1DPROC glad_glUniform1d = NULL; PFNGLUNIFORM1DVPROC glad_glUniform1dv = NULL; PFNGLUNIFORM1FPROC glad_glUniform1f = NULL; PFNGLUNIFORM1FVPROC glad_glUniform1fv = NULL; PFNGLUNIFORM1IPROC glad_glUniform1i = NULL; PFNGLUNIFORM1IVPROC glad_glUniform1iv = NULL; PFNGLUNIFORM1UIPROC glad_glUniform1ui = NULL; PFNGLUNIFORM1UIVPROC glad_glUniform1uiv = NULL; PFNGLUNIFORM2DPROC glad_glUniform2d = NULL; PFNGLUNIFORM2DVPROC glad_glUniform2dv = NULL; PFNGLUNIFORM2FPROC glad_glUniform2f = NULL; PFNGLUNIFORM2FVPROC glad_glUniform2fv = NULL; PFNGLUNIFORM2IPROC glad_glUniform2i = NULL; PFNGLUNIFORM2IVPROC glad_glUniform2iv = NULL; PFNGLUNIFORM2UIPROC glad_glUniform2ui = NULL; PFNGLUNIFORM2UIVPROC glad_glUniform2uiv = NULL; PFNGLUNIFORM3DPROC glad_glUniform3d = NULL; PFNGLUNIFORM3DVPROC glad_glUniform3dv = NULL; PFNGLUNIFORM3FPROC glad_glUniform3f = NULL; PFNGLUNIFORM3FVPROC glad_glUniform3fv = NULL; PFNGLUNIFORM3IPROC glad_glUniform3i = NULL; PFNGLUNIFORM3IVPROC glad_glUniform3iv = NULL; PFNGLUNIFORM3UIPROC glad_glUniform3ui = NULL; PFNGLUNIFORM3UIVPROC glad_glUniform3uiv = NULL; PFNGLUNIFORM4DPROC glad_glUniform4d = NULL; PFNGLUNIFORM4DVPROC glad_glUniform4dv = NULL; PFNGLUNIFORM4FPROC glad_glUniform4f = NULL; PFNGLUNIFORM4FVPROC glad_glUniform4fv = NULL; PFNGLUNIFORM4IPROC glad_glUniform4i = NULL; PFNGLUNIFORM4IVPROC glad_glUniform4iv = NULL; PFNGLUNIFORM4UIPROC glad_glUniform4ui = NULL; PFNGLUNIFORM4UIVPROC glad_glUniform4uiv = NULL; PFNGLUNIFORMBLOCKBINDINGPROC glad_glUniformBlockBinding = NULL; PFNGLUNIFORMMATRIX2DVPROC glad_glUniformMatrix2dv = NULL; PFNGLUNIFORMMATRIX2FVPROC glad_glUniformMatrix2fv = NULL; PFNGLUNIFORMMATRIX2X3DVPROC glad_glUniformMatrix2x3dv = NULL; PFNGLUNIFORMMATRIX2X3FVPROC glad_glUniformMatrix2x3fv = NULL; PFNGLUNIFORMMATRIX2X4DVPROC glad_glUniformMatrix2x4dv = NULL; PFNGLUNIFORMMATRIX2X4FVPROC glad_glUniformMatrix2x4fv = NULL; PFNGLUNIFORMMATRIX3DVPROC glad_glUniformMatrix3dv = NULL; PFNGLUNIFORMMATRIX3FVPROC glad_glUniformMatrix3fv = NULL; PFNGLUNIFORMMATRIX3X2DVPROC glad_glUniformMatrix3x2dv = NULL; PFNGLUNIFORMMATRIX3X2FVPROC glad_glUniformMatrix3x2fv = NULL; PFNGLUNIFORMMATRIX3X4DVPROC glad_glUniformMatrix3x4dv = NULL; PFNGLUNIFORMMATRIX3X4FVPROC glad_glUniformMatrix3x4fv = NULL; PFNGLUNIFORMMATRIX4DVPROC glad_glUniformMatrix4dv = NULL; PFNGLUNIFORMMATRIX4FVPROC glad_glUniformMatrix4fv = NULL; PFNGLUNIFORMMATRIX4X2DVPROC glad_glUniformMatrix4x2dv = NULL; PFNGLUNIFORMMATRIX4X2FVPROC glad_glUniformMatrix4x2fv = NULL; PFNGLUNIFORMMATRIX4X3DVPROC glad_glUniformMatrix4x3dv = NULL; PFNGLUNIFORMMATRIX4X3FVPROC glad_glUniformMatrix4x3fv = NULL; PFNGLUNIFORMSUBROUTINESUIVPROC glad_glUniformSubroutinesuiv = NULL; PFNGLUNMAPBUFFERPROC glad_glUnmapBuffer = NULL; PFNGLUNMAPNAMEDBUFFERPROC glad_glUnmapNamedBuffer = NULL; PFNGLUSEPROGRAMPROC glad_glUseProgram = NULL; PFNGLUSEPROGRAMSTAGESPROC glad_glUseProgramStages = NULL; PFNGLVALIDATEPROGRAMPROC glad_glValidateProgram = NULL; PFNGLVALIDATEPROGRAMPIPELINEPROC glad_glValidateProgramPipeline = NULL; PFNGLVERTEXARRAYATTRIBBINDINGPROC glad_glVertexArrayAttribBinding = NULL; PFNGLVERTEXARRAYATTRIBFORMATPROC glad_glVertexArrayAttribFormat = NULL; PFNGLVERTEXARRAYATTRIBIFORMATPROC glad_glVertexArrayAttribIFormat = NULL; PFNGLVERTEXARRAYATTRIBLFORMATPROC glad_glVertexArrayAttribLFormat = NULL; PFNGLVERTEXARRAYBINDINGDIVISORPROC glad_glVertexArrayBindingDivisor = NULL; PFNGLVERTEXARRAYELEMENTBUFFERPROC glad_glVertexArrayElementBuffer = NULL; PFNGLVERTEXARRAYVERTEXBUFFERPROC glad_glVertexArrayVertexBuffer = NULL; PFNGLVERTEXARRAYVERTEXBUFFERSPROC glad_glVertexArrayVertexBuffers = NULL; PFNGLVERTEXATTRIB1DPROC glad_glVertexAttrib1d = NULL; PFNGLVERTEXATTRIB1DVPROC glad_glVertexAttrib1dv = NULL; PFNGLVERTEXATTRIB1FPROC glad_glVertexAttrib1f = NULL; PFNGLVERTEXATTRIB1FVPROC glad_glVertexAttrib1fv = NULL; PFNGLVERTEXATTRIB1SPROC glad_glVertexAttrib1s = NULL; PFNGLVERTEXATTRIB1SVPROC glad_glVertexAttrib1sv = NULL; PFNGLVERTEXATTRIB2DPROC glad_glVertexAttrib2d = NULL; PFNGLVERTEXATTRIB2DVPROC glad_glVertexAttrib2dv = NULL; PFNGLVERTEXATTRIB2FPROC glad_glVertexAttrib2f = NULL; PFNGLVERTEXATTRIB2FVPROC glad_glVertexAttrib2fv = NULL; PFNGLVERTEXATTRIB2SPROC glad_glVertexAttrib2s = NULL; PFNGLVERTEXATTRIB2SVPROC glad_glVertexAttrib2sv = NULL; PFNGLVERTEXATTRIB3DPROC glad_glVertexAttrib3d = NULL; PFNGLVERTEXATTRIB3DVPROC glad_glVertexAttrib3dv = NULL; PFNGLVERTEXATTRIB3FPROC glad_glVertexAttrib3f = NULL; PFNGLVERTEXATTRIB3FVPROC glad_glVertexAttrib3fv = NULL; PFNGLVERTEXATTRIB3SPROC glad_glVertexAttrib3s = NULL; PFNGLVERTEXATTRIB3SVPROC glad_glVertexAttrib3sv = NULL; PFNGLVERTEXATTRIB4NBVPROC glad_glVertexAttrib4Nbv = NULL; PFNGLVERTEXATTRIB4NIVPROC glad_glVertexAttrib4Niv = NULL; PFNGLVERTEXATTRIB4NSVPROC glad_glVertexAttrib4Nsv = NULL; PFNGLVERTEXATTRIB4NUBPROC glad_glVertexAttrib4Nub = NULL; PFNGLVERTEXATTRIB4NUBVPROC glad_glVertexAttrib4Nubv = NULL; PFNGLVERTEXATTRIB4NUIVPROC glad_glVertexAttrib4Nuiv = NULL; PFNGLVERTEXATTRIB4NUSVPROC glad_glVertexAttrib4Nusv = NULL; PFNGLVERTEXATTRIB4BVPROC glad_glVertexAttrib4bv = NULL; PFNGLVERTEXATTRIB4DPROC glad_glVertexAttrib4d = NULL; PFNGLVERTEXATTRIB4DVPROC glad_glVertexAttrib4dv = NULL; PFNGLVERTEXATTRIB4FPROC glad_glVertexAttrib4f = NULL; PFNGLVERTEXATTRIB4FVPROC glad_glVertexAttrib4fv = NULL; PFNGLVERTEXATTRIB4IVPROC glad_glVertexAttrib4iv = NULL; PFNGLVERTEXATTRIB4SPROC glad_glVertexAttrib4s = NULL; PFNGLVERTEXATTRIB4SVPROC glad_glVertexAttrib4sv = NULL; PFNGLVERTEXATTRIB4UBVPROC glad_glVertexAttrib4ubv = NULL; PFNGLVERTEXATTRIB4UIVPROC glad_glVertexAttrib4uiv = NULL; PFNGLVERTEXATTRIB4USVPROC glad_glVertexAttrib4usv = NULL; PFNGLVERTEXATTRIBBINDINGPROC glad_glVertexAttribBinding = NULL; PFNGLVERTEXATTRIBDIVISORPROC glad_glVertexAttribDivisor = NULL; PFNGLVERTEXATTRIBFORMATPROC glad_glVertexAttribFormat = NULL; PFNGLVERTEXATTRIBI1IPROC glad_glVertexAttribI1i = NULL; PFNGLVERTEXATTRIBI1IVPROC glad_glVertexAttribI1iv = NULL; PFNGLVERTEXATTRIBI1UIPROC glad_glVertexAttribI1ui = NULL; PFNGLVERTEXATTRIBI1UIVPROC glad_glVertexAttribI1uiv = NULL; PFNGLVERTEXATTRIBI2IPROC glad_glVertexAttribI2i = NULL; PFNGLVERTEXATTRIBI2IVPROC glad_glVertexAttribI2iv = NULL; PFNGLVERTEXATTRIBI2UIPROC glad_glVertexAttribI2ui = NULL; PFNGLVERTEXATTRIBI2UIVPROC glad_glVertexAttribI2uiv = NULL; PFNGLVERTEXATTRIBI3IPROC glad_glVertexAttribI3i = NULL; PFNGLVERTEXATTRIBI3IVPROC glad_glVertexAttribI3iv = NULL; PFNGLVERTEXATTRIBI3UIPROC glad_glVertexAttribI3ui = NULL; PFNGLVERTEXATTRIBI3UIVPROC glad_glVertexAttribI3uiv = NULL; PFNGLVERTEXATTRIBI4BVPROC glad_glVertexAttribI4bv = NULL; PFNGLVERTEXATTRIBI4IPROC glad_glVertexAttribI4i = NULL; PFNGLVERTEXATTRIBI4IVPROC glad_glVertexAttribI4iv = NULL; PFNGLVERTEXATTRIBI4SVPROC glad_glVertexAttribI4sv = NULL; PFNGLVERTEXATTRIBI4UBVPROC glad_glVertexAttribI4ubv = NULL; PFNGLVERTEXATTRIBI4UIPROC glad_glVertexAttribI4ui = NULL; PFNGLVERTEXATTRIBI4UIVPROC glad_glVertexAttribI4uiv = NULL; PFNGLVERTEXATTRIBI4USVPROC glad_glVertexAttribI4usv = NULL; PFNGLVERTEXATTRIBIFORMATPROC glad_glVertexAttribIFormat = NULL; PFNGLVERTEXATTRIBIPOINTERPROC glad_glVertexAttribIPointer = NULL; PFNGLVERTEXATTRIBL1DPROC glad_glVertexAttribL1d = NULL; PFNGLVERTEXATTRIBL1DVPROC glad_glVertexAttribL1dv = NULL; PFNGLVERTEXATTRIBL2DPROC glad_glVertexAttribL2d = NULL; PFNGLVERTEXATTRIBL2DVPROC glad_glVertexAttribL2dv = NULL; PFNGLVERTEXATTRIBL3DPROC glad_glVertexAttribL3d = NULL; PFNGLVERTEXATTRIBL3DVPROC glad_glVertexAttribL3dv = NULL; PFNGLVERTEXATTRIBL4DPROC glad_glVertexAttribL4d = NULL; PFNGLVERTEXATTRIBL4DVPROC glad_glVertexAttribL4dv = NULL; PFNGLVERTEXATTRIBLFORMATPROC glad_glVertexAttribLFormat = NULL; PFNGLVERTEXATTRIBLPOINTERPROC glad_glVertexAttribLPointer = NULL; PFNGLVERTEXATTRIBP1UIPROC glad_glVertexAttribP1ui = NULL; PFNGLVERTEXATTRIBP1UIVPROC glad_glVertexAttribP1uiv = NULL; PFNGLVERTEXATTRIBP2UIPROC glad_glVertexAttribP2ui = NULL; PFNGLVERTEXATTRIBP2UIVPROC glad_glVertexAttribP2uiv = NULL; PFNGLVERTEXATTRIBP3UIPROC glad_glVertexAttribP3ui = NULL; PFNGLVERTEXATTRIBP3UIVPROC glad_glVertexAttribP3uiv = NULL; PFNGLVERTEXATTRIBP4UIPROC glad_glVertexAttribP4ui = NULL; PFNGLVERTEXATTRIBP4UIVPROC glad_glVertexAttribP4uiv = NULL; PFNGLVERTEXATTRIBPOINTERPROC glad_glVertexAttribPointer = NULL; PFNGLVERTEXBINDINGDIVISORPROC glad_glVertexBindingDivisor = NULL; PFNGLVERTEXP2UIPROC glad_glVertexP2ui = NULL; PFNGLVERTEXP2UIVPROC glad_glVertexP2uiv = NULL; PFNGLVERTEXP3UIPROC glad_glVertexP3ui = NULL; PFNGLVERTEXP3UIVPROC glad_glVertexP3uiv = NULL; PFNGLVERTEXP4UIPROC glad_glVertexP4ui = NULL; PFNGLVERTEXP4UIVPROC glad_glVertexP4uiv = NULL; PFNGLVIEWPORTPROC glad_glViewport = NULL; PFNGLVIEWPORTARRAYVPROC glad_glViewportArrayv = NULL; PFNGLVIEWPORTINDEXEDFPROC glad_glViewportIndexedf = NULL; PFNGLVIEWPORTINDEXEDFVPROC glad_glViewportIndexedfv = NULL; PFNGLWAITSYNCPROC glad_glWaitSync = NULL; static void load_GL_VERSION_1_0(GLADloadproc load) { if(!GLAD_GL_VERSION_1_0) return; glad_glCullFace = (PFNGLCULLFACEPROC)load("glCullFace"); glad_glFrontFace = (PFNGLFRONTFACEPROC)load("glFrontFace"); glad_glHint = (PFNGLHINTPROC)load("glHint"); glad_glLineWidth = (PFNGLLINEWIDTHPROC)load("glLineWidth"); glad_glPointSize = (PFNGLPOINTSIZEPROC)load("glPointSize"); glad_glPolygonMode = (PFNGLPOLYGONMODEPROC)load("glPolygonMode"); glad_glScissor = (PFNGLSCISSORPROC)load("glScissor"); glad_glTexParameterf = (PFNGLTEXPARAMETERFPROC)load("glTexParameterf"); glad_glTexParameterfv = (PFNGLTEXPARAMETERFVPROC)load("glTexParameterfv"); glad_glTexParameteri = (PFNGLTEXPARAMETERIPROC)load("glTexParameteri"); glad_glTexParameteriv = (PFNGLTEXPARAMETERIVPROC)load("glTexParameteriv"); glad_glTexImage1D = (PFNGLTEXIMAGE1DPROC)load("glTexImage1D"); glad_glTexImage2D = (PFNGLTEXIMAGE2DPROC)load("glTexImage2D"); glad_glDrawBuffer = (PFNGLDRAWBUFFERPROC)load("glDrawBuffer"); glad_glClear = (PFNGLCLEARPROC)load("glClear"); glad_glClearColor = (PFNGLCLEARCOLORPROC)load("glClearColor"); glad_glClearStencil = (PFNGLCLEARSTENCILPROC)load("glClearStencil"); glad_glClearDepth = (PFNGLCLEARDEPTHPROC)load("glClearDepth"); glad_glStencilMask = (PFNGLSTENCILMASKPROC)load("glStencilMask"); glad_glColorMask = (PFNGLCOLORMASKPROC)load("glColorMask"); glad_glDepthMask = (PFNGLDEPTHMASKPROC)load("glDepthMask"); glad_glDisable = (PFNGLDISABLEPROC)load("glDisable"); glad_glEnable = (PFNGLENABLEPROC)load("glEnable"); glad_glFinish = (PFNGLFINISHPROC)load("glFinish"); glad_glFlush = (PFNGLFLUSHPROC)load("glFlush"); glad_glBlendFunc = (PFNGLBLENDFUNCPROC)load("glBlendFunc"); glad_glLogicOp = (PFNGLLOGICOPPROC)load("glLogicOp"); glad_glStencilFunc = (PFNGLSTENCILFUNCPROC)load("glStencilFunc"); glad_glStencilOp = (PFNGLSTENCILOPPROC)load("glStencilOp"); glad_glDepthFunc = (PFNGLDEPTHFUNCPROC)load("glDepthFunc"); glad_glPixelStoref = (PFNGLPIXELSTOREFPROC)load("glPixelStoref"); glad_glPixelStorei = (PFNGLPIXELSTOREIPROC)load("glPixelStorei"); glad_glReadBuffer = (PFNGLREADBUFFERPROC)load("glReadBuffer"); glad_glReadPixels = (PFNGLREADPIXELSPROC)load("glReadPixels"); glad_glGetBooleanv = (PFNGLGETBOOLEANVPROC)load("glGetBooleanv"); glad_glGetDoublev = (PFNGLGETDOUBLEVPROC)load("glGetDoublev"); glad_glGetError = (PFNGLGETERRORPROC)load("glGetError"); glad_glGetFloatv = (PFNGLGETFLOATVPROC)load("glGetFloatv"); glad_glGetIntegerv = (PFNGLGETINTEGERVPROC)load("glGetIntegerv"); glad_glGetString = (PFNGLGETSTRINGPROC)load("glGetString"); glad_glGetTexImage = (PFNGLGETTEXIMAGEPROC)load("glGetTexImage"); glad_glGetTexParameterfv = (PFNGLGETTEXPARAMETERFVPROC)load("glGetTexParameterfv"); glad_glGetTexParameteriv = (PFNGLGETTEXPARAMETERIVPROC)load("glGetTexParameteriv"); glad_glGetTexLevelParameterfv = (PFNGLGETTEXLEVELPARAMETERFVPROC)load("glGetTexLevelParameterfv"); glad_glGetTexLevelParameteriv = (PFNGLGETTEXLEVELPARAMETERIVPROC)load("glGetTexLevelParameteriv"); glad_glIsEnabled = (PFNGLISENABLEDPROC)load("glIsEnabled"); glad_glDepthRange = (PFNGLDEPTHRANGEPROC)load("glDepthRange"); glad_glViewport = (PFNGLVIEWPORTPROC)load("glViewport"); } static void load_GL_VERSION_1_1(GLADloadproc load) { if(!GLAD_GL_VERSION_1_1) return; glad_glDrawArrays = (PFNGLDRAWARRAYSPROC)load("glDrawArrays"); glad_glDrawElements = (PFNGLDRAWELEMENTSPROC)load("glDrawElements"); glad_glPolygonOffset = (PFNGLPOLYGONOFFSETPROC)load("glPolygonOffset"); glad_glCopyTexImage1D = (PFNGLCOPYTEXIMAGE1DPROC)load("glCopyTexImage1D"); glad_glCopyTexImage2D = (PFNGLCOPYTEXIMAGE2DPROC)load("glCopyTexImage2D"); glad_glCopyTexSubImage1D = (PFNGLCOPYTEXSUBIMAGE1DPROC)load("glCopyTexSubImage1D"); glad_glCopyTexSubImage2D = (PFNGLCOPYTEXSUBIMAGE2DPROC)load("glCopyTexSubImage2D"); glad_glTexSubImage1D = (PFNGLTEXSUBIMAGE1DPROC)load("glTexSubImage1D"); glad_glTexSubImage2D = (PFNGLTEXSUBIMAGE2DPROC)load("glTexSubImage2D"); glad_glBindTexture = (PFNGLBINDTEXTUREPROC)load("glBindTexture"); glad_glDeleteTextures = (PFNGLDELETETEXTURESPROC)load("glDeleteTextures"); glad_glGenTextures = (PFNGLGENTEXTURESPROC)load("glGenTextures"); glad_glIsTexture = (PFNGLISTEXTUREPROC)load("glIsTexture"); } static void load_GL_VERSION_1_2(GLADloadproc load) { if(!GLAD_GL_VERSION_1_2) return; glad_glDrawRangeElements = (PFNGLDRAWRANGEELEMENTSPROC)load("glDrawRangeElements"); glad_glTexImage3D = (PFNGLTEXIMAGE3DPROC)load("glTexImage3D"); glad_glTexSubImage3D = (PFNGLTEXSUBIMAGE3DPROC)load("glTexSubImage3D"); glad_glCopyTexSubImage3D = (PFNGLCOPYTEXSUBIMAGE3DPROC)load("glCopyTexSubImage3D"); } static void load_GL_VERSION_1_3(GLADloadproc load) { if(!GLAD_GL_VERSION_1_3) return; glad_glActiveTexture = (PFNGLACTIVETEXTUREPROC)load("glActiveTexture"); glad_glSampleCoverage = (PFNGLSAMPLECOVERAGEPROC)load("glSampleCoverage"); glad_glCompressedTexImage3D = (PFNGLCOMPRESSEDTEXIMAGE3DPROC)load("glCompressedTexImage3D"); glad_glCompressedTexImage2D = (PFNGLCOMPRESSEDTEXIMAGE2DPROC)load("glCompressedTexImage2D"); glad_glCompressedTexImage1D = (PFNGLCOMPRESSEDTEXIMAGE1DPROC)load("glCompressedTexImage1D"); glad_glCompressedTexSubImage3D = (PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC)load("glCompressedTexSubImage3D"); glad_glCompressedTexSubImage2D = (PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC)load("glCompressedTexSubImage2D"); glad_glCompressedTexSubImage1D = (PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC)load("glCompressedTexSubImage1D"); glad_glGetCompressedTexImage = (PFNGLGETCOMPRESSEDTEXIMAGEPROC)load("glGetCompressedTexImage"); } static void load_GL_VERSION_1_4(GLADloadproc load) { if(!GLAD_GL_VERSION_1_4) return; glad_glBlendFuncSeparate = (PFNGLBLENDFUNCSEPARATEPROC)load("glBlendFuncSeparate"); glad_glMultiDrawArrays = (PFNGLMULTIDRAWARRAYSPROC)load("glMultiDrawArrays"); glad_glMultiDrawElements = (PFNGLMULTIDRAWELEMENTSPROC)load("glMultiDrawElements"); glad_glPointParameterf = (PFNGLPOINTPARAMETERFPROC)load("glPointParameterf"); glad_glPointParameterfv = (PFNGLPOINTPARAMETERFVPROC)load("glPointParameterfv"); glad_glPointParameteri = (PFNGLPOINTPARAMETERIPROC)load("glPointParameteri"); glad_glPointParameteriv = (PFNGLPOINTPARAMETERIVPROC)load("glPointParameteriv"); glad_glBlendColor = (PFNGLBLENDCOLORPROC)load("glBlendColor"); glad_glBlendEquation = (PFNGLBLENDEQUATIONPROC)load("glBlendEquation"); } static void load_GL_VERSION_1_5(GLADloadproc load) { if(!GLAD_GL_VERSION_1_5) return; glad_glGenQueries = (PFNGLGENQUERIESPROC)load("glGenQueries"); glad_glDeleteQueries = (PFNGLDELETEQUERIESPROC)load("glDeleteQueries"); glad_glIsQuery = (PFNGLISQUERYPROC)load("glIsQuery"); glad_glBeginQuery = (PFNGLBEGINQUERYPROC)load("glBeginQuery"); glad_glEndQuery = (PFNGLENDQUERYPROC)load("glEndQuery"); glad_glGetQueryiv = (PFNGLGETQUERYIVPROC)load("glGetQueryiv"); glad_glGetQueryObjectiv = (PFNGLGETQUERYOBJECTIVPROC)load("glGetQueryObjectiv"); glad_glGetQueryObjectuiv = (PFNGLGETQUERYOBJECTUIVPROC)load("glGetQueryObjectuiv"); glad_glBindBuffer = (PFNGLBINDBUFFERPROC)load("glBindBuffer"); glad_glDeleteBuffers = (PFNGLDELETEBUFFERSPROC)load("glDeleteBuffers"); glad_glGenBuffers = (PFNGLGENBUFFERSPROC)load("glGenBuffers"); glad_glIsBuffer = (PFNGLISBUFFERPROC)load("glIsBuffer"); glad_glBufferData = (PFNGLBUFFERDATAPROC)load("glBufferData"); glad_glBufferSubData = (PFNGLBUFFERSUBDATAPROC)load("glBufferSubData"); glad_glGetBufferSubData = (PFNGLGETBUFFERSUBDATAPROC)load("glGetBufferSubData"); glad_glMapBuffer = (PFNGLMAPBUFFERPROC)load("glMapBuffer"); glad_glUnmapBuffer = (PFNGLUNMAPBUFFERPROC)load("glUnmapBuffer"); glad_glGetBufferParameteriv = (PFNGLGETBUFFERPARAMETERIVPROC)load("glGetBufferParameteriv"); glad_glGetBufferPointerv = (PFNGLGETBUFFERPOINTERVPROC)load("glGetBufferPointerv"); } static void load_GL_VERSION_2_0(GLADloadproc load) { if(!GLAD_GL_VERSION_2_0) return; glad_glBlendEquationSeparate = (PFNGLBLENDEQUATIONSEPARATEPROC)load("glBlendEquationSeparate"); glad_glDrawBuffers = (PFNGLDRAWBUFFERSPROC)load("glDrawBuffers"); glad_glStencilOpSeparate = (PFNGLSTENCILOPSEPARATEPROC)load("glStencilOpSeparate"); glad_glStencilFuncSeparate = (PFNGLSTENCILFUNCSEPARATEPROC)load("glStencilFuncSeparate"); glad_glStencilMaskSeparate = (PFNGLSTENCILMASKSEPARATEPROC)load("glStencilMaskSeparate"); glad_glAttachShader = (PFNGLATTACHSHADERPROC)load("glAttachShader"); glad_glBindAttribLocation = (PFNGLBINDATTRIBLOCATIONPROC)load("glBindAttribLocation"); glad_glCompileShader = (PFNGLCOMPILESHADERPROC)load("glCompileShader"); glad_glCreateProgram = (PFNGLCREATEPROGRAMPROC)load("glCreateProgram"); glad_glCreateShader = (PFNGLCREATESHADERPROC)load("glCreateShader"); glad_glDeleteProgram = (PFNGLDELETEPROGRAMPROC)load("glDeleteProgram"); glad_glDeleteShader = (PFNGLDELETESHADERPROC)load("glDeleteShader"); glad_glDetachShader = (PFNGLDETACHSHADERPROC)load("glDetachShader"); glad_glDisableVertexAttribArray = (PFNGLDISABLEVERTEXATTRIBARRAYPROC)load("glDisableVertexAttribArray"); glad_glEnableVertexAttribArray = (PFNGLENABLEVERTEXATTRIBARRAYPROC)load("glEnableVertexAttribArray"); glad_glGetActiveAttrib = (PFNGLGETACTIVEATTRIBPROC)load("glGetActiveAttrib"); glad_glGetActiveUniform = (PFNGLGETACTIVEUNIFORMPROC)load("glGetActiveUniform"); glad_glGetAttachedShaders = (PFNGLGETATTACHEDSHADERSPROC)load("glGetAttachedShaders"); glad_glGetAttribLocation = (PFNGLGETATTRIBLOCATIONPROC)load("glGetAttribLocation"); glad_glGetProgramiv = (PFNGLGETPROGRAMIVPROC)load("glGetProgramiv"); glad_glGetProgramInfoLog = (PFNGLGETPROGRAMINFOLOGPROC)load("glGetProgramInfoLog"); glad_glGetShaderiv = (PFNGLGETSHADERIVPROC)load("glGetShaderiv"); glad_glGetShaderInfoLog = (PFNGLGETSHADERINFOLOGPROC)load("glGetShaderInfoLog"); glad_glGetShaderSource = (PFNGLGETSHADERSOURCEPROC)load("glGetShaderSource"); glad_glGetUniformLocation = (PFNGLGETUNIFORMLOCATIONPROC)load("glGetUniformLocation"); glad_glGetUniformfv = (PFNGLGETUNIFORMFVPROC)load("glGetUniformfv"); glad_glGetUniformiv = (PFNGLGETUNIFORMIVPROC)load("glGetUniformiv"); glad_glGetVertexAttribdv = (PFNGLGETVERTEXATTRIBDVPROC)load("glGetVertexAttribdv"); glad_glGetVertexAttribfv = (PFNGLGETVERTEXATTRIBFVPROC)load("glGetVertexAttribfv"); glad_glGetVertexAttribiv = (PFNGLGETVERTEXATTRIBIVPROC)load("glGetVertexAttribiv"); glad_glGetVertexAttribPointerv = (PFNGLGETVERTEXATTRIBPOINTERVPROC)load("glGetVertexAttribPointerv"); glad_glIsProgram = (PFNGLISPROGRAMPROC)load("glIsProgram"); glad_glIsShader = (PFNGLISSHADERPROC)load("glIsShader"); glad_glLinkProgram = (PFNGLLINKPROGRAMPROC)load("glLinkProgram"); glad_glShaderSource = (PFNGLSHADERSOURCEPROC)load("glShaderSource"); glad_glUseProgram = (PFNGLUSEPROGRAMPROC)load("glUseProgram"); glad_glUniform1f = (PFNGLUNIFORM1FPROC)load("glUniform1f"); glad_glUniform2f = (PFNGLUNIFORM2FPROC)load("glUniform2f"); glad_glUniform3f = (PFNGLUNIFORM3FPROC)load("glUniform3f"); glad_glUniform4f = (PFNGLUNIFORM4FPROC)load("glUniform4f"); glad_glUniform1i = (PFNGLUNIFORM1IPROC)load("glUniform1i"); glad_glUniform2i = (PFNGLUNIFORM2IPROC)load("glUniform2i"); glad_glUniform3i = (PFNGLUNIFORM3IPROC)load("glUniform3i"); glad_glUniform4i = (PFNGLUNIFORM4IPROC)load("glUniform4i"); glad_glUniform1fv = (PFNGLUNIFORM1FVPROC)load("glUniform1fv"); glad_glUniform2fv = (PFNGLUNIFORM2FVPROC)load("glUniform2fv"); glad_glUniform3fv = (PFNGLUNIFORM3FVPROC)load("glUniform3fv"); glad_glUniform4fv = (PFNGLUNIFORM4FVPROC)load("glUniform4fv"); glad_glUniform1iv = (PFNGLUNIFORM1IVPROC)load("glUniform1iv"); glad_glUniform2iv = (PFNGLUNIFORM2IVPROC)load("glUniform2iv"); glad_glUniform3iv = (PFNGLUNIFORM3IVPROC)load("glUniform3iv"); glad_glUniform4iv = (PFNGLUNIFORM4IVPROC)load("glUniform4iv"); glad_glUniformMatrix2fv = (PFNGLUNIFORMMATRIX2FVPROC)load("glUniformMatrix2fv"); glad_glUniformMatrix3fv = (PFNGLUNIFORMMATRIX3FVPROC)load("glUniformMatrix3fv"); glad_glUniformMatrix4fv = (PFNGLUNIFORMMATRIX4FVPROC)load("glUniformMatrix4fv"); glad_glValidateProgram = (PFNGLVALIDATEPROGRAMPROC)load("glValidateProgram"); glad_glVertexAttrib1d = (PFNGLVERTEXATTRIB1DPROC)load("glVertexAttrib1d"); glad_glVertexAttrib1dv = (PFNGLVERTEXATTRIB1DVPROC)load("glVertexAttrib1dv"); glad_glVertexAttrib1f = (PFNGLVERTEXATTRIB1FPROC)load("glVertexAttrib1f"); glad_glVertexAttrib1fv = (PFNGLVERTEXATTRIB1FVPROC)load("glVertexAttrib1fv"); glad_glVertexAttrib1s = (PFNGLVERTEXATTRIB1SPROC)load("glVertexAttrib1s"); glad_glVertexAttrib1sv = (PFNGLVERTEXATTRIB1SVPROC)load("glVertexAttrib1sv"); glad_glVertexAttrib2d = (PFNGLVERTEXATTRIB2DPROC)load("glVertexAttrib2d"); glad_glVertexAttrib2dv = (PFNGLVERTEXATTRIB2DVPROC)load("glVertexAttrib2dv"); glad_glVertexAttrib2f = (PFNGLVERTEXATTRIB2FPROC)load("glVertexAttrib2f"); glad_glVertexAttrib2fv = (PFNGLVERTEXATTRIB2FVPROC)load("glVertexAttrib2fv"); glad_glVertexAttrib2s = (PFNGLVERTEXATTRIB2SPROC)load("glVertexAttrib2s"); glad_glVertexAttrib2sv = (PFNGLVERTEXATTRIB2SVPROC)load("glVertexAttrib2sv"); glad_glVertexAttrib3d = (PFNGLVERTEXATTRIB3DPROC)load("glVertexAttrib3d"); glad_glVertexAttrib3dv = (PFNGLVERTEXATTRIB3DVPROC)load("glVertexAttrib3dv"); glad_glVertexAttrib3f = (PFNGLVERTEXATTRIB3FPROC)load("glVertexAttrib3f"); glad_glVertexAttrib3fv = (PFNGLVERTEXATTRIB3FVPROC)load("glVertexAttrib3fv"); glad_glVertexAttrib3s = (PFNGLVERTEXATTRIB3SPROC)load("glVertexAttrib3s"); glad_glVertexAttrib3sv = (PFNGLVERTEXATTRIB3SVPROC)load("glVertexAttrib3sv"); glad_glVertexAttrib4Nbv = (PFNGLVERTEXATTRIB4NBVPROC)load("glVertexAttrib4Nbv"); glad_glVertexAttrib4Niv = (PFNGLVERTEXATTRIB4NIVPROC)load("glVertexAttrib4Niv"); glad_glVertexAttrib4Nsv = (PFNGLVERTEXATTRIB4NSVPROC)load("glVertexAttrib4Nsv"); glad_glVertexAttrib4Nub = (PFNGLVERTEXATTRIB4NUBPROC)load("glVertexAttrib4Nub"); glad_glVertexAttrib4Nubv = (PFNGLVERTEXATTRIB4NUBVPROC)load("glVertexAttrib4Nubv"); glad_glVertexAttrib4Nuiv = (PFNGLVERTEXATTRIB4NUIVPROC)load("glVertexAttrib4Nuiv"); glad_glVertexAttrib4Nusv = (PFNGLVERTEXATTRIB4NUSVPROC)load("glVertexAttrib4Nusv"); glad_glVertexAttrib4bv = (PFNGLVERTEXATTRIB4BVPROC)load("glVertexAttrib4bv"); glad_glVertexAttrib4d = (PFNGLVERTEXATTRIB4DPROC)load("glVertexAttrib4d"); glad_glVertexAttrib4dv = (PFNGLVERTEXATTRIB4DVPROC)load("glVertexAttrib4dv"); glad_glVertexAttrib4f = (PFNGLVERTEXATTRIB4FPROC)load("glVertexAttrib4f"); glad_glVertexAttrib4fv = (PFNGLVERTEXATTRIB4FVPROC)load("glVertexAttrib4fv"); glad_glVertexAttrib4iv = (PFNGLVERTEXATTRIB4IVPROC)load("glVertexAttrib4iv"); glad_glVertexAttrib4s = (PFNGLVERTEXATTRIB4SPROC)load("glVertexAttrib4s"); glad_glVertexAttrib4sv = (PFNGLVERTEXATTRIB4SVPROC)load("glVertexAttrib4sv"); glad_glVertexAttrib4ubv = (PFNGLVERTEXATTRIB4UBVPROC)load("glVertexAttrib4ubv"); glad_glVertexAttrib4uiv = (PFNGLVERTEXATTRIB4UIVPROC)load("glVertexAttrib4uiv"); glad_glVertexAttrib4usv = (PFNGLVERTEXATTRIB4USVPROC)load("glVertexAttrib4usv"); glad_glVertexAttribPointer = (PFNGLVERTEXATTRIBPOINTERPROC)load("glVertexAttribPointer"); } static void load_GL_VERSION_2_1(GLADloadproc load) { if(!GLAD_GL_VERSION_2_1) return; glad_glUniformMatrix2x3fv = (PFNGLUNIFORMMATRIX2X3FVPROC)load("glUniformMatrix2x3fv"); glad_glUniformMatrix3x2fv = (PFNGLUNIFORMMATRIX3X2FVPROC)load("glUniformMatrix3x2fv"); glad_glUniformMatrix2x4fv = (PFNGLUNIFORMMATRIX2X4FVPROC)load("glUniformMatrix2x4fv"); glad_glUniformMatrix4x2fv = (PFNGLUNIFORMMATRIX4X2FVPROC)load("glUniformMatrix4x2fv"); glad_glUniformMatrix3x4fv = (PFNGLUNIFORMMATRIX3X4FVPROC)load("glUniformMatrix3x4fv"); glad_glUniformMatrix4x3fv = (PFNGLUNIFORMMATRIX4X3FVPROC)load("glUniformMatrix4x3fv"); } static void load_GL_VERSION_3_0(GLADloadproc load) { if(!GLAD_GL_VERSION_3_0) return; glad_glColorMaski = (PFNGLCOLORMASKIPROC)load("glColorMaski"); glad_glGetBooleani_v = (PFNGLGETBOOLEANI_VPROC)load("glGetBooleani_v"); glad_glGetIntegeri_v = (PFNGLGETINTEGERI_VPROC)load("glGetIntegeri_v"); glad_glEnablei = (PFNGLENABLEIPROC)load("glEnablei"); glad_glDisablei = (PFNGLDISABLEIPROC)load("glDisablei"); glad_glIsEnabledi = (PFNGLISENABLEDIPROC)load("glIsEnabledi"); glad_glBeginTransformFeedback = (PFNGLBEGINTRANSFORMFEEDBACKPROC)load("glBeginTransformFeedback"); glad_glEndTransformFeedback = (PFNGLENDTRANSFORMFEEDBACKPROC)load("glEndTransformFeedback"); glad_glBindBufferRange = (PFNGLBINDBUFFERRANGEPROC)load("glBindBufferRange"); glad_glBindBufferBase = (PFNGLBINDBUFFERBASEPROC)load("glBindBufferBase"); glad_glTransformFeedbackVaryings = (PFNGLTRANSFORMFEEDBACKVARYINGSPROC)load("glTransformFeedbackVaryings"); glad_glGetTransformFeedbackVarying = (PFNGLGETTRANSFORMFEEDBACKVARYINGPROC)load("glGetTransformFeedbackVarying"); glad_glClampColor = (PFNGLCLAMPCOLORPROC)load("glClampColor"); glad_glBeginConditionalRender = (PFNGLBEGINCONDITIONALRENDERPROC)load("glBeginConditionalRender"); glad_glEndConditionalRender = (PFNGLENDCONDITIONALRENDERPROC)load("glEndConditionalRender"); glad_glVertexAttribIPointer = (PFNGLVERTEXATTRIBIPOINTERPROC)load("glVertexAttribIPointer"); glad_glGetVertexAttribIiv = (PFNGLGETVERTEXATTRIBIIVPROC)load("glGetVertexAttribIiv"); glad_glGetVertexAttribIuiv = (PFNGLGETVERTEXATTRIBIUIVPROC)load("glGetVertexAttribIuiv"); glad_glVertexAttribI1i = (PFNGLVERTEXATTRIBI1IPROC)load("glVertexAttribI1i"); glad_glVertexAttribI2i = (PFNGLVERTEXATTRIBI2IPROC)load("glVertexAttribI2i"); glad_glVertexAttribI3i = (PFNGLVERTEXATTRIBI3IPROC)load("glVertexAttribI3i"); glad_glVertexAttribI4i = (PFNGLVERTEXATTRIBI4IPROC)load("glVertexAttribI4i"); glad_glVertexAttribI1ui = (PFNGLVERTEXATTRIBI1UIPROC)load("glVertexAttribI1ui"); glad_glVertexAttribI2ui = (PFNGLVERTEXATTRIBI2UIPROC)load("glVertexAttribI2ui"); glad_glVertexAttribI3ui = (PFNGLVERTEXATTRIBI3UIPROC)load("glVertexAttribI3ui"); glad_glVertexAttribI4ui = (PFNGLVERTEXATTRIBI4UIPROC)load("glVertexAttribI4ui"); glad_glVertexAttribI1iv = (PFNGLVERTEXATTRIBI1IVPROC)load("glVertexAttribI1iv"); glad_glVertexAttribI2iv = (PFNGLVERTEXATTRIBI2IVPROC)load("glVertexAttribI2iv"); glad_glVertexAttribI3iv = (PFNGLVERTEXATTRIBI3IVPROC)load("glVertexAttribI3iv"); glad_glVertexAttribI4iv = (PFNGLVERTEXATTRIBI4IVPROC)load("glVertexAttribI4iv"); glad_glVertexAttribI1uiv = (PFNGLVERTEXATTRIBI1UIVPROC)load("glVertexAttribI1uiv"); glad_glVertexAttribI2uiv = (PFNGLVERTEXATTRIBI2UIVPROC)load("glVertexAttribI2uiv"); glad_glVertexAttribI3uiv = (PFNGLVERTEXATTRIBI3UIVPROC)load("glVertexAttribI3uiv"); glad_glVertexAttribI4uiv = (PFNGLVERTEXATTRIBI4UIVPROC)load("glVertexAttribI4uiv"); glad_glVertexAttribI4bv = (PFNGLVERTEXATTRIBI4BVPROC)load("glVertexAttribI4bv"); glad_glVertexAttribI4sv = (PFNGLVERTEXATTRIBI4SVPROC)load("glVertexAttribI4sv"); glad_glVertexAttribI4ubv = (PFNGLVERTEXATTRIBI4UBVPROC)load("glVertexAttribI4ubv"); glad_glVertexAttribI4usv = (PFNGLVERTEXATTRIBI4USVPROC)load("glVertexAttribI4usv"); glad_glGetUniformuiv = (PFNGLGETUNIFORMUIVPROC)load("glGetUniformuiv"); glad_glBindFragDataLocation = (PFNGLBINDFRAGDATALOCATIONPROC)load("glBindFragDataLocation"); glad_glGetFragDataLocation = (PFNGLGETFRAGDATALOCATIONPROC)load("glGetFragDataLocation"); glad_glUniform1ui = (PFNGLUNIFORM1UIPROC)load("glUniform1ui"); glad_glUniform2ui = (PFNGLUNIFORM2UIPROC)load("glUniform2ui"); glad_glUniform3ui = (PFNGLUNIFORM3UIPROC)load("glUniform3ui"); glad_glUniform4ui = (PFNGLUNIFORM4UIPROC)load("glUniform4ui"); glad_glUniform1uiv = (PFNGLUNIFORM1UIVPROC)load("glUniform1uiv"); glad_glUniform2uiv = (PFNGLUNIFORM2UIVPROC)load("glUniform2uiv"); glad_glUniform3uiv = (PFNGLUNIFORM3UIVPROC)load("glUniform3uiv"); glad_glUniform4uiv = (PFNGLUNIFORM4UIVPROC)load("glUniform4uiv"); glad_glTexParameterIiv = (PFNGLTEXPARAMETERIIVPROC)load("glTexParameterIiv"); glad_glTexParameterIuiv = (PFNGLTEXPARAMETERIUIVPROC)load("glTexParameterIuiv"); glad_glGetTexParameterIiv = (PFNGLGETTEXPARAMETERIIVPROC)load("glGetTexParameterIiv"); glad_glGetTexParameterIuiv = (PFNGLGETTEXPARAMETERIUIVPROC)load("glGetTexParameterIuiv"); glad_glClearBufferiv = (PFNGLCLEARBUFFERIVPROC)load("glClearBufferiv"); glad_glClearBufferuiv = (PFNGLCLEARBUFFERUIVPROC)load("glClearBufferuiv"); glad_glClearBufferfv = (PFNGLCLEARBUFFERFVPROC)load("glClearBufferfv"); glad_glClearBufferfi = (PFNGLCLEARBUFFERFIPROC)load("glClearBufferfi"); glad_glGetStringi = (PFNGLGETSTRINGIPROC)load("glGetStringi"); glad_glIsRenderbuffer = (PFNGLISRENDERBUFFERPROC)load("glIsRenderbuffer"); glad_glBindRenderbuffer = (PFNGLBINDRENDERBUFFERPROC)load("glBindRenderbuffer"); glad_glDeleteRenderbuffers = (PFNGLDELETERENDERBUFFERSPROC)load("glDeleteRenderbuffers"); glad_glGenRenderbuffers = (PFNGLGENRENDERBUFFERSPROC)load("glGenRenderbuffers"); glad_glRenderbufferStorage = (PFNGLRENDERBUFFERSTORAGEPROC)load("glRenderbufferStorage"); glad_glGetRenderbufferParameteriv = (PFNGLGETRENDERBUFFERPARAMETERIVPROC)load("glGetRenderbufferParameteriv"); glad_glIsFramebuffer = (PFNGLISFRAMEBUFFERPROC)load("glIsFramebuffer"); glad_glBindFramebuffer = (PFNGLBINDFRAMEBUFFERPROC)load("glBindFramebuffer"); glad_glDeleteFramebuffers = (PFNGLDELETEFRAMEBUFFERSPROC)load("glDeleteFramebuffers"); glad_glGenFramebuffers = (PFNGLGENFRAMEBUFFERSPROC)load("glGenFramebuffers"); glad_glCheckFramebufferStatus = (PFNGLCHECKFRAMEBUFFERSTATUSPROC)load("glCheckFramebufferStatus"); glad_glFramebufferTexture1D = (PFNGLFRAMEBUFFERTEXTURE1DPROC)load("glFramebufferTexture1D"); glad_glFramebufferTexture2D = (PFNGLFRAMEBUFFERTEXTURE2DPROC)load("glFramebufferTexture2D"); glad_glFramebufferTexture3D = (PFNGLFRAMEBUFFERTEXTURE3DPROC)load("glFramebufferTexture3D"); glad_glFramebufferRenderbuffer = (PFNGLFRAMEBUFFERRENDERBUFFERPROC)load("glFramebufferRenderbuffer"); glad_glGetFramebufferAttachmentParameteriv = (PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC)load("glGetFramebufferAttachmentParameteriv"); glad_glGenerateMipmap = (PFNGLGENERATEMIPMAPPROC)load("glGenerateMipmap"); glad_glBlitFramebuffer = (PFNGLBLITFRAMEBUFFERPROC)load("glBlitFramebuffer"); glad_glRenderbufferStorageMultisample = (PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC)load("glRenderbufferStorageMultisample"); glad_glFramebufferTextureLayer = (PFNGLFRAMEBUFFERTEXTURELAYERPROC)load("glFramebufferTextureLayer"); glad_glMapBufferRange = (PFNGLMAPBUFFERRANGEPROC)load("glMapBufferRange"); glad_glFlushMappedBufferRange = (PFNGLFLUSHMAPPEDBUFFERRANGEPROC)load("glFlushMappedBufferRange"); glad_glBindVertexArray = (PFNGLBINDVERTEXARRAYPROC)load("glBindVertexArray"); glad_glDeleteVertexArrays = (PFNGLDELETEVERTEXARRAYSPROC)load("glDeleteVertexArrays"); glad_glGenVertexArrays = (PFNGLGENVERTEXARRAYSPROC)load("glGenVertexArrays"); glad_glIsVertexArray = (PFNGLISVERTEXARRAYPROC)load("glIsVertexArray"); } static void load_GL_VERSION_3_1(GLADloadproc load) { if(!GLAD_GL_VERSION_3_1) return; glad_glDrawArraysInstanced = (PFNGLDRAWARRAYSINSTANCEDPROC)load("glDrawArraysInstanced"); glad_glDrawElementsInstanced = (PFNGLDRAWELEMENTSINSTANCEDPROC)load("glDrawElementsInstanced"); glad_glTexBuffer = (PFNGLTEXBUFFERPROC)load("glTexBuffer"); glad_glPrimitiveRestartIndex = (PFNGLPRIMITIVERESTARTINDEXPROC)load("glPrimitiveRestartIndex"); glad_glCopyBufferSubData = (PFNGLCOPYBUFFERSUBDATAPROC)load("glCopyBufferSubData"); glad_glGetUniformIndices = (PFNGLGETUNIFORMINDICESPROC)load("glGetUniformIndices"); glad_glGetActiveUniformsiv = (PFNGLGETACTIVEUNIFORMSIVPROC)load("glGetActiveUniformsiv"); glad_glGetActiveUniformName = (PFNGLGETACTIVEUNIFORMNAMEPROC)load("glGetActiveUniformName"); glad_glGetUniformBlockIndex = (PFNGLGETUNIFORMBLOCKINDEXPROC)load("glGetUniformBlockIndex"); glad_glGetActiveUniformBlockiv = (PFNGLGETACTIVEUNIFORMBLOCKIVPROC)load("glGetActiveUniformBlockiv"); glad_glGetActiveUniformBlockName = (PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC)load("glGetActiveUniformBlockName"); glad_glUniformBlockBinding = (PFNGLUNIFORMBLOCKBINDINGPROC)load("glUniformBlockBinding"); glad_glBindBufferRange = (PFNGLBINDBUFFERRANGEPROC)load("glBindBufferRange"); glad_glBindBufferBase = (PFNGLBINDBUFFERBASEPROC)load("glBindBufferBase"); glad_glGetIntegeri_v = (PFNGLGETINTEGERI_VPROC)load("glGetIntegeri_v"); } static void load_GL_VERSION_3_2(GLADloadproc load) { if(!GLAD_GL_VERSION_3_2) return; glad_glDrawElementsBaseVertex = (PFNGLDRAWELEMENTSBASEVERTEXPROC)load("glDrawElementsBaseVertex"); glad_glDrawRangeElementsBaseVertex = (PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC)load("glDrawRangeElementsBaseVertex"); glad_glDrawElementsInstancedBaseVertex = (PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC)load("glDrawElementsInstancedBaseVertex"); glad_glMultiDrawElementsBaseVertex = (PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC)load("glMultiDrawElementsBaseVertex"); glad_glProvokingVertex = (PFNGLPROVOKINGVERTEXPROC)load("glProvokingVertex"); glad_glFenceSync = (PFNGLFENCESYNCPROC)load("glFenceSync"); glad_glIsSync = (PFNGLISSYNCPROC)load("glIsSync"); glad_glDeleteSync = (PFNGLDELETESYNCPROC)load("glDeleteSync"); glad_glClientWaitSync = (PFNGLCLIENTWAITSYNCPROC)load("glClientWaitSync"); glad_glWaitSync = (PFNGLWAITSYNCPROC)load("glWaitSync"); glad_glGetInteger64v = (PFNGLGETINTEGER64VPROC)load("glGetInteger64v"); glad_glGetSynciv = (PFNGLGETSYNCIVPROC)load("glGetSynciv"); glad_glGetInteger64i_v = (PFNGLGETINTEGER64I_VPROC)load("glGetInteger64i_v"); glad_glGetBufferParameteri64v = (PFNGLGETBUFFERPARAMETERI64VPROC)load("glGetBufferParameteri64v"); glad_glFramebufferTexture = (PFNGLFRAMEBUFFERTEXTUREPROC)load("glFramebufferTexture"); glad_glTexImage2DMultisample = (PFNGLTEXIMAGE2DMULTISAMPLEPROC)load("glTexImage2DMultisample"); glad_glTexImage3DMultisample = (PFNGLTEXIMAGE3DMULTISAMPLEPROC)load("glTexImage3DMultisample"); glad_glGetMultisamplefv = (PFNGLGETMULTISAMPLEFVPROC)load("glGetMultisamplefv"); glad_glSampleMaski = (PFNGLSAMPLEMASKIPROC)load("glSampleMaski"); } static void load_GL_VERSION_3_3(GLADloadproc load) { if(!GLAD_GL_VERSION_3_3) return; glad_glBindFragDataLocationIndexed = (PFNGLBINDFRAGDATALOCATIONINDEXEDPROC)load("glBindFragDataLocationIndexed"); glad_glGetFragDataIndex = (PFNGLGETFRAGDATAINDEXPROC)load("glGetFragDataIndex"); glad_glGenSamplers = (PFNGLGENSAMPLERSPROC)load("glGenSamplers"); glad_glDeleteSamplers = (PFNGLDELETESAMPLERSPROC)load("glDeleteSamplers"); glad_glIsSampler = (PFNGLISSAMPLERPROC)load("glIsSampler"); glad_glBindSampler = (PFNGLBINDSAMPLERPROC)load("glBindSampler"); glad_glSamplerParameteri = (PFNGLSAMPLERPARAMETERIPROC)load("glSamplerParameteri"); glad_glSamplerParameteriv = (PFNGLSAMPLERPARAMETERIVPROC)load("glSamplerParameteriv"); glad_glSamplerParameterf = (PFNGLSAMPLERPARAMETERFPROC)load("glSamplerParameterf"); glad_glSamplerParameterfv = (PFNGLSAMPLERPARAMETERFVPROC)load("glSamplerParameterfv"); glad_glSamplerParameterIiv = (PFNGLSAMPLERPARAMETERIIVPROC)load("glSamplerParameterIiv"); glad_glSamplerParameterIuiv = (PFNGLSAMPLERPARAMETERIUIVPROC)load("glSamplerParameterIuiv"); glad_glGetSamplerParameteriv = (PFNGLGETSAMPLERPARAMETERIVPROC)load("glGetSamplerParameteriv"); glad_glGetSamplerParameterIiv = (PFNGLGETSAMPLERPARAMETERIIVPROC)load("glGetSamplerParameterIiv"); glad_glGetSamplerParameterfv = (PFNGLGETSAMPLERPARAMETERFVPROC)load("glGetSamplerParameterfv"); glad_glGetSamplerParameterIuiv = (PFNGLGETSAMPLERPARAMETERIUIVPROC)load("glGetSamplerParameterIuiv"); glad_glQueryCounter = (PFNGLQUERYCOUNTERPROC)load("glQueryCounter"); glad_glGetQueryObjecti64v = (PFNGLGETQUERYOBJECTI64VPROC)load("glGetQueryObjecti64v"); glad_glGetQueryObjectui64v = (PFNGLGETQUERYOBJECTUI64VPROC)load("glGetQueryObjectui64v"); glad_glVertexAttribDivisor = (PFNGLVERTEXATTRIBDIVISORPROC)load("glVertexAttribDivisor"); glad_glVertexAttribP1ui = (PFNGLVERTEXATTRIBP1UIPROC)load("glVertexAttribP1ui"); glad_glVertexAttribP1uiv = (PFNGLVERTEXATTRIBP1UIVPROC)load("glVertexAttribP1uiv"); glad_glVertexAttribP2ui = (PFNGLVERTEXATTRIBP2UIPROC)load("glVertexAttribP2ui"); glad_glVertexAttribP2uiv = (PFNGLVERTEXATTRIBP2UIVPROC)load("glVertexAttribP2uiv"); glad_glVertexAttribP3ui = (PFNGLVERTEXATTRIBP3UIPROC)load("glVertexAttribP3ui"); glad_glVertexAttribP3uiv = (PFNGLVERTEXATTRIBP3UIVPROC)load("glVertexAttribP3uiv"); glad_glVertexAttribP4ui = (PFNGLVERTEXATTRIBP4UIPROC)load("glVertexAttribP4ui"); glad_glVertexAttribP4uiv = (PFNGLVERTEXATTRIBP4UIVPROC)load("glVertexAttribP4uiv"); glad_glVertexP2ui = (PFNGLVERTEXP2UIPROC)load("glVertexP2ui"); glad_glVertexP2uiv = (PFNGLVERTEXP2UIVPROC)load("glVertexP2uiv"); glad_glVertexP3ui = (PFNGLVERTEXP3UIPROC)load("glVertexP3ui"); glad_glVertexP3uiv = (PFNGLVERTEXP3UIVPROC)load("glVertexP3uiv"); glad_glVertexP4ui = (PFNGLVERTEXP4UIPROC)load("glVertexP4ui"); glad_glVertexP4uiv = (PFNGLVERTEXP4UIVPROC)load("glVertexP4uiv"); glad_glTexCoordP1ui = (PFNGLTEXCOORDP1UIPROC)load("glTexCoordP1ui"); glad_glTexCoordP1uiv = (PFNGLTEXCOORDP1UIVPROC)load("glTexCoordP1uiv"); glad_glTexCoordP2ui = (PFNGLTEXCOORDP2UIPROC)load("glTexCoordP2ui"); glad_glTexCoordP2uiv = (PFNGLTEXCOORDP2UIVPROC)load("glTexCoordP2uiv"); glad_glTexCoordP3ui = (PFNGLTEXCOORDP3UIPROC)load("glTexCoordP3ui"); glad_glTexCoordP3uiv = (PFNGLTEXCOORDP3UIVPROC)load("glTexCoordP3uiv"); glad_glTexCoordP4ui = (PFNGLTEXCOORDP4UIPROC)load("glTexCoordP4ui"); glad_glTexCoordP4uiv = (PFNGLTEXCOORDP4UIVPROC)load("glTexCoordP4uiv"); glad_glMultiTexCoordP1ui = (PFNGLMULTITEXCOORDP1UIPROC)load("glMultiTexCoordP1ui"); glad_glMultiTexCoordP1uiv = (PFNGLMULTITEXCOORDP1UIVPROC)load("glMultiTexCoordP1uiv"); glad_glMultiTexCoordP2ui = (PFNGLMULTITEXCOORDP2UIPROC)load("glMultiTexCoordP2ui"); glad_glMultiTexCoordP2uiv = (PFNGLMULTITEXCOORDP2UIVPROC)load("glMultiTexCoordP2uiv"); glad_glMultiTexCoordP3ui = (PFNGLMULTITEXCOORDP3UIPROC)load("glMultiTexCoordP3ui"); glad_glMultiTexCoordP3uiv = (PFNGLMULTITEXCOORDP3UIVPROC)load("glMultiTexCoordP3uiv"); glad_glMultiTexCoordP4ui = (PFNGLMULTITEXCOORDP4UIPROC)load("glMultiTexCoordP4ui"); glad_glMultiTexCoordP4uiv = (PFNGLMULTITEXCOORDP4UIVPROC)load("glMultiTexCoordP4uiv"); glad_glNormalP3ui = (PFNGLNORMALP3UIPROC)load("glNormalP3ui"); glad_glNormalP3uiv = (PFNGLNORMALP3UIVPROC)load("glNormalP3uiv"); glad_glColorP3ui = (PFNGLCOLORP3UIPROC)load("glColorP3ui"); glad_glColorP3uiv = (PFNGLCOLORP3UIVPROC)load("glColorP3uiv"); glad_glColorP4ui = (PFNGLCOLORP4UIPROC)load("glColorP4ui"); glad_glColorP4uiv = (PFNGLCOLORP4UIVPROC)load("glColorP4uiv"); glad_glSecondaryColorP3ui = (PFNGLSECONDARYCOLORP3UIPROC)load("glSecondaryColorP3ui"); glad_glSecondaryColorP3uiv = (PFNGLSECONDARYCOLORP3UIVPROC)load("glSecondaryColorP3uiv"); } static void load_GL_VERSION_4_0(GLADloadproc load) { if(!GLAD_GL_VERSION_4_0) return; glad_glMinSampleShading = (PFNGLMINSAMPLESHADINGPROC)load("glMinSampleShading"); glad_glBlendEquationi = (PFNGLBLENDEQUATIONIPROC)load("glBlendEquationi"); glad_glBlendEquationSeparatei = (PFNGLBLENDEQUATIONSEPARATEIPROC)load("glBlendEquationSeparatei"); glad_glBlendFunci = (PFNGLBLENDFUNCIPROC)load("glBlendFunci"); glad_glBlendFuncSeparatei = (PFNGLBLENDFUNCSEPARATEIPROC)load("glBlendFuncSeparatei"); glad_glDrawArraysIndirect = (PFNGLDRAWARRAYSINDIRECTPROC)load("glDrawArraysIndirect"); glad_glDrawElementsIndirect = (PFNGLDRAWELEMENTSINDIRECTPROC)load("glDrawElementsIndirect"); glad_glUniform1d = (PFNGLUNIFORM1DPROC)load("glUniform1d"); glad_glUniform2d = (PFNGLUNIFORM2DPROC)load("glUniform2d"); glad_glUniform3d = (PFNGLUNIFORM3DPROC)load("glUniform3d"); glad_glUniform4d = (PFNGLUNIFORM4DPROC)load("glUniform4d"); glad_glUniform1dv = (PFNGLUNIFORM1DVPROC)load("glUniform1dv"); glad_glUniform2dv = (PFNGLUNIFORM2DVPROC)load("glUniform2dv"); glad_glUniform3dv = (PFNGLUNIFORM3DVPROC)load("glUniform3dv"); glad_glUniform4dv = (PFNGLUNIFORM4DVPROC)load("glUniform4dv"); glad_glUniformMatrix2dv = (PFNGLUNIFORMMATRIX2DVPROC)load("glUniformMatrix2dv"); glad_glUniformMatrix3dv = (PFNGLUNIFORMMATRIX3DVPROC)load("glUniformMatrix3dv"); glad_glUniformMatrix4dv = (PFNGLUNIFORMMATRIX4DVPROC)load("glUniformMatrix4dv"); glad_glUniformMatrix2x3dv = (PFNGLUNIFORMMATRIX2X3DVPROC)load("glUniformMatrix2x3dv"); glad_glUniformMatrix2x4dv = (PFNGLUNIFORMMATRIX2X4DVPROC)load("glUniformMatrix2x4dv"); glad_glUniformMatrix3x2dv = (PFNGLUNIFORMMATRIX3X2DVPROC)load("glUniformMatrix3x2dv"); glad_glUniformMatrix3x4dv = (PFNGLUNIFORMMATRIX3X4DVPROC)load("glUniformMatrix3x4dv"); glad_glUniformMatrix4x2dv = (PFNGLUNIFORMMATRIX4X2DVPROC)load("glUniformMatrix4x2dv"); glad_glUniformMatrix4x3dv = (PFNGLUNIFORMMATRIX4X3DVPROC)load("glUniformMatrix4x3dv"); glad_glGetUniformdv = (PFNGLGETUNIFORMDVPROC)load("glGetUniformdv"); glad_glGetSubroutineUniformLocation = (PFNGLGETSUBROUTINEUNIFORMLOCATIONPROC)load("glGetSubroutineUniformLocation"); glad_glGetSubroutineIndex = (PFNGLGETSUBROUTINEINDEXPROC)load("glGetSubroutineIndex"); glad_glGetActiveSubroutineUniformiv = (PFNGLGETACTIVESUBROUTINEUNIFORMIVPROC)load("glGetActiveSubroutineUniformiv"); glad_glGetActiveSubroutineUniformName = (PFNGLGETACTIVESUBROUTINEUNIFORMNAMEPROC)load("glGetActiveSubroutineUniformName"); glad_glGetActiveSubroutineName = (PFNGLGETACTIVESUBROUTINENAMEPROC)load("glGetActiveSubroutineName"); glad_glUniformSubroutinesuiv = (PFNGLUNIFORMSUBROUTINESUIVPROC)load("glUniformSubroutinesuiv"); glad_glGetUniformSubroutineuiv = (PFNGLGETUNIFORMSUBROUTINEUIVPROC)load("glGetUniformSubroutineuiv"); glad_glGetProgramStageiv = (PFNGLGETPROGRAMSTAGEIVPROC)load("glGetProgramStageiv"); glad_glPatchParameteri = (PFNGLPATCHPARAMETERIPROC)load("glPatchParameteri"); glad_glPatchParameterfv = (PFNGLPATCHPARAMETERFVPROC)load("glPatchParameterfv"); glad_glBindTransformFeedback = (PFNGLBINDTRANSFORMFEEDBACKPROC)load("glBindTransformFeedback"); glad_glDeleteTransformFeedbacks = (PFNGLDELETETRANSFORMFEEDBACKSPROC)load("glDeleteTransformFeedbacks"); glad_glGenTransformFeedbacks = (PFNGLGENTRANSFORMFEEDBACKSPROC)load("glGenTransformFeedbacks"); glad_glIsTransformFeedback = (PFNGLISTRANSFORMFEEDBACKPROC)load("glIsTransformFeedback"); glad_glPauseTransformFeedback = (PFNGLPAUSETRANSFORMFEEDBACKPROC)load("glPauseTransformFeedback"); glad_glResumeTransformFeedback = (PFNGLRESUMETRANSFORMFEEDBACKPROC)load("glResumeTransformFeedback"); glad_glDrawTransformFeedback = (PFNGLDRAWTRANSFORMFEEDBACKPROC)load("glDrawTransformFeedback"); glad_glDrawTransformFeedbackStream = (PFNGLDRAWTRANSFORMFEEDBACKSTREAMPROC)load("glDrawTransformFeedbackStream"); glad_glBeginQueryIndexed = (PFNGLBEGINQUERYINDEXEDPROC)load("glBeginQueryIndexed"); glad_glEndQueryIndexed = (PFNGLENDQUERYINDEXEDPROC)load("glEndQueryIndexed"); glad_glGetQueryIndexediv = (PFNGLGETQUERYINDEXEDIVPROC)load("glGetQueryIndexediv"); } static void load_GL_VERSION_4_1(GLADloadproc load) { if(!GLAD_GL_VERSION_4_1) return; glad_glReleaseShaderCompiler = (PFNGLRELEASESHADERCOMPILERPROC)load("glReleaseShaderCompiler"); glad_glShaderBinary = (PFNGLSHADERBINARYPROC)load("glShaderBinary"); glad_glGetShaderPrecisionFormat = (PFNGLGETSHADERPRECISIONFORMATPROC)load("glGetShaderPrecisionFormat"); glad_glDepthRangef = (PFNGLDEPTHRANGEFPROC)load("glDepthRangef"); glad_glClearDepthf = (PFNGLCLEARDEPTHFPROC)load("glClearDepthf"); glad_glGetProgramBinary = (PFNGLGETPROGRAMBINARYPROC)load("glGetProgramBinary"); glad_glProgramBinary = (PFNGLPROGRAMBINARYPROC)load("glProgramBinary"); glad_glProgramParameteri = (PFNGLPROGRAMPARAMETERIPROC)load("glProgramParameteri"); glad_glUseProgramStages = (PFNGLUSEPROGRAMSTAGESPROC)load("glUseProgramStages"); glad_glActiveShaderProgram = (PFNGLACTIVESHADERPROGRAMPROC)load("glActiveShaderProgram"); glad_glCreateShaderProgramv = (PFNGLCREATESHADERPROGRAMVPROC)load("glCreateShaderProgramv"); glad_glBindProgramPipeline = (PFNGLBINDPROGRAMPIPELINEPROC)load("glBindProgramPipeline"); glad_glDeleteProgramPipelines = (PFNGLDELETEPROGRAMPIPELINESPROC)load("glDeleteProgramPipelines"); glad_glGenProgramPipelines = (PFNGLGENPROGRAMPIPELINESPROC)load("glGenProgramPipelines"); glad_glIsProgramPipeline = (PFNGLISPROGRAMPIPELINEPROC)load("glIsProgramPipeline"); glad_glGetProgramPipelineiv = (PFNGLGETPROGRAMPIPELINEIVPROC)load("glGetProgramPipelineiv"); glad_glProgramParameteri = (PFNGLPROGRAMPARAMETERIPROC)load("glProgramParameteri"); glad_glProgramUniform1i = (PFNGLPROGRAMUNIFORM1IPROC)load("glProgramUniform1i"); glad_glProgramUniform1iv = (PFNGLPROGRAMUNIFORM1IVPROC)load("glProgramUniform1iv"); glad_glProgramUniform1f = (PFNGLPROGRAMUNIFORM1FPROC)load("glProgramUniform1f"); glad_glProgramUniform1fv = (PFNGLPROGRAMUNIFORM1FVPROC)load("glProgramUniform1fv"); glad_glProgramUniform1d = (PFNGLPROGRAMUNIFORM1DPROC)load("glProgramUniform1d"); glad_glProgramUniform1dv = (PFNGLPROGRAMUNIFORM1DVPROC)load("glProgramUniform1dv"); glad_glProgramUniform1ui = (PFNGLPROGRAMUNIFORM1UIPROC)load("glProgramUniform1ui"); glad_glProgramUniform1uiv = (PFNGLPROGRAMUNIFORM1UIVPROC)load("glProgramUniform1uiv"); glad_glProgramUniform2i = (PFNGLPROGRAMUNIFORM2IPROC)load("glProgramUniform2i"); glad_glProgramUniform2iv = (PFNGLPROGRAMUNIFORM2IVPROC)load("glProgramUniform2iv"); glad_glProgramUniform2f = (PFNGLPROGRAMUNIFORM2FPROC)load("glProgramUniform2f"); glad_glProgramUniform2fv = (PFNGLPROGRAMUNIFORM2FVPROC)load("glProgramUniform2fv"); glad_glProgramUniform2d = (PFNGLPROGRAMUNIFORM2DPROC)load("glProgramUniform2d"); glad_glProgramUniform2dv = (PFNGLPROGRAMUNIFORM2DVPROC)load("glProgramUniform2dv"); glad_glProgramUniform2ui = (PFNGLPROGRAMUNIFORM2UIPROC)load("glProgramUniform2ui"); glad_glProgramUniform2uiv = (PFNGLPROGRAMUNIFORM2UIVPROC)load("glProgramUniform2uiv"); glad_glProgramUniform3i = (PFNGLPROGRAMUNIFORM3IPROC)load("glProgramUniform3i"); glad_glProgramUniform3iv = (PFNGLPROGRAMUNIFORM3IVPROC)load("glProgramUniform3iv"); glad_glProgramUniform3f = (PFNGLPROGRAMUNIFORM3FPROC)load("glProgramUniform3f"); glad_glProgramUniform3fv = (PFNGLPROGRAMUNIFORM3FVPROC)load("glProgramUniform3fv"); glad_glProgramUniform3d = (PFNGLPROGRAMUNIFORM3DPROC)load("glProgramUniform3d"); glad_glProgramUniform3dv = (PFNGLPROGRAMUNIFORM3DVPROC)load("glProgramUniform3dv"); glad_glProgramUniform3ui = (PFNGLPROGRAMUNIFORM3UIPROC)load("glProgramUniform3ui"); glad_glProgramUniform3uiv = (PFNGLPROGRAMUNIFORM3UIVPROC)load("glProgramUniform3uiv"); glad_glProgramUniform4i = (PFNGLPROGRAMUNIFORM4IPROC)load("glProgramUniform4i"); glad_glProgramUniform4iv = (PFNGLPROGRAMUNIFORM4IVPROC)load("glProgramUniform4iv"); glad_glProgramUniform4f = (PFNGLPROGRAMUNIFORM4FPROC)load("glProgramUniform4f"); glad_glProgramUniform4fv = (PFNGLPROGRAMUNIFORM4FVPROC)load("glProgramUniform4fv"); glad_glProgramUniform4d = (PFNGLPROGRAMUNIFORM4DPROC)load("glProgramUniform4d"); glad_glProgramUniform4dv = (PFNGLPROGRAMUNIFORM4DVPROC)load("glProgramUniform4dv"); glad_glProgramUniform4ui = (PFNGLPROGRAMUNIFORM4UIPROC)load("glProgramUniform4ui"); glad_glProgramUniform4uiv = (PFNGLPROGRAMUNIFORM4UIVPROC)load("glProgramUniform4uiv"); glad_glProgramUniformMatrix2fv = (PFNGLPROGRAMUNIFORMMATRIX2FVPROC)load("glProgramUniformMatrix2fv"); glad_glProgramUniformMatrix3fv = (PFNGLPROGRAMUNIFORMMATRIX3FVPROC)load("glProgramUniformMatrix3fv"); glad_glProgramUniformMatrix4fv = (PFNGLPROGRAMUNIFORMMATRIX4FVPROC)load("glProgramUniformMatrix4fv"); glad_glProgramUniformMatrix2dv = (PFNGLPROGRAMUNIFORMMATRIX2DVPROC)load("glProgramUniformMatrix2dv"); glad_glProgramUniformMatrix3dv = (PFNGLPROGRAMUNIFORMMATRIX3DVPROC)load("glProgramUniformMatrix3dv"); glad_glProgramUniformMatrix4dv = (PFNGLPROGRAMUNIFORMMATRIX4DVPROC)load("glProgramUniformMatrix4dv"); glad_glProgramUniformMatrix2x3fv = (PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC)load("glProgramUniformMatrix2x3fv"); glad_glProgramUniformMatrix3x2fv = (PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC)load("glProgramUniformMatrix3x2fv"); glad_glProgramUniformMatrix2x4fv = (PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC)load("glProgramUniformMatrix2x4fv"); glad_glProgramUniformMatrix4x2fv = (PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC)load("glProgramUniformMatrix4x2fv"); glad_glProgramUniformMatrix3x4fv = (PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC)load("glProgramUniformMatrix3x4fv"); glad_glProgramUniformMatrix4x3fv = (PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC)load("glProgramUniformMatrix4x3fv"); glad_glProgramUniformMatrix2x3dv = (PFNGLPROGRAMUNIFORMMATRIX2X3DVPROC)load("glProgramUniformMatrix2x3dv"); glad_glProgramUniformMatrix3x2dv = (PFNGLPROGRAMUNIFORMMATRIX3X2DVPROC)load("glProgramUniformMatrix3x2dv"); glad_glProgramUniformMatrix2x4dv = (PFNGLPROGRAMUNIFORMMATRIX2X4DVPROC)load("glProgramUniformMatrix2x4dv"); glad_glProgramUniformMatrix4x2dv = (PFNGLPROGRAMUNIFORMMATRIX4X2DVPROC)load("glProgramUniformMatrix4x2dv"); glad_glProgramUniformMatrix3x4dv = (PFNGLPROGRAMUNIFORMMATRIX3X4DVPROC)load("glProgramUniformMatrix3x4dv"); glad_glProgramUniformMatrix4x3dv = (PFNGLPROGRAMUNIFORMMATRIX4X3DVPROC)load("glProgramUniformMatrix4x3dv"); glad_glValidateProgramPipeline = (PFNGLVALIDATEPROGRAMPIPELINEPROC)load("glValidateProgramPipeline"); glad_glGetProgramPipelineInfoLog = (PFNGLGETPROGRAMPIPELINEINFOLOGPROC)load("glGetProgramPipelineInfoLog"); glad_glVertexAttribL1d = (PFNGLVERTEXATTRIBL1DPROC)load("glVertexAttribL1d"); glad_glVertexAttribL2d = (PFNGLVERTEXATTRIBL2DPROC)load("glVertexAttribL2d"); glad_glVertexAttribL3d = (PFNGLVERTEXATTRIBL3DPROC)load("glVertexAttribL3d"); glad_glVertexAttribL4d = (PFNGLVERTEXATTRIBL4DPROC)load("glVertexAttribL4d"); glad_glVertexAttribL1dv = (PFNGLVERTEXATTRIBL1DVPROC)load("glVertexAttribL1dv"); glad_glVertexAttribL2dv = (PFNGLVERTEXATTRIBL2DVPROC)load("glVertexAttribL2dv"); glad_glVertexAttribL3dv = (PFNGLVERTEXATTRIBL3DVPROC)load("glVertexAttribL3dv"); glad_glVertexAttribL4dv = (PFNGLVERTEXATTRIBL4DVPROC)load("glVertexAttribL4dv"); glad_glVertexAttribLPointer = (PFNGLVERTEXATTRIBLPOINTERPROC)load("glVertexAttribLPointer"); glad_glGetVertexAttribLdv = (PFNGLGETVERTEXATTRIBLDVPROC)load("glGetVertexAttribLdv"); glad_glViewportArrayv = (PFNGLVIEWPORTARRAYVPROC)load("glViewportArrayv"); glad_glViewportIndexedf = (PFNGLVIEWPORTINDEXEDFPROC)load("glViewportIndexedf"); glad_glViewportIndexedfv = (PFNGLVIEWPORTINDEXEDFVPROC)load("glViewportIndexedfv"); glad_glScissorArrayv = (PFNGLSCISSORARRAYVPROC)load("glScissorArrayv"); glad_glScissorIndexed = (PFNGLSCISSORINDEXEDPROC)load("glScissorIndexed"); glad_glScissorIndexedv = (PFNGLSCISSORINDEXEDVPROC)load("glScissorIndexedv"); glad_glDepthRangeArrayv = (PFNGLDEPTHRANGEARRAYVPROC)load("glDepthRangeArrayv"); glad_glDepthRangeIndexed = (PFNGLDEPTHRANGEINDEXEDPROC)load("glDepthRangeIndexed"); glad_glGetFloati_v = (PFNGLGETFLOATI_VPROC)load("glGetFloati_v"); glad_glGetDoublei_v = (PFNGLGETDOUBLEI_VPROC)load("glGetDoublei_v"); } static void load_GL_VERSION_4_2(GLADloadproc load) { if(!GLAD_GL_VERSION_4_2) return; glad_glDrawArraysInstancedBaseInstance = (PFNGLDRAWARRAYSINSTANCEDBASEINSTANCEPROC)load("glDrawArraysInstancedBaseInstance"); glad_glDrawElementsInstancedBaseInstance = (PFNGLDRAWELEMENTSINSTANCEDBASEINSTANCEPROC)load("glDrawElementsInstancedBaseInstance"); glad_glDrawElementsInstancedBaseVertexBaseInstance = (PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXBASEINSTANCEPROC)load("glDrawElementsInstancedBaseVertexBaseInstance"); glad_glGetInternalformativ = (PFNGLGETINTERNALFORMATIVPROC)load("glGetInternalformativ"); glad_glGetActiveAtomicCounterBufferiv = (PFNGLGETACTIVEATOMICCOUNTERBUFFERIVPROC)load("glGetActiveAtomicCounterBufferiv"); glad_glBindImageTexture = (PFNGLBINDIMAGETEXTUREPROC)load("glBindImageTexture"); glad_glMemoryBarrier = (PFNGLMEMORYBARRIERPROC)load("glMemoryBarrier"); glad_glTexStorage1D = (PFNGLTEXSTORAGE1DPROC)load("glTexStorage1D"); glad_glTexStorage2D = (PFNGLTEXSTORAGE2DPROC)load("glTexStorage2D"); glad_glTexStorage3D = (PFNGLTEXSTORAGE3DPROC)load("glTexStorage3D"); glad_glDrawTransformFeedbackInstanced = (PFNGLDRAWTRANSFORMFEEDBACKINSTANCEDPROC)load("glDrawTransformFeedbackInstanced"); glad_glDrawTransformFeedbackStreamInstanced = (PFNGLDRAWTRANSFORMFEEDBACKSTREAMINSTANCEDPROC)load("glDrawTransformFeedbackStreamInstanced"); } static void load_GL_VERSION_4_3(GLADloadproc load) { if(!GLAD_GL_VERSION_4_3) return; glad_glClearBufferData = (PFNGLCLEARBUFFERDATAPROC)load("glClearBufferData"); glad_glClearBufferSubData = (PFNGLCLEARBUFFERSUBDATAPROC)load("glClearBufferSubData"); glad_glDispatchCompute = (PFNGLDISPATCHCOMPUTEPROC)load("glDispatchCompute"); glad_glDispatchComputeIndirect = (PFNGLDISPATCHCOMPUTEINDIRECTPROC)load("glDispatchComputeIndirect"); glad_glCopyImageSubData = (PFNGLCOPYIMAGESUBDATAPROC)load("glCopyImageSubData"); glad_glFramebufferParameteri = (PFNGLFRAMEBUFFERPARAMETERIPROC)load("glFramebufferParameteri"); glad_glGetFramebufferParameteriv = (PFNGLGETFRAMEBUFFERPARAMETERIVPROC)load("glGetFramebufferParameteriv"); glad_glGetInternalformati64v = (PFNGLGETINTERNALFORMATI64VPROC)load("glGetInternalformati64v"); glad_glInvalidateTexSubImage = (PFNGLINVALIDATETEXSUBIMAGEPROC)load("glInvalidateTexSubImage"); glad_glInvalidateTexImage = (PFNGLINVALIDATETEXIMAGEPROC)load("glInvalidateTexImage"); glad_glInvalidateBufferSubData = (PFNGLINVALIDATEBUFFERSUBDATAPROC)load("glInvalidateBufferSubData"); glad_glInvalidateBufferData = (PFNGLINVALIDATEBUFFERDATAPROC)load("glInvalidateBufferData"); glad_glInvalidateFramebuffer = (PFNGLINVALIDATEFRAMEBUFFERPROC)load("glInvalidateFramebuffer"); glad_glInvalidateSubFramebuffer = (PFNGLINVALIDATESUBFRAMEBUFFERPROC)load("glInvalidateSubFramebuffer"); glad_glMultiDrawArraysIndirect = (PFNGLMULTIDRAWARRAYSINDIRECTPROC)load("glMultiDrawArraysIndirect"); glad_glMultiDrawElementsIndirect = (PFNGLMULTIDRAWELEMENTSINDIRECTPROC)load("glMultiDrawElementsIndirect"); glad_glGetProgramInterfaceiv = (PFNGLGETPROGRAMINTERFACEIVPROC)load("glGetProgramInterfaceiv"); glad_glGetProgramResourceIndex = (PFNGLGETPROGRAMRESOURCEINDEXPROC)load("glGetProgramResourceIndex"); glad_glGetProgramResourceName = (PFNGLGETPROGRAMRESOURCENAMEPROC)load("glGetProgramResourceName"); glad_glGetProgramResourceiv = (PFNGLGETPROGRAMRESOURCEIVPROC)load("glGetProgramResourceiv"); glad_glGetProgramResourceLocation = (PFNGLGETPROGRAMRESOURCELOCATIONPROC)load("glGetProgramResourceLocation"); glad_glGetProgramResourceLocationIndex = (PFNGLGETPROGRAMRESOURCELOCATIONINDEXPROC)load("glGetProgramResourceLocationIndex"); glad_glShaderStorageBlockBinding = (PFNGLSHADERSTORAGEBLOCKBINDINGPROC)load("glShaderStorageBlockBinding"); glad_glTexBufferRange = (PFNGLTEXBUFFERRANGEPROC)load("glTexBufferRange"); glad_glTexStorage2DMultisample = (PFNGLTEXSTORAGE2DMULTISAMPLEPROC)load("glTexStorage2DMultisample"); glad_glTexStorage3DMultisample = (PFNGLTEXSTORAGE3DMULTISAMPLEPROC)load("glTexStorage3DMultisample"); glad_glTextureView = (PFNGLTEXTUREVIEWPROC)load("glTextureView"); glad_glBindVertexBuffer = (PFNGLBINDVERTEXBUFFERPROC)load("glBindVertexBuffer"); glad_glVertexAttribFormat = (PFNGLVERTEXATTRIBFORMATPROC)load("glVertexAttribFormat"); glad_glVertexAttribIFormat = (PFNGLVERTEXATTRIBIFORMATPROC)load("glVertexAttribIFormat"); glad_glVertexAttribLFormat = (PFNGLVERTEXATTRIBLFORMATPROC)load("glVertexAttribLFormat"); glad_glVertexAttribBinding = (PFNGLVERTEXATTRIBBINDINGPROC)load("glVertexAttribBinding"); glad_glVertexBindingDivisor = (PFNGLVERTEXBINDINGDIVISORPROC)load("glVertexBindingDivisor"); glad_glDebugMessageControl = (PFNGLDEBUGMESSAGECONTROLPROC)load("glDebugMessageControl"); glad_glDebugMessageInsert = (PFNGLDEBUGMESSAGEINSERTPROC)load("glDebugMessageInsert"); glad_glDebugMessageCallback = (PFNGLDEBUGMESSAGECALLBACKPROC)load("glDebugMessageCallback"); glad_glGetDebugMessageLog = (PFNGLGETDEBUGMESSAGELOGPROC)load("glGetDebugMessageLog"); glad_glPushDebugGroup = (PFNGLPUSHDEBUGGROUPPROC)load("glPushDebugGroup"); glad_glPopDebugGroup = (PFNGLPOPDEBUGGROUPPROC)load("glPopDebugGroup"); glad_glObjectLabel = (PFNGLOBJECTLABELPROC)load("glObjectLabel"); glad_glGetObjectLabel = (PFNGLGETOBJECTLABELPROC)load("glGetObjectLabel"); glad_glObjectPtrLabel = (PFNGLOBJECTPTRLABELPROC)load("glObjectPtrLabel"); glad_glGetObjectPtrLabel = (PFNGLGETOBJECTPTRLABELPROC)load("glGetObjectPtrLabel"); glad_glGetPointerv = (PFNGLGETPOINTERVPROC)load("glGetPointerv"); } static void load_GL_VERSION_4_4(GLADloadproc load) { if(!GLAD_GL_VERSION_4_4) return; glad_glBufferStorage = (PFNGLBUFFERSTORAGEPROC)load("glBufferStorage"); glad_glClearTexImage = (PFNGLCLEARTEXIMAGEPROC)load("glClearTexImage"); glad_glClearTexSubImage = (PFNGLCLEARTEXSUBIMAGEPROC)load("glClearTexSubImage"); glad_glBindBuffersBase = (PFNGLBINDBUFFERSBASEPROC)load("glBindBuffersBase"); glad_glBindBuffersRange = (PFNGLBINDBUFFERSRANGEPROC)load("glBindBuffersRange"); glad_glBindTextures = (PFNGLBINDTEXTURESPROC)load("glBindTextures"); glad_glBindSamplers = (PFNGLBINDSAMPLERSPROC)load("glBindSamplers"); glad_glBindImageTextures = (PFNGLBINDIMAGETEXTURESPROC)load("glBindImageTextures"); glad_glBindVertexBuffers = (PFNGLBINDVERTEXBUFFERSPROC)load("glBindVertexBuffers"); } static void load_GL_VERSION_4_5(GLADloadproc load) { if(!GLAD_GL_VERSION_4_5) return; glad_glClipControl = (PFNGLCLIPCONTROLPROC)load("glClipControl"); glad_glCreateTransformFeedbacks = (PFNGLCREATETRANSFORMFEEDBACKSPROC)load("glCreateTransformFeedbacks"); glad_glTransformFeedbackBufferBase = (PFNGLTRANSFORMFEEDBACKBUFFERBASEPROC)load("glTransformFeedbackBufferBase"); glad_glTransformFeedbackBufferRange = (PFNGLTRANSFORMFEEDBACKBUFFERRANGEPROC)load("glTransformFeedbackBufferRange"); glad_glGetTransformFeedbackiv = (PFNGLGETTRANSFORMFEEDBACKIVPROC)load("glGetTransformFeedbackiv"); glad_glGetTransformFeedbacki_v = (PFNGLGETTRANSFORMFEEDBACKI_VPROC)load("glGetTransformFeedbacki_v"); glad_glGetTransformFeedbacki64_v = (PFNGLGETTRANSFORMFEEDBACKI64_VPROC)load("glGetTransformFeedbacki64_v"); glad_glCreateBuffers = (PFNGLCREATEBUFFERSPROC)load("glCreateBuffers"); glad_glNamedBufferStorage = (PFNGLNAMEDBUFFERSTORAGEPROC)load("glNamedBufferStorage"); glad_glNamedBufferData = (PFNGLNAMEDBUFFERDATAPROC)load("glNamedBufferData"); glad_glNamedBufferSubData = (PFNGLNAMEDBUFFERSUBDATAPROC)load("glNamedBufferSubData"); glad_glCopyNamedBufferSubData = (PFNGLCOPYNAMEDBUFFERSUBDATAPROC)load("glCopyNamedBufferSubData"); glad_glClearNamedBufferData = (PFNGLCLEARNAMEDBUFFERDATAPROC)load("glClearNamedBufferData"); glad_glClearNamedBufferSubData = (PFNGLCLEARNAMEDBUFFERSUBDATAPROC)load("glClearNamedBufferSubData"); glad_glMapNamedBuffer = (PFNGLMAPNAMEDBUFFERPROC)load("glMapNamedBuffer"); glad_glMapNamedBufferRange = (PFNGLMAPNAMEDBUFFERRANGEPROC)load("glMapNamedBufferRange"); glad_glUnmapNamedBuffer = (PFNGLUNMAPNAMEDBUFFERPROC)load("glUnmapNamedBuffer"); glad_glFlushMappedNamedBufferRange = (PFNGLFLUSHMAPPEDNAMEDBUFFERRANGEPROC)load("glFlushMappedNamedBufferRange"); glad_glGetNamedBufferParameteriv = (PFNGLGETNAMEDBUFFERPARAMETERIVPROC)load("glGetNamedBufferParameteriv"); glad_glGetNamedBufferParameteri64v = (PFNGLGETNAMEDBUFFERPARAMETERI64VPROC)load("glGetNamedBufferParameteri64v"); glad_glGetNamedBufferPointerv = (PFNGLGETNAMEDBUFFERPOINTERVPROC)load("glGetNamedBufferPointerv"); glad_glGetNamedBufferSubData = (PFNGLGETNAMEDBUFFERSUBDATAPROC)load("glGetNamedBufferSubData"); glad_glCreateFramebuffers = (PFNGLCREATEFRAMEBUFFERSPROC)load("glCreateFramebuffers"); glad_glNamedFramebufferRenderbuffer = (PFNGLNAMEDFRAMEBUFFERRENDERBUFFERPROC)load("glNamedFramebufferRenderbuffer"); glad_glNamedFramebufferParameteri = (PFNGLNAMEDFRAMEBUFFERPARAMETERIPROC)load("glNamedFramebufferParameteri"); glad_glNamedFramebufferTexture = (PFNGLNAMEDFRAMEBUFFERTEXTUREPROC)load("glNamedFramebufferTexture"); glad_glNamedFramebufferTextureLayer = (PFNGLNAMEDFRAMEBUFFERTEXTURELAYERPROC)load("glNamedFramebufferTextureLayer"); glad_glNamedFramebufferDrawBuffer = (PFNGLNAMEDFRAMEBUFFERDRAWBUFFERPROC)load("glNamedFramebufferDrawBuffer"); glad_glNamedFramebufferDrawBuffers = (PFNGLNAMEDFRAMEBUFFERDRAWBUFFERSPROC)load("glNamedFramebufferDrawBuffers"); glad_glNamedFramebufferReadBuffer = (PFNGLNAMEDFRAMEBUFFERREADBUFFERPROC)load("glNamedFramebufferReadBuffer"); glad_glInvalidateNamedFramebufferData = (PFNGLINVALIDATENAMEDFRAMEBUFFERDATAPROC)load("glInvalidateNamedFramebufferData"); glad_glInvalidateNamedFramebufferSubData = (PFNGLINVALIDATENAMEDFRAMEBUFFERSUBDATAPROC)load("glInvalidateNamedFramebufferSubData"); glad_glClearNamedFramebufferiv = (PFNGLCLEARNAMEDFRAMEBUFFERIVPROC)load("glClearNamedFramebufferiv"); glad_glClearNamedFramebufferuiv = (PFNGLCLEARNAMEDFRAMEBUFFERUIVPROC)load("glClearNamedFramebufferuiv"); glad_glClearNamedFramebufferfv = (PFNGLCLEARNAMEDFRAMEBUFFERFVPROC)load("glClearNamedFramebufferfv"); glad_glClearNamedFramebufferfi = (PFNGLCLEARNAMEDFRAMEBUFFERFIPROC)load("glClearNamedFramebufferfi"); glad_glBlitNamedFramebuffer = (PFNGLBLITNAMEDFRAMEBUFFERPROC)load("glBlitNamedFramebuffer"); glad_glCheckNamedFramebufferStatus = (PFNGLCHECKNAMEDFRAMEBUFFERSTATUSPROC)load("glCheckNamedFramebufferStatus"); glad_glGetNamedFramebufferParameteriv = (PFNGLGETNAMEDFRAMEBUFFERPARAMETERIVPROC)load("glGetNamedFramebufferParameteriv"); glad_glGetNamedFramebufferAttachmentParameteriv = (PFNGLGETNAMEDFRAMEBUFFERATTACHMENTPARAMETERIVPROC)load("glGetNamedFramebufferAttachmentParameteriv"); glad_glCreateRenderbuffers = (PFNGLCREATERENDERBUFFERSPROC)load("glCreateRenderbuffers"); glad_glNamedRenderbufferStorage = (PFNGLNAMEDRENDERBUFFERSTORAGEPROC)load("glNamedRenderbufferStorage"); glad_glNamedRenderbufferStorageMultisample = (PFNGLNAMEDRENDERBUFFERSTORAGEMULTISAMPLEPROC)load("glNamedRenderbufferStorageMultisample"); glad_glGetNamedRenderbufferParameteriv = (PFNGLGETNAMEDRENDERBUFFERPARAMETERIVPROC)load("glGetNamedRenderbufferParameteriv"); glad_glCreateTextures = (PFNGLCREATETEXTURESPROC)load("glCreateTextures"); glad_glTextureBuffer = (PFNGLTEXTUREBUFFERPROC)load("glTextureBuffer"); glad_glTextureBufferRange = (PFNGLTEXTUREBUFFERRANGEPROC)load("glTextureBufferRange"); glad_glTextureStorage1D = (PFNGLTEXTURESTORAGE1DPROC)load("glTextureStorage1D"); glad_glTextureStorage2D = (PFNGLTEXTURESTORAGE2DPROC)load("glTextureStorage2D"); glad_glTextureStorage3D = (PFNGLTEXTURESTORAGE3DPROC)load("glTextureStorage3D"); glad_glTextureStorage2DMultisample = (PFNGLTEXTURESTORAGE2DMULTISAMPLEPROC)load("glTextureStorage2DMultisample"); glad_glTextureStorage3DMultisample = (PFNGLTEXTURESTORAGE3DMULTISAMPLEPROC)load("glTextureStorage3DMultisample"); glad_glTextureSubImage1D = (PFNGLTEXTURESUBIMAGE1DPROC)load("glTextureSubImage1D"); glad_glTextureSubImage2D = (PFNGLTEXTURESUBIMAGE2DPROC)load("glTextureSubImage2D"); glad_glTextureSubImage3D = (PFNGLTEXTURESUBIMAGE3DPROC)load("glTextureSubImage3D"); glad_glCompressedTextureSubImage1D = (PFNGLCOMPRESSEDTEXTURESUBIMAGE1DPROC)load("glCompressedTextureSubImage1D"); glad_glCompressedTextureSubImage2D = (PFNGLCOMPRESSEDTEXTURESUBIMAGE2DPROC)load("glCompressedTextureSubImage2D"); glad_glCompressedTextureSubImage3D = (PFNGLCOMPRESSEDTEXTURESUBIMAGE3DPROC)load("glCompressedTextureSubImage3D"); glad_glCopyTextureSubImage1D = (PFNGLCOPYTEXTURESUBIMAGE1DPROC)load("glCopyTextureSubImage1D"); glad_glCopyTextureSubImage2D = (PFNGLCOPYTEXTURESUBIMAGE2DPROC)load("glCopyTextureSubImage2D"); glad_glCopyTextureSubImage3D = (PFNGLCOPYTEXTURESUBIMAGE3DPROC)load("glCopyTextureSubImage3D"); glad_glTextureParameterf = (PFNGLTEXTUREPARAMETERFPROC)load("glTextureParameterf"); glad_glTextureParameterfv = (PFNGLTEXTUREPARAMETERFVPROC)load("glTextureParameterfv"); glad_glTextureParameteri = (PFNGLTEXTUREPARAMETERIPROC)load("glTextureParameteri"); glad_glTextureParameterIiv = (PFNGLTEXTUREPARAMETERIIVPROC)load("glTextureParameterIiv"); glad_glTextureParameterIuiv = (PFNGLTEXTUREPARAMETERIUIVPROC)load("glTextureParameterIuiv"); glad_glTextureParameteriv = (PFNGLTEXTUREPARAMETERIVPROC)load("glTextureParameteriv"); glad_glGenerateTextureMipmap = (PFNGLGENERATETEXTUREMIPMAPPROC)load("glGenerateTextureMipmap"); glad_glBindTextureUnit = (PFNGLBINDTEXTUREUNITPROC)load("glBindTextureUnit"); glad_glGetTextureImage = (PFNGLGETTEXTUREIMAGEPROC)load("glGetTextureImage"); glad_glGetCompressedTextureImage = (PFNGLGETCOMPRESSEDTEXTUREIMAGEPROC)load("glGetCompressedTextureImage"); glad_glGetTextureLevelParameterfv = (PFNGLGETTEXTURELEVELPARAMETERFVPROC)load("glGetTextureLevelParameterfv"); glad_glGetTextureLevelParameteriv = (PFNGLGETTEXTURELEVELPARAMETERIVPROC)load("glGetTextureLevelParameteriv"); glad_glGetTextureParameterfv = (PFNGLGETTEXTUREPARAMETERFVPROC)load("glGetTextureParameterfv"); glad_glGetTextureParameterIiv = (PFNGLGETTEXTUREPARAMETERIIVPROC)load("glGetTextureParameterIiv"); glad_glGetTextureParameterIuiv = (PFNGLGETTEXTUREPARAMETERIUIVPROC)load("glGetTextureParameterIuiv"); glad_glGetTextureParameteriv = (PFNGLGETTEXTUREPARAMETERIVPROC)load("glGetTextureParameteriv"); glad_glCreateVertexArrays = (PFNGLCREATEVERTEXARRAYSPROC)load("glCreateVertexArrays"); glad_glDisableVertexArrayAttrib = (PFNGLDISABLEVERTEXARRAYATTRIBPROC)load("glDisableVertexArrayAttrib"); glad_glEnableVertexArrayAttrib = (PFNGLENABLEVERTEXARRAYATTRIBPROC)load("glEnableVertexArrayAttrib"); glad_glVertexArrayElementBuffer = (PFNGLVERTEXARRAYELEMENTBUFFERPROC)load("glVertexArrayElementBuffer"); glad_glVertexArrayVertexBuffer = (PFNGLVERTEXARRAYVERTEXBUFFERPROC)load("glVertexArrayVertexBuffer"); glad_glVertexArrayVertexBuffers = (PFNGLVERTEXARRAYVERTEXBUFFERSPROC)load("glVertexArrayVertexBuffers"); glad_glVertexArrayAttribBinding = (PFNGLVERTEXARRAYATTRIBBINDINGPROC)load("glVertexArrayAttribBinding"); glad_glVertexArrayAttribFormat = (PFNGLVERTEXARRAYATTRIBFORMATPROC)load("glVertexArrayAttribFormat"); glad_glVertexArrayAttribIFormat = (PFNGLVERTEXARRAYATTRIBIFORMATPROC)load("glVertexArrayAttribIFormat"); glad_glVertexArrayAttribLFormat = (PFNGLVERTEXARRAYATTRIBLFORMATPROC)load("glVertexArrayAttribLFormat"); glad_glVertexArrayBindingDivisor = (PFNGLVERTEXARRAYBINDINGDIVISORPROC)load("glVertexArrayBindingDivisor"); glad_glGetVertexArrayiv = (PFNGLGETVERTEXARRAYIVPROC)load("glGetVertexArrayiv"); glad_glGetVertexArrayIndexediv = (PFNGLGETVERTEXARRAYINDEXEDIVPROC)load("glGetVertexArrayIndexediv"); glad_glGetVertexArrayIndexed64iv = (PFNGLGETVERTEXARRAYINDEXED64IVPROC)load("glGetVertexArrayIndexed64iv"); glad_glCreateSamplers = (PFNGLCREATESAMPLERSPROC)load("glCreateSamplers"); glad_glCreateProgramPipelines = (PFNGLCREATEPROGRAMPIPELINESPROC)load("glCreateProgramPipelines"); glad_glCreateQueries = (PFNGLCREATEQUERIESPROC)load("glCreateQueries"); glad_glGetQueryBufferObjecti64v = (PFNGLGETQUERYBUFFEROBJECTI64VPROC)load("glGetQueryBufferObjecti64v"); glad_glGetQueryBufferObjectiv = (PFNGLGETQUERYBUFFEROBJECTIVPROC)load("glGetQueryBufferObjectiv"); glad_glGetQueryBufferObjectui64v = (PFNGLGETQUERYBUFFEROBJECTUI64VPROC)load("glGetQueryBufferObjectui64v"); glad_glGetQueryBufferObjectuiv = (PFNGLGETQUERYBUFFEROBJECTUIVPROC)load("glGetQueryBufferObjectuiv"); glad_glMemoryBarrierByRegion = (PFNGLMEMORYBARRIERBYREGIONPROC)load("glMemoryBarrierByRegion"); glad_glGetTextureSubImage = (PFNGLGETTEXTURESUBIMAGEPROC)load("glGetTextureSubImage"); glad_glGetCompressedTextureSubImage = (PFNGLGETCOMPRESSEDTEXTURESUBIMAGEPROC)load("glGetCompressedTextureSubImage"); glad_glGetGraphicsResetStatus = (PFNGLGETGRAPHICSRESETSTATUSPROC)load("glGetGraphicsResetStatus"); glad_glGetnCompressedTexImage = (PFNGLGETNCOMPRESSEDTEXIMAGEPROC)load("glGetnCompressedTexImage"); glad_glGetnTexImage = (PFNGLGETNTEXIMAGEPROC)load("glGetnTexImage"); glad_glGetnUniformdv = (PFNGLGETNUNIFORMDVPROC)load("glGetnUniformdv"); glad_glGetnUniformfv = (PFNGLGETNUNIFORMFVPROC)load("glGetnUniformfv"); glad_glGetnUniformiv = (PFNGLGETNUNIFORMIVPROC)load("glGetnUniformiv"); glad_glGetnUniformuiv = (PFNGLGETNUNIFORMUIVPROC)load("glGetnUniformuiv"); glad_glReadnPixels = (PFNGLREADNPIXELSPROC)load("glReadnPixels"); glad_glGetnMapdv = (PFNGLGETNMAPDVPROC)load("glGetnMapdv"); glad_glGetnMapfv = (PFNGLGETNMAPFVPROC)load("glGetnMapfv"); glad_glGetnMapiv = (PFNGLGETNMAPIVPROC)load("glGetnMapiv"); glad_glGetnPixelMapfv = (PFNGLGETNPIXELMAPFVPROC)load("glGetnPixelMapfv"); glad_glGetnPixelMapuiv = (PFNGLGETNPIXELMAPUIVPROC)load("glGetnPixelMapuiv"); glad_glGetnPixelMapusv = (PFNGLGETNPIXELMAPUSVPROC)load("glGetnPixelMapusv"); glad_glGetnPolygonStipple = (PFNGLGETNPOLYGONSTIPPLEPROC)load("glGetnPolygonStipple"); glad_glGetnColorTable = (PFNGLGETNCOLORTABLEPROC)load("glGetnColorTable"); glad_glGetnConvolutionFilter = (PFNGLGETNCONVOLUTIONFILTERPROC)load("glGetnConvolutionFilter"); glad_glGetnSeparableFilter = (PFNGLGETNSEPARABLEFILTERPROC)load("glGetnSeparableFilter"); glad_glGetnHistogram = (PFNGLGETNHISTOGRAMPROC)load("glGetnHistogram"); glad_glGetnMinmax = (PFNGLGETNMINMAXPROC)load("glGetnMinmax"); glad_glTextureBarrier = (PFNGLTEXTUREBARRIERPROC)load("glTextureBarrier"); } static void load_GL_VERSION_4_6(GLADloadproc load) { if(!GLAD_GL_VERSION_4_6) return; glad_glSpecializeShader = (PFNGLSPECIALIZESHADERPROC)load("glSpecializeShader"); glad_glMultiDrawArraysIndirectCount = (PFNGLMULTIDRAWARRAYSINDIRECTCOUNTPROC)load("glMultiDrawArraysIndirectCount"); glad_glMultiDrawElementsIndirectCount = (PFNGLMULTIDRAWELEMENTSINDIRECTCOUNTPROC)load("glMultiDrawElementsIndirectCount"); glad_glPolygonOffsetClamp = (PFNGLPOLYGONOFFSETCLAMPPROC)load("glPolygonOffsetClamp"); } static int find_extensionsGL(void) { if (!get_exts()) return 0; (void)&has_ext; free_exts(); return 1; } static void find_coreGL(void) { /* Thank you @elmindreda * https://github.com/elmindreda/greg/blob/master/templates/greg.c.in#L176 * https://github.com/glfw/glfw/blob/master/src/context.c#L36 */ int i, major, minor; const char* version; const char* prefixes[] = { "OpenGL ES-CM ", "OpenGL ES-CL ", "OpenGL ES ", NULL }; version = (const char*) glGetString(GL_VERSION); if (!version) return; for (i = 0; prefixes[i]; i++) { const size_t length = strlen(prefixes[i]); if (strncmp(version, prefixes[i], length) == 0) { version += length; break; } } /* PR #18 */ #ifdef _MSC_VER sscanf_s(version, "%d.%d", &major, &minor); #else sscanf(version, "%d.%d", &major, &minor); #endif GLVersion.major = major; GLVersion.minor = minor; max_loaded_major = major; max_loaded_minor = minor; GLAD_GL_VERSION_1_0 = (major == 1 && minor >= 0) || major > 1; GLAD_GL_VERSION_1_1 = (major == 1 && minor >= 1) || major > 1; GLAD_GL_VERSION_1_2 = (major == 1 && minor >= 2) || major > 1; GLAD_GL_VERSION_1_3 = (major == 1 && minor >= 3) || major > 1; GLAD_GL_VERSION_1_4 = (major == 1 && minor >= 4) || major > 1; GLAD_GL_VERSION_1_5 = (major == 1 && minor >= 5) || major > 1; GLAD_GL_VERSION_2_0 = (major == 2 && minor >= 0) || major > 2; GLAD_GL_VERSION_2_1 = (major == 2 && minor >= 1) || major > 2; GLAD_GL_VERSION_3_0 = (major == 3 && minor >= 0) || major > 3; GLAD_GL_VERSION_3_1 = (major == 3 && minor >= 1) || major > 3; GLAD_GL_VERSION_3_2 = (major == 3 && minor >= 2) || major > 3; GLAD_GL_VERSION_3_3 = (major == 3 && minor >= 3) || major > 3; GLAD_GL_VERSION_4_0 = (major == 4 && minor >= 0) || major > 4; GLAD_GL_VERSION_4_1 = (major == 4 && minor >= 1) || major > 4; GLAD_GL_VERSION_4_2 = (major == 4 && minor >= 2) || major > 4; GLAD_GL_VERSION_4_3 = (major == 4 && minor >= 3) || major > 4; GLAD_GL_VERSION_4_4 = (major == 4 && minor >= 4) || major > 4; GLAD_GL_VERSION_4_5 = (major == 4 && minor >= 5) || major > 4; GLAD_GL_VERSION_4_6 = (major == 4 && minor >= 6) || major > 4; if (GLVersion.major > 4 || (GLVersion.major >= 4 && GLVersion.minor >= 6)) { max_loaded_major = 4; max_loaded_minor = 6; } } int gladLoadGLLoader(GLADloadproc load) { GLVersion.major = 0; GLVersion.minor = 0; glGetString = (PFNGLGETSTRINGPROC)load("glGetString"); if(glGetString == NULL) return 0; if(glGetString(GL_VERSION) == NULL) return 0; find_coreGL(); load_GL_VERSION_1_0(load); load_GL_VERSION_1_1(load); load_GL_VERSION_1_2(load); load_GL_VERSION_1_3(load); load_GL_VERSION_1_4(load); load_GL_VERSION_1_5(load); load_GL_VERSION_2_0(load); load_GL_VERSION_2_1(load); load_GL_VERSION_3_0(load); load_GL_VERSION_3_1(load); load_GL_VERSION_3_2(load); load_GL_VERSION_3_3(load); load_GL_VERSION_4_0(load); load_GL_VERSION_4_1(load); load_GL_VERSION_4_2(load); load_GL_VERSION_4_3(load); load_GL_VERSION_4_4(load); load_GL_VERSION_4_5(load); load_GL_VERSION_4_6(load); if (!find_extensionsGL()) return 0; return GLVersion.major != 0 || GLVersion.minor != 0; }
682152.c
#include <std.h> inherit POTION; void create() { ::create() ; set_short("A vial containing a white potion") ; set_long("The white potion swirls in the crystal vial.\n") ; set ("color", "white") ; set ("effect_time", 0) ; } int do_effect() { int healing; if(drinker->query_hp() < drinker->query_max_hp()){ healing = random(4) + random(4) + 2; tell_object(drinker,"Suddenly, you begin to feel better.\n") ; tell_object(drinker,"You have gained back "+healing+" hit points.\n") ; drinker->add_hp(healing); } else { tell_object(drinker,"You feel no different then before.\n") ; } return 1 ; } int do_wear_off() { return 1 ; }
4019.c
// SFM 04/06/2013 Code module added in merge from M.Grecu's code // SFM 05/06/2013 Modifications from LW to facilitate using job names // SFM 06/27/2013 Parameter name changes from W.Olson; reduce unused code // SFM 07/19/2013 Large volume of code added for M.Grecu // #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hdf.h> #include <mfhdf.h> #include "TKheaders.h" #include "TK_2BCMB_hdf5.h" #ifdef GFOR extern int __nbinmod_MOD_imemb; #define nbins __nbinmod_MOD_imemb //begin WSO 9/15/13 extern float __missingmod_MOD_missing_r4; #define missing_r4c __missingmod_MOD_missing_r4 extern short __missingmod_MOD_missing_i2; #define missing_i2c __missingmod_MOD_missing_i2 extern long __missingmod_MOD_missing_i4; #define missing_i4c __missingmod_MOD_missing_i4 extern int __nbinmod_MOD_ntransition; #define ntransitions __nbinmod_MOD_ntransition //end WSO 9/15/13 #endif #ifdef IFORT extern int nbinmod_mp_nbin_; //begin WSO 8/8/13 extern int nbinmod_mp_ntransition_; //end WSO 8/8/13 #define nbins nbinmod_mp_nbin_ //begin WSO 8/8/13 #define ntransitions nbinmod_mp_ntransition_ //end WSO 8/8/13 //begin WSO 9/15/13 extern float missingmod_mp_missing_r4_; #define missing_r4c missingmod_mp_missing_r4_ extern short missingmod_mp_missing_i2_; #define missing_i2c missingmod_mp_missing_i2_ extern long missingmod_mp_missing_i4_; #define missing_i4c missingmod_mp_missing_i4_ //end WSO 9/15/13 #endif //begin WSO 04/07/2013 //Note that there were many structure/variable name changes in this //version to be compatible with TKIO 3.50.8 //All S1 and S2 were changed to NS and MS, respectively //The variable ending "Out" was removed because a separate Input structure //was created //end WSO 04/07/2013 extern TKINFO dprtkfile; TKINFO ctkfile; TKINFO ctkfileIn; //L2BCMB_SWATHS swath; //L2BCMB_SWATHS swath300[300]; //L2BCMBX_SWATHS swathx300[300]; L2ADPR_SWATHS dprswath; L2ADPRX_SWATHS dprxswath; L2BCMB_SWATHS swath1; L2AKu_NS L2AKuData; L2AKuX_FS L2AKuDataX; void setlatlons1_300_(int *isc,float *lat, float *lon, float *sfcPrecip, float *sfcPrecipStd, float *piaOut) { int i; extern L2BCMB_SWATHS swath300[300]; for(i=0;i<49;i++) { swath300[*isc].NS.Latitude[i]=lat[i]; swath300[*isc].NS.Longitude[i]=lon[i]; if(swath300[*isc].NS.Longitude[i]>180) swath300[*isc].NS.Longitude[i]-=360; swath300[*isc].NS.surfPrecipTotRate[i]=sfcPrecip[i]; swath300[*isc].NS.surfPrecipTotRateSigma[i]=sfcPrecipStd[i]; swath300[*isc].NS.pia[i]=piaOut[i]; } } void setlatlons2_300_(int *isc,float *lat, float *lon, float *sfcPrecip, float *sfcPrecipStd, float *piaOutKu, float *piaOutKa) { int i; extern L2BCMB_SWATHS swath300[300]; for(i=12;i<37;i++) { swath300[*isc].MS.Latitude[i-12]=lat[i]; swath300[*isc].MS.Longitude[i-12]=lon[i]; if(swath300[*isc].MS.Longitude[i-12]>180) swath300[*isc].MS.Longitude[i-12]-=360; swath300[*isc].MS.surfPrecipTotRate[i-12]=sfcPrecip[i]; swath300[*isc].MS.surfPrecipTotRateSigma[i-12]=sfcPrecipStd[i]; swath300[*isc].MS.pia[i-12][0]=piaOutKu[i]; swath300[*isc].MS.pia[i-12][1]=piaOutKa[i]; } } void copyrrates1_300_(int *isc,float *rrate, float *rratestd, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<nbins;k++) { swath300[*isc].NS.precipTotRate[*i][k]=rrate[k]; swath300[*isc].NS.precipTotRateSigma[*i][k]=rratestd[k]; } } void copysflfract_300_(int *isc,float *lfract, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.surfLiqRateFrac[*i]=*lfract; if(*i>=12 && *i<=37) swath300[*isc].MS.surfLiqRateFrac[*i-12]=*lfract; } void copyzka_300_(int *isc,float *zka, int *i) { int k; extern L2ADPR_SWATHS dprswath; for(k=0;k<nbins;k++) { dprswath.MS.PRE.zFactorMeasured[*i][2*k]=(int)(zka[k]*100); dprswath.MS.PRE.zFactorMeasured[*i][2*k+1]=(int)(zka[k]*100); // printf("%g ",zka[k]); } } void copypiaka_300_(int *isc,float *piaKa, int *i) { int k; extern L2ADPR_SWATHS dprswath; for(k=0;k<nbins;k++) { dprswath.MS.SRT.pathAtten[*i]=*piaKa; } } void copytruerrate_300_(int *isc,float *rrate, int *i) { int k; extern L2ADPR_SWATHS dprswath; for(k=0;k<nbins;k++) { dprswath.NS.SLV.precipRate[*i][2*k]=(int)(rrate[k]*100); dprswath.NS.SLV.precipRate[*i][2*k+1]=(int)(rrate[k]*100); } } //begin WSO 8/30/13 void copyenvsfqvs1_300_(int *isc,float *envQv, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.surfaceVaporDensity[*i]=envQv[nbins-1]; } void copyenvsfqvs2_300_(int *isc,float *envQv, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.surfaceVaporDensity[*i]=envQv[nbins-1]; } void copyenvqvs1_300_(int *isc,float *envQv, short *envnodes, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<10;k++) swath300[*isc].NS.vaporDensity[*i][k]=envQv[envnodes[k]-1]; } void copyenvqvs2_300_(int *isc,float *envQv, short *envnodes, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<10;k++) swath300[*isc].MS.vaporDensity[*i][k]=envQv[envnodes[k]-1]; } void copyenvpresss1_300_(int *isc,float *envQv, short *envnodes, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<10;k++) swath300[*isc].NS.airPressure[*i][k]=envQv[envnodes[k]-1]; } void copyenvpresss2_300_(int *isc,float *envQv, short *envnodes, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<10;k++) swath300[*isc].MS.airPressure[*i][k]=envQv[envnodes[k]-1]; } void copyenvtemps1_300_(int *isc,float *envQv, short *envnodes, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<10;k++) { swath300[*isc].NS.envParamNode[*i][k]=envnodes[k]-1; swath300[*isc].NS.airTemperature[*i][k]=envQv[envnodes[k]-1]; } } void copyenvtemps2_300_(int *isc,float *envQv, short *envnodes, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<10;k++) { swath300[*isc].MS.envParamNode[*i][k]=envnodes[k]-1; swath300[*isc].MS.airTemperature[*i][k]=envQv[envnodes[k]-1]; } } void copyenvsftemps1_300_(int *isc,float *envQv, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.surfaceAirTemperature[*i]=envQv[nbins-1]; } void copyenvsftemps2_300_(int *isc,float *envQv, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.surfaceAirTemperature[*i]=envQv[nbins-1]; } //end WSO 8/30/13 void copypwcs1_300_(int *isc,float *rrate, float *rratestd, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<nbins;k++) { swath300[*isc].NS.precipTotWaterCont[*i][k]=rrate[k]; swath300[*isc].NS.precipTotWaterContSigma[*i][k]=rratestd[k]; } } //begin WSO 8/7/13 void copylwcfracs1_300_(int *isc,float *mlwc_frac, float *mrate_frac, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<ntransitions;k++) { swath300[*isc].NS.liqMassFracTrans[*i][k]=mlwc_frac[k]; swath300[*isc].NS.liqRateFracTrans[*i][k]=mrate_frac[k]; } } void copysfcrainliqfracs1_300_(int *isc,float *sfcrainliq_frac, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.surfLiqRateFrac[*i]=*sfcrainliq_frac; } //end WSO 8/7/13 void copyd0s1_300_(int *isc,float *dm, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<nbins;k++) { swath300[*isc].NS.precipTotPSDparamHigh[*i][k]=dm[k]; } } void copyzckus1_300_(int *isc,float *zc, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<nbins;k++) { if(zc[k] > -90.) swath300[*isc].NS.correctedReflectFactor[*i][k] = zc[k]; else //begin WSO 9/17/13 standardized missing flags swath300[*isc].NS.correctedReflectFactor[*i][k] = missing_r4c; //end WSO 9/17/13 } } void copynodess1_300_(int *isc,int *node, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<5;k++) { swath300[*isc].NS.phaseBinNodes[*i][k]=node[k]; } } void copyrrates2_300_(int *isc,float *rrate, float *rratestd, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<nbins;k++) { swath300[*isc].MS.precipTotRate[*i][k]=rrate[k]; swath300[*isc].MS.precipTotRateSigma[*i][k]=rratestd[k]; } } void copypwcs2_300_(int *isc,float *rrate, float *rratestd, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<nbins;k++) { //begin WSO 4/18/2013 //changed NS to MS swath300[*isc].MS.precipTotWaterCont[*i][k]=rrate[k]; swath300[*isc].MS.precipTotWaterContSigma[*i][k]=rratestd[k]; //end WSO 4/18/2013 } } //begin WSO 8/7/13 void copylwcfracs2_300_(int *isc,float *mlwc_frac, float *mrate_frac, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<ntransitions;k++) { swath300[*isc].MS.liqMassFracTrans[*i][k]=mlwc_frac[k]; swath300[*isc].MS.liqRateFracTrans[*i][k]=mrate_frac[k]; } } void copysfcrainliqfracs2_300_(int *isc,float *sfcrainliq_frac, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.surfLiqRateFrac[*i]=*sfcrainliq_frac; } //end WSO 8/7/13 void copyd0s2_300_(int *isc,float *dm, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<nbins;k++) { // SFM 05/06/2013 Changed NS to MS to match M.Grecu code from 04/19/2013 swath300[*isc].MS.precipTotPSDparamHigh[*i][k]=dm[k]; } } void copyzckus2_300_(int *isc,float *zku, float *zka, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<nbins;k++) { if(zku[k] > -90.) swath300[*isc].MS.correctedReflectFactor[*i][k][0] = zku[k]; else //begin WSO 9/17/13 standardized missing flags swath300[*isc].MS.correctedReflectFactor[*i][k][0] = missing_r4c; //end WSO 9/17/13 if(zka[k] > -90.) swath300[*isc].MS.correctedReflectFactor[*i][k][1] = zka[k]; else //begin WSO 9/17/13 standardized missing flags swath300[*isc].MS.correctedReflectFactor[*i][k][1] = missing_r4c; //end WSO 9/17/13 } } void copynodess2_300_(int *isc,int *node, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<5;k++) { swath300[*isc].MS.phaseBinNodes[*i][k]=node[k]; } } void rewind_300_(int *isc,int *ic) { extern TKINFO granuleHandle2AKu; int status = TKseek(&granuleHandle2AKu, *ic, TK_ABS_SCAN_OFF); } void rewindc_300_(int *isc,int *ic) { extern TKINFO granuleHandle2AKu; printf("rewind cmb\n"); int status = TKseek(&ctkfile, *ic, TK_ABS_SCAN_OFF); } //begin WSO 9/8/13 rewind DPR file void rewind_dpr_300_(int *isc,int *ic) { extern TKINFO dprtkfile; int status_dpr = TKseek(&dprtkfile, *ic, TK_ABS_SCAN_OFF); } //end WSO 9/8/13 // SFM begin 12/13/2013; add flag to call sequence void frominput_300_(int *isc,long *st_2adpr) { // SFM begin 12/13/2013 extern TKINFO granuleHandle2AKu; extern L2AKu_NS L2AKuData; extern L2BCMB_SWATHS swath300[300]; //begin WSO 9/1/13 extern L2ADPR_SWATHS dprswath; //end WSO 9/1/13 int j; int status, status_dpr ; //for diagnostic float dummyPIA[49]; int k, printPIA[49]; //end for diagnostic // // SFM begin 12/13/2013; add conditional to dpr read status=TKreadScan(&granuleHandle2AKu,&L2AKuData); if (*st_2adpr == 0) status_dpr=TKreadScan(&dprtkfile,&dprswath); // SFM begin 12/13/2013 for( j=0; j<49; j++) { //swath300[*isc].NS.Input.piaEffective[j]=L2AKuData.SRT.pathAtten[j]; swath300[*isc].NS.Input.piaEffective[j]=L2AKuData.SRT.PIAhybrid[j]; //MG 7/31/18, use hybrid PIA //begin WSO 9/5/13 remove flag assignment // swath300[*isc].NS.Input.piaEffectiveSigma[j]=-99; //end WSO 9/5/13 // swath300[*isc].NS.Input.piaEffectiveReliabFlag[j]= // L2AKuData.SRT.reliabFlag[j]; swath300[*isc].NS.Input.piaEffectiveReliabFlag[j]= L2AKuData.SRT.reliabFlagHY[j]; //WSO 8/2/18 use hybrid flag swath300[*isc].NS.Input.precipitationType[j]= L2AKuData.CSF.typePrecip[j]; swath300[*isc].NS.Input.precipTypeQualityFlag[j]= L2AKuData.CSF.qualityTypePrecip[j]; swath300[*isc].NS.Input.surfaceElevation[j]=L2AKuData.PRE.elevation[j]; swath300[*isc].NS.Input.localZenithAngle[j]=L2AKuData.PRE.localZenithAngle[j]; swath300[*isc].NS.Input.surfaceType[j]=L2AKuData.PRE.landSurfaceType[j]; //begin WSO 9/28/13 use alternate rain flag that includes missing for bad scans // swath300[*isc].NS.Input.precipitationFlag[j]=L2AKuData.PRE.flagPrecip[j]; //end WSO 9/28/13 swath300[*isc].NS.Input.surfaceRangeBin[j]=(L2AKuData.PRE.binRealSurface[j]-1)/2; // MG 04/11/2014 swath300[*isc].NS.Input.stormTopBin[j]=(L2AKuData.PRE.binStormTop[j]-1)/2; // MG 04/11/2014 if(swath300[*isc].NS.Input.stormTopBin[j]<0) swath300[*isc].NS.Input.stormTopBin[j]=missing_i2c; swath300[*isc].NS.Input.stormTopAltitude[j]=L2AKuData.PRE.heightStormTop[j]; //begin WSO 09/30/15 add one bin to the binClutterFreeBottom to temporarily compensate for //the subtraction of one bin by the radar team // swath300[*isc].NS.Input.lowestClutterFreeBin[j]= // (L2AKuData.PRE.binClutterFreeBottom[j]-1)/2; // MG 04/11/2014 //begin WSO 10/19/15 subtract one 125 m bin from binClutterFreeBottom, and //restore V3 definition of lowestClutterFreeBin as a test // swath300[*isc].NS.Input.lowestClutterFreeBin[j]= // (L2AKuData.PRE.binClutterFreeBottom[j])/2; swath300[*isc].NS.Input.lowestClutterFreeBin[j]= (L2AKuData.PRE.binClutterFreeBottom[j] - 2)/2; //end WSO 10/15/15 //end WSO 09/30/15 //begin WSO 9/17/13 correction for two bin average location in combined swath300[*isc].NS.Input.ellipsoidBinOffset[j]= L2AKuData.PRE.ellipsoidBinOffset[j] + 0.125/2.; //end WSO 9/17/13 //begin WSO 8/19/13 swath300[*isc].NS.Input.zeroDegAltitude[j] = L2AKuData.VER.heightZeroDeg[j]; swath300[*isc].NS.Input.zeroDegBin[j] = (L2AKuData.VER.binZeroDeg[j]-1)/2; // MG 04/11/2014 //end WSO 8/19/13 if(j>=12 && j<37) { swath300[*isc].MS.Input.surfaceElevation[j-12]= L2AKuData.PRE.elevation[j]; swath300[*isc].MS.Input.localZenithAngle[j-12]= L2AKuData.PRE.localZenithAngle[j]; swath300[*isc].MS.Input.surfaceType[j-12]= L2AKuData.PRE.landSurfaceType[j]; //begin WSO 9/28/13 use alternate rain flag that includes missing for bad scans // swath300[*isc].MS.Input.precipitationFlag[j-12][0]= // L2AKuData.PRE.flagPrecip[j]; // swath300[*isc].MS.Input.precipitationFlag[j-12][1]= // L2AKuData.PRE.flagPrecip[j]; //end WSO 9/28/13 swath300[*isc].MS.Input.surfaceRangeBin[j-12][0]= (L2AKuData.PRE.binRealSurface[j]-1)/2; // MG 04/11/2014 swath300[*isc].MS.Input.surfaceRangeBin[j-12][1]= (L2AKuData.PRE.binRealSurface[j]-1)/2; // MG 04/11/2014 swath300[*isc].MS.Input.stormTopBin[j-12][0]= (L2AKuData.PRE.binStormTop[j]-1)/2; // MG 04/11/2014 swath300[*isc].MS.Input.stormTopBin[j-12][1]= // MG 04/11/2014 (L2AKuData.PRE.binStormTop[j]-1)/2; // MG 04/11/2014 if(swath300[*isc].MS.Input.stormTopBin[j-12][0]<0) swath300[*isc].MS.Input.stormTopBin[j-12][0]=missing_i2c; if(swath300[*isc].MS.Input.stormTopBin[j-12][1]<0) swath300[*isc].MS.Input.stormTopBin[j-12][1]=missing_i2c; swath300[*isc].MS.Input.stormTopAltitude[j-12][0]= L2AKuData.PRE.heightStormTop[j]; swath300[*isc].MS.Input.stormTopAltitude[j-12][1]= L2AKuData.PRE.heightStormTop[j]; //begin WSO 09/30/15 add one bin to the binClutterFreeBottom to temporarily compensate for //the subtraction of one bin by the radar team // swath300[*isc].MS.Input.lowestClutterFreeBin[j-12][0]= // (L2AKuData.PRE.binClutterFreeBottom[j]-1)/2; // MG 04/11/2014 // swath300[*isc].MS.Input.lowestClutterFreeBin[j-12][1]= // (L2AKuData.PRE.binClutterFreeBottom[j]-1)/2; // MG 04/11/2014 //begin WSO 10/19/15 subtract one 125 m bin from binClutterFreeBottom, and // restore V3 definition of lowestClutterFreeBin in test // swath300[*isc].MS.Input.lowestClutterFreeBin[j-12][0]= // (L2AKuData.PRE.binClutterFreeBottom[j])/2; // swath300[*isc].MS.Input.lowestClutterFreeBin[j-12][1]= // (L2AKuData.PRE.binClutterFreeBottom[j])/2; swath300[*isc].MS.Input.lowestClutterFreeBin[j-12][0]= (L2AKuData.PRE.binClutterFreeBottom[j] - 2)/2; swath300[*isc].MS.Input.lowestClutterFreeBin[j-12][1]= (L2AKuData.PRE.binClutterFreeBottom[j] - 2)/2; //end WSO 10/19/15 //end WSO 09/30/15 //begin WSO 9/17/13 correction for two bin average location in combined swath300[*isc].MS.Input.ellipsoidBinOffset[j-12][0]= L2AKuData.PRE.ellipsoidBinOffset[j] + 0.125/2.; swath300[*isc].MS.Input.ellipsoidBinOffset[j-12][1]= L2AKuData.PRE.ellipsoidBinOffset[j] + 0.125/2.; //end WSO 9/17/13 //begin WSO 9/5/13 reset pia's using DPR output swath300[*isc].MS.Input.piaEffective[j-12][0]= dprswath.NS.SRT.pathAtten[j]; swath300[*isc].MS.Input.piaEffective[j-12][1]= dprswath.MS.SRT.pathAtten[j-12]; //begin WSO 9/5/13 remove flag assignment // swath300[*isc].MS.Input.piaEffectiveSigma[j-12][0]=-99; //end WSO 9/5/13 swath300[*isc].MS.Input.piaEffectiveReliabFlag[j-12][0]= dprswath.NS.SRT.reliabFlag[j]; swath300[*isc].MS.Input.piaEffectiveReliabFlag[j-12][1]= dprswath.MS.SRT.reliabFlag[j-12]; //end WSO 9/5/13 swath300[*isc].MS.Input.precipitationType[j-12]= L2AKuData.CSF.typePrecip[j]; swath300[*isc].MS.Input.precipTypeQualityFlag[j-12]= L2AKuData.CSF.qualityTypePrecip[j]; //begin WSO 8/19/13 need to update toolkit swath300[*isc].MS.Input.zeroDegAltitude[j-12] = L2AKuData.VER.heightZeroDeg[j]; swath300[*isc].MS.Input.zeroDegBin[j-12][0] = (L2AKuData.VER.binZeroDeg[j]-1)/2; // MG 04/11/2014 //end WSO 8/19/13 } //diagnostic assignment dummyPIA[j] = dprswath.NS.SRT.pathAtten[j]; //end diagnostic } //diagnostic // if(L2AKuData.Latitude[24] > 30. && L2AKuData.Latitude[24] < 40. && L2AKuData.Longitude[24] > -165. && L2AKuData.Longitude[24] <-155.) // { // for(k=0;k<49;k++) // if(dummyPIA[k] < -99.) // { // printPIA[k] = 99; // } // else // printPIA[k] = dummyPIA[k]*10.; // printf("lon: %10.2f, ", L2AKuData.Longitude[24]); // for(k=0;k<49;k++) // printf("%2i", printPIA[k]); // printf("\n"); // } //end diagnostic //begin WSO 9/1/13 scanStatus variables copied from 2AKu swath300[*isc].NS.scanStatus.FractionalGranuleNumber = L2AKuData.scanStatus.FractionalGranuleNumber; swath300[*isc].NS.scanStatus.SCorientation = L2AKuData.scanStatus.SCorientation; swath300[*isc].NS.scanStatus.acsModeMidScan = L2AKuData.scanStatus.acsModeMidScan; swath300[*isc].NS.scanStatus.dataQuality = L2AKuData.scanStatus.dataQuality; swath300[*isc].NS.scanStatus.dataWarning = L2AKuData.scanStatus.dataWarning; swath300[*isc].NS.scanStatus.geoError = L2AKuData.scanStatus.geoError; swath300[*isc].NS.scanStatus.geoWarning = L2AKuData.scanStatus.geoWarning; swath300[*isc].NS.scanStatus.limitErrorFlag = L2AKuData.scanStatus.limitErrorFlag; swath300[*isc].NS.scanStatus.missing = L2AKuData.scanStatus.missing; swath300[*isc].NS.scanStatus.modeStatus = L2AKuData.scanStatus.modeStatus; swath300[*isc].NS.scanStatus.operationalMode = L2AKuData.scanStatus.operationalMode; swath300[*isc].NS.scanStatus.pointingStatus = L2AKuData.scanStatus.pointingStatus; swath300[*isc].NS.scanStatus.targetSelectionMidScan = L2AKuData.scanStatus.targetSelectionMidScan; //from 2ADPR swath300[*isc].MS.scanStatus.FractionalGranuleNumber = dprswath.MS.scanStatus.FractionalGranuleNumber; swath300[*isc].MS.scanStatus.SCorientation = dprswath.MS.scanStatus.SCorientation; swath300[*isc].MS.scanStatus.acsModeMidScan = dprswath.MS.scanStatus.acsModeMidScan; swath300[*isc].MS.scanStatus.dataQuality = dprswath.MS.scanStatus.dataQuality; swath300[*isc].MS.scanStatus.dataWarning = dprswath.MS.scanStatus.dataWarning; swath300[*isc].MS.scanStatus.geoError = dprswath.MS.scanStatus.geoError; swath300[*isc].MS.scanStatus.geoWarning = dprswath.MS.scanStatus.geoWarning; swath300[*isc].MS.scanStatus.limitErrorFlag = dprswath.MS.scanStatus.limitErrorFlag; swath300[*isc].MS.scanStatus.missing = dprswath.MS.scanStatus.missing; swath300[*isc].MS.scanStatus.modeStatus = dprswath.MS.scanStatus.modeStatus; swath300[*isc].MS.scanStatus.operationalMode = dprswath.MS.scanStatus.operationalMode; swath300[*isc].MS.scanStatus.pointingStatus = dprswath.MS.scanStatus.pointingStatus; swath300[*isc].MS.scanStatus.targetSelectionMidScan = dprswath.MS.scanStatus.targetSelectionMidScan; //end WSO 9/1/13 } void copyscantime_300_(int *isc,int *i) { extern L2BCMB_SWATHS swath300[300]; extern int DayOfMonth[300], DayOfYear[300], Hour[300], MilliSecond[300], Minute[300], Month[300], Second[300], Year[300], SecondOfDay[300]; extern NAVIGATION navigation[300]; swath300[*isc].NS.ScanTime.DayOfMonth=DayOfMonth[*i]; swath300[*isc].NS.ScanTime.DayOfYear=DayOfYear[*i]; swath300[*isc].NS.ScanTime.Hour=Hour[*i]; swath300[*isc].NS.ScanTime.MilliSecond=MilliSecond[*i]; swath300[*isc].NS.ScanTime.Minute=Minute[*i]; swath300[*isc].NS.ScanTime.Month=Month[*i]; swath300[*isc].NS.ScanTime.Second=Second[*i]; swath300[*isc].NS.ScanTime.Year=Year[*i]; swath300[*isc].NS.ScanTime.SecondOfDay=SecondOfDay[*i]; memcpy(&swath300[*isc].NS.navigation, &navigation[*i], sizeof(NAVIGATION)); //begin WSO 04/07/2013 //added MS swath300[300] scantimes swath300[*isc].MS.ScanTime.DayOfMonth=DayOfMonth[*i]; swath300[*isc].MS.ScanTime.DayOfYear=DayOfYear[*i]; swath300[*isc].MS.ScanTime.Hour=Hour[*i]; swath300[*isc].MS.ScanTime.MilliSecond=MilliSecond[*i]; swath300[*isc].MS.ScanTime.Minute=Minute[*i]; swath300[*isc].MS.ScanTime.Month=Month[*i]; swath300[*isc].MS.ScanTime.Second=Second[*i]; swath300[*isc].MS.ScanTime.Year=Year[*i]; swath300[*isc].MS.ScanTime.SecondOfDay=SecondOfDay[*i]; memcpy(&swath300[*isc].MS.navigation, &navigation[*i], sizeof(NAVIGATION)); //end WSO 04/07/2013 } void copypreciptype_300_(int *isc,int *ptype, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; //swath300[*isc].S1.precipitationType[*i]=*ptype; } void copyw10_300_(int *isc,float *w10, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.tenMeterWindSpeed[*i]=*w10; } void copyw10sigma_300_(int *isc,float *w10s, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.tenMeterWindSigma[*i]=*w10s; } void copyw10small_300_(int *isc,float *w10, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.tenMeterWindSpeed[*i]=*w10; } void copyw10smallsigma_300_(int *isc,float *w10s, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.tenMeterWindSigma[*i]=*w10s; } void writedprscan_300_(int *isc) { int ret; ret= TKwriteScan(&dprtkfile,&dprswath); } // begin SFM 12/26/2013 void write_empty_300_(int *isc) // brief utility to put empty keyword into output file header // when needed { char emptygranuletext[100]; strcpy(emptygranuletext,"EMPTY") ; TKsetMetaString(&ctkfile, "FileHeader", "EmptyGranule", emptygranuletext); } // end SFM 12/26/2013 // begin SFM 11/27/2013 void writescan_300_(int *isc) { int ret; char emptygranuletext[100]; extern L2BCMB_SWATHS swath300[300]; // TKgetMetaString(&ctkfile, "FileHeader", "EmptyGranule", // emptygranuletext); // if (strncmp(emptygranuletext,"NOT_EMPTY",9) == 0) ret= TKwriteScan(&ctkfile,&swath300); } // end SFM 11/27/2013 void copysfcairtemps1_300_(int *isc,float *sfcVar, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.surfaceAirTemperature[*i]=*sfcVar; } void copysfcairtemps2_300_(int *isc,float *sfcVar, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.surfaceAirTemperature[*i]=*sfcVar; } void copysfcairpresss1_300_(int *isc,float *sfcVar, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.surfaceAirPressure[*i]=*sfcVar; } void copysfcairpresss2_300_(int *isc,float *sfcVar, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.surfaceAirPressure[*i]=*sfcVar; } void copyskintemps1_300_(int *isc,float *sfcVar, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.skinTemperature[*i]=*sfcVar; } void copyskintemps2_300_(int *isc,float *sfcVar, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.skinTemperature[*i]=*sfcVar; } //write skin temperature estimate uncertainty void copyskintempsigmas1_300_(int *isc,float *skinsigma, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.skinTempSigma[*i] = *skinsigma; } void copyskintempsigmas2_300_(int *isc,float *skinsigma, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.skinTempSigma[*i] = *skinsigma; } //write column vapor estimate uncertainty void copycolumnvaporsigmas1_300_(int *isc,float *colvaporsigma, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.columnVaporSigma[*i] = *colvaporsigma; } void copycolumnvaporsigmas2_300_(int *isc,float *colvaporsigma, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.columnVaporSigma[*i] = *colvaporsigma; } //write column cloud liquid estimate uncerainty void copycolumncloudliqsigmas1_300_(int *isc,float *colcldsigma, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.columnCloudLiqSigma[*i] = *colcldsigma; } void copycolumncloudliqsigmas2_300_(int *isc,float *colcldsigma, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.columnCloudLiqSigma[*i] = *colcldsigma; } //write algorithm type flag void copyalgotypes1_300_(int *isc,int *algotype, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.FLG.algoType[*i] = *algotype; } void copyalgotypes2_300_(int *isc,int *algotype, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.FLG.algoType[*i] = *algotype; } //write error of non-raining data fit void copyerrorofdatafits1_300_(int *isc,float *erroroffit, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.errorOfDataFit[*i] = *erroroffit; } void copyerrorofdatafits2_300_(int *isc,float *erroroffit, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.errorOfDataFit[*i] = *erroroffit; } void copysfcemissouts1_300_(int *isc,float *tbout, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; //begin WSO 10/13/15 change loops to output HF // emissivities for(k=0;k<13;k++) //begin WSO 9/16/13 if(tbout[k] > 0.) swath300[*isc].NS.surfEmissivity[*i][k]=tbout[k]; else swath300[*isc].NS.surfEmissivity[*i][k]=missing_r4c; //begin WSO 7/28/16 remove extra channels // for(k=13;k<15;k++) // swath300[*isc].NS.surfEmissivity[*i][k]=missing_r4c; //end WSO 7/28/16 //end WSO 9/16/13 //end WSO 10/13/15 } void copysfcemissouts1sigma_300_(int *isc,float *tbout, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; //begin WSO 10/13/15 change loops to output HF // emissivities for(k=0;k<13;k++) //begin WSO 9/16/13 if(tbout[k] > 0.) swath300[*isc].NS.surfEmissSigma[*i][k]=tbout[k]; else swath300[*isc].NS.surfEmissSigma[*i][k]=missing_r4c; //begin WSO 7/28/16 remove extra channels // for(k=13;k<15;k++) // swath300[*isc].NS.surfEmissivity[*i][k]=missing_r4c; //end WSO 7/28/16 //end WSO 9/16/13 //end WSO 10/13/15 } void copysfcemissouts2_300_(int *isc,float *tbout, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; //begin WSO 10/13/15 change loops to output HF // emissivities for(k=0;k<13;k++) //begin WSO 9/16/13 if(tbout[k] > 0.) swath300[*isc].MS.surfEmissivity[*i][k]=tbout[k]; else swath300[*isc].MS.surfEmissivity[*i][k]=missing_r4c; //begin WSO 7/28/16 remove extra channels // for(k=13;k<15;k++) // swath300[*isc].MS.surfEmissivity[*i][k]=missing_r4c; //end WSO 7/28/16 //end WSO 9/16/13 //end WSO 10/13/15 } void copysfcemissouts2sigma_300_(int *isc,float *tbout, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; //begin WSO 10/13/15 change loops to output HF // emissivities for(k=0;k<13;k++) //begin WSO 9/16/13 if(tbout[k] > 0.) swath300[*isc].MS.surfEmissSigma[*i][k]=tbout[k]; else swath300[*isc].MS.surfEmissSigma[*i][k]=missing_r4c; //begin WSO 7/28/16 remove extra channels // for(k=13;k<15;k++) // swath300[*isc].MS.surfEmissivity[*i][k]=missing_r4c; //end WSO 7/28/16 //end WSO 9/16/13 //end WSO 10/13/15 } void copytbouts1_300_(int *isc,float *tbout, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; //begin WSO 9/16/13 for(k=0;k<13;k++) if(tbout[k] > -90.) swath300[*isc].NS.simulatedBrightTemp[*i][k]=tbout[k]; else swath300[*isc].NS.simulatedBrightTemp[*i][k]=missing_r4c; //begin WSO 7/28/16 remove extra channels // for(k=13;k<15;k++) // swath300[*isc].NS.simulatedBrightTemp[*i][k]=missing_r4c; //end WSO 7/28/16 //end WSO 9/16/13 } void copytbouts2_300_(int *isc,float *tbout, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; //begin WSO 9/16/13 for(k=0;k<13;k++) if(tbout[k] > -90.) swath300[*isc].MS.simulatedBrightTemp[*i][k]=tbout[k]; else swath300[*isc].MS.simulatedBrightTemp[*i][k]=missing_r4c; //for(k=0;k<2;k++) // swath300[*isc].MS.simulatedBrightTemp[*i][k]=missing_r4c; //begin WSO 7/28/16 remove extra channels // for(k=13;k<15;k++) // swath300[*isc].MS.simulatedBrightTemp[*i][k]=missing_r4c; //end WSO 7/28/16 //end WSO 9/16/13 } void copyrainflags1_300_(int *isc,int *sfcVar, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.Input.precipitationFlag[*i]=*sfcVar; } void copyrainflags2_300_(int *isc,int *sfcVar, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.Input.precipitationFlag[*i][0]=*sfcVar; swath300[*isc].MS.Input.precipitationFlag[*i][1]=*sfcVar; } //begin WSO 8/20/14 write new ioquality flags void copyioqualitys1_300_(int *isc,int *sfcVar, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.FLG.ioQuality[*i]=*sfcVar; } void copyioqualitys2_300_(int *isc,int *sfcVar, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.FLG.ioQuality[*i]=*sfcVar; } //end WSO 8/20/14 // //begin WSO 3/17/17 write snow ice cover flags void copysnowices1_300_(int *isc,int *sfcVar, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.Input.snowIceCover[*i]=*sfcVar; } void copysnowices2_300_(int *isc,int *sfcVar, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.Input.snowIceCover[*i]=*sfcVar; } //end WSO 3/17/17 void copysfcliqfracts1_300_(int *isc,float *sfcVar, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.surfLiqRateFrac[*i]=*sfcVar; } void copysfcliqfracts2_300_(int *isc,float *sfcVar, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.surfLiqRateFrac[*i]=*sfcVar; } void copycldwaters1_300_(int *isc,float *var1d, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<nbins;k++) { swath300[*isc].NS.cloudLiqWaterCont[*i][k]=var1d[k]; } } void copycldwaters2_300_(int *isc,float *var1d, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<nbins;k++) { swath300[*isc].MS.cloudLiqWaterCont[*i][k]=var1d[k]; } } void copycldices1_300_(int *isc,float *var1d, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<nbins;k++) { swath300[*isc].NS.cloudIceWaterCont[*i][k]=var1d[k]; } } void copycldices2_300_(int *isc,float *var1d, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<nbins;k++) { swath300[*isc].MS.cloudIceWaterCont[*i][k]=var1d[k]; } } //begin WSO 9/5/13 new copy routine for SRT and DSRT pia effective sigma's void copysigmapias1_300_(int *isc,float *sigmapia, int *i) { extern L2BCMB_SWATHS swath300[300]; //diagnostic // printf("in writeCMBOut i: %5i, sigmapia: %10.4f\n", *sigmapia, *i); //end diagnostic swath300[*isc].NS.Input.piaEffectiveSigma[*i] = *sigmapia; } void copysigmapias2_300_(int *isc,float *sigmapiaku, float *sigmapiaka, int *i) { extern L2BCMB_SWATHS swath300[300]; // diagnostic // printf("in writeCMBOut i: %5i, sigmapiaku: %10.4f, sigmapiaka: %10.4f\n", // *sigmapiaku, *sigmapiaka, *i); //end diagnostic swath300[*isc].MS.Input.piaEffectiveSigma[*i][0] = *sigmapiaku; swath300[*isc].MS.Input.piaEffectiveSigma[*i][1] = *sigmapiaka; } //end WSO 9/5/13 //write principal components void copyprincomps1_300_(int *isc,float *princomp, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<5;k++) { swath300[*isc].NS.aPriori.prinComp[*i][k] = princomp[k]; } } // void copyprincomps2_300_(int *isc,float *princomp, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<5;k++) { swath300[*isc].MS.aPriori.prinComp[*i][k] = princomp[k]; } } //write profile class void copyprofclasss1_300_(int *isc,int *profclass, int *i) { extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.aPriori.profClass[*i] = *profclass; } void copyprofclasss2_300_(int *isc,int *profclass, int *i) { extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.aPriori.profClass[*i] = *profclass; } //write surface precip bias ratio void copysurfprecipbiasratios1_300_(int *isc,float *biasratio, int *i) { extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.aPriori.surfPrecipBiasRatio[*i] = *biasratio; } void copysurfprecipbiasratios2_300_(int *isc,float *biasratio, int *i) { extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.aPriori.surfPrecipBiasRatio[*i] = *biasratio; } //write initial log10 of the PSD intercept void copyinitnws1_300_(int *isc,float *initlogNw, int *n9, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<9;k++) { if(n9[k]>1 && n9[k]<=88) { swath300[*isc].NS.aPriori.initNw[*i][k] = initlogNw[n9[k]-1]; } else { swath300[*isc].NS.aPriori.initNw[*i][k] = missing_r4c; } } } void copyinitnws2_300_(int *isc,float *initlogNw, int *n9, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<9;k++) { if(n9[k]>1 && n9[k]<=88) { swath300[*isc].MS.aPriori.initNw[*i][k] = initlogNw[n9[k]-1]; } else { swath300[*isc].MS.aPriori.initNw[*i][k] = missing_r4c; } } } //write sub-footprint variability parameter void copysubfootvariabilitys1_300_(int *isc,float *subfoot, int *i) { extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.nubfPIAfactor[*i] = *subfoot; } void copysubfootvariabilitys2_300_(int *isc,float *subfoot, int *i) { extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.nubfPIAfactor[*i] = *subfoot; } //write multiple scattering flag void copymultiscatcalcs1_300_(int *isc,int *multiscat, int *i) { extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.FLG.multiScatCalc[*i] = *multiscat; } void copymultiscatcalcs2_300_(int *isc,int *multiscat, int *i) { extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.FLG.multiScatCalc[*i] = *multiscat; } //write multiple scattering surface parameter void copymultiscatsurfaces1_300_(int *isc,float *multisfc, int *i) { extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.multiScatMaxContrib[*i] = *multisfc; } void copymultiscatsurfaces2_300_(int *isc,float *multisfc, int *i) { extern L2BCMB_SWATHS swath300[300]; swath300[*isc].MS.multiScatMaxContrib[*i] = *multisfc; } // //begin WSO 2/8/17 copy routine for measured sigma-zeros void copysigmazeros1_300_(int *isc,float *sigmazeroku, int *i) { extern L2BCMB_SWATHS swath300[300]; swath300[*isc].NS.Input.sigmaZeroMeasured[*i] = *sigmazeroku; } void copysigmazeros2_300_(int *isc,float *sigmazeroku, float *sigmazeroka, int *i) { extern L2BCMB_SWATHS swath300[300]; // swath300[*isc].MS.Input.sigmaZeroMeasured[*i][0] = *sigmazeroku; // swath300[*isc].MS.Input.sigmaZeroMeasured[*i][1] = *sigmazeroka; swath300[*isc].MS.Input.sigmaZeroMeasured[*i] = *sigmazeroka; } //end WSO 2/8/17 //begin WSO 8/19/13 modified copy routines to include nodes void copylognws1_300_(int *isc,float *logNw, int *n9, int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<9;k++) { //begin WSO 9/7/14 added upper limit for n9 in next line if(n9[k]>1 && n9[k]<=88) { swath300[*isc].NS.PSDparamLowNode[*i][k] = n9[k]-1; swath300[*isc].NS.precipTotPSDparamLow[*i][k][0]=logNw[n9[k]-1]; } else { swath300[*isc].NS.PSDparamLowNode[*i][k] = missing_i2c; swath300[*isc].NS.precipTotPSDparamLow[*i][k][0]= missing_r4c; } } } void copylognws2_300_(int *isc,float *logNw, int *n9,int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<9;k++) { if(n9[k]>0) { swath300[*isc].MS.PSDparamLowNode[*i][k] = n9[k]-1; swath300[*isc].MS.precipTotPSDparamLow[*i][k][0]=logNw[n9[k]-1]; } else { swath300[*isc].MS.PSDparamLowNode[*i][k] = missing_i2c; swath300[*isc].MS.precipTotPSDparamLow[*i][k][0]= missing_r4c; } } } //end WSO 8/19/13 //begin WSO 8/19/13 add mu as second low-resolution parameter void copymus1_300_(int *isc,float *mu_prof, int *n9,int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<9;k++) { if(n9[k]>1) { swath300[*isc].NS.precipTotPSDparamLow[*i][k][1]=mu_prof[n9[k]-1]; } else { swath300[*isc].NS.precipTotPSDparamLow[*i][k][1]=missing_r4c; } } } void copymus2_300_(int *isc,float *mu_prof, int *n9,int *i) { int k; extern L2BCMB_SWATHS swath300[300]; for(k=0;k<9;k++) { if(n9[k]>0) { swath300[*isc].MS.precipTotPSDparamLow[*i][k][1]=mu_prof[n9[k]-1]; } else { swath300[*isc].MS.precipTotPSDparamLow[*i][k][1]=missing_r4c; } } } //end WSO 8/19/13 void idiot_check_300_(int *isc,int *number, char *ident) { printf(" sfm idiot check %i %s \n",number,ident); }
224138.c
/* * Copyright (C) 2018 Intel Corporation. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include <sys/param.h> #include <sys/stat.h> #include <errno.h> #include <paths.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <stdbool.h> #include <fcntl.h> #include <dirent.h> #include <time.h> #include "dm.h" #include "log.h" #define DISK_PREFIX "disk_log: " #define LOG_PATH_NODE "/var/log/acrn-dm/" #define LOG_NAME_PREFIX "%s_log_" #define LOG_NAME_FMT "%s%s_log_%d" /* %s-->vm1/vm2..., %d-->1/2/3/4... */ #define LOG_DELIMITER "\n\n----------------new vm instance------------------\n\n\0" #define FILE_NAME_LENGTH 96 #define LOG_SIZE_LIMIT 0x200000 /* one log file size limit */ #define LOG_FILES_COUNT 8 static int disk_fd = -1; static uint32_t cur_log_size; static uint16_t cur_file_index; static uint8_t disk_log_level = LOG_DEBUG; static bool disk_log_enabled = false; #define DISK_LOG_MAX_LEN (MAX_ONE_LOG_SIZE + 32) #define INDEX_AFTER(a, b) ((short int)b - (short int)a < 0) static bool is_disk_log_enabled(void) { return disk_log_enabled; } static uint8_t get_disk_log_level(void) { return disk_log_level; } static int probe_disk_log_file(void) { char file_name[FILE_NAME_LENGTH]; struct dirent *pdir; struct stat st; int length; uint16_t index = 0, tmp; bool is_first_file = true; DIR *dir; if (stat(LOG_PATH_NODE, &st)) { if (mkdir(LOG_PATH_NODE, 0644)) { printf(DISK_PREFIX"create path: %s failed! Error: %s\n", LOG_PATH_NODE, strerror(errno)); return -1; } } dir = opendir(LOG_PATH_NODE); if (!dir) { printf(DISK_PREFIX" open %s failed! Error: %s\n", LOG_PATH_NODE, strerror(errno)); return -1; } snprintf(file_name, FILE_NAME_LENGTH - 1, LOG_NAME_PREFIX, vmname); length = strlen(file_name); while ((pdir = readdir(dir)) != NULL) { if (!(pdir->d_type & DT_REG)) continue; if (strncmp(pdir->d_name, file_name, length) != 0) continue; tmp = (uint16_t)atoi(pdir->d_name + length); if (is_first_file) { is_first_file = false; index = tmp; } else if (INDEX_AFTER(tmp, index)) { index = tmp; } } snprintf(file_name, FILE_NAME_LENGTH - 1, LOG_NAME_FMT, LOG_PATH_NODE, vmname, index); disk_fd = open(file_name, O_RDWR | O_CREAT | O_APPEND, 0644); if (disk_fd < 0) { printf(DISK_PREFIX" open %s failed! Error: %s\n", file_name, strerror(errno)); return -1; } if (write(disk_fd, LOG_DELIMITER, strlen(LOG_DELIMITER)) < 0) { printf(DISK_PREFIX" write %s failed! Error: %s\n", file_name, strerror(errno)); return -1; } fstat(disk_fd, &st); cur_log_size = st.st_size; cur_file_index = index; return 0; } static int init_disk_logger(bool enable, uint8_t log_level) { disk_log_enabled = enable; disk_log_level = log_level; return 1; } static void deinit_disk_logger(void) { if (disk_fd > 0) { disk_log_enabled = false; fsync(disk_fd); close(disk_fd); disk_fd = -1; } } static void write_to_disk(const char *fmt, va_list args) { char buffer[DISK_LOG_MAX_LEN]; char *file_name = buffer; char *buf; int len; int write_cnt; struct timespec times = {0, 0}; if ((disk_fd < 0) && disk_log_enabled) { /** * usually this probe just be called once in DM whole life; but we need use vmname in * probe_disk_log_file, it can't be called in init_disk_logger for vmname not inited then, * so call it here. */ if (probe_disk_log_file() < 0) { disk_log_enabled = false; return; } } len = vasprintf(&buf, fmt, args); if (len < 0) return; clock_gettime(CLOCK_MONOTONIC, &times); len = snprintf(buffer, DISK_LOG_MAX_LEN, "[%5lu.%06lu] ", times.tv_sec, times.tv_nsec / 1000); if (len < 0 || len >= DISK_LOG_MAX_LEN) { free(buf); return; } len = strnlen(buffer, DISK_LOG_MAX_LEN); strncpy(buffer + len, buf, DISK_LOG_MAX_LEN - len); buffer[DISK_LOG_MAX_LEN - 1] = '\0'; free(buf); write_cnt = write(disk_fd, buffer, strnlen(buffer, DISK_LOG_MAX_LEN)); if (write_cnt < 0) { perror(DISK_PREFIX"write disk failed"); close(disk_fd); disk_fd = -1; return; } cur_log_size += write_cnt; if (cur_log_size > LOG_SIZE_LIMIT) { cur_file_index++; /* remove the first old log file, to add a new one */ snprintf(file_name, FILE_NAME_LENGTH - 1, LOG_NAME_FMT, LOG_PATH_NODE, vmname, (uint16_t)(cur_file_index - LOG_FILES_COUNT)); remove(file_name); snprintf(file_name, FILE_NAME_LENGTH - 1, LOG_NAME_FMT, LOG_PATH_NODE, vmname, cur_file_index); close(disk_fd); disk_fd = open(file_name, O_RDWR | O_CREAT, 0644); if (disk_fd < 0) { printf(DISK_PREFIX" open %s failed! Error: %s\n", file_name, strerror(errno)); return; } cur_log_size = 0; } } static struct logger_ops logger_disk = { .name = "disk", .is_enabled = is_disk_log_enabled, .get_log_level = get_disk_log_level, .init = init_disk_logger, .deinit = deinit_disk_logger, .output = write_to_disk, }; DEFINE_LOGGER_DEVICE(logger_disk);
422646.c
#include "config.h" #include "packet.h" struct PacketStat packet_stat; conf_t* conf = NULL; char ip1[30]; char ip2[30]; uint64_t seq_count;
405100.c
/* this daemon handles all race abilities and stats // Celtron */ /* Added 2nd rebirth races -- Rag 25.2.2004 */ /* 3rd rebirth race -- Rag 1.6.2004 */ #define XP_RATE_D "/daemons/xp_rate_d" mapping races; reset(arg) { if(arg) return; races = ([ ]); /* hpr spr str dex con int wis size skc spc xp prot bg-sk */ /* Fighters */ races += ([ "giant": ({"high", "low", "90", "45", "75", "30", "45", "huge", "med", "high", "80", "", "attack", }) ]); races += ([ "ogre": ({"high", "low", "75", "60", "60", "45", "30", "large", "low", "high", "95", "low", "stun", }) ]); races += ([ "orc": ({"high", "low", "60", "60", "60", "45", "30", "normal", "low", "high","105", "", "blades", }) ]); races += ([ "troll": ({"spec", "low", "75", "75", "75", "30", "45", "large", "high","high", "90", "low", "bludgeons", }) ]); /* Semi-fighters */ races += ([ "dwarf": ({"high", "med", "75", "45", "60", "30", "45", "small", "spec", "med", "90", "high","axes", }) ]); races += ([ "human": ({"high", "med", "60", "60", "60", "60", "60", "normal", "med", "med","100", "", "parry", }) ]); races += ([ "hobbit": ({"high", "med", "45", "75", "60", "30", "45", "very small","low", "med","100", "", "dodge", }) ]); races += ([ "dark elf": ({"med", "high", "60", "75", "60", "75", "60", "normal", "med", "med", "80", "", "critical", }) ]); /* Clerics */ races += ([ "ent": ({"med", "high", "60", "30", "60", "45", "75", "huge", "high", "spec","85", "low", "cast minor", }) ]); races += ([ "lizardman": ({"med", "high", "45", "30", "45", "60", "75", "large", "high", "low","100", "", "chanting", }) ]); races += ([ "wood elf": ({"med", "high", "30", "45", "60", "45", "90", "small", "high", "med", "90", "", "cast heal", }) ]); /* Mages */ races += ([ "catfolk": ({"low", "high", "30", "45", "45", "75", "45", "small", "high", "low","105", "", "cast essence", }) ]); races += ([ "high elf": ({"low", "high", "45", "30", "60", "90", "60", "normal", "high", "med", "85", "", "cast bolt", }) ]); races += ([ "gnome": ({"low", "high", "45", "30", "60", "75", "45", "very small","high", "spec","90", "", "cast fire", }) ]); races += ([ "mind flayer":({"low", "spec", "45", "30", "45", "75", "60", "normal", "high", "low", "90", "", "cast ice", }) ]); /* REBIRTH RACES */ races += ([ "golem": ({"high", "med", "90", "45", "90", "45", "45", "huge", "low", "high","90", "high","berserk", }) ]); races += ([ "angel": ({"high", "high","75", "75", "75", "75", "75", "normal", "med", "med", "90", "", "tumble", }) ]); races += ([ "lich": ({"med", "high","45", "45", "60", "90", "90", "normal", "high", "low","90", "","mana control",}) ]); races += ([ "sprite": ({"high", "very high","30", "90", "60", "90", "75", "very small","high", "high","90","high","foresee attack",}) ]); /* REBIRTH 2 RACES */ races += ([ "demon": ({"spec", "med", "90", "60", "90", "45", "45", "huge", "spec", "med","90", "high", "tremendous blow", }) ]); races += ([ "spirit": ({"med", "spec", "45", "60", "60", "90", "90", "large", "med", "spec","90", "high", "cast storm", }) ]); /* REBIRTH 3 RACE */ //races += ([ "avatar": ({"spec", "spec", "90", "75", "90", "75", "90", "huge", "spec", "spec","80","high", "reflect spell", }) ]); /* races += ([ "": ({"", "", "", "", "", "", "", "", "", "", "", "", "", }) ]); */ /* put your tune here. */ races["dwarf"] = ({"high", "med", "75", "45", "60", "30", "45", "small", "spec", "med", "85", "high","axes", }); /* reason bg */ } query_races() { return m_indices(races); } get_stat_slot(string stat) { string slots; int i; slots = ({ "hpr", "spr", "str", "dex", "con", "int", "wis", "size", "skill_cost", "spell_cost", "exp_rate", "resist", "bonus_skill", }); if(stat == "hpregen" || stat == "hp_regen") stat = "hpr"; if(stat == "spregen" || stat == "sp_regen") stat = "spr"; while(i<sizeof(slots)) { if(slots[i] == stat) return i; i += 1; } return -1; } query_race_info(string race) { string tmp, adj, stats, file, str; int value, i, tmp_xp; file = race; while(sscanf(file,"%s %s",file,tmp) == 2) { file = file + "_" + tmp; } file = "/data/races/"+file; if(file_size(file) == -1) { write("No such race.\n"); return 0; } cat(file); /* Show race desc */ write("\n"); stats = ({ "Strength", "Intelligence", "Dexterity", "Wisdom", "Constitution", "Size", "Hpregen", "Spregen", }); str = ""; while(i<sizeof(stats)) { if(stats[i] == "Size") str += "Size: "+races[race][7]; else if(i >= 6) str += stats[i]+": "+change_adj(races[race][i-6]); else str += stats[i]+": "+get_adj(query_race_stat(race, lower_case(extract(stats[i],0,2))) ); if(i/2*2 == i) { str += extract(" ", 0, 35-strlen(str)); } else { write(str+"\n"); str = ""; } i += 1; } write("\n"); tmp_xp = XP_RATE_D->query_race_xp_rate(race); if(!tmp_xp) tmp_xp = races[race][10]; write("They receive "+tmp_xp+"% of experience gained.\n"); write("They learn craft skills "+change_adj(races[race][8], 1)+".\n"); write("They master arcane powers "+change_adj(races[race][9], 1)+".\n"); write("They have some knowledge in skill '"+races[race][12]+"'.\n"); if(query_race_stat(race, "can_eat_corpses")) write("They can survive by eating corpses.\n"); adj = races[race][11]; if(adj != "") write("They have a "+adj+" resistance against natural elements.\n"); } change_adj(string str, int alt) { if(alt) { switch(str) { case "spec": return "very easily"; case "low": return "in short time"; case "med": return "with some practice"; case "high": return "with much trouble"; } } switch(str) { case "med": return "mediocre"; case "spec": return "very high"; } return str; } /* huom! value on numero stringina */ get_adj(int value) { switch(value) { case 30: return "poor"; case 45: return "low"; case 60: return "average"; case 75: return "good"; case 90: return "excellent"; } return "invalid value"; } query_race_stat(string race, string stat, int info) { int slot, value; string data; if(!race || !stat) return 0; if(race == "visitor" || this_player()->query_guest()) return 0; /* infran saa nopeesti */ if(stat == "infra") { switch(race) { case "dark elf": return 3; case "spirit": return 3; case "demon": return 3; case "avatar": return 3; case "mind flayer": return 2; case "dwarf": return 2; case "troll": return 2; case "gnome": return 2; case "wood elf": return 1; case "high elf": return 1; case "catfolk": return 1; case "orc": return 1; } return 0; } if(stat == "can_eat_corpses") { switch(race) { case "orc": case "troll": case "mind flayer": case "catfolk": case "demon": return 1; } return 0; } if(stat == "rebirth") { switch(race) { case "sprite": case "golem": case "angel": case "lich": return 1; case "demon": case "spirit": return 2; case "avatar": return 3; } return 0; } slot = get_stat_slot(stat); if(!race || slot == -1) log_file("RACE_STATS", ctime(time())+" "+this_player()->query_name()+" race: "+race+" slot: "+slot+" stat: "+stat+ " prev_ob: "+file_name(previous_object())+"\n", 1); data = races[race][slot]; slot += 1; /* hp & sp regen */ if(slot == 1 || slot == 2) { if(info) return data; switch(data) { case "low": return 30; case "med": return 80; case "high": return 150; case "spec": return 200; } return -1; } /* stats */ if(slot >= 3 && slot <= 7) { if(sscanf(data, "%d", value) != 1) return -1; return value; } /* size */ if(slot == 8) { if(info) return data; switch(data) { case "very small": return 25; case "small": return 40; case "normal": return 60; case "large": return 80; case "huge": return 100; } return -1; } /* skill and spellcosts in percents */ if(slot == 9 || slot == 10) { if(info) return data; switch(data) { case "high": return 300; case "med": return 180; case "low": return 100; case "spec": return 75; } return -1; } /* xprate */ if(slot == 11) { int tmp_rate; if(sscanf(data, "%d", value) != 1) return -1; tmp_rate = XP_RATE_D->query_race_xp_rate(race); if(tmp_rate) return tmp_rate; return value; } /* nat prot */ if(slot == 12) { if(info) return data; switch(data) { case "low": return 20; case "high": return 33; default: return 0; } } /* background skill */ if(slot == 13) return data; /* should not end here */ return -1; }
990578.c
/* * Linux 386 fpu support * Mimic Plan9 floating point support */ #include "lib9.h" void setfcr(ulong fcr) { __asm__( "xorb $0x3f, %%al\n\t" "pushw %%ax\n\t" "fwait\n\t" "fldcw (%%esp)\n\t" "popw %%ax\n\t" : /* no output */ : "al" (fcr) ); } ulong getfcr(void) { ulong fcr = 0; __asm__( "pushl %%eax\n\t" "fwait\n\t" "fstcw (%%esp)\n\t" "popl %%eax\n\t" "xorb $0x3f, %%al\n\t" : "=a" (fcr) : "eax" (fcr) ); return fcr; } ulong getfsr(void) { ulong fsr = -1; __asm__( "fwait\n\t" "fstsw (%%eax)\n\t" "movl (%%eax), %%eax\n\t" "andl $0xffff, %%eax\n\t" : "=a" (fsr) : "eax" (&fsr) ); return fsr; } void setfsr(ulong fsr) { __asm__("fclex\n\t"); }
344887.c
/* * linux/fs/ext2/balloc.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card ([email protected]) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * Enhanced block allocation by Stephen Tweedie ([email protected]), 1993 * Big-endian to little-endian byte-swapping/bitmaps by * David S. Miller ([email protected]), 1995 */ #include <linux/config.h> #include <linux/fs.h> #include <linux/ext2_fs.h> #include <linux/locks.h> #include <linux/quotaops.h> /* * balloc.c contains the blocks allocation and deallocation routines */ /* * The free blocks are managed by bitmaps. A file system contains several * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap * block for inodes, N blocks for the inode table and data blocks. * * The file system contains group descriptors which are located after the * super block. Each descriptor contains the number of the bitmap block and * the free blocks count in the block. The descriptors are loaded in memory * when a file system is mounted (see ext2_read_super). */ #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb, unsigned int block_group, struct buffer_head ** bh) { unsigned long group_desc; unsigned long desc; struct ext2_group_desc * gdp; if (block_group >= sb->u.ext2_sb.s_groups_count) { ext2_error (sb, "ext2_get_group_desc", "block_group >= groups_count - " "block_group = %d, groups_count = %lu", block_group, sb->u.ext2_sb.s_groups_count); return NULL; } group_desc = block_group / EXT2_DESC_PER_BLOCK(sb); desc = block_group % EXT2_DESC_PER_BLOCK(sb); if (!sb->u.ext2_sb.s_group_desc[group_desc]) { ext2_error (sb, "ext2_get_group_desc", "Group descriptor not loaded - " "block_group = %d, group_desc = %lu, desc = %lu", block_group, group_desc, desc); return NULL; } gdp = (struct ext2_group_desc *) sb->u.ext2_sb.s_group_desc[group_desc]->b_data; if (bh) *bh = sb->u.ext2_sb.s_group_desc[group_desc]; return gdp + desc; } /* * Read the bitmap for a given block_group, reading into the specified * slot in the superblock's bitmap cache. * * Return >=0 on success or a -ve error code. */ static int read_block_bitmap (struct super_block * sb, unsigned int block_group, unsigned long bitmap_nr) { struct ext2_group_desc * gdp; struct buffer_head * bh = NULL; int retval = -EIO; gdp = ext2_get_group_desc (sb, block_group, NULL); if (!gdp) goto error_out; retval = 0; bh = sb_bread(sb, le32_to_cpu(gdp->bg_block_bitmap)); if (!bh) { ext2_error (sb, "read_block_bitmap", "Cannot read block bitmap - " "block_group = %d, block_bitmap = %lu", block_group, (unsigned long) gdp->bg_block_bitmap); retval = -EIO; } /* * On IO error, just leave a zero in the superblock's block pointer for * this group. The IO will be retried next time. */ error_out: sb->u.ext2_sb.s_block_bitmap_number[bitmap_nr] = block_group; sb->u.ext2_sb.s_block_bitmap[bitmap_nr] = bh; return retval; } /* * load_block_bitmap loads the block bitmap for a blocks group * * It maintains a cache for the last bitmaps loaded. This cache is managed * with a LRU algorithm. * * Notes: * 1/ There is one cache per mounted file system. * 2/ If the file system contains less than EXT2_MAX_GROUP_LOADED groups, * this function reads the bitmap without maintaining a LRU cache. * * Return the slot used to store the bitmap, or a -ve error code. */ static int __load_block_bitmap (struct super_block * sb, unsigned int block_group) { int i, j, retval = 0; unsigned long block_bitmap_number; struct buffer_head * block_bitmap; if (block_group >= sb->u.ext2_sb.s_groups_count) ext2_panic (sb, "load_block_bitmap", "block_group >= groups_count - " "block_group = %d, groups_count = %lu", block_group, sb->u.ext2_sb.s_groups_count); if (sb->u.ext2_sb.s_groups_count <= EXT2_MAX_GROUP_LOADED) { if (sb->u.ext2_sb.s_block_bitmap[block_group]) { if (sb->u.ext2_sb.s_block_bitmap_number[block_group] == block_group) return block_group; ext2_error (sb, "__load_block_bitmap", "block_group != block_bitmap_number"); } retval = read_block_bitmap (sb, block_group, block_group); if (retval < 0) return retval; return block_group; } for (i = 0; i < sb->u.ext2_sb.s_loaded_block_bitmaps && sb->u.ext2_sb.s_block_bitmap_number[i] != block_group; i++) ; if (i < sb->u.ext2_sb.s_loaded_block_bitmaps && sb->u.ext2_sb.s_block_bitmap_number[i] == block_group) { block_bitmap_number = sb->u.ext2_sb.s_block_bitmap_number[i]; block_bitmap = sb->u.ext2_sb.s_block_bitmap[i]; for (j = i; j > 0; j--) { sb->u.ext2_sb.s_block_bitmap_number[j] = sb->u.ext2_sb.s_block_bitmap_number[j - 1]; sb->u.ext2_sb.s_block_bitmap[j] = sb->u.ext2_sb.s_block_bitmap[j - 1]; } sb->u.ext2_sb.s_block_bitmap_number[0] = block_bitmap_number; sb->u.ext2_sb.s_block_bitmap[0] = block_bitmap; /* * There's still one special case here --- if block_bitmap == 0 * then our last attempt to read the bitmap failed and we have * just ended up caching that failure. Try again to read it. */ if (!block_bitmap) retval = read_block_bitmap (sb, block_group, 0); } else { if (sb->u.ext2_sb.s_loaded_block_bitmaps < EXT2_MAX_GROUP_LOADED) sb->u.ext2_sb.s_loaded_block_bitmaps++; else brelse (sb->u.ext2_sb.s_block_bitmap[EXT2_MAX_GROUP_LOADED - 1]); for (j = sb->u.ext2_sb.s_loaded_block_bitmaps - 1; j > 0; j--) { sb->u.ext2_sb.s_block_bitmap_number[j] = sb->u.ext2_sb.s_block_bitmap_number[j - 1]; sb->u.ext2_sb.s_block_bitmap[j] = sb->u.ext2_sb.s_block_bitmap[j - 1]; } retval = read_block_bitmap (sb, block_group, 0); } return retval; } /* * Load the block bitmap for a given block group. First of all do a couple * of fast lookups for common cases and then pass the request onto the guts * of the bitmap loader. * * Return the slot number of the group in the superblock bitmap cache's on * success, or a -ve error code. * * There is still one inconsistency here --- if the number of groups in this * filesystems is <= EXT2_MAX_GROUP_LOADED, then we have no way of * differentiating between a group for which we have never performed a bitmap * IO request, and a group for which the last bitmap read request failed. */ static inline int load_block_bitmap (struct super_block * sb, unsigned int block_group) { int slot; /* * Do the lookup for the slot. First of all, check if we're asking * for the same slot as last time, and did we succeed that last time? */ if (sb->u.ext2_sb.s_loaded_block_bitmaps > 0 && sb->u.ext2_sb.s_block_bitmap_number[0] == block_group && sb->u.ext2_sb.s_block_bitmap[0]) { return 0; } /* * Or can we do a fast lookup based on a loaded group on a filesystem * small enough to be mapped directly into the superblock? */ else if (sb->u.ext2_sb.s_groups_count <= EXT2_MAX_GROUP_LOADED && sb->u.ext2_sb.s_block_bitmap_number[block_group] == block_group && sb->u.ext2_sb.s_block_bitmap[block_group]) { slot = block_group; } /* * If not, then do a full lookup for this block group. */ else { slot = __load_block_bitmap (sb, block_group); } /* * <0 means we just got an error */ if (slot < 0) return slot; /* * If it's a valid slot, we may still have cached a previous IO error, * in which case the bh in the superblock cache will be zero. */ if (!sb->u.ext2_sb.s_block_bitmap[slot]) return -EIO; /* * Must have been read in OK to get this far. */ return slot; } /* Free given blocks, update quota and i_blocks field */ void ext2_free_blocks (struct inode * inode, unsigned long block, unsigned long count) { struct buffer_head * bh; struct buffer_head * bh2; unsigned long block_group; unsigned long bit; unsigned long i; int bitmap_nr; unsigned long overflow; struct super_block * sb; struct ext2_group_desc * gdp; struct ext2_super_block * es; sb = inode->i_sb; if (!sb) { printk ("ext2_free_blocks: nonexistent device"); return; } lock_super (sb); es = sb->u.ext2_sb.s_es; if (block < le32_to_cpu(es->s_first_data_block) || (block + count) > le32_to_cpu(es->s_blocks_count)) { ext2_error (sb, "ext2_free_blocks", "Freeing blocks not in datazone - " "block = %lu, count = %lu", block, count); goto error_return; } ext2_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1); do_more: overflow = 0; block_group = (block - le32_to_cpu(es->s_first_data_block)) / EXT2_BLOCKS_PER_GROUP(sb); bit = (block - le32_to_cpu(es->s_first_data_block)) % EXT2_BLOCKS_PER_GROUP(sb); /* * Check to see if we are freeing blocks across a group * boundary. */ if (bit + count > EXT2_BLOCKS_PER_GROUP(sb)) { overflow = bit + count - EXT2_BLOCKS_PER_GROUP(sb); count -= overflow; } bitmap_nr = load_block_bitmap (sb, block_group); if (bitmap_nr < 0) goto error_return; bh = sb->u.ext2_sb.s_block_bitmap[bitmap_nr]; gdp = ext2_get_group_desc (sb, block_group, &bh2); if (!gdp) goto error_return; if (in_range (le32_to_cpu(gdp->bg_block_bitmap), block, count) || in_range (le32_to_cpu(gdp->bg_inode_bitmap), block, count) || in_range (block, le32_to_cpu(gdp->bg_inode_table), sb->u.ext2_sb.s_itb_per_group) || in_range (block + count - 1, le32_to_cpu(gdp->bg_inode_table), sb->u.ext2_sb.s_itb_per_group)) ext2_error (sb, "ext2_free_blocks", "Freeing blocks in system zones - " "Block = %lu, count = %lu", block, count); for (i = 0; i < count; i++) { if (!ext2_clear_bit (bit + i, bh->b_data)) ext2_error (sb, "ext2_free_blocks", "bit already cleared for block %lu", block + i); else { DQUOT_FREE_BLOCK(inode, 1); gdp->bg_free_blocks_count = cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)+1); es->s_free_blocks_count = cpu_to_le32(le32_to_cpu(es->s_free_blocks_count)+1); } } mark_buffer_dirty(bh2); mark_buffer_dirty(sb->u.ext2_sb.s_sbh); mark_buffer_dirty(bh); if (sb->s_flags & MS_SYNCHRONOUS) { ll_rw_block (WRITE, 1, &bh); wait_on_buffer (bh); } if (overflow) { block += count; count = overflow; goto do_more; } sb->s_dirt = 1; error_return: unlock_super (sb); return; } /* * ext2_new_block uses a goal block to assist allocation. If the goal is * free, or there is a free block within 32 blocks of the goal, that block * is allocated. Otherwise a forward search is made for a free block; within * each block group the search first looks for an entire free byte in the block * bitmap, and then for any free bit if that fails. * This function also updates quota and i_blocks field. */ int ext2_new_block (struct inode * inode, unsigned long goal, u32 * prealloc_count, u32 * prealloc_block, int * err) { struct buffer_head * bh; struct buffer_head * bh2; char * p, * r; int i, j, k, tmp; int bitmap_nr; struct super_block * sb; struct ext2_group_desc * gdp; struct ext2_super_block * es; #ifdef EXT2FS_DEBUG static int goal_hits = 0, goal_attempts = 0; #endif *err = -ENOSPC; sb = inode->i_sb; if (!sb) { printk ("ext2_new_block: nonexistent device"); return 0; } lock_super (sb); es = sb->u.ext2_sb.s_es; if (le32_to_cpu(es->s_free_blocks_count) <= le32_to_cpu(es->s_r_blocks_count) && ((sb->u.ext2_sb.s_resuid != current->fsuid) && (sb->u.ext2_sb.s_resgid == 0 || !in_group_p (sb->u.ext2_sb.s_resgid)) && !capable(CAP_SYS_RESOURCE))) goto out; ext2_debug ("goal=%lu.\n", goal); repeat: /* * First, test whether the goal block is free. */ if (goal < le32_to_cpu(es->s_first_data_block) || goal >= le32_to_cpu(es->s_blocks_count)) goal = le32_to_cpu(es->s_first_data_block); i = (goal - le32_to_cpu(es->s_first_data_block)) / EXT2_BLOCKS_PER_GROUP(sb); gdp = ext2_get_group_desc (sb, i, &bh2); if (!gdp) goto io_error; if (le16_to_cpu(gdp->bg_free_blocks_count) > 0) { j = ((goal - le32_to_cpu(es->s_first_data_block)) % EXT2_BLOCKS_PER_GROUP(sb)); #ifdef EXT2FS_DEBUG if (j) goal_attempts++; #endif bitmap_nr = load_block_bitmap (sb, i); if (bitmap_nr < 0) goto io_error; bh = sb->u.ext2_sb.s_block_bitmap[bitmap_nr]; ext2_debug ("goal is at %d:%d.\n", i, j); if (!ext2_test_bit(j, bh->b_data)) { ext2_debug("goal bit allocated, %d hits\n",++goal_hits); goto got_block; } if (j) { /* * The goal was occupied; search forward for a free * block within the next XX blocks. * * end_goal is more or less random, but it has to be * less than EXT2_BLOCKS_PER_GROUP. Aligning up to the * next 64-bit boundary is simple.. */ int end_goal = (j + 63) & ~63; j = ext2_find_next_zero_bit(bh->b_data, end_goal, j); if (j < end_goal) goto got_block; } ext2_debug ("Bit not found near goal\n"); /* * There has been no free block found in the near vicinity * of the goal: do a search forward through the block groups, * searching in each group first for an entire free byte in * the bitmap and then for any free bit. * * Search first in the remainder of the current group; then, * cyclicly search through the rest of the groups. */ p = ((char *) bh->b_data) + (j >> 3); r = memscan(p, 0, (EXT2_BLOCKS_PER_GROUP(sb) - j + 7) >> 3); k = (r - ((char *) bh->b_data)) << 3; if (k < EXT2_BLOCKS_PER_GROUP(sb)) { j = k; goto search_back; } k = ext2_find_next_zero_bit ((unsigned long *) bh->b_data, EXT2_BLOCKS_PER_GROUP(sb), j); if (k < EXT2_BLOCKS_PER_GROUP(sb)) { j = k; goto got_block; } } ext2_debug ("Bit not found in block group %d.\n", i); /* * Now search the rest of the groups. We assume that * i and gdp correctly point to the last group visited. */ for (k = 0; k < sb->u.ext2_sb.s_groups_count; k++) { i++; if (i >= sb->u.ext2_sb.s_groups_count) i = 0; gdp = ext2_get_group_desc (sb, i, &bh2); if (!gdp) goto io_error; if (le16_to_cpu(gdp->bg_free_blocks_count) > 0) break; } if (k >= sb->u.ext2_sb.s_groups_count) goto out; bitmap_nr = load_block_bitmap (sb, i); if (bitmap_nr < 0) goto io_error; bh = sb->u.ext2_sb.s_block_bitmap[bitmap_nr]; r = memscan(bh->b_data, 0, EXT2_BLOCKS_PER_GROUP(sb) >> 3); j = (r - bh->b_data) << 3; if (j < EXT2_BLOCKS_PER_GROUP(sb)) goto search_back; else j = ext2_find_first_zero_bit ((unsigned long *) bh->b_data, EXT2_BLOCKS_PER_GROUP(sb)); if (j >= EXT2_BLOCKS_PER_GROUP(sb)) { ext2_error (sb, "ext2_new_block", "Free blocks count corrupted for block group %d", i); goto out; } search_back: /* * We have succeeded in finding a free byte in the block * bitmap. Now search backwards up to 7 bits to find the * start of this group of free blocks. */ for (k = 0; k < 7 && j > 0 && !ext2_test_bit (j - 1, bh->b_data); k++, j--); got_block: ext2_debug ("using block group %d(%d)\n", i, gdp->bg_free_blocks_count); /* * Check quota for allocation of this block. */ if(DQUOT_ALLOC_BLOCK(inode, 1)) { *err = -EDQUOT; goto out; } tmp = j + i * EXT2_BLOCKS_PER_GROUP(sb) + le32_to_cpu(es->s_first_data_block); if (tmp == le32_to_cpu(gdp->bg_block_bitmap) || tmp == le32_to_cpu(gdp->bg_inode_bitmap) || in_range (tmp, le32_to_cpu(gdp->bg_inode_table), sb->u.ext2_sb.s_itb_per_group)) ext2_error (sb, "ext2_new_block", "Allocating block in system zone - " "block = %u", tmp); if (ext2_set_bit (j, bh->b_data)) { ext2_warning (sb, "ext2_new_block", "bit already set for block %d", j); DQUOT_FREE_BLOCK(inode, 1); goto repeat; } ext2_debug ("found bit %d\n", j); /* * Do block preallocation now if required. */ #ifdef EXT2_PREALLOCATE /* Writer: ->i_prealloc* */ if (prealloc_count && !*prealloc_count) { int prealloc_goal; unsigned long next_block = tmp + 1; prealloc_goal = es->s_prealloc_blocks ? es->s_prealloc_blocks : EXT2_DEFAULT_PREALLOC_BLOCKS; *prealloc_block = next_block; /* Writer: end */ for (k = 1; k < prealloc_goal && (j + k) < EXT2_BLOCKS_PER_GROUP(sb); k++, next_block++) { if (DQUOT_PREALLOC_BLOCK(inode, 1)) break; /* Writer: ->i_prealloc* */ if (*prealloc_block + *prealloc_count != next_block || ext2_set_bit (j + k, bh->b_data)) { /* Writer: end */ DQUOT_FREE_BLOCK(inode, 1); break; } (*prealloc_count)++; /* Writer: end */ } /* * As soon as we go for per-group spinlocks we'll need these * done inside the loop above. */ gdp->bg_free_blocks_count = cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - (k - 1)); es->s_free_blocks_count = cpu_to_le32(le32_to_cpu(es->s_free_blocks_count) - (k - 1)); ext2_debug ("Preallocated a further %lu bits.\n", (k - 1)); } #endif j = tmp; mark_buffer_dirty(bh); if (sb->s_flags & MS_SYNCHRONOUS) { ll_rw_block (WRITE, 1, &bh); wait_on_buffer (bh); } if (j >= le32_to_cpu(es->s_blocks_count)) { ext2_error (sb, "ext2_new_block", "block(%d) >= blocks count(%d) - " "block_group = %d, es == %p ",j, le32_to_cpu(es->s_blocks_count), i, es); goto out; } ext2_debug ("allocating block %d. " "Goal hits %d of %d.\n", j, goal_hits, goal_attempts); gdp->bg_free_blocks_count = cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1); mark_buffer_dirty(bh2); es->s_free_blocks_count = cpu_to_le32(le32_to_cpu(es->s_free_blocks_count) - 1); mark_buffer_dirty(sb->u.ext2_sb.s_sbh); sb->s_dirt = 1; unlock_super (sb); *err = 0; return j; io_error: *err = -EIO; out: unlock_super (sb); return 0; } unsigned long ext2_count_free_blocks (struct super_block * sb) { #ifdef EXT2FS_DEBUG struct ext2_super_block * es; unsigned long desc_count, bitmap_count, x; int bitmap_nr; struct ext2_group_desc * gdp; int i; lock_super (sb); es = sb->u.ext2_sb.s_es; desc_count = 0; bitmap_count = 0; gdp = NULL; for (i = 0; i < sb->u.ext2_sb.s_groups_count; i++) { gdp = ext2_get_group_desc (sb, i, NULL); if (!gdp) continue; desc_count += le16_to_cpu(gdp->bg_free_blocks_count); bitmap_nr = load_block_bitmap (sb, i); if (bitmap_nr < 0) continue; x = ext2_count_free (sb->u.ext2_sb.s_block_bitmap[bitmap_nr], sb->s_blocksize); printk ("group %d: stored = %d, counted = %lu\n", i, le16_to_cpu(gdp->bg_free_blocks_count), x); bitmap_count += x; } printk("ext2_count_free_blocks: stored = %lu, computed = %lu, %lu\n", le32_to_cpu(es->s_free_blocks_count), desc_count, bitmap_count); unlock_super (sb); return bitmap_count; #else return le32_to_cpu(sb->u.ext2_sb.s_es->s_free_blocks_count); #endif } static inline int block_in_use (unsigned long block, struct super_block * sb, unsigned char * map) { return ext2_test_bit ((block - le32_to_cpu(sb->u.ext2_sb.s_es->s_first_data_block)) % EXT2_BLOCKS_PER_GROUP(sb), map); } static inline int test_root(int a, int b) { if (a == 0) return 1; while (1) { if (a == 1) return 1; if (a % b) return 0; a = a / b; } } int ext2_group_sparse(int group) { return (test_root(group, 3) || test_root(group, 5) || test_root(group, 7)); } /** * ext2_bg_has_super - number of blocks used by the superblock in group * @sb: superblock for filesystem * @group: group number to check * * Return the number of blocks used by the superblock (primary or backup) * in this group. Currently this will be only 0 or 1. */ int ext2_bg_has_super(struct super_block *sb, int group) { if (EXT2_HAS_RO_COMPAT_FEATURE(sb,EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER)&& !ext2_group_sparse(group)) return 0; return 1; } /** * ext2_bg_num_gdb - number of blocks used by the group table in group * @sb: superblock for filesystem * @group: group number to check * * Return the number of blocks used by the group descriptor table * (primary or backup) in this group. In the future there may be a * different number of descriptor blocks in each group. */ unsigned long ext2_bg_num_gdb(struct super_block *sb, int group) { if (EXT2_HAS_RO_COMPAT_FEATURE(sb,EXT2_FEATURE_RO_COMPAT_SPARSE_SUPER)&& !ext2_group_sparse(group)) return 0; return EXT2_SB(sb)->s_gdb_count; } #ifdef CONFIG_EXT2_CHECK /* Called at mount-time, super-block is locked */ void ext2_check_blocks_bitmap (struct super_block * sb) { struct buffer_head * bh; struct ext2_super_block * es; unsigned long desc_count, bitmap_count, x, j; unsigned long desc_blocks; int bitmap_nr; struct ext2_group_desc * gdp; int i; es = sb->u.ext2_sb.s_es; desc_count = 0; bitmap_count = 0; gdp = NULL; for (i = 0; i < sb->u.ext2_sb.s_groups_count; i++) { gdp = ext2_get_group_desc (sb, i, NULL); if (!gdp) continue; desc_count += le16_to_cpu(gdp->bg_free_blocks_count); bitmap_nr = load_block_bitmap (sb, i); if (bitmap_nr < 0) continue; bh = EXT2_SB(sb)->s_block_bitmap[bitmap_nr]; if (ext2_bg_has_super(sb, i) && !ext2_test_bit(0, bh->b_data)) ext2_error(sb, __FUNCTION__, "Superblock in group %d is marked free", i); desc_blocks = ext2_bg_num_gdb(sb, i); for (j = 0; j < desc_blocks; j++) if (!ext2_test_bit(j + 1, bh->b_data)) ext2_error(sb, __FUNCTION__, "Descriptor block #%ld in group " "%d is marked free", j, i); if (!block_in_use (le32_to_cpu(gdp->bg_block_bitmap), sb, bh->b_data)) ext2_error (sb, "ext2_check_blocks_bitmap", "Block bitmap for group %d is marked free", i); if (!block_in_use (le32_to_cpu(gdp->bg_inode_bitmap), sb, bh->b_data)) ext2_error (sb, "ext2_check_blocks_bitmap", "Inode bitmap for group %d is marked free", i); for (j = 0; j < sb->u.ext2_sb.s_itb_per_group; j++) if (!block_in_use (le32_to_cpu(gdp->bg_inode_table) + j, sb, bh->b_data)) ext2_error (sb, "ext2_check_blocks_bitmap", "Block #%ld of the inode table in " "group %d is marked free", j, i); x = ext2_count_free (bh, sb->s_blocksize); if (le16_to_cpu(gdp->bg_free_blocks_count) != x) ext2_error (sb, "ext2_check_blocks_bitmap", "Wrong free blocks count for group %d, " "stored = %d, counted = %lu", i, le16_to_cpu(gdp->bg_free_blocks_count), x); bitmap_count += x; } if (le32_to_cpu(es->s_free_blocks_count) != bitmap_count) ext2_error (sb, "ext2_check_blocks_bitmap", "Wrong free blocks count in super block, " "stored = %lu, counted = %lu", (unsigned long) le32_to_cpu(es->s_free_blocks_count), bitmap_count); } #endif
645352.c
/******************************************************************************* * * DO NOT EDIT THIS FILE! * This file is auto-generated by fltg from INTERNAL/fltg/xgs/tnl/TNL_MPLS_DST_MAC.tbl.ltl * Edits to this file will be lost when it is regenerated. * * Copyright: (c) 2018 Broadcom. All Rights Reserved. "Broadcom" refers to * Broadcom Limited and/or its subsidiaries. * * Broadcom Switch Software License * * This license governs the use of the accompanying Broadcom software. Your * use of the software indicates your acceptance of the terms and conditions * of this license. If you do not agree to the terms and conditions of this * license, do not use the software. * 1. Definitions * "Licensor" means any person or entity that distributes its Work. * "Software" means the original work of authorship made available under * this license. * "Work" means the Software and any additions to or derivative works of * the Software that are made available under this license. * The terms "reproduce," "reproduction," "derivative works," and * "distribution" have the meaning as provided under U.S. copyright law. * Works, including the Software, are "made available" under this license * by including in or with the Work either (a) a copyright notice * referencing the applicability of this license to the Work, or (b) a copy * of this license. * 2. Grant of Copyright License * Subject to the terms and conditions of this license, each Licensor * grants to you a perpetual, worldwide, non-exclusive, and royalty-free * copyright license to reproduce, prepare derivative works of, publicly * display, publicly perform, sublicense and distribute its Work and any * resulting derivative works in any form. * 3. Grant of Patent License * Subject to the terms and conditions of this license, each Licensor * grants to you a perpetual, worldwide, non-exclusive, and royalty-free * patent license to make, have made, use, offer to sell, sell, import, and * otherwise transfer its Work, in whole or in part. This patent license * applies only to the patent claims licensable by Licensor that would be * infringed by Licensor's Work (or portion thereof) individually and * excluding any combinations with any other materials or technology. * If you institute patent litigation against any Licensor (including a * cross-claim or counterclaim in a lawsuit) to enforce any patents that * you allege are infringed by any Work, then your patent license from such * Licensor to the Work shall terminate as of the date such litigation is * filed. * 4. Redistribution * You may reproduce or distribute the Work only if (a) you do so under * this License, (b) you include a complete copy of this License with your * distribution, and (c) you retain without modification any copyright, * patent, trademark, or attribution notices that are present in the Work. * 5. Derivative Works * You may specify that additional or different terms apply to the use, * reproduction, and distribution of your derivative works of the Work * ("Your Terms") only if (a) Your Terms provide that the limitations of * Section 7 apply to your derivative works, and (b) you identify the * specific derivative works that are subject to Your Terms. * Notwithstanding Your Terms, this license (including the redistribution * requirements in Section 4) will continue to apply to the Work itself. * 6. Trademarks * This license does not grant any rights to use any Licensor's or its * affiliates' names, logos, or trademarks, except as necessary to * reproduce the notices described in this license. * 7. Limitations * Platform. The Work and any derivative works thereof may only be used, or * intended for use, with a Broadcom switch integrated circuit. * No Reverse Engineering. You will not use the Work to disassemble, * reverse engineer, decompile, or attempt to ascertain the underlying * technology of a Broadcom switch integrated circuit. * 8. Termination * If you violate any term of this license, then your rights under this * license (including the license grants of Sections 2 and 3) will * terminate immediately. * 9. Disclaimer of Warranty * THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR * NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER * THIS LICENSE. SOME STATES' CONSUMER LAWS DO NOT ALLOW EXCLUSION OF AN * IMPLIED WARRANTY, SO THIS DISCLAIMER MAY NOT APPLY TO YOU. * 10. Limitation of Liability * EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL * THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE * SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, * INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF * OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK * (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, * LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER * COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include <bcmltd/bcmltd_internal.h> #include "bcmltd_strpool.h" static const bcmltd_field_rep_t bcmltd_tnl_mpls_dst_mac_t_fields[] = { { .name = bcmltd_strpool_tnl_mpls_dst_mac_id, /* TNL_MPLS_DST_MAC_ID */ .flags = BCMLTD_FIELD_F_KEY, .width = 16, .depth = 0, .desc = "Index into the TNL_MPLS_DST_MAC table.", }, { .name = bcmltd_strpool_dst_mac, /* DST_MAC */ .flags = 0, .width = 48, .depth = 0, .desc = "Destination MAC address.", }, }; const bcmltd_table_rep_t bcmltd_tnl_mpls_dst_mac_t = { .name = bcmltd_strpool_tnl_mpls_dst_mac, /* TNL_MPLS_DST_MAC */ .flags = 0, .fields = 2, .field = bcmltd_tnl_mpls_dst_mac_t_fields, .desc = "The TNL_MPLS_DST_MAC table is used to set the destination MAC used during\n MPLS packet modification.\n", };
662414.c
#include <stdio.h> #include <stdlib.h> #include <math.h> // MERGE SORT: // A ordenação é feita dividindo a lista ao meio em partes iguais e depois juntando de forma ordenada: dividir para conquistar // A lista é dividida ao meio de forma recursiva até sobrar apenas elementos isolados: merge_sort // Com os elementos isolados, vai juntando de dois em dois de forma ordenada, até chegar em uma lista final ordenada: merge // Não possui melhor e pior caso: sempre realiza o mesmo processamento independendo dos dados // É estável: independentemente de como os dados estão previamente organizados, o tempo de execução e o processamento será o mesmo // Consome O(n) de memória se implementado com linked lists (precisa apenas de um vetor temporário para armazenar os ponteiros de cada elemento) -> linear // Consome O(n*log(n)) de processamento e tempo no pior e médio caso, pois realiza diversas divisões // Não é adaptativo: mesmo com os dados quase ordenados, vai ter o mesmo tempo e custo computacional de um totalmente desordenado // O selection e isertion sort não adaptativos, pois diminui o custo e tempo computacional se os dados já estão quase ordenados // Não precisa de acesso aleatório: não precisa fazer buscas de elementos, apenas divide eles void merge(int *arr, int beg, int mid, int end) { int *temp, size, ptr1, ptr2; // Controla quando a ordenação chega ao fim: end1 e end2 = 1 int end1=0, end2=0; // Aponta para os dois arrays que quero comparar ptr1 = beg; ptr2 = mid+1; // Aloca um array temporário com o array ptr1 + ptr2 size = end-beg+1; temp = (int*)malloc(size*sizeof(int)); if(temp != NULL) { for(int i = 0; i < size; i++){ if(!end1 && !end2){ // Combina e ordena todos os elementos incrementando ptr1 e ptr2 if(arr[ptr1] < arr[ptr2]) { temp[i] = arr[ptr1++]; } else { temp[i] = arr[ptr2++]; } // Testa seo vator chegou ao fim para sair do if if(ptr1>mid) end1 = 1; if(ptr2>end) end2 = 1; } else { // Realiza a união de forma ordenada: copia a sobra if(!end1) { temp[i] = arr[ptr1++]; } else { temp[i] = arr[ptr2++]; } } } // Copia do auxiliar temp para o original quando a lista já está 100% finalizada e ordenada for(int j = 0, k = beg; j < size; j++, k++) { arr[k] = temp[j]; } } free(temp); } void merge_sort(int *arr, int beg, int end) { int mid; if(beg < end) { mid = floor((beg+end)/2); // Encontra o index central // Divisões do array por recursão merge_sort(arr, beg, mid); merge_sort(arr, mid+1, end); // Conquista: ordenação dos elementos divididos merge(arr, beg, mid, end); } } int main(int argc, char *argv[]) { int arr[5] = {500, 1, 50, 23, 76}; merge_sort(arr, 0, 4); // Array, index inicial (0) e index final (5-1) printf("The sorted array is: \n"); for(int i=0;i<5;i++) { printf("%d, ", arr[i]); } }
42487.c
/* * MCP9600_cfg.c * * Created on: 14-Dec-2020 * Author: Mrunal Ahirao * Description: This file has mapping between MCP I2C functions and driver functions. This will help in making * this library platform independent. */ #include "MCP9600_cfg.h" /** Description: This function writes data on I2C bus. Parameters(in): uint8_t *ptrToData: This is pointer to databuff which is to be written on I2C bus. Parameters(out): None Return Value: t_MCP_Status: SUCCESS or FAILURE */ t_MCP_Status I2C_Write(uint8_t *ptrToData) { t_MCP_Status status = SUCCESS; if(STATUS_SUCCESS != LPI2C_DRV_MasterSendDataBlocking(INST_LPI2C1, ptrToData,8, true, I2C_TMOUT)) { status = FAILURE; } return status; } /** Description: This function writes data on I2C bus. Parameters(in): uint8_t *ptrToData: This is pointer to databuff which is to be written on I2C bus. uint32_t size: This is number of bytes to be read from I2C bus. Parameters(out): None Return Value: t_MCP_Status: SUCCESS or FAILURE */ t_MCP_Status I2C_Read(uint8_t *ptrToRxBuf,uint32_t size) { t_MCP_Status status = SUCCESS; if(STATUS_SUCCESS!= LPI2C_DRV_MasterReceiveDataBlocking(INST_LPI2C1,ptrToRxBuf,size,true,I2C_TMOUT)) { status = FAILURE; } return status; }
841641.c
/*! \file system_gd32f4xx.c \brief CMSIS Cortex-M4 Device Peripheral Access Layer Source File for GD32F4xx Device Series */ /* Copyright (c) 2012 ARM LIMITED All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of ARM nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------------------*/ /* This file refers the CMSIS standard, some adjustments are made according to GigaDevice chips */ #include "gd32f4xx.h" /* system frequency define */ #define __IRC16M (IRC16M_VALUE) /* internal 16 MHz RC oscillator frequency */ #define __HXTAL (HXTAL_VALUE) /* high speed crystal oscillator frequency */ #define __SYS_OSC_CLK (__IRC16M) /* main oscillator frequency */ /* select a system clock by uncommenting the following line */ //#define __SYSTEM_CLOCK_IRC16M (uint32_t)(__IRC16M) //#define __SYSTEM_CLOCK_HXTAL (uint32_t)(__HXTAL) //#define __SYSTEM_CLOCK_120M_PLL_IRC16M (uint32_t)(120000000) //#define __SYSTEM_CLOCK_120M_PLL_8M_HXTAL (uint32_t)(120000000) //#define __SYSTEM_CLOCK_120M_PLL_25M_HXTAL (uint32_t)(120000000) //#define __SYSTEM_CLOCK_168M_PLL_IRC16M (uint32_t)(168000000) //#define __SYSTEM_CLOCK_168M_PLL_8M_HXTAL (uint32_t)(168000000) //#define __SYSTEM_CLOCK_168M_PLL_25M_HXTAL (uint32_t)(168000000) //#define __SYSTEM_CLOCK_200M_PLL_IRC16M (uint32_t)(200000000) //#define __SYSTEM_CLOCK_200M_PLL_8M_HXTAL (uint32_t)(200000000) #define __SYSTEM_CLOCK_200M_PLL_25M_HXTAL (uint32_t)(200000000) #define SEL_IRC16M 0x00U #define SEL_HXTAL 0x01U #define SEL_PLLP 0x02U #define RCU_MODIFY {volatile uint32_t i; \ RCU_CFG0 |= RCU_AHB_CKSYS_DIV2; \ for(i=0;i<50000;i++);} /* set the system clock frequency and declare the system clock configuration function */ #ifdef __SYSTEM_CLOCK_IRC16M uint32_t SystemCoreClock = __SYSTEM_CLOCK_IRC16M; static void system_clock_16m_irc16m(void); #elif defined (__SYSTEM_CLOCK_HXTAL) uint32_t SystemCoreClock = __SYSTEM_CLOCK_HXTAL; static void system_clock_hxtal(void); #elif defined (__SYSTEM_CLOCK_120M_PLL_IRC16M) uint32_t SystemCoreClock = __SYSTEM_CLOCK_120M_PLL_IRC16M; static void system_clock_120m_irc16m(void); #elif defined (__SYSTEM_CLOCK_120M_PLL_8M_HXTAL) uint32_t SystemCoreClock = __SYSTEM_CLOCK_120M_PLL_8M_HXTAL; static void system_clock_120m_8m_hxtal(void); #elif defined (__SYSTEM_CLOCK_120M_PLL_25M_HXTAL) uint32_t SystemCoreClock = __SYSTEM_CLOCK_120M_PLL_25M_HXTAL; static void system_clock_120m_25m_hxtal(void); #elif defined (__SYSTEM_CLOCK_168M_PLL_IRC16M) uint32_t SystemCoreClock = __SYSTEM_CLOCK_168M_PLL_IRC16M; static void system_clock_168m_irc16m(void); #elif defined (__SYSTEM_CLOCK_168M_PLL_8M_HXTAL) uint32_t SystemCoreClock = __SYSTEM_CLOCK_168M_PLL_8M_HXTAL; static void system_clock_168m_8m_hxtal(void); #elif defined (__SYSTEM_CLOCK_168M_PLL_25M_HXTAL) uint32_t SystemCoreClock = __SYSTEM_CLOCK_168M_PLL_25M_HXTAL; static void system_clock_168m_25m_hxtal(void); #elif defined (__SYSTEM_CLOCK_200M_PLL_IRC16M) uint32_t SystemCoreClock = __SYSTEM_CLOCK_200M_PLL_IRC16M; static void system_clock_200m_irc16m(void); #elif defined (__SYSTEM_CLOCK_200M_PLL_8M_HXTAL) uint32_t SystemCoreClock = __SYSTEM_CLOCK_200M_PLL_8M_HXTAL; static void system_clock_200m_8m_hxtal(void); #elif defined (__SYSTEM_CLOCK_200M_PLL_25M_HXTAL) uint32_t SystemCoreClock = __SYSTEM_CLOCK_200M_PLL_25M_HXTAL; static void system_clock_200m_25m_hxtal(void); #endif /* __SYSTEM_CLOCK_IRC16M */ /* configure the system clock */ static void system_clock_config(void); /*! \brief setup the microcontroller system, initialize the system \param[in] none \param[out] none \retval none */ void SystemInit (void) { /* FPU settings ------------------------------------------------------------*/ #if (__FPU_PRESENT == 1) && (__FPU_USED == 1) SCB->CPACR |= ((3UL << 10*2)|(3UL << 11*2)); /* set CP10 and CP11 Full Access */ #endif /* Reset the RCU clock configuration to the default reset state ------------*/ /* Set IRC16MEN bit */ RCU_CTL |= RCU_CTL_IRC16MEN; RCU_MODIFY /* Reset CFG0 register */ RCU_CFG0 = 0x00000000U; /* Reset HXTALEN, CKMEN and PLLEN bits */ RCU_CTL &= ~(RCU_CTL_PLLEN | RCU_CTL_CKMEN | RCU_CTL_HXTALEN); /* Reset PLLCFGR register */ RCU_PLL = 0x24003010U; /* Reset HSEBYP bit */ RCU_CTL &= ~(RCU_CTL_HXTALBPS); /* Disable all interrupts */ RCU_INT = 0x00000000U; /* Configure the System clock source, PLL Multiplier and Divider factors, AHB/APBx prescalers and Flash settings ----------------------------------*/ system_clock_config(); } /*! \brief configure the system clock \param[in] none \param[out] none \retval none */ static void system_clock_config(void) { #ifdef __SYSTEM_CLOCK_IRC16M system_clock_16m_irc16m(); #elif defined (__SYSTEM_CLOCK_HXTAL) system_clock_hxtal(); #elif defined (__SYSTEM_CLOCK_120M_PLL_IRC16M) system_clock_120m_irc16m(); #elif defined (__SYSTEM_CLOCK_120M_PLL_8M_HXTAL) system_clock_120m_8m_hxtal(); #elif defined (__SYSTEM_CLOCK_120M_PLL_25M_HXTAL) system_clock_120m_25m_hxtal(); #elif defined (__SYSTEM_CLOCK_168M_PLL_IRC16M) system_clock_168m_irc16m(); #elif defined (__SYSTEM_CLOCK_168M_PLL_8M_HXTAL) system_clock_168m_8m_hxtal(); #elif defined (__SYSTEM_CLOCK_168M_PLL_25M_HXTAL) system_clock_168m_25m_hxtal(); #elif defined (__SYSTEM_CLOCK_200M_PLL_IRC16M) system_clock_200m_irc16m(); #elif defined (__SYSTEM_CLOCK_200M_PLL_8M_HXTAL) system_clock_200m_8m_hxtal(); #elif defined (__SYSTEM_CLOCK_200M_PLL_25M_HXTAL) system_clock_200m_25m_hxtal(); #endif /* __SYSTEM_CLOCK_IRC16M */ } #ifdef __SYSTEM_CLOCK_IRC16M /*! \brief configure the system clock to 16M by IRC16M \param[in] none \param[out] none \retval none */ static void system_clock_16m_irc16m(void) { uint32_t timeout = 0U; uint32_t stab_flag = 0U; /* enable IRC16M */ RCU_CTL |= RCU_CTL_IRC16MEN; /* wait until IRC16M is stable or the startup time is longer than IRC16M_STARTUP_TIMEOUT */ do{ timeout++; stab_flag = (RCU_CTL & RCU_CTL_IRC16MSTB); }while((0U == stab_flag) && (IRC16M_STARTUP_TIMEOUT != timeout)); /* if fail */ if(0U == (RCU_CTL & RCU_CTL_IRC16MSTB)){ while(1){ } } /* AHB = SYSCLK */ RCU_CFG0 |= RCU_AHB_CKSYS_DIV1; /* APB2 = AHB */ RCU_CFG0 |= RCU_APB2_CKAHB_DIV1; /* APB1 = AHB */ RCU_CFG0 |= RCU_APB1_CKAHB_DIV1; /* select IRC16M as system clock */ RCU_CFG0 &= ~RCU_CFG0_SCS; RCU_CFG0 |= RCU_CKSYSSRC_IRC16M; /* wait until IRC16M is selected as system clock */ while(0 != (RCU_CFG0 & RCU_SCSS_IRC16M)){ } } #elif defined (__SYSTEM_CLOCK_HXTAL) /*! \brief configure the system clock to HXTAL \param[in] none \param[out] none \retval none */ static void system_clock_hxtal(void) { uint32_t timeout = 0U; uint32_t stab_flag = 0U; /* enable HXTAL */ RCU_CTL |= RCU_CTL_HXTALEN; /* wait until HXTAL is stable or the startup time is longer than HXTAL_STARTUP_TIMEOUT */ do{ timeout++; stab_flag = (RCU_CTL & RCU_CTL_HXTALSTB); }while((0U == stab_flag) && (HXTAL_STARTUP_TIMEOUT != timeout)); /* if fail */ if(0U == (RCU_CTL & RCU_CTL_HXTALSTB)){ while(1){ } } /* AHB = SYSCLK */ RCU_CFG0 |= RCU_AHB_CKSYS_DIV1; /* APB2 = AHB */ RCU_CFG0 |= RCU_APB2_CKAHB_DIV1; /* APB1 = AHB */ RCU_CFG0 |= RCU_APB1_CKAHB_DIV1; /* select HXTAL as system clock */ RCU_CFG0 &= ~RCU_CFG0_SCS; RCU_CFG0 |= RCU_CKSYSSRC_HXTAL; /* wait until HXTAL is selected as system clock */ while(0 == (RCU_CFG0 & RCU_SCSS_HXTAL)){ } } #elif defined (__SYSTEM_CLOCK_120M_PLL_IRC16M) /*! \brief configure the system clock to 120M by PLL which selects IRC16M as its clock source \param[in] none \param[out] none \retval none */ static void system_clock_120m_irc16m(void) { uint32_t timeout = 0U; uint32_t stab_flag = 0U; /* enable IRC16M */ RCU_CTL |= RCU_CTL_IRC16MEN; /* wait until IRC16M is stable or the startup time is longer than IRC16M_STARTUP_TIMEOUT */ do{ timeout++; stab_flag = (RCU_CTL & RCU_CTL_IRC16MSTB); }while((0U == stab_flag) && (IRC16M_STARTUP_TIMEOUT != timeout)); /* if fail */ if(0U == (RCU_CTL & RCU_CTL_IRC16MSTB)){ while(1){ } } RCU_APB1EN |= RCU_APB1EN_PMUEN; PMU_CTL |= PMU_CTL_LDOVS; /* IRC16M is stable */ /* AHB = SYSCLK */ RCU_CFG0 |= RCU_AHB_CKSYS_DIV1; /* APB2 = AHB/2 */ RCU_CFG0 |= RCU_APB2_CKAHB_DIV2; /* APB1 = AHB/4 */ RCU_CFG0 |= RCU_APB1_CKAHB_DIV4; /* Configure the main PLL, PSC = 16, PLL_N = 240, PLL_P = 2, PLL_Q = 5 */ RCU_PLL = (16U | (240U << 6U) | (((2U >> 1U) - 1U) << 16U) | (RCU_PLLSRC_IRC16M) | (5U << 24U)); /* enable PLL */ RCU_CTL |= RCU_CTL_PLLEN; /* wait until PLL is stable */ while(0U == (RCU_CTL & RCU_CTL_PLLSTB)){ } /* Enable the high-drive to extend the clock frequency to 120 Mhz */ PMU_CTL |= PMU_CTL_HDEN; while(0U == (PMU_CS & PMU_CS_HDRF)){ } /* select the high-drive mode */ PMU_CTL |= PMU_CTL_HDS; while(0U == (PMU_CS & PMU_CS_HDSRF)){ } /* select PLL as system clock */ RCU_CFG0 &= ~RCU_CFG0_SCS; RCU_CFG0 |= RCU_CKSYSSRC_PLLP; /* wait until PLL is selected as system clock */ while(0U == (RCU_CFG0 & RCU_SCSS_PLLP)){ } } #elif defined (__SYSTEM_CLOCK_120M_PLL_8M_HXTAL) /*! \brief configure the system clock to 120M by PLL which selects HXTAL(8M) as its clock source \param[in] none \param[out] none \retval none */ static void system_clock_120m_8m_hxtal(void) { uint32_t timeout = 0U; uint32_t stab_flag = 0U; /* enable HXTAL */ RCU_CTL |= RCU_CTL_HXTALEN; /* wait until HXTAL is stable or the startup time is longer than HXTAL_STARTUP_TIMEOUT */ do{ timeout++; stab_flag = (RCU_CTL & RCU_CTL_HXTALSTB); }while((0U == stab_flag) && (HXTAL_STARTUP_TIMEOUT != timeout)); /* if fail */ if(0U == (RCU_CTL & RCU_CTL_HXTALSTB)){ while(1){ } } RCU_APB1EN |= RCU_APB1EN_PMUEN; PMU_CTL |= PMU_CTL_LDOVS; /* HXTAL is stable */ /* AHB = SYSCLK */ RCU_CFG0 |= RCU_AHB_CKSYS_DIV1; /* APB2 = AHB/2 */ RCU_CFG0 |= RCU_APB2_CKAHB_DIV2; /* APB1 = AHB/4 */ RCU_CFG0 |= RCU_APB1_CKAHB_DIV4; /* Configure the main PLL, PSC = 8, PLL_N = 240, PLL_P = 2, PLL_Q = 5 */ RCU_PLL = (8U | (240U << 6U) | (((2U >> 1U) - 1U) << 16U) | (RCU_PLLSRC_HXTAL) | (5U << 24U)); /* enable PLL */ RCU_CTL |= RCU_CTL_PLLEN; /* wait until PLL is stable */ while(0U == (RCU_CTL & RCU_CTL_PLLSTB)){ } /* Enable the high-drive to extend the clock frequency to 120 Mhz */ PMU_CTL |= PMU_CTL_HDEN; while(0U == (PMU_CS & PMU_CS_HDRF)){ } /* select the high-drive mode */ PMU_CTL |= PMU_CTL_HDS; while(0U == (PMU_CS & PMU_CS_HDSRF)){ } /* select PLL as system clock */ RCU_CFG0 &= ~RCU_CFG0_SCS; RCU_CFG0 |= RCU_CKSYSSRC_PLLP; /* wait until PLL is selected as system clock */ while(0U == (RCU_CFG0 & RCU_SCSS_PLLP)){ } } #elif defined (__SYSTEM_CLOCK_120M_PLL_25M_HXTAL) /*! \brief configure the system clock to 120M by PLL which selects HXTAL(25M) as its clock source \param[in] none \param[out] none \retval none */ static void system_clock_120m_25m_hxtal(void) { uint32_t timeout = 0U; uint32_t stab_flag = 0U; /* enable HXTAL */ RCU_CTL |= RCU_CTL_HXTALEN; /* wait until HXTAL is stable or the startup time is longer than HXTAL_STARTUP_TIMEOUT */ do{ timeout++; stab_flag = (RCU_CTL & RCU_CTL_HXTALSTB); }while((0U == stab_flag) && (HXTAL_STARTUP_TIMEOUT != timeout)); /* if fail */ if(0U == (RCU_CTL & RCU_CTL_HXTALSTB)){ while(1){ } } RCU_APB1EN |= RCU_APB1EN_PMUEN; PMU_CTL |= PMU_CTL_LDOVS; /* HXTAL is stable */ /* AHB = SYSCLK */ RCU_CFG0 |= RCU_AHB_CKSYS_DIV1; /* APB2 = AHB/2 */ RCU_CFG0 |= RCU_APB2_CKAHB_DIV2; /* APB1 = AHB/4 */ RCU_CFG0 |= RCU_APB1_CKAHB_DIV4; /* Configure the main PLL, PSC = 25, PLL_N = 240, PLL_P = 2, PLL_Q = 5 */ RCU_PLL = (25U | (240U << 6U) | (((2U >> 1U) - 1U) << 16U) | (RCU_PLLSRC_HXTAL) | (5U << 24U)); /* enable PLL */ RCU_CTL |= RCU_CTL_PLLEN; /* wait until PLL is stable */ while(0U == (RCU_CTL & RCU_CTL_PLLSTB)){ } /* Enable the high-drive to extend the clock frequency to 120 Mhz */ PMU_CTL |= PMU_CTL_HDEN; while(0U == (PMU_CS & PMU_CS_HDRF)){ } /* select the high-drive mode */ PMU_CTL |= PMU_CTL_HDS; while(0U == (PMU_CS & PMU_CS_HDSRF)){ } /* select PLL as system clock */ RCU_CFG0 &= ~RCU_CFG0_SCS; RCU_CFG0 |= RCU_CKSYSSRC_PLLP; /* wait until PLL is selected as system clock */ while(0U == (RCU_CFG0 & RCU_SCSS_PLLP)){ } } #elif defined (__SYSTEM_CLOCK_168M_PLL_IRC16M) /*! \brief configure the system clock to 168M by PLL which selects IRC16M as its clock source \param[in] none \param[out] none \retval none */ static void system_clock_168m_irc16m(void) { uint32_t timeout = 0U; uint32_t stab_flag = 0U; /* enable IRC16M */ RCU_CTL |= RCU_CTL_IRC16MEN; /* wait until IRC16M is stable or the startup time is longer than IRC16M_STARTUP_TIMEOUT */ do{ timeout++; stab_flag = (RCU_CTL & RCU_CTL_IRC16MSTB); }while((0U == stab_flag) && (IRC16M_STARTUP_TIMEOUT != timeout)); /* if fail */ if(0U == (RCU_CTL & RCU_CTL_IRC16MSTB)){ while(1){ } } RCU_APB1EN |= RCU_APB1EN_PMUEN; PMU_CTL |= PMU_CTL_LDOVS; /* IRC16M is stable */ /* AHB = SYSCLK */ RCU_CFG0 |= RCU_AHB_CKSYS_DIV1; /* APB2 = AHB/2 */ RCU_CFG0 |= RCU_APB2_CKAHB_DIV2; /* APB1 = AHB/4 */ RCU_CFG0 |= RCU_APB1_CKAHB_DIV4; /* Configure the main PLL, PSC = 16, PLL_N = 336, PLL_P = 2, PLL_Q = 7 */ RCU_PLL = (16U | (336U << 6U) | (((2U >> 1U) - 1U) << 16U) | (RCU_PLLSRC_IRC16M) | (7U << 24U)); /* enable PLL */ RCU_CTL |= RCU_CTL_PLLEN; /* wait until PLL is stable */ while(0U == (RCU_CTL & RCU_CTL_PLLSTB)){ } /* Enable the high-drive to extend the clock frequency to 168 Mhz */ PMU_CTL |= PMU_CTL_HDEN; while(0U == (PMU_CS & PMU_CS_HDRF)){ } /* select the high-drive mode */ PMU_CTL |= PMU_CTL_HDS; while(0U == (PMU_CS & PMU_CS_HDSRF)){ } /* select PLL as system clock */ RCU_CFG0 &= ~RCU_CFG0_SCS; RCU_CFG0 |= RCU_CKSYSSRC_PLLP; /* wait until PLL is selected as system clock */ while(0U == (RCU_CFG0 & RCU_SCSS_PLLP)){ } } #elif defined (__SYSTEM_CLOCK_168M_PLL_8M_HXTAL) /*! \brief configure the system clock to 168M by PLL which selects HXTAL(8M) as its clock source \param[in] none \param[out] none \retval none */ static void system_clock_168m_8m_hxtal(void) { uint32_t timeout = 0U; /* enable HXTAL */ RCU_CTL |= RCU_CTL_HXTALEN; /* wait until HXTAL is stable or the startup time is longer than HXTAL_STARTUP_TIMEOUT */ while((0U == (RCU_CTL & RCU_CTL_HXTALSTB)) && (HXTAL_STARTUP_TIMEOUT != timeout++)){ } /* if fail */ if(0U == (RCU_CTL & RCU_CTL_HXTALSTB)){ while(1){ } } RCU_APB1EN |= RCU_APB1EN_PMUEN; PMU_CTL |= PMU_CTL_LDOVS; /* HXTAL is stable */ /* AHB = SYSCLK */ RCU_CFG0 |= RCU_AHB_CKSYS_DIV1; /* APB2 = AHB/2 */ RCU_CFG0 |= RCU_APB2_CKAHB_DIV2; /* APB1 = AHB/4 */ RCU_CFG0 |= RCU_APB1_CKAHB_DIV4; /* Configure the main PLL, PSC = 8, PLL_N = 336, PLL_P = 2, PLL_Q = 7 */ RCU_PLL = (8U | (336 << 6U) | (((2 >> 1U) -1U) << 16U) | (RCU_PLLSRC_HXTAL) | (7 << 24U)); /* enable PLL */ RCU_CTL |= RCU_CTL_PLLEN; /* wait until PLL is stable */ while(0U == (RCU_CTL & RCU_CTL_PLLSTB)){ } /* Enable the high-drive to extend the clock frequency to 168 Mhz */ PMU_CTL |= PMU_CTL_HDEN; while(0U == (PMU_CS & PMU_CS_HDRF)){ } /* select the high-drive mode */ PMU_CTL |= PMU_CTL_HDS; while(0U == (PMU_CS & PMU_CS_HDSRF)){ } /* select PLL as system clock */ RCU_CFG0 &= ~RCU_CFG0_SCS; RCU_CFG0 |= RCU_CKSYSSRC_PLLP; /* wait until PLL is selected as system clock */ while(0U == (RCU_CFG0 & RCU_SCSS_PLLP)){ } } #elif defined (__SYSTEM_CLOCK_168M_PLL_25M_HXTAL) /*! \brief configure the system clock to 168M by PLL which selects HXTAL(25M) as its clock source \param[in] none \param[out] none \retval none */ static void system_clock_168m_25m_hxtal(void) { uint32_t timeout = 0U; uint32_t stab_flag = 0U; /* enable HXTAL */ RCU_CTL |= RCU_CTL_HXTALEN; /* wait until HXTAL is stable or the startup time is longer than HXTAL_STARTUP_TIMEOUT */ do{ timeout++; stab_flag = (RCU_CTL & RCU_CTL_HXTALSTB); }while((0U == stab_flag) && (HXTAL_STARTUP_TIMEOUT != timeout)); /* if fail */ if(0U == (RCU_CTL & RCU_CTL_HXTALSTB)){ while(1){ } } RCU_APB1EN |= RCU_APB1EN_PMUEN; PMU_CTL |= PMU_CTL_LDOVS; /* HXTAL is stable */ /* AHB = SYSCLK */ RCU_CFG0 |= RCU_AHB_CKSYS_DIV1; /* APB2 = AHB */ RCU_CFG0 |= RCU_APB2_CKAHB_DIV2; /* APB1 = AHB */ RCU_CFG0 |= RCU_APB1_CKAHB_DIV4; /* Configure the main PLL, PSC = 25, PLL_N = 336, PLL_P = 2, PLL_Q = 7 */ RCU_PLL = (25U | (336U << 6U) | (((2U >> 1U) - 1U) << 16U) | (RCU_PLLSRC_HXTAL) | (7U << 24U)); /* enable PLL */ RCU_CTL |= RCU_CTL_PLLEN; /* wait until PLL is stable */ while(0U == (RCU_CTL & RCU_CTL_PLLSTB)){ } /* Enable the high-drive to extend the clock frequency to 168 Mhz */ PMU_CTL |= PMU_CTL_HDEN; while(0U == (PMU_CS & PMU_CS_HDRF)){ } /* select the high-drive mode */ PMU_CTL |= PMU_CTL_HDS; while(0U == (PMU_CS & PMU_CS_HDSRF)){ } /* select PLL as system clock */ RCU_CFG0 &= ~RCU_CFG0_SCS; RCU_CFG0 |= RCU_CKSYSSRC_PLLP; /* wait until PLL is selected as system clock */ while(0U == (RCU_CFG0 & RCU_SCSS_PLLP)){ } } #elif defined (__SYSTEM_CLOCK_200M_PLL_IRC16M) /*! \brief configure the system clock to 200M by PLL which selects IRC16M as its clock source \param[in] none \param[out] none \retval none */ static void system_clock_200m_irc16m(void) { uint32_t timeout = 0U; uint32_t stab_flag = 0U; /* enable IRC16M */ RCU_CTL |= RCU_CTL_IRC16MEN; /* wait until IRC16M is stable or the startup time is longer than IRC16M_STARTUP_TIMEOUT */ do{ timeout++; stab_flag = (RCU_CTL & RCU_CTL_IRC16MSTB); }while((0U == stab_flag) && (IRC16M_STARTUP_TIMEOUT != timeout)); /* if fail */ if(0U == (RCU_CTL & RCU_CTL_IRC16MSTB)){ while(1){ } } RCU_APB1EN |= RCU_APB1EN_PMUEN; PMU_CTL |= PMU_CTL_LDOVS; /* IRC16M is stable */ /* AHB = SYSCLK */ RCU_CFG0 |= RCU_AHB_CKSYS_DIV1; /* APB2 = AHB/2 */ RCU_CFG0 |= RCU_APB2_CKAHB_DIV2; /* APB1 = AHB/4 */ RCU_CFG0 |= RCU_APB1_CKAHB_DIV4; /* Configure the main PLL, PSC = 16, PLL_N = 400, PLL_P = 2, PLL_Q = 9 */ RCU_PLL = (16U | (400U << 6U) | (((2U >> 1U) - 1U) << 16U) | (RCU_PLLSRC_IRC16M) | (9U << 24U)); /* enable PLL */ RCU_CTL |= RCU_CTL_PLLEN; /* wait until PLL is stable */ while(0U == (RCU_CTL & RCU_CTL_PLLSTB)){ } /* Enable the high-drive to extend the clock frequency to 200 Mhz */ PMU_CTL |= PMU_CTL_HDEN; while(0U == (PMU_CS & PMU_CS_HDRF)){ } /* select the high-drive mode */ PMU_CTL |= PMU_CTL_HDS; while(0U == (PMU_CS & PMU_CS_HDSRF)){ } /* select PLL as system clock */ RCU_CFG0 &= ~RCU_CFG0_SCS; RCU_CFG0 |= RCU_CKSYSSRC_PLLP; /* wait until PLL is selected as system clock */ while(0U == (RCU_CFG0 & RCU_SCSS_PLLP)){ } } #elif defined (__SYSTEM_CLOCK_200M_PLL_8M_HXTAL) /*! \brief configure the system clock to 200M by PLL which selects HXTAL(8M) as its clock source \param[in] none \param[out] none \retval none */ static void system_clock_200m_8m_hxtal(void) { uint32_t timeout = 0U; uint32_t stab_flag = 0U; /* enable HXTAL */ RCU_CTL |= RCU_CTL_HXTALEN; /* wait until HXTAL is stable or the startup time is longer than HXTAL_STARTUP_TIMEOUT */ do{ timeout++; stab_flag = (RCU_CTL & RCU_CTL_HXTALSTB); }while((0U == stab_flag) && (HXTAL_STARTUP_TIMEOUT != timeout)); /* if fail */ if(0U == (RCU_CTL & RCU_CTL_HXTALSTB)){ while(1){ } } RCU_APB1EN |= RCU_APB1EN_PMUEN; PMU_CTL |= PMU_CTL_LDOVS; /* HXTAL is stable */ /* AHB = SYSCLK */ RCU_CFG0 |= RCU_AHB_CKSYS_DIV1; /* APB2 = AHB/2 */ RCU_CFG0 |= RCU_APB2_CKAHB_DIV2; /* APB1 = AHB/4 */ RCU_CFG0 |= RCU_APB1_CKAHB_DIV4; /* Configure the main PLL, PSC = 8, PLL_N = 400, PLL_P = 2, PLL_Q = 9 */ RCU_PLL = (8U | (400U << 6U) | (((2U >> 1U) - 1U) << 16U) | (RCU_PLLSRC_HXTAL) | (9U << 24U)); /* enable PLL */ RCU_CTL |= RCU_CTL_PLLEN; /* wait until PLL is stable */ while(0U == (RCU_CTL & RCU_CTL_PLLSTB)){ } /* Enable the high-drive to extend the clock frequency to 200 Mhz */ PMU_CTL |= PMU_CTL_HDEN; while(0U == (PMU_CS & PMU_CS_HDRF)){ } /* select the high-drive mode */ PMU_CTL |= PMU_CTL_HDS; while(0U == (PMU_CS & PMU_CS_HDSRF)){ } /* select PLL as system clock */ RCU_CFG0 &= ~RCU_CFG0_SCS; RCU_CFG0 |= RCU_CKSYSSRC_PLLP; /* wait until PLL is selected as system clock */ while(0U == (RCU_CFG0 & RCU_SCSS_PLLP)){ } } #elif defined (__SYSTEM_CLOCK_200M_PLL_25M_HXTAL) /*! \brief configure the system clock to 200M by PLL which selects HXTAL(25M) as its clock source \param[in] none \param[out] none \retval none */ static void system_clock_200m_25m_hxtal(void) { uint32_t timeout = 0U; uint32_t stab_flag = 0U; /* enable HXTAL */ RCU_CTL |= RCU_CTL_HXTALEN; /* wait until HXTAL is stable or the startup time is longer than HXTAL_STARTUP_TIMEOUT */ do{ timeout++; stab_flag = (RCU_CTL & RCU_CTL_HXTALSTB); }while((0U == stab_flag) && (HXTAL_STARTUP_TIMEOUT != timeout)); /* if fail */ if(0U == (RCU_CTL & RCU_CTL_HXTALSTB)){ while(1){ } } RCU_APB1EN |= RCU_APB1EN_PMUEN; PMU_CTL |= PMU_CTL_LDOVS; /* HXTAL is stable */ /* AHB = SYSCLK */ RCU_CFG0 |= RCU_AHB_CKSYS_DIV1; /* APB2 = AHB/2 */ RCU_CFG0 |= RCU_APB2_CKAHB_DIV2; /* APB1 = AHB/4 */ RCU_CFG0 |= RCU_APB1_CKAHB_DIV4; /* Configure the main PLL, PSC = 25, PLL_N = 400, PLL_P = 2, PLL_Q = 9 */ RCU_PLL = (25U | (400U << 6U) | (((2U >> 1U) - 1U) << 16U) | (RCU_PLLSRC_HXTAL) | (9U << 24U)); /* enable PLL */ RCU_CTL |= RCU_CTL_PLLEN; /* wait until PLL is stable */ while(0U == (RCU_CTL & RCU_CTL_PLLSTB)){ } /* Enable the high-drive to extend the clock frequency to 200 Mhz */ PMU_CTL |= PMU_CTL_HDEN; while(0U == (PMU_CS & PMU_CS_HDRF)){ } /* select the high-drive mode */ PMU_CTL |= PMU_CTL_HDS; while(0U == (PMU_CS & PMU_CS_HDSRF)){ } /* select PLL as system clock */ RCU_CFG0 &= ~RCU_CFG0_SCS; RCU_CFG0 |= RCU_CKSYSSRC_PLLP; /* wait until PLL is selected as system clock */ while(0U == (RCU_CFG0 & RCU_SCSS_PLLP)){ } } #endif /* __SYSTEM_CLOCK_IRC16M */ /*! \brief update the SystemCoreClock with current core clock retrieved from cpu registers \param[in] none \param[out] none \retval none */ void SystemCoreClockUpdate (void) { uint32_t sws; uint32_t pllpsc, plln, pllsel, pllp, ck_src, idx, clk_exp; /* exponent of AHB, APB1 and APB2 clock divider */ const uint8_t ahb_exp[16] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 6, 7, 8, 9}; sws = GET_BITS(RCU_CFG0, 2, 3); switch(sws){ /* IRC16M is selected as CK_SYS */ case SEL_IRC16M: SystemCoreClock = IRC16M_VALUE; break; /* HXTAL is selected as CK_SYS */ case SEL_HXTAL: SystemCoreClock = HXTAL_VALUE; break; /* PLLP is selected as CK_SYS */ case SEL_PLLP: /* get the value of PLLPSC[5:0] */ pllpsc = GET_BITS(RCU_PLL, 0U, 5U); plln = GET_BITS(RCU_PLL, 6U, 14U); pllp = (GET_BITS(RCU_PLL, 16U, 17U) + 1U) * 2U; /* PLL clock source selection, HXTAL or IRC8M/2 */ pllsel = (RCU_PLL & RCU_PLL_PLLSEL); if (RCU_PLLSRC_HXTAL == pllsel) { ck_src = HXTAL_VALUE; } else { ck_src = IRC16M_VALUE; } SystemCoreClock = ((ck_src / pllpsc) * plln)/pllp; break; /* IRC16M is selected as CK_SYS */ default: SystemCoreClock = IRC16M_VALUE; break; } /* calculate AHB clock frequency */ idx = GET_BITS(RCU_CFG0, 4, 7); clk_exp = ahb_exp[idx]; SystemCoreClock = SystemCoreClock >> clk_exp; }
1001982.c
#include <stdio.h> #include <stdlib.h> #include <unistd.h> int globvar = 6; char buf[] = "a write to stdout\n"; int main(void) { int var; pid_t pid; var = 88; if (write(STDOUT_FILENO, buf, sizeof(buf)-1) != sizeof(buf)-1) { perror("Error"); exit(1); } puts("before fork"); if ((pid = fork()) < 0) { perror("Error"); exit(1); } else if (pid == 0) { globvar++; var++; } else { sleep(2); } printf("pid = %ld, glob = %d, var = %d\n", (long)getpid(), globvar, var); exit(0); }
866107.c
/* * RELIC is an Efficient LIbrary for Cryptography * Copyright (C) 2007, 2008, 2009 RELIC Authors * * This file is part of RELIC. RELIC is legal property of its developers, * whose names are not listed here. Please refer to the COPYRIGHT file * for contact information. * * RELIC is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * RELIC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with RELIC. If not, see <http://www.gnu.org/licenses/>. */ /** * @file * * Implementation of the low-level modular reduction functions. * * @version $Id: relic_fb_rdc_low.c 1538 2013-08-28 04:26:56Z dfaranha $ * @ingroup fb */ #include "relic_fb.h" #include "relic_fb_low.h" #include "relic_util.h" /*============================================================================*/ /* Public definitions */ /*============================================================================*/ void fb_rdc1_low(dig_t *c, dig_t *a) { int fa, fb, fc; int sh, lh, rh, sa, la, ra, sb, lb, rb, sc, lc, rc; dig_t d; fb_poly_get_rdc(&fa, &fb, &fc); sh = lh = rh = sa = la = ra = sb = lb = rb = sc = lc = rc = 0; SPLIT(rh, sh, FB_BITS, FB_DIG_LOG); sh++; lh = FB_DIGIT - rh; SPLIT(ra, sa, FB_BITS - fa, FB_DIG_LOG); sa++; la = FB_DIGIT - ra; if (fb != -1) { SPLIT(rb, sb, FB_BITS - fb, FB_DIG_LOG); sb++; lb = FB_DIGIT - rb; SPLIT(rc, sc, FB_BITS - fc, FB_DIG_LOG); sc++; lc = FB_DIGIT - rc; } d = a[FB_DIGS]; a[FB_DIGS] = 0; if (rh == 0) { a[FB_DIGS - sh + 1] ^= d; } else { a[FB_DIGS - sh + 1] ^= (d >> rh); a[FB_DIGS - sh] ^= (d << lh); } if (ra == 0) { a[FB_DIGS - sa + 1] ^= d; } else { a[FB_DIGS - sa + 1] ^= (d >> ra); a[FB_DIGS - sa] ^= (d << la); } if (fb != -1) { if (rb == 0) { a[FB_DIGS - sb + 1] ^= d; } else { a[FB_DIGS - sb + 1] ^= (d >> rb); a[FB_DIGS - sb] ^= (d << lb); } if (rc == 0) { a[FB_DIGS - sc + 1] ^= d; } else { a[FB_DIGS - sc + 1] ^= (d >> rc); a[FB_DIGS - sc] ^= (d << lc); } } d = a[sh - 1] >> rh; if (d != 0) { a[0] ^= d; d <<= rh; if (ra == 0) { a[sh - sa] ^= d; } else { a[sh - sa] ^= (d >> ra); if (sh > sa) { a[sh - sa - 1] ^= (d << la); } } if (fb != -1) { if (rb == 0) { a[sh - sb] ^= d; } else { a[sh - sb] ^= (d >> rb); if (sh > sb) { a[sh - sb - 1] ^= (d << lb); } } if (rc == 0) { a[sh - sc] ^= d; } else { a[sh - sc] ^= (d >> rc); if (sh > sc) { a[sh - sc - 1] ^= (d << lc); } } } a[sh - 1] ^= d; } fb_copy(c, a); }
948634.c
/* * OF helpers for the GPIO API * * Copyright (c) 2007-2008 MontaVista Software, Inc. * * Author: Anton Vorontsov <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/device.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_gpio.h> #include <linux/slab.h> /** * of_get_named_gpio_flags() - Get a GPIO number and flags to use with GPIO API * @np: device node to get GPIO from * @propname: property name containing gpio specifier(s) * @index: index of the GPIO * @flags: a flags pointer to fill in * * Returns GPIO number to use with Linux generic GPIO API, or one of the errno * value on the error condition. If @flags is not NULL the function also fills * in flags for the GPIO. */ int of_get_named_gpio_flags(struct device_node *np, const char *propname, int index, enum of_gpio_flags *flags) { int ret; struct gpio_chip *gc; struct of_phandle_args gpiospec; ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index, &gpiospec); if (ret) { pr_debug("%s: can't parse gpios property\n", __func__); goto err0; } gc = of_node_to_gpiochip(gpiospec.np); if (!gc) { pr_debug("%s: gpio controller %s isn't registered\n", np->full_name, gpiospec.np->full_name); ret = -ENODEV; goto err1; } if (gpiospec.args_count != gc->of_gpio_n_cells) { pr_debug("%s: wrong #gpio-cells for %s\n", np->full_name, gpiospec.np->full_name); ret = -EINVAL; goto err1; } /* .xlate might decide to not fill in the flags, so clear it. */ if (flags) *flags = 0; ret = gc->of_xlate(gc, &gpiospec, flags); if (ret < 0) goto err1; ret += gc->base; err1: of_node_put(gpiospec.np); err0: pr_debug("%s exited with status %d\n", __func__, ret); return ret; } EXPORT_SYMBOL(of_get_named_gpio_flags); /** * of_gpio_count - Count GPIOs for a device * @np: device node to count GPIOs for * * The function returns the count of GPIOs specified for a node. * * Note that the empty GPIO specifiers counts too. For example, * * gpios = <0 * &pio1 1 2 * 0 * &pio2 3 4>; * * defines four GPIOs (so this function will return 4), two of which * are not specified. */ unsigned int of_gpio_count(struct device_node *np) { unsigned int cnt = 0; do { int ret; ret = of_parse_phandle_with_args(np, "gpios", "#gpio-cells", cnt, NULL); /* A hole in the gpios = <> counts anyway. */ if (ret < 0 && ret != -EEXIST) break; } while (++cnt); return cnt; } EXPORT_SYMBOL(of_gpio_count); /** * of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags * @gc: pointer to the gpio_chip structure * @np: device node of the GPIO chip * @gpio_spec: gpio specifier as found in the device tree * @flags: a flags pointer to fill in * * This is simple translation function, suitable for the most 1:1 mapped * gpio chips. This function performs only one sanity check: whether gpio * is less than ngpios (that is specified in the gpio_chip). */ int of_gpio_simple_xlate(struct gpio_chip *gc, const struct of_phandle_args *gpiospec, u32 *flags) { /* * We're discouraging gpio_cells < 2, since that way you'll have to * write your own xlate function (that will have to retrive the GPIO * number and the flags from a single gpio cell -- this is possible, * but not recommended). */ if (gc->of_gpio_n_cells < 2) { WARN_ON(1); return -EINVAL; } if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells)) return -EINVAL; if (gpiospec->args[0] > gc->ngpio) return -EINVAL; if (flags) *flags = gpiospec->args[1]; return gpiospec->args[0]; } EXPORT_SYMBOL(of_gpio_simple_xlate); /** * of_mm_gpiochip_add - Add memory mapped GPIO chip (bank) * @np: device node of the GPIO chip * @mm_gc: pointer to the of_mm_gpio_chip allocated structure * * To use this function you should allocate and fill mm_gc with: * * 1) In the gpio_chip structure: * - all the callbacks * - of_gpio_n_cells * - of_xlate callback (optional) * * 3) In the of_mm_gpio_chip structure: * - save_regs callback (optional) * * If succeeded, this function will map bank's memory and will * do all necessary work for you. Then you'll able to use .regs * to manage GPIOs from the callbacks. */ int of_mm_gpiochip_add(struct device_node *np, struct of_mm_gpio_chip *mm_gc) { int ret = -ENOMEM; struct gpio_chip *gc = &mm_gc->gc; gc->label = kstrdup(np->full_name, GFP_KERNEL); if (!gc->label) goto err0; mm_gc->regs = of_iomap(np, 0); if (!mm_gc->regs) goto err1; gc->base = -1; if (mm_gc->save_regs) mm_gc->save_regs(mm_gc); mm_gc->gc.of_node = np; ret = gpiochip_add(gc); if (ret) goto err2; return 0; err2: iounmap(mm_gc->regs); err1: kfree(gc->label); err0: pr_err("%s: GPIO chip registration failed with status %d\n", np->full_name, ret); return ret; } EXPORT_SYMBOL(of_mm_gpiochip_add); void of_gpiochip_add(struct gpio_chip *chip) { if ((!chip->of_node) && (chip->dev)) chip->of_node = chip->dev->of_node; if (!chip->of_node) return; if (!chip->of_xlate) { chip->of_gpio_n_cells = 2; chip->of_xlate = of_gpio_simple_xlate; } of_node_get(chip->of_node); } void of_gpiochip_remove(struct gpio_chip *chip) { if (chip->of_node) of_node_put(chip->of_node); } /* Private function for resolving node pointer to gpio_chip */ static int of_gpiochip_is_match(struct gpio_chip *chip, void *data) { return chip->of_node == data; } struct gpio_chip *of_node_to_gpiochip(struct device_node *np) { return gpiochip_find(np, of_gpiochip_is_match); }
192715.c
/** * @file base_test.c * @brief Unit tests for the base.h file. * @author Dominique LaSalle <[email protected]> * Copyright 2016 * @version 1 * @date 2016-07-10 */ #include "test.h" #include "graph.h" #include "base.h" int test(void) { graph_type graph; graphdist_type dist; dist.nthreads = 7; dist.shift = 5; dist.offset = 1 << dist.shift; dist.mask = dist.offset - 1; graph.dist = dist; /* gvtx_to_lvtx test */ TESTEQUALS(0,gvtx_to_lvtx(dist.offset,dist),"%"PF_VTX_T); TESTEQUALS(0,gvtx_to_lvtx(dist.offset*2,dist),"%"PF_VTX_T); TESTEQUALS(5,gvtx_to_lvtx(dist.offset*2+5,dist),"%"PF_VTX_T); /* lvtx_to_gvtx test */ TESTEQUALS(dist.offset,lvtx_to_gvtx(0,0,dist),"%"PF_VTX_T); TESTEQUALS(2*dist.offset,lvtx_to_gvtx(0,1,dist),"%"PF_VTX_T); TESTEQUALS(dist.offset+10,lvtx_to_gvtx(10,0,dist),"%"PF_VTX_T); /* gvtx_to_tid test */ TESTEQUALS(0,gvtx_to_tid(dist.offset,dist),"%"PF_TID_T); TESTEQUALS(1,gvtx_to_tid(dist.offset*2,dist),"%"PF_TID_T); TESTEQUALS(2,gvtx_to_tid(dist.offset*3+10,dist),"%"PF_TID_T); /* max_gvtx test */ TESTEQUALS((dist.nthreads+1)*(dist.offset),max_gvtx(&graph),"%"PF_VTX_T); return 0; }
333389.c
/* vi: set sw=4 ts=4: */ /* * Utility routines. * * Copyright (C) 2008 Bernhard Reutner-Fischer * * Licensed under GPLv2 or later, see file LICENSE in this source tree. */ #include "libbb.h" /* Open file and write string str to it, close file. * Die on any open or write error. */ void FAST_FUNC xopen_xwrite_close(const char* file, const char* str) { int fd = xopen(file, O_WRONLY); xwrite_str(fd, str); close(fd); }