filename
stringlengths
3
9
code
stringlengths
4
1.05M
395267.c
/** * @file * Ethernet Interface Skeleton * */ /* * Copyright (c) 2001-2004 Swedish Institute of Computer Science. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. * * This file is part of the lwIP TCP/IP stack. * * Author: Adam Dunkels <[email protected]> * */ /* * This file is a skeleton for developing Ethernet network interface * drivers for lwIP. Add code to the low_level functions and do a * search-and-replace for the word "ethernetif" to replace it with * something that better describes your network interface. */ #include "lwip/opt.h" #if 0 /* don't build, this is only a skeleton, see previous comment */ #include "lwip/def.h" #include "lwip/mem.h" #include "lwip/pbuf.h" #include "lwip/sys.h" #include <lwip/stats.h> #include <lwip/snmp.h> #include "netif/etharp.h" #include "netif/ppp_oe.h" /* Define those to better describe your network interface. */ #define IFNAME0 'e' #define IFNAME1 'n' /** * Helper struct to hold private data used to operate your ethernet interface. * Keeping the ethernet address of the MAC in this struct is not necessary * as it is already kept in the struct netif. * But this is only an example, anyway... */ struct ethernetif { struct eth_addr *ethaddr; /* Add whatever per-interface state that is needed here. */ }; /* Forward declarations. */ static void ethernetif_input(struct netif *netif); /** * In this function, the hardware should be initialized. * Called from ethernetif_init(). * * @param netif the already initialized lwip network interface structure * for this ethernetif */ static void low_level_init(struct netif *netif) { struct ethernetif *ethernetif = netif->state; /* set MAC hardware address length */ netif->hwaddr_len = ETHARP_HWADDR_LEN; /* set MAC hardware address */ netif->hwaddr[0] =; ... netif->hwaddr[5] =; /* maximum transfer unit */ netif->mtu = 1500; /* device capabilities */ /* don't set NETIF_FLAG_ETHARP if this device is not an ethernet one */ netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_LINK_UP; /* Do whatever else is needed to initialize interface. */ } /** * This function should do the actual transmission of the packet. The packet is * contained in the pbuf that is passed to the function. This pbuf * might be chained. * * @param netif the lwip network interface structure for this ethernetif * @param p the MAC packet to send (e.g. IP packet including MAC addresses and type) * @return ERR_OK if the packet could be sent * an err_t value if the packet couldn't be sent * * @note Returning ERR_MEM here if a DMA queue of your MAC is full can lead to * strange results. You might consider waiting for space in the DMA queue * to become availale since the stack doesn't retry to send a packet * dropped because of memory failure (except for the TCP timers). */ static err_t low_level_output(struct netif *netif, struct pbuf *p) { struct ethernetif *ethernetif = netif->state; struct pbuf *q; initiate transfer(); #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */ #endif for (q = p; q != NULL; q = q->next) { /* Send the data from the pbuf to the interface, one pbuf at a time. The size of the data in each pbuf is kept in the ->len variable. */ send data from(q->payload, q->len); } signal that packet should be sent(); #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */ #endif LINK_STATS_INC(link.xmit); return ERR_OK; } /** * Should allocate a pbuf and transfer the bytes of the incoming * packet from the interface into the pbuf. * * @param netif the lwip network interface structure for this ethernetif * @return a pbuf filled with the received packet (including MAC header) * NULL on memory error */ static struct pbuf *low_level_input(struct netif *netif) { struct ethernetif *ethernetif = netif->state; struct pbuf *p, *q; u16_t len; /* Obtain the size of the packet and put it into the "len" variable. */ len =; #if ETH_PAD_SIZE len += ETH_PAD_SIZE; /* allow room for Ethernet padding */ #endif /* We allocate a pbuf chain of pbufs from the pool. */ p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL); if (p != NULL) { #if ETH_PAD_SIZE pbuf_header(p, -ETH_PAD_SIZE); /* drop the padding word */ #endif /* We iterate over the pbuf chain until we have read the entire * packet into the pbuf. */ for (q = p; q != NULL; q = q->next) { /* Read enough bytes to fill this pbuf in the chain. The * available data in the pbuf is given by the q->len * variable. */ read data into(q->payload, q->len); } acknowledge that packet has been read(); #if ETH_PAD_SIZE pbuf_header(p, ETH_PAD_SIZE); /* reclaim the padding word */ #endif LINK_STATS_INC(link.recv); } else { drop packet(); LINK_STATS_INC(link.memerr); LINK_STATS_INC(link.drop); } return p; } /** * This function should be called when a packet is ready to be read * from the interface. It uses the function low_level_input() that * should handle the actual reception of bytes from the network * interface. Then the type of the received packet is determined and * the appropriate input function is called. * * @param netif the lwip network interface structure for this ethernetif */ static void ethernetif_input(struct netif *netif) { struct ethernetif *ethernetif; struct eth_hdr *ethhdr; struct pbuf *p; ethernetif = netif->state; /* move received packet into a new pbuf */ p = low_level_input(netif); /* no packet could be read, silently ignore this */ if (p == NULL) return; /* points to packet payload, which starts with an Ethernet header */ ethhdr = p->payload; switch (htons(ethhdr->type)) { /* IP or ARP packet? */ case ETHTYPE_IP: case ETHTYPE_ARP: #if PPPOE_SUPPORT /* PPPoE packet? */ case ETHTYPE_PPPOEDISC: case ETHTYPE_PPPOE: #endif /* PPPOE_SUPPORT */ /* full packet send to tcpip_thread to process */ if (netif->input(p, netif) != ERR_OK) { LWIP_DEBUGF(NETIF_DEBUG, ("ethernetif_input: IP input error\n")); pbuf_free(p); p = NULL; } break; default: pbuf_free(p); p = NULL; break; } } /** * Should be called at the beginning of the program to set up the * network interface. It calls the function low_level_init() to do the * actual setup of the hardware. * * This function should be passed as a parameter to netif_add(). * * @param netif the lwip network interface structure for this ethernetif * @return ERR_OK if the loopif is initialized * ERR_MEM if private data couldn't be allocated * any other err_t on error */ err_t ethernetif_init(struct netif *netif) { struct ethernetif *ethernetif; LWIP_ASSERT("netif != NULL", (netif != NULL)); ethernetif = mem_malloc(sizeof(struct ethernetif)); if (ethernetif == NULL) { LWIP_DEBUGF(NETIF_DEBUG, ("ethernetif_init: out of memory\n")); return ERR_MEM; } #if LWIP_NETIF_HOSTNAME /* Initialize interface hostname */ netif->hostname = "lwip"; #endif /* LWIP_NETIF_HOSTNAME */ /* * Initialize the snmp variables and counters inside the struct netif. * The last argument should be replaced with your link speed, in units * of bits per second. */ NETIF_INIT_SNMP(netif, snmp_ifType_ethernet_csmacd, LINK_SPEED_OF_YOUR_NETIF_IN_BPS); netif->state = ethernetif; netif->name[0] = IFNAME0; netif->name[1] = IFNAME1; /* We directly use etharp_output() here to save a function call. * You can instead declare your own function an call etharp_output() * from it if you have to do some checks before sending (e.g. if link * is available...) */ netif->output = etharp_output; netif->linkoutput = low_level_output; ethernetif->ethaddr = (struct eth_addr *) &(netif->hwaddr[0]); /* initialize the hardware */ low_level_init(netif); return ERR_OK; } #endif /* 0 */
928652.c
/* * Copyright (c) 2017-2019, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause * */ #include "mpu_armv8m_drv.h" #include CMSIS_device_header /* * FixMe: * This is a beta quality driver for MPU in v8M. To be finalized. */ enum mpu_armv8m_error_t mpu_armv8m_enable(struct mpu_armv8m_dev_t *dev, uint32_t privdef_en, uint32_t hfnmi_en) { /*No error checking*/ MPU_Type *mpu = (MPU_Type *)dev->base; /* * FixMe: Set 3 pre-defined MAIR_ATTR for memory. The attributes come * from default memory map, need to check if fine-tune is necessary. * * MAIR0_0: Peripheral, Device-nGnRE. * MAIR0_1: Code, WT RA. Same attr for Outer and Inner. * MAIR0_2: SRAM, WBWA RA. Same attr for Outer and Inner. */ mpu->MAIR0 = (MPU_ARMV8M_MAIR_ATTR_DEVICE_VAL << MPU_MAIR0_Attr0_Pos) | (MPU_ARMV8M_MAIR_ATTR_CODE_VAL << MPU_MAIR0_Attr1_Pos) | (MPU_ARMV8M_MAIR_ATTR_DATA_VAL << MPU_MAIR0_Attr2_Pos); mpu->CTRL = (privdef_en ? MPU_CTRL_PRIVDEFENA_Msk : 0) | (hfnmi_en ? MPU_CTRL_HFNMIENA_Msk : 0); /*Ensure all configuration is written before enable*/ mpu->CTRL |= MPU_CTRL_ENABLE_Msk; /* Enable MPU before next instruction */ __DSB(); __ISB(); return MPU_ARMV8M_OK; } enum mpu_armv8m_error_t mpu_armv8m_disable(struct mpu_armv8m_dev_t *dev) { MPU_Type *mpu = (MPU_Type *)dev->base; /* Reset all fields as enable does full setup */ mpu->CTRL = 0; return MPU_ARMV8M_OK; } enum mpu_armv8m_error_t mpu_armv8m_region_enable( struct mpu_armv8m_dev_t *dev, struct mpu_armv8m_region_cfg_t *region_cfg) { MPU_Type *mpu = (MPU_Type *)dev->base; enum mpu_armv8m_error_t ret_val = MPU_ARMV8M_OK; uint32_t ctrl_before; uint32_t base_cfg; uint32_t limit_cfg; /*FIXME : Add complete error checking*/ if ((region_cfg->region_base & ~MPU_RBAR_BASE_Msk) != 0) { return MPU_ARMV8M_ERROR; } /* region_limit doesn't need to be aligned but the scatter * file needs to be setup to ensure that partitions do not overlap. */ ctrl_before = mpu->CTRL; mpu->CTRL = 0; mpu->RNR = region_cfg->region_nr & MPU_RNR_REGION_Msk; /* This 0s the lower bits of the base address */ base_cfg = region_cfg->region_base & MPU_RBAR_BASE_Msk; base_cfg |= (region_cfg->attr_sh << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk; base_cfg |= (region_cfg->attr_access << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk; base_cfg |= (region_cfg->attr_exec << MPU_RBAR_XN_Pos) & MPU_RBAR_XN_Msk; mpu->RBAR = base_cfg; /*This 0s the lower bits of base address but they are treated as 1 */ limit_cfg = (region_cfg->region_limit-1) & MPU_RLAR_LIMIT_Msk; limit_cfg |= (region_cfg->region_attridx << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk; limit_cfg |= MPU_RLAR_EN_Msk; mpu->RLAR = limit_cfg; /*Restore main MPU control*/ mpu->CTRL = ctrl_before; /* Enable MPU before the next instruction */ __DSB(); __ISB(); return ret_val; } enum mpu_armv8m_error_t mpu_armv8m_region_disable( struct mpu_armv8m_dev_t *dev, uint32_t region_nr) { MPU_Type *mpu = (MPU_Type *)dev->base; enum mpu_armv8m_error_t ret_val = MPU_ARMV8M_OK; uint32_t ctrl_before; /*FIXME : Add complete error checking*/ ctrl_before = mpu->CTRL; mpu->CTRL = 0; mpu->RNR = region_nr & MPU_RNR_REGION_Msk; mpu->RBAR = 0; mpu->RLAR = 0; /*Restore main MPU control*/ mpu->CTRL = ctrl_before; return ret_val; } enum mpu_armv8m_error_t mpu_armv8m_clean(struct mpu_armv8m_dev_t *dev) { MPU_Type *mpu = (MPU_Type *)dev->base; uint32_t i = (mpu->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos; while (i > 0) { mpu_armv8m_region_disable(dev, i-1); i--; } return MPU_ARMV8M_OK; }
555491.c
/// @file /// Test creation, transpose use, and destruction of a multicomponent element restriction /// \test Test creation, transpose use, and destruction of a multicomponent element restriction #include <ceed.h> #include <ceed-backend.h> int main(int argc, char **argv) { Ceed ceed; CeedVector x, y; CeedInt ne = 5; CeedInt ind[2*ne]; CeedInt layout[3]; CeedScalar mult; CeedScalar a[2*(ne*2)]; const CeedScalar *yy; CeedElemRestriction r; CeedInit(argv[1], &ceed); // Setup CeedVectorCreate(ceed, 2*(ne*2), &x); for (CeedInt i=0; i<ne; i++) { ind[2*i+0] = i; ind[2*i+1] = i+1; } CeedElemRestrictionCreate(ceed, ne, 2, 2, ne+1, 2*(ne+1), CEED_MEM_HOST, CEED_USE_POINTER, ind, &r); CeedVectorCreate(ceed, 2*(ne+1), &y); CeedVectorSetValue(y, 0); // Allocates array // Set x data in backend E-layout CeedElemRestrictionGetELayout(r, &layout); for (CeedInt i=0; i<2; i++) // Node for (CeedInt j=0; j<2; j++) // Component for (CeedInt k=0; k<ne; k++) // Element a[i*layout[0] + j*layout[1] + k*layout[2]] = 10*j+(2*k+i+1)/2; CeedVectorSetArray(x, CEED_MEM_HOST, CEED_USE_POINTER, a); // Restrict CeedElemRestrictionApply(r, CEED_TRANSPOSE, x, y, CEED_REQUEST_IMMEDIATE); // Check CeedVectorGetArrayRead(y, CEED_MEM_HOST, &yy); for (CeedInt i=0; i<ne+1; i++) { mult = i>0&&i<ne ? 2 : 1; if (yy[i] != i*mult) // LCOV_EXCL_START printf("Error in restricted array y[%d] = %f != %f\n", i, (double)yy[i], i*mult); // LCOV_EXCL_STOP if (yy[i+ne+1] != (10+i)*mult) // LCOV_EXCL_START printf("Error in restricted array y[%d] = %f != %f\n", i+ne+1, (double)yy[i+ne+1], (10.+i)*mult); // LCOV_EXCL_STOP } CeedVectorRestoreArrayRead(y, &yy); CeedVectorDestroy(&x); CeedVectorDestroy(&y); CeedElemRestrictionDestroy(&r); CeedDestroy(&ceed); return 0; }
593519.c
/* PipeWire * * Copyright © 2018 Wim Taymans * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <string.h> #include "pipewire/private.h" #include "pipewire/protocol.h" #include "pipewire/resource.h" #include "pipewire/type.h" #include <spa/debug/types.h> #define NAME "resource" /** \cond */ struct impl { struct pw_resource this; }; /** \endcond */ SPA_EXPORT struct pw_resource *pw_resource_new(struct pw_impl_client *client, uint32_t id, uint32_t permissions, const char *type, uint32_t version, size_t user_data_size) { struct impl *impl; struct pw_resource *this; int res; impl = calloc(1, sizeof(struct impl) + user_data_size); if (impl == NULL) return NULL; this = &impl->this; this->context = client->context; this->client = client; this->permissions = permissions; this->type = type; this->version = version; this->bound_id = SPA_ID_INVALID; spa_hook_list_init(&this->listener_list); spa_hook_list_init(&this->object_listener_list); if (id == SPA_ID_INVALID) { res = -EINVAL; goto error_clean; } if ((res = pw_map_insert_at(&client->objects, id, this)) < 0) { pw_log_error(NAME" %p: can't add id %u for client %p: %s", this, id, client, spa_strerror(res)); goto error_clean; } this->id = id; if ((res = pw_resource_install_marshal(this, false)) < 0) { pw_log_error(NAME" %p: no marshal for type %s/%d: %s", this, type, version, spa_strerror(res)); goto error_clean; } if (user_data_size > 0) this->user_data = SPA_MEMBER(impl, sizeof(struct impl), void); pw_log_debug(NAME" %p: new %u type %s/%d client:%p marshal:%p", this, id, type, version, client, this->marshal); pw_impl_client_emit_resource_added(client, this); return this; error_clean: free(impl); errno = -res; return NULL; } SPA_EXPORT int pw_resource_install_marshal(struct pw_resource *this, bool implementor) { struct pw_impl_client *client = this->client; const struct pw_protocol_marshal *marshal; marshal = pw_protocol_get_marshal(client->protocol, this->type, this->version, implementor ? PW_PROTOCOL_MARSHAL_FLAG_IMPL : 0); if (marshal == NULL) return -EPROTO; this->marshal = marshal; this->type = marshal->type; this->impl = SPA_INTERFACE_INIT( this->type, this->marshal->version, this->marshal->server_marshal, this); return 0; } SPA_EXPORT struct pw_impl_client *pw_resource_get_client(struct pw_resource *resource) { return resource->client; } SPA_EXPORT uint32_t pw_resource_get_id(struct pw_resource *resource) { return resource->id; } SPA_EXPORT uint32_t pw_resource_get_permissions(struct pw_resource *resource) { return resource->permissions; } SPA_EXPORT const char *pw_resource_get_type(struct pw_resource *resource, uint32_t *version) { if (version) *version = resource->version; return resource->type; } SPA_EXPORT struct pw_protocol *pw_resource_get_protocol(struct pw_resource *resource) { return resource->client->protocol; } SPA_EXPORT void *pw_resource_get_user_data(struct pw_resource *resource) { return resource->user_data; } SPA_EXPORT void pw_resource_add_listener(struct pw_resource *resource, struct spa_hook *listener, const struct pw_resource_events *events, void *data) { spa_hook_list_append(&resource->listener_list, listener, events, data); } SPA_EXPORT void pw_resource_add_object_listener(struct pw_resource *resource, struct spa_hook *listener, const void *funcs, void *data) { spa_hook_list_append(&resource->object_listener_list, listener, funcs, data); } SPA_EXPORT struct spa_hook_list *pw_resource_get_object_listeners(struct pw_resource *resource) { return &resource->object_listener_list; } SPA_EXPORT const struct pw_protocol_marshal *pw_resource_get_marshal(struct pw_resource *resource) { return resource->marshal; } SPA_EXPORT int pw_resource_ping(struct pw_resource *resource, int seq) { int res = -EIO; struct pw_impl_client *client = resource->client; if (client->core_resource != NULL) { pw_core_resource_ping(client->core_resource, resource->id, seq); res = client->send_seq; pw_log_debug(NAME" %p: %u seq:%d ping %d", resource, resource->id, seq, res); } return res; } SPA_EXPORT int pw_resource_set_bound_id(struct pw_resource *resource, uint32_t global_id) { struct pw_impl_client *client = resource->client; resource->bound_id = global_id; if (client->core_resource != NULL) { pw_log_debug(NAME" %p: %u global_id:%u", resource, resource->id, global_id); pw_core_resource_bound_id(client->core_resource, resource->id, global_id); } return 0; } SPA_EXPORT uint32_t pw_resource_get_bound_id(struct pw_resource *resource) { return resource->bound_id; } static void SPA_PRINTF_FUNC(4, 0) pw_resource_errorv_id(struct pw_resource *resource, uint32_t id, int res, const char *error, va_list ap) { struct pw_impl_client *client = resource->client; if (client->core_resource != NULL) pw_core_resource_errorv(client->core_resource, id, client->recv_seq, res, error, ap); } SPA_EXPORT void pw_resource_errorf(struct pw_resource *resource, int res, const char *error, ...) { va_list ap; va_start(ap, error); pw_resource_errorv_id(resource, resource->id, res, error, ap); va_end(ap); } SPA_EXPORT void pw_resource_errorf_id(struct pw_resource *resource, uint32_t id, int res, const char *error, ...) { va_list ap; va_start(ap, error); pw_resource_errorv_id(resource, id, res, error, ap); va_end(ap); } SPA_EXPORT void pw_resource_error(struct pw_resource *resource, int res, const char *error) { struct pw_impl_client *client = resource->client; if (client->core_resource != NULL) pw_core_resource_error(client->core_resource, resource->id, client->recv_seq, res, error); } SPA_EXPORT void pw_resource_destroy(struct pw_resource *resource) { struct pw_impl_client *client = resource->client; if (resource->global) { spa_list_remove(&resource->link); resource->global = NULL; } pw_log_debug(NAME" %p: destroy %u", resource, resource->id); pw_resource_emit_destroy(resource); pw_map_insert_at(&client->objects, resource->id, NULL); pw_impl_client_emit_resource_removed(client, resource); if (client->core_resource && !resource->removed) pw_core_resource_remove_id(client->core_resource, resource->id); pw_log_debug(NAME" %p: free %u", resource, resource->id); spa_hook_list_clean(&resource->listener_list); spa_hook_list_clean(&resource->object_listener_list); free(resource); } SPA_EXPORT void pw_resource_remove(struct pw_resource *resource) { resource->removed = true; pw_resource_destroy(resource); }
732185.c
#include<stdio.h> void main() { printf("MY NAME IS AADHYAAN"); getch(); }
806161.c
/**************************************************************************** * * Copyright 2016 Samsung Electronics All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific * language governing permissions and limitations under the License. * ****************************************************************************/ /************************************************************************ * kernel/sched/sched_processtimer.c * * Copyright (C) 2007, 2009, 2014-2018 Gregory Nutt. All rights reserved. * Author: Gregory Nutt <[email protected]> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name NuttX nor the names of its contributors may be * used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ************************************************************************/ /************************************************************************ * Included Files ************************************************************************/ #include <tinyara/config.h> #include <tinyara/compiler.h> #include <time.h> #if CONFIG_RR_INTERVAL > 0 #include <sched.h> #include <tinyara/arch.h> #include <tinyara/ttrace.h> #endif #include "sched/sched.h" #include "wdog/wdog.h" #include "clock/clock.h" /************************************************************************ * Pre-processor Definitions ************************************************************************/ #ifndef CONFIG_SCHED_CPULOAD_TIMECONSTANT #define CONFIG_SCHED_CPULOAD_TIMECONSTANT 2 #endif /************************************************************************ * Private Type Declarations ************************************************************************/ /************************************************************************ * Public Variables ************************************************************************/ /************************************************************************ * Private Variables ************************************************************************/ /************************************************************************ * Private Functions ************************************************************************/ /************************************************************************ * Name: sched_process_timeslice * * Description: * Check if the currently executing task has exceeded its time slice. * * Inputs: * None * * Return Value: * None * ************************************************************************/ #if CONFIG_RR_INTERVAL > 0 static inline void sched_process_timeslice(void) { FAR struct tcb_s *rtcb = this_task(); /* Check if the currently executing task uses round robin * scheduling. */ if ((rtcb->flags & TCB_FLAG_ROUND_ROBIN) != 0) { /* Yes, check if decrementing the timeslice counter * would cause the timeslice to expire */ if (rtcb->timeslice <= 1) { /* Yes, Now check if the task has pre-emption disabled. * If so, then we will freeze the timeslice count at * the value until the next tick after pre-emption * has been enabled. */ if (!rtcb->lockcount) { /* Reset the timeslice in any case. */ rtcb->timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); /* We know we are at the head of the ready to run * prioritized list. We must be the highest priority * task eligible for execution. Check the next task * in the ready to run list. If it is the same * priority, then we need to relinquish the CPU and * give that task a shot. */ if (rtcb->flink && rtcb->flink->sched_priority >= rtcb->sched_priority) { /* Just resetting the task priority to its current * value. This this will cause the task to be * rescheduled behind any other tasks at the same * priority. */ up_reprioritize_rtr(rtcb, rtcb->sched_priority); } } } else { /* Decrement the timeslice counter */ rtcb->timeslice--; } } } #else #define sched_process_timeslice() #endif /************************************************************************ * Public Functions ************************************************************************/ /************************************************************************ * System Timer Hooks * * These are standard interfaces that are exported by the OS * for use by the architecture specific logic * ************************************************************************/ /************************************************************************ * Name: sched_process_timer * * Description: * This function handles system timer events. * The timer interrupt logic itself is implemented in the * architecture specific code, but must call the following OS * function periodically -- the calling interval must be * USEC_PER_TICK * * Inputs: * None * * Return Value: * None * ************************************************************************/ void sched_process_timer(void) { #ifdef CONFIG_WATCHDOG_FOR_IRQ up_wdog_keepalive(); #endif /* Increment the system time (if in the link) */ #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (clock_timer != NULL) #endif { clock_timer(); } #if defined(CONFIG_SCHED_CPULOAD) && !defined(CONFIG_SCHED_CPULOAD_EXTCLK) /* Perform CPU load measurements (before any timer-initiated context * switches can occur) */ #ifdef CONFIG_HAVE_WEAKFUNCTIONS if (sched_process_cpuload != NULL) #endif { sched_process_cpuload(); } #endif /* Check if the currently executing task has exceeded its * timeslice. */ sched_process_timeslice(); /* Process watchdogs */ wd_timer(); }
821627.c
/***************************************************************************/ /* */ /* ftcimage.c */ /* */ /* FreeType Image cache (body). */ /* */ /* Copyright 2000-2001, 2003, 2004, 2006, 2010 by */ /* David Turner, Robert Wilhelm, and Werner Lemberg. */ /* */ /* This file is part of the FreeType project, and may only be used, */ /* modified, and distributed under the terms of the FreeType project */ /* license, original_LICENSE.TXT. By continuing to use, modify, or distribute */ /* this file you indicate that you have read the license and */ /* understand and accept it fully. */ /* */ /***************************************************************************/ #include <ft2build.h> #include FT_CACHE_H #include "ftcimage.h" #include FT_INTERNAL_MEMORY_H #include "ftccback.h" #include "ftcerror.h" /* finalize a given glyph image node */ FT_LOCAL_DEF( void ) ftc_inode_free( FTC_Node ftcinode, FTC_Cache cache ) { FTC_INode inode = (FTC_INode)ftcinode; FT_Memory memory = cache->memory; if ( inode->glyph ) { FT_Done_Glyph( inode->glyph ); inode->glyph = NULL; } FTC_GNode_Done( FTC_GNODE( inode ), cache ); FT_FREE( inode ); } FT_LOCAL_DEF( void ) FTC_INode_Free( FTC_INode inode, FTC_Cache cache ) { ftc_inode_free( FTC_NODE( inode ), cache ); } /* initialize a new glyph image node */ FT_LOCAL_DEF( FT_Error ) FTC_INode_New( FTC_INode *pinode, FTC_GQuery gquery, FTC_Cache cache ) { FT_Memory memory = cache->memory; FT_Error error; FTC_INode inode = NULL; if ( !FT_NEW( inode ) ) { FTC_GNode gnode = FTC_GNODE( inode ); FTC_Family family = gquery->family; FT_UInt gindex = gquery->gindex; FTC_IFamilyClass clazz = FTC_CACHE__IFAMILY_CLASS( cache ); /* initialize its inner fields */ FTC_GNode_Init( gnode, gindex, family ); /* we will now load the glyph image */ error = clazz->family_load_glyph( family, gindex, cache, &inode->glyph ); if ( error ) { FTC_INode_Free( inode, cache ); inode = NULL; } } *pinode = inode; return error; } FT_LOCAL_DEF( FT_Error ) ftc_inode_new( FTC_Node *ftcpinode, FT_Pointer ftcgquery, FTC_Cache cache ) { FTC_INode *pinode = (FTC_INode*)ftcpinode; FTC_GQuery gquery = (FTC_GQuery)ftcgquery; return FTC_INode_New( pinode, gquery, cache ); } FT_LOCAL_DEF( FT_Offset ) ftc_inode_weight( FTC_Node ftcinode, FTC_Cache ftccache ) { FTC_INode inode = (FTC_INode)ftcinode; FT_Offset size = 0; FT_Glyph glyph = inode->glyph; FT_UNUSED( ftccache ); switch ( glyph->format ) { case FT_GLYPH_FORMAT_BITMAP: { FT_BitmapGlyph bitg; bitg = (FT_BitmapGlyph)glyph; size = bitg->bitmap.rows * ft_labs( bitg->bitmap.pitch ) + sizeof ( *bitg ); } break; case FT_GLYPH_FORMAT_OUTLINE: { FT_OutlineGlyph outg; outg = (FT_OutlineGlyph)glyph; size = outg->outline.n_points * ( sizeof ( FT_Vector ) + sizeof ( FT_Byte ) ) + outg->outline.n_contours * sizeof ( FT_Short ) + sizeof ( *outg ); } break; default: ; } size += sizeof ( *inode ); return size; } #if 0 FT_LOCAL_DEF( FT_Offset ) FTC_INode_Weight( FTC_INode inode ) { return ftc_inode_weight( FTC_NODE( inode ), NULL ); } #endif /* 0 */ /* END */
375721.c
/* * Server-side socket management * * Copyright (C) 1999 Marcus Meissner, Ove Kåven * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA * * FIXME: we use read|write access in all cases. Shouldn't we depend that * on the access of the current handle? */ #include "config.h" #include <assert.h> #include <fcntl.h> #include <stdarg.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #include <errno.h> #ifdef HAVE_IFADDRS_H # include <ifaddrs.h> #endif #ifdef HAVE_NET_IF_H # include <net/if.h> #endif #ifdef HAVE_NETINET_IN_H # include <netinet/in.h> #endif #include <poll.h> #include <sys/time.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/ioctl.h> #ifdef HAVE_SYS_FILIO_H # include <sys/filio.h> #endif #include <time.h> #include <unistd.h> #include <limits.h> #ifdef HAVE_LINUX_FILTER_H # include <linux/filter.h> #endif #ifdef HAVE_LINUX_RTNETLINK_H # include <linux/rtnetlink.h> #endif #ifdef HAVE_NETIPX_IPX_H # include <netipx/ipx.h> #elif defined(HAVE_LINUX_IPX_H) # ifdef HAVE_ASM_TYPES_H # include <asm/types.h> # endif # ifdef HAVE_LINUX_TYPES_H # include <linux/types.h> # endif # include <linux/ipx.h> #endif #if defined(SOL_IPX) || defined(SO_DEFAULT_HEADERS) # define HAS_IPX #endif #ifdef HAVE_LINUX_IRDA_H # ifdef HAVE_LINUX_TYPES_H # include <linux/types.h> # endif # include <linux/irda.h> # define HAS_IRDA #endif #include "ntstatus.h" #define WIN32_NO_STATUS #include "windef.h" #include "winternl.h" #include "winerror.h" #define USE_WS_PREFIX #include "winsock2.h" #include "ws2tcpip.h" #include "wsipx.h" #include "af_irda.h" #include "wine/afd.h" #include "process.h" #include "file.h" #include "handle.h" #include "thread.h" #include "request.h" #include "user.h" #if defined(linux) && !defined(IP_UNICAST_IF) #define IP_UNICAST_IF 50 #endif static const char magic_loopback_addr[] = {127, 12, 34, 56}; union win_sockaddr { struct WS_sockaddr addr; struct WS_sockaddr_in in; struct WS_sockaddr_in6 in6; struct WS_sockaddr_ipx ipx; SOCKADDR_IRDA irda; }; static struct list poll_list = LIST_INIT( poll_list ); struct poll_req { struct list entry; struct async *async; struct iosb *iosb; struct timeout_user *timeout; timeout_t orig_timeout; int exclusive; unsigned int count; struct { struct sock *sock; int mask; obj_handle_t handle; int flags; unsigned int status; } sockets[1]; }; struct accept_req { struct list entry; struct async *async; struct iosb *iosb; struct sock *sock, *acceptsock; int accepted; unsigned int recv_len, local_len; }; struct connect_req { struct async *async; struct iosb *iosb; struct sock *sock; unsigned int addr_len, send_len, send_cursor; }; enum connection_state { SOCK_LISTENING, SOCK_UNCONNECTED, SOCK_CONNECTING, SOCK_CONNECTED, SOCK_CONNECTIONLESS, }; struct sock { struct object obj; /* object header */ struct fd *fd; /* socket file descriptor */ enum connection_state state; /* connection state */ unsigned int mask; /* event mask */ /* pending AFD_POLL_* events which have not yet been reported to the application */ unsigned int pending_events; /* AFD_POLL_* events which have already been reported and should not be * selected for again until reset by a relevant call. * * For example, if AFD_POLL_READ is set here and not in pending_events, it * has already been reported and consumed, and we should not report it * again, even if POLLIN is signaled, until it is reset by e.g recv(). * * If an event has been signaled and not consumed yet, it will be set in * both pending_events and reported_events (as we should only ever report * any event once until it is reset.) */ unsigned int reported_events; unsigned int flags; /* socket flags */ unsigned short proto; /* socket protocol */ unsigned short type; /* socket type */ unsigned short family; /* socket family */ struct event *event; /* event object */ user_handle_t window; /* window to send the message to */ unsigned int message; /* message to send */ obj_handle_t wparam; /* message wparam (socket handle) */ int errors[AFD_POLL_BIT_COUNT]; /* event errors */ timeout_t connect_time;/* time the socket was connected */ struct sock *deferred; /* socket that waits for a deferred accept */ struct async_queue read_q; /* queue for asynchronous reads */ struct async_queue write_q; /* queue for asynchronous writes */ struct async_queue ifchange_q; /* queue for interface change notifications */ struct async_queue accept_q; /* queue for asynchronous accepts */ struct async_queue connect_q; /* queue for asynchronous connects */ struct async_queue poll_q; /* queue for asynchronous polls */ struct object *ifchange_obj; /* the interface change notification object */ struct list ifchange_entry; /* entry in ifchange notification list */ struct list accept_list; /* list of pending accept requests */ struct accept_req *accept_recv_req; /* pending accept-into request which will recv on this socket */ struct connect_req *connect_req; /* pending connection request */ struct poll_req *main_poll; /* main poll */ union win_sockaddr addr; /* socket name */ int addr_len; /* socket name length */ unsigned int rcvbuf; /* advisory recv buffer size */ unsigned int sndbuf; /* advisory send buffer size */ unsigned int rcvtimeo; /* receive timeout in ms */ unsigned int sndtimeo; /* send timeout in ms */ unsigned int rd_shutdown : 1; /* is the read end shut down? */ unsigned int wr_shutdown : 1; /* is the write end shut down? */ unsigned int wr_shutdown_pending : 1; /* is a write shutdown pending? */ unsigned int hangup : 1; /* has the read end received a hangup? */ unsigned int aborted : 1; /* did we get a POLLERR or irregular POLLHUP? */ unsigned int nonblocking : 1; /* is the socket nonblocking? */ unsigned int bound : 1; /* is the socket bound? */ }; static void sock_dump( struct object *obj, int verbose ); static struct fd *sock_get_fd( struct object *obj ); static int sock_close_handle( struct object *obj, struct process *process, obj_handle_t handle ); static void sock_destroy( struct object *obj ); static struct object *sock_get_ifchange( struct sock *sock ); static void sock_release_ifchange( struct sock *sock ); static int sock_get_poll_events( struct fd *fd ); static void sock_poll_event( struct fd *fd, int event ); static enum server_fd_type sock_get_fd_type( struct fd *fd ); static void sock_ioctl( struct fd *fd, ioctl_code_t code, struct async *async ); static void sock_cancel_async( struct fd *fd, struct async *async ); static void sock_queue_async( struct fd *fd, struct async *async, int type, int count ); static void sock_reselect_async( struct fd *fd, struct async_queue *queue ); static int accept_into_socket( struct sock *sock, struct sock *acceptsock ); static struct sock *accept_socket( struct sock *sock ); static int sock_get_ntstatus( int err ); static unsigned int sock_get_error( int err ); static void poll_socket( struct sock *poll_sock, struct async *async, int exclusive, timeout_t timeout, unsigned int count, const struct afd_poll_socket_64 *sockets ); static const struct object_ops sock_ops = { sizeof(struct sock), /* size */ &file_type, /* type */ sock_dump, /* dump */ add_queue, /* add_queue */ remove_queue, /* remove_queue */ default_fd_signaled, /* signaled */ NULL, /* get_esync_fd */ no_satisfied, /* satisfied */ no_signal, /* signal */ sock_get_fd, /* get_fd */ default_map_access, /* map_access */ default_get_sd, /* get_sd */ default_set_sd, /* set_sd */ no_get_full_name, /* get_full_name */ no_lookup_name, /* lookup_name */ no_link_name, /* link_name */ NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ sock_close_handle, /* close_handle */ sock_destroy /* destroy */ }; static const struct fd_ops sock_fd_ops = { sock_get_poll_events, /* get_poll_events */ sock_poll_event, /* poll_event */ sock_get_fd_type, /* get_fd_type */ no_fd_read, /* read */ no_fd_write, /* write */ no_fd_flush, /* flush */ default_fd_get_file_info, /* get_file_info */ no_fd_get_volume_info, /* get_volume_info */ sock_ioctl, /* ioctl */ sock_cancel_async, /* cancel_async */ sock_queue_async, /* queue_async */ sock_reselect_async /* reselect_async */ }; union unix_sockaddr { struct sockaddr addr; struct sockaddr_in in; struct sockaddr_in6 in6; #ifdef HAS_IPX struct sockaddr_ipx ipx; #endif #ifdef HAS_IRDA struct sockaddr_irda irda; #endif }; static int sockaddr_from_unix( const union unix_sockaddr *uaddr, struct WS_sockaddr *wsaddr, socklen_t wsaddrlen ) { memset( wsaddr, 0, wsaddrlen ); switch (uaddr->addr.sa_family) { case AF_INET: { struct WS_sockaddr_in win = {0}; if (wsaddrlen < sizeof(win)) return -1; win.sin_family = WS_AF_INET; win.sin_port = uaddr->in.sin_port; memcpy( &win.sin_addr, &uaddr->in.sin_addr, sizeof(win.sin_addr) ); memcpy( wsaddr, &win, sizeof(win) ); return sizeof(win); } case AF_INET6: { struct WS_sockaddr_in6 win = {0}; if (wsaddrlen < sizeof(win)) return -1; win.sin6_family = WS_AF_INET6; win.sin6_port = uaddr->in6.sin6_port; win.sin6_flowinfo = uaddr->in6.sin6_flowinfo; memcpy( &win.sin6_addr, &uaddr->in6.sin6_addr, sizeof(win.sin6_addr) ); #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_SCOPE_ID win.sin6_scope_id = uaddr->in6.sin6_scope_id; #endif memcpy( wsaddr, &win, sizeof(win) ); return sizeof(win); } #ifdef HAS_IPX case AF_IPX: { struct WS_sockaddr_ipx win = {0}; if (wsaddrlen < sizeof(win)) return -1; win.sa_family = WS_AF_IPX; memcpy( win.sa_netnum, &uaddr->ipx.sipx_network, sizeof(win.sa_netnum) ); memcpy( win.sa_nodenum, &uaddr->ipx.sipx_node, sizeof(win.sa_nodenum) ); win.sa_socket = uaddr->ipx.sipx_port; memcpy( wsaddr, &win, sizeof(win) ); return sizeof(win); } #endif #ifdef HAS_IRDA case AF_IRDA: { SOCKADDR_IRDA win; if (wsaddrlen < sizeof(win)) return -1; win.irdaAddressFamily = WS_AF_IRDA; memcpy( win.irdaDeviceID, &uaddr->irda.sir_addr, sizeof(win.irdaDeviceID) ); if (uaddr->irda.sir_lsap_sel != LSAP_ANY) snprintf( win.irdaServiceName, sizeof(win.irdaServiceName), "LSAP-SEL%u", uaddr->irda.sir_lsap_sel ); else memcpy( win.irdaServiceName, uaddr->irda.sir_name, sizeof(win.irdaServiceName) ); memcpy( wsaddr, &win, sizeof(win) ); return sizeof(win); } #endif case AF_UNSPEC: return 0; default: return -1; } } static socklen_t sockaddr_to_unix( const struct WS_sockaddr *wsaddr, int wsaddrlen, union unix_sockaddr *uaddr ) { memset( uaddr, 0, sizeof(*uaddr) ); switch (wsaddr->sa_family) { case WS_AF_INET: { struct WS_sockaddr_in win = {0}; if (wsaddrlen < sizeof(win)) return 0; memcpy( &win, wsaddr, sizeof(win) ); uaddr->in.sin_family = AF_INET; uaddr->in.sin_port = win.sin_port; memcpy( &uaddr->in.sin_addr, &win.sin_addr, sizeof(win.sin_addr) ); return sizeof(uaddr->in); } case WS_AF_INET6: { struct WS_sockaddr_in6 win = {0}; if (wsaddrlen < sizeof(win)) return 0; memcpy( &win, wsaddr, sizeof(win) ); uaddr->in6.sin6_family = AF_INET6; uaddr->in6.sin6_port = win.sin6_port; uaddr->in6.sin6_flowinfo = win.sin6_flowinfo; memcpy( &uaddr->in6.sin6_addr, &win.sin6_addr, sizeof(win.sin6_addr) ); #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_SCOPE_ID uaddr->in6.sin6_scope_id = win.sin6_scope_id; #endif return sizeof(uaddr->in6); } #ifdef HAS_IPX case WS_AF_IPX: { struct WS_sockaddr_ipx win = {0}; if (wsaddrlen < sizeof(win)) return 0; memcpy( &win, wsaddr, sizeof(win) ); uaddr->ipx.sipx_family = AF_IPX; memcpy( &uaddr->ipx.sipx_network, win.sa_netnum, sizeof(win.sa_netnum) ); memcpy( &uaddr->ipx.sipx_node, win.sa_nodenum, sizeof(win.sa_nodenum) ); uaddr->ipx.sipx_port = win.sa_socket; return sizeof(uaddr->ipx); } #endif #ifdef HAS_IRDA case WS_AF_IRDA: { SOCKADDR_IRDA win = {0}; unsigned int lsap_sel; if (wsaddrlen < sizeof(win)) return 0; memcpy( &win, wsaddr, sizeof(win) ); uaddr->irda.sir_family = AF_IRDA; if (sscanf( win.irdaServiceName, "LSAP-SEL%u", &lsap_sel ) == 1) uaddr->irda.sir_lsap_sel = lsap_sel; else { uaddr->irda.sir_lsap_sel = LSAP_ANY; memcpy( uaddr->irda.sir_name, win.irdaServiceName, sizeof(win.irdaServiceName) ); } memcpy( &uaddr->irda.sir_addr, win.irdaDeviceID, sizeof(win.irdaDeviceID) ); return sizeof(uaddr->irda); } #endif case WS_AF_UNSPEC: switch (wsaddrlen) { default: /* likely an ipv4 address */ case sizeof(struct WS_sockaddr_in): return sizeof(uaddr->in); #ifdef HAS_IPX case sizeof(struct WS_sockaddr_ipx): return sizeof(uaddr->ipx); #endif #ifdef HAS_IRDA case sizeof(SOCKADDR_IRDA): return sizeof(uaddr->irda); #endif case sizeof(struct WS_sockaddr_in6): return sizeof(uaddr->in6); } default: return 0; } } /* some events are generated at the same time but must be sent in a particular * order (e.g. CONNECT must be sent before READ) */ static const enum afd_poll_bit event_bitorder[] = { AFD_POLL_BIT_CONNECT, AFD_POLL_BIT_CONNECT_ERR, AFD_POLL_BIT_ACCEPT, AFD_POLL_BIT_OOB, AFD_POLL_BIT_WRITE, AFD_POLL_BIT_READ, AFD_POLL_BIT_RESET, AFD_POLL_BIT_HUP, AFD_POLL_BIT_CLOSE, }; typedef enum { SOCK_SHUTDOWN_ERROR = -1, SOCK_SHUTDOWN_EOF = 0, SOCK_SHUTDOWN_POLLHUP = 1 } sock_shutdown_t; static sock_shutdown_t sock_shutdown_type = SOCK_SHUTDOWN_ERROR; static sock_shutdown_t sock_check_pollhup(void) { sock_shutdown_t ret = SOCK_SHUTDOWN_ERROR; int fd[2], n; struct pollfd pfd; char dummy; if ( socketpair( AF_UNIX, SOCK_STREAM, 0, fd ) ) return ret; if ( shutdown( fd[0], 1 ) ) goto out; pfd.fd = fd[1]; pfd.events = POLLIN; pfd.revents = 0; /* Solaris' poll() sometimes returns nothing if given a 0ms timeout here */ n = poll( &pfd, 1, 1 ); if ( n != 1 ) goto out; /* error or timeout */ if ( pfd.revents & POLLHUP ) ret = SOCK_SHUTDOWN_POLLHUP; else if ( pfd.revents & POLLIN && read( fd[1], &dummy, 1 ) == 0 ) ret = SOCK_SHUTDOWN_EOF; out: close( fd[0] ); close( fd[1] ); return ret; } void sock_init(void) { sock_shutdown_type = sock_check_pollhup(); switch ( sock_shutdown_type ) { case SOCK_SHUTDOWN_EOF: if (debug_level) fprintf( stderr, "sock_init: shutdown() causes EOF\n" ); break; case SOCK_SHUTDOWN_POLLHUP: if (debug_level) fprintf( stderr, "sock_init: shutdown() causes POLLHUP\n" ); break; default: fprintf( stderr, "sock_init: ERROR in sock_check_pollhup()\n" ); sock_shutdown_type = SOCK_SHUTDOWN_EOF; } } static int sock_reselect( struct sock *sock ) { int ev = sock_get_poll_events( sock->fd ); if (debug_level) fprintf(stderr,"sock_reselect(%p): new mask %x\n", sock, ev); set_fd_events( sock->fd, ev ); return ev; } static unsigned int afd_poll_flag_to_win32( unsigned int flags ) { static const unsigned int map[] = { FD_READ, /* READ */ FD_OOB, /* OOB */ FD_WRITE, /* WRITE */ FD_CLOSE, /* HUP */ FD_CLOSE, /* RESET */ 0, /* CLOSE */ FD_CONNECT, /* CONNECT */ FD_ACCEPT, /* ACCEPT */ FD_CONNECT, /* CONNECT_ERR */ }; unsigned int i, ret = 0; for (i = 0; i < ARRAY_SIZE(map); ++i) { if (flags & (1 << i)) ret |= map[i]; } return ret; } /* wake anybody waiting on the socket event or send the associated message */ static void sock_wake_up( struct sock *sock ) { unsigned int events = sock->pending_events & sock->mask; int i; if (sock->event) { if (debug_level) fprintf(stderr, "signalling events %x ptr %p\n", events, sock->event ); if (events) set_event( sock->event ); } if (sock->window) { if (debug_level) fprintf(stderr, "signalling events %x win %08x\n", events, sock->window ); for (i = 0; i < ARRAY_SIZE(event_bitorder); i++) { enum afd_poll_bit event = event_bitorder[i]; if (events & (1 << event)) { lparam_t lparam = afd_poll_flag_to_win32(1 << event) | (sock_get_error( sock->errors[event] ) << 16); post_message( sock->window, sock->message, sock->wparam, lparam ); } } sock->pending_events = 0; sock_reselect( sock ); } } static inline int sock_error( struct fd *fd ) { unsigned int optval = 0; socklen_t optlen = sizeof(optval); getsockopt( get_unix_fd(fd), SOL_SOCKET, SO_ERROR, (void *) &optval, &optlen); return optval; } static void free_accept_req( void *private ) { struct accept_req *req = private; list_remove( &req->entry ); if (req->acceptsock) { req->acceptsock->accept_recv_req = NULL; release_object( req->acceptsock ); } release_object( req->async ); release_object( req->iosb ); release_object( req->sock ); free( req ); } static void fill_accept_output( struct accept_req *req ) { const data_size_t out_size = req->iosb->out_size; struct async *async = req->async; union unix_sockaddr unix_addr; struct WS_sockaddr *win_addr; unsigned int remote_len; socklen_t unix_len; int fd, size = 0; char *out_data; int win_len; if (!(out_data = mem_alloc( out_size ))) { async_terminate( async, get_error() ); return; } fd = get_unix_fd( req->acceptsock->fd ); if (req->recv_len && (size = recv( fd, out_data, req->recv_len, 0 )) < 0) { if (!req->accepted && errno == EWOULDBLOCK) { req->accepted = 1; sock_reselect( req->acceptsock ); return; } async_terminate( async, sock_get_ntstatus( errno ) ); free( out_data ); return; } if (req->local_len) { if (req->local_len < sizeof(int)) { async_terminate( async, STATUS_BUFFER_TOO_SMALL ); free( out_data ); return; } unix_len = sizeof(unix_addr); win_addr = (struct WS_sockaddr *)(out_data + req->recv_len + sizeof(int)); if (getsockname( fd, &unix_addr.addr, &unix_len ) < 0 || (win_len = sockaddr_from_unix( &unix_addr, win_addr, req->local_len - sizeof(int) )) < 0) { async_terminate( async, sock_get_ntstatus( errno ) ); free( out_data ); return; } memcpy( out_data + req->recv_len, &win_len, sizeof(int) ); } unix_len = sizeof(unix_addr); win_addr = (struct WS_sockaddr *)(out_data + req->recv_len + req->local_len + sizeof(int)); remote_len = out_size - req->recv_len - req->local_len; if (getpeername( fd, &unix_addr.addr, &unix_len ) < 0 || (win_len = sockaddr_from_unix( &unix_addr, win_addr, remote_len - sizeof(int) )) < 0) { async_terminate( async, sock_get_ntstatus( errno ) ); free( out_data ); return; } memcpy( out_data + req->recv_len + req->local_len, &win_len, sizeof(int) ); async_request_complete( req->async, STATUS_SUCCESS, size, out_size, out_data ); } static void complete_async_accept( struct sock *sock, struct accept_req *req ) { struct sock *acceptsock = req->acceptsock; struct async *async = req->async; if (debug_level) fprintf( stderr, "completing accept request for socket %p\n", sock ); if (acceptsock) { if (!accept_into_socket( sock, acceptsock )) { async_terminate( async, get_error() ); return; } fill_accept_output( req ); } else { obj_handle_t handle; if (!(acceptsock = accept_socket( sock ))) { async_terminate( async, get_error() ); return; } handle = alloc_handle_no_access_check( async_get_thread( async )->process, &acceptsock->obj, GENERIC_READ | GENERIC_WRITE | SYNCHRONIZE, OBJ_INHERIT ); acceptsock->wparam = handle; sock_reselect( acceptsock ); release_object( acceptsock ); if (!handle) { async_terminate( async, get_error() ); return; } async_request_complete_alloc( req->async, STATUS_SUCCESS, 0, sizeof(handle), &handle ); } } static void complete_async_accept_recv( struct accept_req *req ) { if (debug_level) fprintf( stderr, "completing accept recv request for socket %p\n", req->acceptsock ); assert( req->recv_len ); fill_accept_output( req ); } static void free_connect_req( void *private ) { struct connect_req *req = private; req->sock->connect_req = NULL; release_object( req->async ); release_object( req->iosb ); release_object( req->sock ); free( req ); } static void complete_async_connect( struct sock *sock ) { struct connect_req *req = sock->connect_req; const char *in_buffer; size_t len; int ret; if (debug_level) fprintf( stderr, "completing connect request for socket %p\n", sock ); sock->state = SOCK_CONNECTED; if (!req->send_len) { async_terminate( req->async, STATUS_SUCCESS ); return; } in_buffer = (const char *)req->iosb->in_data + sizeof(struct afd_connect_params) + req->addr_len; len = req->send_len - req->send_cursor; ret = send( get_unix_fd( sock->fd ), in_buffer + req->send_cursor, len, 0 ); if (ret < 0 && errno != EWOULDBLOCK) async_terminate( req->async, sock_get_ntstatus( errno ) ); else if (ret == len) async_request_complete( req->async, STATUS_SUCCESS, req->send_len, 0, NULL ); else req->send_cursor += ret; } static void free_poll_req( void *private ) { struct poll_req *req = private; unsigned int i; if (req->timeout) remove_timeout_user( req->timeout ); for (i = 0; i < req->count; ++i) release_object( req->sockets[i].sock ); release_object( req->async ); release_object( req->iosb ); list_remove( &req->entry ); free( req ); } static int is_oobinline( struct sock *sock ) { int oobinline; socklen_t len = sizeof(oobinline); return !getsockopt( get_unix_fd( sock->fd ), SOL_SOCKET, SO_OOBINLINE, (char *)&oobinline, &len ) && oobinline; } static int get_poll_flags( struct sock *sock, int event ) { int flags = 0; /* A connection-mode socket which has never been connected does not return * write or hangup events, but Linux reports POLLOUT | POLLHUP. */ if (sock->state == SOCK_UNCONNECTED) event &= ~(POLLOUT | POLLHUP); if (event & POLLIN) { if (sock->state == SOCK_LISTENING) flags |= AFD_POLL_ACCEPT; else flags |= AFD_POLL_READ; } if (event & POLLPRI) flags |= is_oobinline( sock ) ? AFD_POLL_READ : AFD_POLL_OOB; if (event & POLLOUT) flags |= AFD_POLL_WRITE; if (sock->state == SOCK_CONNECTED) flags |= AFD_POLL_CONNECT; if (event & POLLHUP) flags |= AFD_POLL_HUP; if (event & POLLERR) flags |= AFD_POLL_CONNECT_ERR; return flags; } static void complete_async_poll( struct poll_req *req, unsigned int status ) { unsigned int i, signaled_count = 0; for (i = 0; i < req->count; ++i) { struct sock *sock = req->sockets[i].sock; if (sock->main_poll == req) sock->main_poll = NULL; } if (!status) { for (i = 0; i < req->count; ++i) { if (req->sockets[i].flags) ++signaled_count; } } if (is_machine_64bit( async_get_thread( req->async )->process->machine )) { size_t output_size = offsetof( struct afd_poll_params_64, sockets[signaled_count] ); struct afd_poll_params_64 *output; if (!(output = mem_alloc( output_size ))) { async_terminate( req->async, get_error() ); return; } memset( output, 0, output_size ); output->timeout = req->orig_timeout; output->exclusive = req->exclusive; for (i = 0; i < req->count; ++i) { if (!req->sockets[i].flags) continue; output->sockets[output->count].socket = req->sockets[i].handle; output->sockets[output->count].flags = req->sockets[i].flags; output->sockets[output->count].status = req->sockets[i].status; ++output->count; } assert( output->count == signaled_count ); async_request_complete( req->async, status, output_size, output_size, output ); } else { size_t output_size = offsetof( struct afd_poll_params_32, sockets[signaled_count] ); struct afd_poll_params_32 *output; if (!(output = mem_alloc( output_size ))) { async_terminate( req->async, get_error() ); return; } memset( output, 0, output_size ); output->timeout = req->orig_timeout; output->exclusive = req->exclusive; for (i = 0; i < req->count; ++i) { if (!req->sockets[i].flags) continue; output->sockets[output->count].socket = req->sockets[i].handle; output->sockets[output->count].flags = req->sockets[i].flags; output->sockets[output->count].status = req->sockets[i].status; ++output->count; } assert( output->count == signaled_count ); async_request_complete( req->async, status, output_size, output_size, output ); } } static void complete_async_polls( struct sock *sock, int event, int error ) { int flags = get_poll_flags( sock, event ); struct poll_req *req, *next; LIST_FOR_EACH_ENTRY_SAFE( req, next, &poll_list, struct poll_req, entry ) { unsigned int i; if (req->iosb->status != STATUS_PENDING) continue; for (i = 0; i < req->count; ++i) { if (req->sockets[i].sock != sock) continue; if (!(req->sockets[i].mask & flags)) continue; if (debug_level) fprintf( stderr, "completing poll for socket %p, wanted %#x got %#x\n", sock, req->sockets[i].mask, flags ); req->sockets[i].flags = req->sockets[i].mask & flags; req->sockets[i].status = sock_get_ntstatus( error ); complete_async_poll( req, STATUS_SUCCESS ); break; } } } static void async_poll_timeout( void *private ) { struct poll_req *req = private; req->timeout = NULL; if (req->iosb->status != STATUS_PENDING) return; complete_async_poll( req, STATUS_TIMEOUT ); } static int sock_dispatch_asyncs( struct sock *sock, int event, int error ) { if (event & (POLLIN | POLLPRI)) { struct accept_req *req; LIST_FOR_EACH_ENTRY( req, &sock->accept_list, struct accept_req, entry ) { if (req->iosb->status == STATUS_PENDING && !req->accepted) { complete_async_accept( sock, req ); break; } } if (sock->accept_recv_req && sock->accept_recv_req->iosb->status == STATUS_PENDING) complete_async_accept_recv( sock->accept_recv_req ); } if ((event & POLLOUT) && sock->connect_req && sock->connect_req->iosb->status == STATUS_PENDING) complete_async_connect( sock ); if (event & (POLLIN | POLLPRI) && async_waiting( &sock->read_q )) { if (debug_level) fprintf( stderr, "activating read queue for socket %p\n", sock ); async_wake_up( &sock->read_q, STATUS_ALERTED ); event &= ~(POLLIN | POLLPRI); } if (event & POLLOUT && async_waiting( &sock->write_q )) { if (debug_level) fprintf( stderr, "activating write queue for socket %p\n", sock ); async_wake_up( &sock->write_q, STATUS_ALERTED ); event &= ~POLLOUT; } if (event & (POLLERR | POLLHUP)) { int status = sock_get_ntstatus( error ); struct accept_req *req, *next; if (sock->rd_shutdown || sock->hangup) async_wake_up( &sock->read_q, status ); if (sock->wr_shutdown) async_wake_up( &sock->write_q, status ); LIST_FOR_EACH_ENTRY_SAFE( req, next, &sock->accept_list, struct accept_req, entry ) { if (req->iosb->status == STATUS_PENDING) async_terminate( req->async, status ); } if (sock->accept_recv_req && sock->accept_recv_req->iosb->status == STATUS_PENDING) async_terminate( sock->accept_recv_req->async, status ); if (sock->connect_req) async_terminate( sock->connect_req->async, status ); } return event; } static void post_socket_event( struct sock *sock, enum afd_poll_bit event_bit, int error ) { unsigned int event = (1 << event_bit); if (!(sock->reported_events & event)) { sock->pending_events |= event; sock->reported_events |= event; sock->errors[event_bit] = error; } } static void sock_dispatch_events( struct sock *sock, enum connection_state prevstate, int event, int error ) { switch (prevstate) { case SOCK_UNCONNECTED: break; case SOCK_CONNECTING: if (event & POLLOUT) { post_socket_event( sock, AFD_POLL_BIT_CONNECT, 0 ); sock->errors[AFD_POLL_BIT_CONNECT_ERR] = 0; } if (event & (POLLERR | POLLHUP)) post_socket_event( sock, AFD_POLL_BIT_CONNECT_ERR, error ); break; case SOCK_LISTENING: if (event & (POLLIN | POLLERR | POLLHUP)) post_socket_event( sock, AFD_POLL_BIT_ACCEPT, error ); break; case SOCK_CONNECTED: case SOCK_CONNECTIONLESS: if (event & POLLIN) post_socket_event( sock, AFD_POLL_BIT_READ, 0 ); if (event & POLLOUT) post_socket_event( sock, AFD_POLL_BIT_WRITE, 0 ); if (event & POLLPRI) post_socket_event( sock, AFD_POLL_BIT_OOB, 0 ); if (event & (POLLERR | POLLHUP)) post_socket_event( sock, AFD_POLL_BIT_HUP, error ); break; } sock_wake_up( sock ); } static void sock_poll_event( struct fd *fd, int event ) { struct sock *sock = get_fd_user( fd ); int hangup_seen = 0; enum connection_state prevstate = sock->state; int error = 0; assert( sock->obj.ops == &sock_ops ); if (debug_level) fprintf(stderr, "socket %p select event: %x\n", sock, event); /* we may change event later, remove from loop here */ if (event & (POLLERR|POLLHUP)) set_fd_events( sock->fd, -1 ); switch (sock->state) { case SOCK_UNCONNECTED: break; case SOCK_CONNECTING: if (event & (POLLERR|POLLHUP)) { sock->state = SOCK_UNCONNECTED; event &= ~POLLOUT; error = sock_error( fd ); } else if (event & POLLOUT) { sock->state = SOCK_CONNECTED; sock->connect_time = current_time; } break; case SOCK_LISTENING: if (event & (POLLERR|POLLHUP)) error = sock_error( fd ); break; case SOCK_CONNECTED: case SOCK_CONNECTIONLESS: if (sock->type == WS_SOCK_STREAM && (event & POLLIN)) { char dummy; int nr; /* Linux 2.4 doesn't report POLLHUP if only one side of the socket * has been closed, so we need to check for it explicitly here */ nr = recv( get_unix_fd( fd ), &dummy, 1, MSG_PEEK ); if ( nr == 0 ) { hangup_seen = 1; event &= ~POLLIN; } else if ( nr < 0 ) { event &= ~POLLIN; /* EAGAIN can happen if an async recv() falls between the server's poll() call and the invocation of this routine */ if ( errno != EAGAIN ) { error = errno; event |= POLLERR; if ( debug_level ) fprintf( stderr, "recv error on socket %p: %d\n", sock, errno ); } } } if (hangup_seen || (sock_shutdown_type == SOCK_SHUTDOWN_POLLHUP && (event & POLLHUP))) { sock->hangup = 1; } else if (event & (POLLHUP | POLLERR)) { sock->aborted = 1; if (debug_level) fprintf( stderr, "socket %p aborted by error %d, event %#x\n", sock, error, event ); } if (hangup_seen) event |= POLLHUP; break; } complete_async_polls( sock, event, error ); event = sock_dispatch_asyncs( sock, event, error ); sock_dispatch_events( sock, prevstate, event, error ); sock_reselect( sock ); } static void sock_dump( struct object *obj, int verbose ) { struct sock *sock = (struct sock *)obj; assert( obj->ops == &sock_ops ); fprintf( stderr, "Socket fd=%p, state=%x, mask=%x, pending=%x, reported=%x\n", sock->fd, sock->state, sock->mask, sock->pending_events, sock->reported_events ); } static int poll_flags_from_afd( struct sock *sock, int flags ) { int ev = 0; /* A connection-mode socket which has never been connected does * not return write or hangup events, but Linux returns * POLLOUT | POLLHUP. */ if (sock->state == SOCK_UNCONNECTED) return -1; if (flags & (AFD_POLL_READ | AFD_POLL_ACCEPT)) ev |= POLLIN; if ((flags & AFD_POLL_HUP) && sock->type == WS_SOCK_STREAM) ev |= POLLIN; if (flags & AFD_POLL_OOB) ev |= is_oobinline( sock ) ? POLLIN : POLLPRI; if (flags & AFD_POLL_WRITE) ev |= POLLOUT; return ev; } static int sock_get_poll_events( struct fd *fd ) { struct sock *sock = get_fd_user( fd ); unsigned int mask = sock->mask & ~sock->reported_events; struct poll_req *req; int ev = 0; assert( sock->obj.ops == &sock_ops ); if (!sock->type) /* not initialized yet */ return -1; switch (sock->state) { case SOCK_UNCONNECTED: /* A connection-mode Windows socket which has never been connected does * not return any events, but Linux returns POLLOUT | POLLHUP. Hence we * need to return -1 here, to prevent the socket from being polled on at * all. */ return -1; case SOCK_CONNECTING: return POLLOUT; case SOCK_LISTENING: if (!list_empty( &sock->accept_list ) || (mask & AFD_POLL_ACCEPT)) ev |= POLLIN; break; case SOCK_CONNECTED: case SOCK_CONNECTIONLESS: if (sock->hangup && sock->wr_shutdown && !sock->wr_shutdown_pending) { /* Linux returns POLLHUP if a socket is both SHUT_RD and SHUT_WR, or * if both the socket and its peer are SHUT_WR. * * We don't use SHUT_RD, so we can only encounter this in the latter * case. In that case there can't be any pending read requests (they * would have already been completed with a length of zero), the * above condition ensures that we don't have any pending write * requests, and nothing that can change about the socket state that * would complete a pending poll request. */ return -1; } if (sock->aborted) return -1; if (sock->accept_recv_req) { ev |= POLLIN; } else if (async_queued( &sock->read_q )) { if (async_waiting( &sock->read_q )) ev |= POLLIN | POLLPRI; } else { /* Don't ask for POLLIN if we got a hangup. We won't receive more * data anyway, but we will get POLLIN if SOCK_SHUTDOWN_EOF. */ if (!sock->hangup) { if (mask & AFD_POLL_READ) ev |= POLLIN; if (mask & AFD_POLL_OOB) ev |= POLLPRI; } /* We use POLLIN with 0 bytes recv() as hangup indication for stream sockets. */ if (sock->state == SOCK_CONNECTED && (mask & AFD_POLL_HUP) && !(sock->reported_events & AFD_POLL_READ)) ev |= POLLIN; } if (async_queued( &sock->write_q )) { if (async_waiting( &sock->write_q )) ev |= POLLOUT; } else if (!sock->wr_shutdown && (mask & AFD_POLL_WRITE)) { ev |= POLLOUT; } break; } LIST_FOR_EACH_ENTRY( req, &poll_list, struct poll_req, entry ) { unsigned int i; for (i = 0; i < req->count; ++i) { if (req->sockets[i].sock != sock) continue; ev |= poll_flags_from_afd( sock, req->sockets[i].mask ); } } return ev; } static enum server_fd_type sock_get_fd_type( struct fd *fd ) { return FD_TYPE_SOCKET; } static void sock_cancel_async( struct fd *fd, struct async *async ) { struct poll_req *req; LIST_FOR_EACH_ENTRY( req, &poll_list, struct poll_req, entry ) { unsigned int i; if (req->async != async) continue; for (i = 0; i < req->count; i++) { struct sock *sock = req->sockets[i].sock; if (sock->main_poll == req) sock->main_poll = NULL; } } async_terminate( async, STATUS_CANCELLED ); } static void sock_queue_async( struct fd *fd, struct async *async, int type, int count ) { struct sock *sock = get_fd_user( fd ); struct async_queue *queue; assert( sock->obj.ops == &sock_ops ); switch (type) { case ASYNC_TYPE_READ: if (sock->rd_shutdown) { set_error( STATUS_PIPE_DISCONNECTED ); return; } queue = &sock->read_q; break; case ASYNC_TYPE_WRITE: if (sock->wr_shutdown) { set_error( STATUS_PIPE_DISCONNECTED ); return; } queue = &sock->write_q; break; default: set_error( STATUS_INVALID_PARAMETER ); return; } if (sock->state != SOCK_CONNECTED) { set_error( STATUS_PIPE_DISCONNECTED ); return; } queue_async( queue, async ); sock_reselect( sock ); set_error( STATUS_PENDING ); } static void sock_reselect_async( struct fd *fd, struct async_queue *queue ) { struct sock *sock = get_fd_user( fd ); if (sock->wr_shutdown_pending && list_empty( &sock->write_q.queue )) { shutdown( get_unix_fd( sock->fd ), SHUT_WR ); sock->wr_shutdown_pending = 0; } /* Don't reselect the ifchange queue; we always ask for POLLIN. * Don't reselect an uninitialized socket; we can't call set_fd_events() on * a pseudo-fd. */ if (queue != &sock->ifchange_q && sock->type) sock_reselect( sock ); } static struct fd *sock_get_fd( struct object *obj ) { struct sock *sock = (struct sock *)obj; return (struct fd *)grab_object( sock->fd ); } static int sock_close_handle( struct object *obj, struct process *process, obj_handle_t handle ) { struct sock *sock = (struct sock *)obj; if (sock->obj.handle_count == 1) /* last handle */ { struct accept_req *accept_req, *accept_next; struct poll_req *poll_req, *poll_next; if (sock->accept_recv_req) async_terminate( sock->accept_recv_req->async, STATUS_CANCELLED ); LIST_FOR_EACH_ENTRY_SAFE( accept_req, accept_next, &sock->accept_list, struct accept_req, entry ) async_terminate( accept_req->async, STATUS_CANCELLED ); if (sock->connect_req) async_terminate( sock->connect_req->async, STATUS_CANCELLED ); LIST_FOR_EACH_ENTRY_SAFE( poll_req, poll_next, &poll_list, struct poll_req, entry ) { struct iosb *iosb = poll_req->iosb; BOOL signaled = FALSE; unsigned int i; if (iosb->status != STATUS_PENDING) continue; for (i = 0; i < poll_req->count; ++i) { if (poll_req->sockets[i].sock == sock) { signaled = TRUE; poll_req->sockets[i].flags = AFD_POLL_CLOSE; poll_req->sockets[i].status = 0; } } if (signaled) complete_async_poll( poll_req, STATUS_SUCCESS ); } } return 1; } static void sock_destroy( struct object *obj ) { struct sock *sock = (struct sock *)obj; assert( obj->ops == &sock_ops ); /* FIXME: special socket shutdown stuff? */ if ( sock->deferred ) release_object( sock->deferred ); async_wake_up( &sock->ifchange_q, STATUS_CANCELLED ); sock_release_ifchange( sock ); free_async_queue( &sock->read_q ); free_async_queue( &sock->write_q ); free_async_queue( &sock->ifchange_q ); free_async_queue( &sock->accept_q ); free_async_queue( &sock->connect_q ); free_async_queue( &sock->poll_q ); if (sock->event) release_object( sock->event ); if (sock->fd) { /* shut the socket down to force pending poll() calls in the client to return */ shutdown( get_unix_fd(sock->fd), SHUT_RDWR ); release_object( sock->fd ); } } static struct sock *create_socket(void) { struct sock *sock; if (!(sock = alloc_object( &sock_ops ))) return NULL; sock->fd = NULL; sock->state = SOCK_UNCONNECTED; sock->mask = 0; sock->pending_events = 0; sock->reported_events = 0; sock->flags = 0; sock->proto = 0; sock->type = 0; sock->family = 0; sock->event = NULL; sock->window = 0; sock->message = 0; sock->wparam = 0; sock->connect_time = 0; sock->deferred = NULL; sock->ifchange_obj = NULL; sock->accept_recv_req = NULL; sock->connect_req = NULL; sock->main_poll = NULL; memset( &sock->addr, 0, sizeof(sock->addr) ); sock->addr_len = 0; sock->rd_shutdown = 0; sock->wr_shutdown = 0; sock->wr_shutdown_pending = 0; sock->hangup = 0; sock->aborted = 0; sock->nonblocking = 0; sock->bound = 0; sock->rcvbuf = 0; sock->sndbuf = 0; sock->rcvtimeo = 0; sock->sndtimeo = 0; init_async_queue( &sock->read_q ); init_async_queue( &sock->write_q ); init_async_queue( &sock->ifchange_q ); init_async_queue( &sock->accept_q ); init_async_queue( &sock->connect_q ); init_async_queue( &sock->poll_q ); memset( sock->errors, 0, sizeof(sock->errors) ); list_init( &sock->accept_list ); return sock; } static int get_unix_family( int family ) { switch (family) { case WS_AF_INET: return AF_INET; case WS_AF_INET6: return AF_INET6; #ifdef HAS_IPX case WS_AF_IPX: return AF_IPX; #endif #ifdef AF_IRDA case WS_AF_IRDA: return AF_IRDA; #endif case WS_AF_UNSPEC: return AF_UNSPEC; default: return -1; } } static int get_unix_type( int type ) { switch (type) { case WS_SOCK_DGRAM: return SOCK_DGRAM; case WS_SOCK_RAW: return SOCK_RAW; case WS_SOCK_STREAM: return SOCK_STREAM; default: return -1; } } static int get_unix_protocol( int protocol ) { if (protocol >= WS_NSPROTO_IPX && protocol <= WS_NSPROTO_IPX + 255) return protocol; switch (protocol) { case WS_IPPROTO_ICMP: return IPPROTO_ICMP; case WS_IPPROTO_IGMP: return IPPROTO_IGMP; case WS_IPPROTO_IP: return IPPROTO_IP; case WS_IPPROTO_IPV4: return IPPROTO_IPIP; case WS_IPPROTO_IPV6: return IPPROTO_IPV6; case WS_IPPROTO_RAW: return IPPROTO_RAW; case WS_IPPROTO_TCP: return IPPROTO_TCP; case WS_IPPROTO_UDP: return IPPROTO_UDP; default: return -1; } } static void set_dont_fragment( int fd, int level, int value ) { int optname; if (level == IPPROTO_IP) { #ifdef IP_DONTFRAG optname = IP_DONTFRAG; #elif defined(IP_MTU_DISCOVER) && defined(IP_PMTUDISC_DO) && defined(IP_PMTUDISC_DONT) optname = IP_MTU_DISCOVER; value = value ? IP_PMTUDISC_DO : IP_PMTUDISC_DONT; #else return; #endif } else { #ifdef IPV6_DONTFRAG optname = IPV6_DONTFRAG; #elif defined(IPV6_MTU_DISCOVER) && defined(IPV6_PMTUDISC_DO) && defined(IPV6_PMTUDISC_DONT) optname = IPV6_MTU_DISCOVER; value = value ? IPV6_PMTUDISC_DO : IPV6_PMTUDISC_DONT; #else return; #endif } setsockopt( fd, level, optname, &value, sizeof(value) ); } static int init_socket( struct sock *sock, int family, int type, int protocol, unsigned int flags ) { unsigned int options = 0; int sockfd, unix_type, unix_family, unix_protocol, value; socklen_t len; unix_family = get_unix_family( family ); unix_type = get_unix_type( type ); unix_protocol = get_unix_protocol( protocol ); if (unix_protocol < 0) { if (type && unix_type < 0) set_win32_error( WSAESOCKTNOSUPPORT ); else set_win32_error( WSAEPROTONOSUPPORT ); return -1; } if (unix_family < 0) { if (family >= 0 && unix_type < 0) set_win32_error( WSAESOCKTNOSUPPORT ); else set_win32_error( WSAEAFNOSUPPORT ); return -1; } sockfd = socket( unix_family, unix_type, unix_protocol ); if (sockfd == -1) { if (errno == EINVAL) set_win32_error( WSAESOCKTNOSUPPORT ); else set_win32_error( sock_get_error( errno )); return -1; } fcntl(sockfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */ if (family == WS_AF_IPX && protocol >= WS_NSPROTO_IPX && protocol <= WS_NSPROTO_IPX + 255) { #ifdef HAS_IPX int ipx_type = protocol - WS_NSPROTO_IPX; #ifdef SOL_IPX setsockopt( sockfd, SOL_IPX, IPX_TYPE, &ipx_type, sizeof(ipx_type) ); #else struct ipx val; /* Should we retrieve val using a getsockopt call and then * set the modified one? */ val.ipx_pt = ipx_type; setsockopt( sockfd, 0, SO_DEFAULT_HEADERS, &val, sizeof(val) ); #endif #endif } if (unix_family == AF_INET || unix_family == AF_INET6) { /* ensure IP_DONTFRAGMENT is disabled for SOCK_DGRAM and SOCK_RAW, enabled for SOCK_STREAM */ if (unix_type == SOCK_DGRAM || unix_type == SOCK_RAW) /* in Linux the global default can be enabled */ set_dont_fragment( sockfd, unix_family == AF_INET6 ? IPPROTO_IPV6 : IPPROTO_IP, FALSE ); else if (unix_type == SOCK_STREAM) set_dont_fragment( sockfd, unix_family == AF_INET6 ? IPPROTO_IPV6 : IPPROTO_IP, TRUE ); } #ifdef IPV6_V6ONLY if (unix_family == AF_INET6) { static const int enable = 1; setsockopt( sockfd, IPPROTO_IPV6, IPV6_V6ONLY, &enable, sizeof(enable) ); } #endif len = sizeof(value); if (!getsockopt( sockfd, SOL_SOCKET, SO_RCVBUF, &value, &len )) sock->rcvbuf = value; len = sizeof(value); if (!getsockopt( sockfd, SOL_SOCKET, SO_SNDBUF, &value, &len )) sock->sndbuf = value; sock->state = (type == WS_SOCK_STREAM ? SOCK_UNCONNECTED : SOCK_CONNECTIONLESS); sock->flags = flags; sock->proto = protocol; sock->type = type; sock->family = family; if (sock->fd) { options = get_fd_options( sock->fd ); release_object( sock->fd ); } if (!(sock->fd = create_anonymous_fd( &sock_fd_ops, sockfd, &sock->obj, options ))) { return -1; } /* We can't immediately allow caching for a connection-mode socket, since it * might be accepted into (changing the underlying fd object.) */ if (sock->type != WS_SOCK_STREAM) allow_fd_caching( sock->fd ); return 0; } /* accepts a socket and inits it */ static int accept_new_fd( struct sock *sock ) { /* Try to accept(2). We can't be safe that this an already connected socket * or that accept() is allowed on it. In those cases we will get -1/errno * return. */ struct sockaddr saddr; socklen_t slen = sizeof(saddr); int acceptfd = accept( get_unix_fd(sock->fd), &saddr, &slen ); if (acceptfd != -1) fcntl( acceptfd, F_SETFL, O_NONBLOCK ); else set_error( sock_get_ntstatus( errno )); return acceptfd; } /* accept a socket (creates a new fd) */ static struct sock *accept_socket( struct sock *sock ) { struct sock *acceptsock; int acceptfd; if (get_unix_fd( sock->fd ) == -1) return NULL; if ( sock->deferred ) { acceptsock = sock->deferred; sock->deferred = NULL; } else { union unix_sockaddr unix_addr; socklen_t unix_len; if ((acceptfd = accept_new_fd( sock )) == -1) return NULL; if (!(acceptsock = create_socket())) { close( acceptfd ); return NULL; } /* newly created socket gets the same properties of the listening socket */ acceptsock->state = SOCK_CONNECTED; acceptsock->bound = 1; acceptsock->nonblocking = sock->nonblocking; acceptsock->mask = sock->mask; acceptsock->proto = sock->proto; acceptsock->type = sock->type; acceptsock->family = sock->family; acceptsock->window = sock->window; acceptsock->message = sock->message; acceptsock->connect_time = current_time; if (sock->event) acceptsock->event = (struct event *)grab_object( sock->event ); acceptsock->flags = sock->flags; if (!(acceptsock->fd = create_anonymous_fd( &sock_fd_ops, acceptfd, &acceptsock->obj, get_fd_options( sock->fd ) ))) { release_object( acceptsock ); return NULL; } unix_len = sizeof(unix_addr); if (!getsockname( acceptfd, &unix_addr.addr, &unix_len )) acceptsock->addr_len = sockaddr_from_unix( &unix_addr, &acceptsock->addr.addr, sizeof(acceptsock->addr) ); } clear_error(); sock->pending_events &= ~AFD_POLL_ACCEPT; sock->reported_events &= ~AFD_POLL_ACCEPT; sock_reselect( sock ); return acceptsock; } static int accept_into_socket( struct sock *sock, struct sock *acceptsock ) { union unix_sockaddr unix_addr; socklen_t unix_len; int acceptfd; struct fd *newfd; if (get_unix_fd( sock->fd ) == -1) return FALSE; if ( sock->deferred ) { newfd = dup_fd_object( sock->deferred->fd, 0, 0, get_fd_options( acceptsock->fd ) ); if ( !newfd ) return FALSE; set_fd_user( newfd, &sock_fd_ops, &acceptsock->obj ); release_object( sock->deferred ); sock->deferred = NULL; } else { if ((acceptfd = accept_new_fd( sock )) == -1) return FALSE; if (!(newfd = create_anonymous_fd( &sock_fd_ops, acceptfd, &acceptsock->obj, get_fd_options( acceptsock->fd ) ))) return FALSE; } acceptsock->state = SOCK_CONNECTED; acceptsock->pending_events = 0; acceptsock->reported_events = 0; acceptsock->proto = sock->proto; acceptsock->type = sock->type; acceptsock->family = sock->family; acceptsock->wparam = 0; acceptsock->deferred = NULL; acceptsock->connect_time = current_time; fd_copy_completion( acceptsock->fd, newfd ); release_object( acceptsock->fd ); acceptsock->fd = newfd; unix_len = sizeof(unix_addr); if (!getsockname( get_unix_fd( newfd ), &unix_addr.addr, &unix_len )) acceptsock->addr_len = sockaddr_from_unix( &unix_addr, &acceptsock->addr.addr, sizeof(acceptsock->addr) ); clear_error(); sock->pending_events &= ~AFD_POLL_ACCEPT; sock->reported_events &= ~AFD_POLL_ACCEPT; sock_reselect( sock ); return TRUE; } #ifdef IP_BOUND_IF static int bind_to_iface_name( int fd, in_addr_t bind_addr, const char *name ) { static const int enable = 1; unsigned int index; if (!(index = if_nametoindex( name ))) return -1; if (setsockopt( fd, IPPROTO_IP, IP_BOUND_IF, &index, sizeof(index) )) return -1; return setsockopt( fd, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(enable) ); } #elif defined(IP_UNICAST_IF) && defined(SO_ATTACH_FILTER) && defined(SO_BINDTODEVICE) struct interface_filter { struct sock_filter iface_memaddr; struct sock_filter iface_rule; struct sock_filter ip_memaddr; struct sock_filter ip_rule; struct sock_filter return_keep; struct sock_filter return_dump; }; # define FILTER_JUMP_DUMP(here) (u_char)(offsetof(struct interface_filter, return_dump) \ -offsetof(struct interface_filter, here)-sizeof(struct sock_filter)) \ /sizeof(struct sock_filter) # define FILTER_JUMP_KEEP(here) (u_char)(offsetof(struct interface_filter, return_keep) \ -offsetof(struct interface_filter, here)-sizeof(struct sock_filter)) \ /sizeof(struct sock_filter) # define FILTER_JUMP_NEXT() (u_char)(0) # define SKF_NET_DESTIP 16 /* offset in the network header to the destination IP */ static struct interface_filter generic_interface_filter = { /* This filter rule allows incoming packets on the specified interface, which works for all * remotely generated packets and for locally generated broadcast packets. */ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, SKF_AD_OFF+SKF_AD_IFINDEX), BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, 0xdeadbeef, FILTER_JUMP_KEEP(iface_rule), FILTER_JUMP_NEXT()), /* This rule allows locally generated packets targeted at the specific IP address of the chosen * adapter (local packets not destined for the broadcast address do not have IFINDEX set) */ BPF_STMT(BPF_LD+BPF_W+BPF_ABS, SKF_NET_OFF+SKF_NET_DESTIP), BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, 0xdeadbeef, FILTER_JUMP_KEEP(ip_rule), FILTER_JUMP_DUMP(ip_rule)), BPF_STMT(BPF_RET+BPF_K, (u_int)-1), /* keep packet */ BPF_STMT(BPF_RET+BPF_K, 0) /* dump packet */ }; static int bind_to_iface_name( int fd, in_addr_t bind_addr, const char *name ) { struct interface_filter specific_interface_filter; struct sock_fprog filter_prog; static const int enable = 1; unsigned int index; in_addr_t ifindex; if (!setsockopt( fd, SOL_SOCKET, SO_BINDTODEVICE, name, strlen( name ) + 1 )) return 0; /* SO_BINDTODEVICE requires NET_CAP_RAW until Linux 5.7. */ if (debug_level) fprintf( stderr, "setsockopt SO_BINDTODEVICE fd %d, name %s failed: %s, falling back to SO_REUSE_ADDR\n", fd, name, strerror( errno )); if (!(index = if_nametoindex( name ))) return -1; ifindex = htonl( index ); if (setsockopt( fd, IPPROTO_IP, IP_UNICAST_IF, &ifindex, sizeof(ifindex) ) < 0) return -1; specific_interface_filter = generic_interface_filter; specific_interface_filter.iface_rule.k = index; specific_interface_filter.ip_rule.k = htonl( bind_addr ); filter_prog.len = sizeof(generic_interface_filter) / sizeof(struct sock_filter); filter_prog.filter = (struct sock_filter *)&specific_interface_filter; if (setsockopt( fd, SOL_SOCKET, SO_ATTACH_FILTER, &filter_prog, sizeof(filter_prog) )) return -1; return setsockopt( fd, SOL_SOCKET, SO_REUSEADDR, &enable, sizeof(enable) ); } #else static int bind_to_iface_name( int fd, in_addr_t bind_addr, const char *name ) { errno = EOPNOTSUPP; return -1; } #endif /* LINUX_BOUND_IF */ /* Take bind() calls on any name corresponding to a local network adapter and * restrict the given socket to operating only on the specified interface. This * restriction consists of two components: * 1) An outgoing packet restriction suggesting the egress interface for all * packets. * 2) An incoming packet restriction dropping packets not meant for the * interface. * If the function succeeds in placing these restrictions, then the name for the * bind() may safely be changed to INADDR_ANY, permitting the transmission and * receipt of broadcast packets on the socket. This behavior is only relevant to * UDP sockets and is needed for applications that expect to be able to receive * broadcast packets on a socket that is bound to a specific network interface. */ static int bind_to_interface( struct sock *sock, const struct sockaddr_in *addr ) { in_addr_t bind_addr = addr->sin_addr.s_addr; struct ifaddrs *ifaddrs, *ifaddr; int fd = get_unix_fd( sock->fd ); int err = 0; if (bind_addr == htonl( INADDR_ANY ) || bind_addr == htonl( INADDR_LOOPBACK )) return 0; if (sock->type != WS_SOCK_DGRAM) return 0; if (getifaddrs( &ifaddrs ) < 0) return 0; for (ifaddr = ifaddrs; ifaddr != NULL; ifaddr = ifaddr->ifa_next) { if (ifaddr->ifa_addr && ifaddr->ifa_addr->sa_family == AF_INET && ((struct sockaddr_in *)ifaddr->ifa_addr)->sin_addr.s_addr == bind_addr) { if ((err = bind_to_iface_name( fd, bind_addr, ifaddr->ifa_name )) < 0) { if (debug_level) fprintf( stderr, "failed to bind to interface: %s\n", strerror( errno ) ); } break; } } freeifaddrs( ifaddrs ); return !err; } #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_SCOPE_ID static unsigned int get_ipv6_interface_index( const struct in6_addr *addr ) { struct ifaddrs *ifaddrs, *ifaddr; if (getifaddrs( &ifaddrs ) < 0) return 0; for (ifaddr = ifaddrs; ifaddr != NULL; ifaddr = ifaddr->ifa_next) { if (ifaddr->ifa_addr && ifaddr->ifa_addr->sa_family == AF_INET6 && !memcmp( &((struct sockaddr_in6 *)ifaddr->ifa_addr)->sin6_addr, addr, sizeof(*addr) )) { unsigned int index = if_nametoindex( ifaddr->ifa_name ); if (!index) { if (debug_level) fprintf( stderr, "Unable to look up interface index for %s: %s\n", ifaddr->ifa_name, strerror( errno ) ); continue; } freeifaddrs( ifaddrs ); return index; } } freeifaddrs( ifaddrs ); return 0; } #endif /* return an errno value mapped to a WSA error */ static unsigned int sock_get_error( int err ) { switch (err) { case EINTR: return WSAEINTR; case EBADF: return WSAEBADF; case EPERM: case EACCES: return WSAEACCES; case EFAULT: return WSAEFAULT; case EINVAL: return WSAEINVAL; case EMFILE: return WSAEMFILE; case EINPROGRESS: case EWOULDBLOCK: return WSAEWOULDBLOCK; case EALREADY: return WSAEALREADY; case ENOTSOCK: return WSAENOTSOCK; case EDESTADDRREQ: return WSAEDESTADDRREQ; case EMSGSIZE: return WSAEMSGSIZE; case EPROTOTYPE: return WSAEPROTOTYPE; case ENOPROTOOPT: return WSAENOPROTOOPT; case EPROTONOSUPPORT: return WSAEPROTONOSUPPORT; case ESOCKTNOSUPPORT: return WSAESOCKTNOSUPPORT; case EOPNOTSUPP: return WSAEOPNOTSUPP; case EPFNOSUPPORT: return WSAEPFNOSUPPORT; case EAFNOSUPPORT: return WSAEAFNOSUPPORT; case EADDRINUSE: return WSAEADDRINUSE; case EADDRNOTAVAIL: return WSAEADDRNOTAVAIL; case ENETDOWN: return WSAENETDOWN; case ENETUNREACH: return WSAENETUNREACH; case ENETRESET: return WSAENETRESET; case ECONNABORTED: return WSAECONNABORTED; case EPIPE: case ECONNRESET: return WSAECONNRESET; case ENOBUFS: return WSAENOBUFS; case EISCONN: return WSAEISCONN; case ENOTCONN: return WSAENOTCONN; case ESHUTDOWN: return WSAESHUTDOWN; case ETOOMANYREFS: return WSAETOOMANYREFS; case ETIMEDOUT: return WSAETIMEDOUT; case ECONNREFUSED: return WSAECONNREFUSED; case ELOOP: return WSAELOOP; case ENAMETOOLONG: return WSAENAMETOOLONG; case EHOSTDOWN: return WSAEHOSTDOWN; case EHOSTUNREACH: return WSAEHOSTUNREACH; case ENOTEMPTY: return WSAENOTEMPTY; #ifdef EPROCLIM case EPROCLIM: return WSAEPROCLIM; #endif #ifdef EUSERS case EUSERS: return WSAEUSERS; #endif #ifdef EDQUOT case EDQUOT: return WSAEDQUOT; #endif #ifdef ESTALE case ESTALE: return WSAESTALE; #endif #ifdef EREMOTE case EREMOTE: return WSAEREMOTE; #endif case 0: return 0; default: errno = err; perror("wineserver: sock_get_error() can't map error"); return WSAEFAULT; } } static int sock_get_ntstatus( int err ) { switch ( err ) { case EBADF: return STATUS_INVALID_HANDLE; case EBUSY: return STATUS_DEVICE_BUSY; case EPERM: case EACCES: return STATUS_ACCESS_DENIED; case EFAULT: return STATUS_ACCESS_VIOLATION; case EINVAL: return STATUS_INVALID_PARAMETER; case ENFILE: case EMFILE: return STATUS_TOO_MANY_OPENED_FILES; case EINPROGRESS: case EWOULDBLOCK: return STATUS_DEVICE_NOT_READY; case EALREADY: return STATUS_NETWORK_BUSY; case ENOTSOCK: return STATUS_OBJECT_TYPE_MISMATCH; case EDESTADDRREQ: return STATUS_INVALID_PARAMETER; case EMSGSIZE: return STATUS_BUFFER_OVERFLOW; case EPROTONOSUPPORT: case ESOCKTNOSUPPORT: case EPFNOSUPPORT: case EAFNOSUPPORT: case EPROTOTYPE: return STATUS_NOT_SUPPORTED; case ENOPROTOOPT: return STATUS_INVALID_PARAMETER; case EOPNOTSUPP: return STATUS_NOT_SUPPORTED; case EADDRINUSE: return STATUS_SHARING_VIOLATION; /* Linux returns ENODEV when specifying an invalid sin6_scope_id; * Windows returns STATUS_INVALID_ADDRESS_COMPONENT */ case ENODEV: case EADDRNOTAVAIL: return STATUS_INVALID_ADDRESS_COMPONENT; case ECONNREFUSED: return STATUS_CONNECTION_REFUSED; case ESHUTDOWN: return STATUS_PIPE_DISCONNECTED; case ENOTCONN: return STATUS_INVALID_CONNECTION; case ETIMEDOUT: return STATUS_IO_TIMEOUT; case ENETUNREACH: return STATUS_NETWORK_UNREACHABLE; case EHOSTUNREACH: return STATUS_HOST_UNREACHABLE; case ENETDOWN: return STATUS_NETWORK_BUSY; case EPIPE: case ECONNRESET: return STATUS_CONNECTION_RESET; case ECONNABORTED: return STATUS_CONNECTION_ABORTED; case EISCONN: return STATUS_CONNECTION_ACTIVE; case 0: return STATUS_SUCCESS; default: errno = err; perror("wineserver: sock_get_ntstatus() can't map error"); return STATUS_UNSUCCESSFUL; } } static struct accept_req *alloc_accept_req( struct sock *sock, struct sock *acceptsock, struct async *async, const struct afd_accept_into_params *params ) { struct accept_req *req = mem_alloc( sizeof(*req) ); if (req) { req->async = (struct async *)grab_object( async ); req->iosb = async_get_iosb( async ); req->sock = (struct sock *)grab_object( sock ); req->acceptsock = acceptsock; if (acceptsock) grab_object( acceptsock ); req->accepted = 0; req->recv_len = 0; req->local_len = 0; if (params) { req->recv_len = params->recv_len; req->local_len = params->local_len; } } return req; } static void sock_ioctl( struct fd *fd, ioctl_code_t code, struct async *async ) { struct sock *sock = get_fd_user( fd ); int unix_fd; assert( sock->obj.ops == &sock_ops ); if (code != IOCTL_AFD_WINE_CREATE && (unix_fd = get_unix_fd( fd )) < 0) return; switch(code) { case IOCTL_AFD_WINE_CREATE: { const struct afd_create_params *params = get_req_data(); if (get_req_data_size() != sizeof(*params)) { set_error( STATUS_INVALID_PARAMETER ); return; } init_socket( sock, params->family, params->type, params->protocol, params->flags ); return; } case IOCTL_AFD_WINE_ACCEPT: { struct sock *acceptsock; obj_handle_t handle; if (get_reply_max_size() != sizeof(handle)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } if (!(acceptsock = accept_socket( sock ))) { struct accept_req *req; if (sock->nonblocking) return; if (get_error() != STATUS_DEVICE_NOT_READY) return; if (!(req = alloc_accept_req( sock, NULL, async, NULL ))) return; list_add_tail( &sock->accept_list, &req->entry ); async_set_completion_callback( async, free_accept_req, req ); queue_async( &sock->accept_q, async ); sock_reselect( sock ); set_error( STATUS_PENDING ); return; } handle = alloc_handle( current->process, &acceptsock->obj, GENERIC_READ | GENERIC_WRITE | SYNCHRONIZE, OBJ_INHERIT ); acceptsock->wparam = handle; sock_reselect( acceptsock ); release_object( acceptsock ); set_reply_data( &handle, sizeof(handle) ); return; } case IOCTL_AFD_WINE_ACCEPT_INTO: { static const int access = FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES | FILE_READ_DATA; const struct afd_accept_into_params *params = get_req_data(); struct sock *acceptsock; unsigned int remote_len; struct accept_req *req; if (get_req_data_size() != sizeof(*params) || get_reply_max_size() < params->recv_len || get_reply_max_size() - params->recv_len < params->local_len) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } remote_len = get_reply_max_size() - params->recv_len - params->local_len; if (remote_len < sizeof(int)) { set_error( STATUS_INVALID_PARAMETER ); return; } if (!(acceptsock = (struct sock *)get_handle_obj( current->process, params->accept_handle, access, &sock_ops ))) return; if (acceptsock->accept_recv_req) { release_object( acceptsock ); set_error( STATUS_INVALID_PARAMETER ); return; } if (!(req = alloc_accept_req( sock, acceptsock, async, params ))) { release_object( acceptsock ); return; } list_add_tail( &sock->accept_list, &req->entry ); acceptsock->accept_recv_req = req; release_object( acceptsock ); acceptsock->wparam = params->accept_handle; async_set_completion_callback( async, free_accept_req, req ); queue_async( &sock->accept_q, async ); sock_reselect( sock ); set_error( STATUS_PENDING ); return; } case IOCTL_AFD_LISTEN: { const struct afd_listen_params *params = get_req_data(); if (get_req_data_size() < sizeof(*params)) { set_error( STATUS_INVALID_PARAMETER ); return; } if (!sock->bound) { set_error( STATUS_INVALID_PARAMETER ); return; } if (listen( unix_fd, params->backlog ) < 0) { set_error( sock_get_ntstatus( errno ) ); return; } sock->state = SOCK_LISTENING; /* a listening socket can no longer be accepted into */ allow_fd_caching( sock->fd ); /* we may already be selecting for AFD_POLL_ACCEPT */ sock_reselect( sock ); return; } case IOCTL_AFD_WINE_CONNECT: { const struct afd_connect_params *params = get_req_data(); const struct WS_sockaddr *addr; union unix_sockaddr unix_addr; struct connect_req *req; socklen_t unix_len; int send_len, ret; if (get_req_data_size() < sizeof(*params) || get_req_data_size() - sizeof(*params) < params->addr_len) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } send_len = get_req_data_size() - sizeof(*params) - params->addr_len; addr = (const struct WS_sockaddr *)(params + 1); if (!params->synchronous && !sock->bound) { set_error( STATUS_INVALID_PARAMETER ); return; } if (sock->accept_recv_req) { set_error( STATUS_INVALID_PARAMETER ); return; } if (sock->connect_req) { set_error( STATUS_INVALID_PARAMETER ); return; } switch (sock->state) { case SOCK_LISTENING: set_error( STATUS_INVALID_PARAMETER ); return; case SOCK_CONNECTING: /* FIXME: STATUS_ADDRESS_ALREADY_ASSOCIATED probably isn't right, * but there's no status code that maps to WSAEALREADY... */ set_error( params->synchronous ? STATUS_ADDRESS_ALREADY_ASSOCIATED : STATUS_INVALID_PARAMETER ); return; case SOCK_CONNECTED: set_error( STATUS_CONNECTION_ACTIVE ); return; case SOCK_UNCONNECTED: case SOCK_CONNECTIONLESS: break; } unix_len = sockaddr_to_unix( addr, params->addr_len, &unix_addr ); if (!unix_len) { set_error( STATUS_INVALID_ADDRESS ); return; } if (unix_addr.addr.sa_family == AF_INET && !memcmp( &unix_addr.in.sin_addr, magic_loopback_addr, 4 )) unix_addr.in.sin_addr.s_addr = htonl( INADDR_LOOPBACK ); ret = connect( unix_fd, &unix_addr.addr, unix_len ); if (ret < 0 && errno != EINPROGRESS) { set_error( sock_get_ntstatus( errno ) ); return; } /* a connected or connecting socket can no longer be accepted into */ allow_fd_caching( sock->fd ); unix_len = sizeof(unix_addr); if (!getsockname( unix_fd, &unix_addr.addr, &unix_len )) sock->addr_len = sockaddr_from_unix( &unix_addr, &sock->addr.addr, sizeof(sock->addr) ); sock->bound = 1; if (!ret) { sock->state = SOCK_CONNECTED; if (!send_len) return; } sock->state = SOCK_CONNECTING; if (params->synchronous && sock->nonblocking) { sock_reselect( sock ); set_error( STATUS_DEVICE_NOT_READY ); return; } if (!(req = mem_alloc( sizeof(*req) ))) return; req->async = (struct async *)grab_object( async ); req->iosb = async_get_iosb( async ); req->sock = (struct sock *)grab_object( sock ); req->addr_len = params->addr_len; req->send_len = send_len; req->send_cursor = 0; async_set_completion_callback( async, free_connect_req, req ); sock->connect_req = req; queue_async( &sock->connect_q, async ); sock_reselect( sock ); set_error( STATUS_PENDING ); return; } case IOCTL_AFD_WINE_SHUTDOWN: { unsigned int how; if (get_req_data_size() < sizeof(int)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } how = *(int *)get_req_data(); if (how > SD_BOTH) { set_error( STATUS_INVALID_PARAMETER ); return; } if (sock->state != SOCK_CONNECTED && sock->state != SOCK_CONNECTIONLESS) { set_error( STATUS_INVALID_CONNECTION ); return; } if (how != SD_SEND) { sock->rd_shutdown = 1; } if (how != SD_RECEIVE) { sock->wr_shutdown = 1; if (list_empty( &sock->write_q.queue )) shutdown( unix_fd, SHUT_WR ); else sock->wr_shutdown_pending = 1; } if (how == SD_BOTH) { if (sock->event) release_object( sock->event ); sock->event = NULL; sock->window = 0; sock->mask = 0; sock->nonblocking = 1; } sock_reselect( sock ); return; } case IOCTL_AFD_WINE_ADDRESS_LIST_CHANGE: { int force_async; if (get_req_data_size() < sizeof(int)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } force_async = *(int *)get_req_data(); if (sock->nonblocking && !force_async) { set_error( STATUS_DEVICE_NOT_READY ); return; } if (!sock_get_ifchange( sock )) return; queue_async( &sock->ifchange_q, async ); set_error( STATUS_PENDING ); return; } case IOCTL_AFD_WINE_FIONBIO: if (get_req_data_size() < sizeof(int)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } if (*(int *)get_req_data()) { sock->nonblocking = 1; } else { if (sock->mask) { set_error( STATUS_INVALID_PARAMETER ); return; } sock->nonblocking = 0; } return; case IOCTL_AFD_GET_EVENTS: { struct afd_get_events_params params = {0}; unsigned int i; if (get_reply_max_size() < sizeof(params)) { set_error( STATUS_INVALID_PARAMETER ); return; } params.flags = sock->pending_events & sock->mask; for (i = 0; i < ARRAY_SIZE( params.status ); ++i) params.status[i] = sock_get_ntstatus( sock->errors[i] ); sock->pending_events = 0; sock_reselect( sock ); set_reply_data( &params, sizeof(params) ); return; } case IOCTL_AFD_EVENT_SELECT: { struct event *event = NULL; obj_handle_t event_handle; int mask; set_async_pending( async ); if (is_machine_64bit( current->process->machine )) { const struct afd_event_select_params_64 *params = get_req_data(); if (get_req_data_size() < sizeof(*params)) { set_error( STATUS_INVALID_PARAMETER ); return; } event_handle = params->event; mask = params->mask; } else { const struct afd_event_select_params_32 *params = get_req_data(); if (get_req_data_size() < sizeof(*params)) { set_error( STATUS_INVALID_PARAMETER ); return; } event_handle = params->event; mask = params->mask; } if ((event_handle || mask) && !(event = get_event_obj( current->process, event_handle, EVENT_MODIFY_STATE ))) { set_error( STATUS_INVALID_PARAMETER ); return; } if (sock->event) release_object( sock->event ); sock->event = event; sock->mask = mask; sock->window = 0; sock->message = 0; sock->wparam = 0; sock->nonblocking = 1; sock_reselect( sock ); /* Explicitly wake the socket up if the mask causes it to become * signaled. Note that reselecting isn't enough, since we might already * have had events recorded in sock->reported_events and we don't want * to select for them again. */ sock_wake_up( sock ); return; } case IOCTL_AFD_WINE_MESSAGE_SELECT: { const struct afd_message_select_params *params = get_req_data(); if (get_req_data_size() < sizeof(params)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } if (sock->event) release_object( sock->event ); if (params->window) { sock->pending_events = 0; sock->reported_events = 0; } sock->event = NULL; sock->mask = params->mask; sock->window = params->window; sock->message = params->message; sock->wparam = params->handle; sock->nonblocking = 1; sock_reselect( sock ); return; } case IOCTL_AFD_BIND: { const struct afd_bind_params *params = get_req_data(); union unix_sockaddr unix_addr, bind_addr; data_size_t in_size; socklen_t unix_len; /* the ioctl is METHOD_NEITHER, so ntdll gives us the output buffer as * input */ if (get_req_data_size() < get_reply_max_size()) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } in_size = get_req_data_size() - get_reply_max_size(); if (in_size < offsetof(struct afd_bind_params, addr.sa_data) || get_reply_max_size() < in_size - sizeof(int)) { set_error( STATUS_INVALID_PARAMETER ); return; } if (sock->bound) { set_error( STATUS_ADDRESS_ALREADY_ASSOCIATED ); return; } unix_len = sockaddr_to_unix( &params->addr, in_size - sizeof(int), &unix_addr ); if (!unix_len) { set_error( STATUS_INVALID_ADDRESS ); return; } bind_addr = unix_addr; if (unix_addr.addr.sa_family == AF_INET) { if (!memcmp( &unix_addr.in.sin_addr, magic_loopback_addr, 4 ) || bind_to_interface( sock, &unix_addr.in )) bind_addr.in.sin_addr.s_addr = htonl( INADDR_ANY ); } else if (unix_addr.addr.sa_family == AF_INET6) { #ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_SCOPE_ID /* Windows allows specifying zero to use the default scope. Linux * interprets it as an interface index and requires that it be * nonzero. */ if (!unix_addr.in6.sin6_scope_id) bind_addr.in6.sin6_scope_id = get_ipv6_interface_index( &unix_addr.in6.sin6_addr ); #endif } set_async_pending( async ); if (bind( unix_fd, &bind_addr.addr, unix_len ) < 0) { if (errno == EADDRINUSE) { int reuse; socklen_t len = sizeof(reuse); if (!getsockopt( unix_fd, SOL_SOCKET, SO_REUSEADDR, (char *)&reuse, &len ) && reuse) errno = EACCES; } set_error( sock_get_ntstatus( errno ) ); return; } sock->bound = 1; unix_len = sizeof(bind_addr); if (!getsockname( unix_fd, &bind_addr.addr, &unix_len )) { /* store the interface or magic loopback address instead of the * actual unix address */ if (bind_addr.addr.sa_family == AF_INET) bind_addr.in.sin_addr = unix_addr.in.sin_addr; sock->addr_len = sockaddr_from_unix( &bind_addr, &sock->addr.addr, sizeof(sock->addr) ); } if (get_reply_max_size() >= sock->addr_len) set_reply_data( &sock->addr, sock->addr_len ); return; } case IOCTL_AFD_GETSOCKNAME: if (!sock->bound) { set_error( STATUS_INVALID_PARAMETER ); return; } if (get_reply_max_size() < sock->addr_len) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } set_reply_data( &sock->addr, sock->addr_len ); return; case IOCTL_AFD_WINE_DEFER: { const obj_handle_t *handle = get_req_data(); struct sock *acceptsock; if (get_req_data_size() < sizeof(*handle)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } acceptsock = (struct sock *)get_handle_obj( current->process, *handle, 0, &sock_ops ); if (!acceptsock) return; sock->deferred = acceptsock; return; } case IOCTL_AFD_WINE_GET_INFO: { struct afd_get_info_params params; if (get_reply_max_size() < sizeof(params)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } params.family = sock->family; params.type = sock->type; params.protocol = sock->proto; set_reply_data( &params, sizeof(params) ); return; } case IOCTL_AFD_WINE_GET_SO_ACCEPTCONN: { int listening = (sock->state == SOCK_LISTENING); if (get_reply_max_size() < sizeof(listening)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } set_reply_data( &listening, sizeof(listening) ); return; } case IOCTL_AFD_WINE_GET_SO_ERROR: { int error; socklen_t len = sizeof(error); unsigned int i; if (get_reply_max_size() < sizeof(error)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } if (getsockopt( unix_fd, SOL_SOCKET, SO_ERROR, (char *)&error, &len ) < 0) { set_error( sock_get_ntstatus( errno ) ); return; } if (!error) { for (i = 0; i < ARRAY_SIZE( sock->errors ); ++i) { if (sock->errors[i]) { error = sock_get_error( sock->errors[i] ); break; } } } set_reply_data( &error, sizeof(error) ); return; } case IOCTL_AFD_WINE_GET_SO_RCVBUF: { int rcvbuf = sock->rcvbuf; if (get_reply_max_size() < sizeof(rcvbuf)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } set_reply_data( &rcvbuf, sizeof(rcvbuf) ); return; } case IOCTL_AFD_WINE_SET_SO_RCVBUF: { DWORD rcvbuf; if (get_req_data_size() < sizeof(rcvbuf)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } rcvbuf = *(DWORD *)get_req_data(); if (!setsockopt( unix_fd, SOL_SOCKET, SO_RCVBUF, (char *)&rcvbuf, sizeof(rcvbuf) )) sock->rcvbuf = rcvbuf; else set_error( sock_get_ntstatus( errno ) ); return; } case IOCTL_AFD_WINE_GET_SO_RCVTIMEO: { DWORD rcvtimeo = sock->rcvtimeo; if (get_reply_max_size() < sizeof(rcvtimeo)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } set_reply_data( &rcvtimeo, sizeof(rcvtimeo) ); return; } case IOCTL_AFD_WINE_SET_SO_RCVTIMEO: { DWORD rcvtimeo; if (get_req_data_size() < sizeof(rcvtimeo)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } rcvtimeo = *(DWORD *)get_req_data(); sock->rcvtimeo = rcvtimeo; return; } case IOCTL_AFD_WINE_GET_SO_SNDBUF: { int sndbuf = sock->sndbuf; if (get_reply_max_size() < sizeof(sndbuf)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } set_reply_data( &sndbuf, sizeof(sndbuf) ); return; } case IOCTL_AFD_WINE_SET_SO_SNDBUF: { DWORD sndbuf; if (get_req_data_size() < sizeof(sndbuf)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } sndbuf = *(DWORD *)get_req_data(); #ifdef __APPLE__ if (!sndbuf) { /* setsockopt fails if a zero value is passed */ sock->sndbuf = sndbuf; return; } #endif if (!setsockopt( unix_fd, SOL_SOCKET, SO_SNDBUF, (char *)&sndbuf, sizeof(sndbuf) )) sock->sndbuf = sndbuf; else set_error( sock_get_ntstatus( errno ) ); return; } case IOCTL_AFD_WINE_GET_SO_SNDTIMEO: { DWORD sndtimeo = sock->sndtimeo; if (get_reply_max_size() < sizeof(sndtimeo)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } set_reply_data( &sndtimeo, sizeof(sndtimeo) ); return; } case IOCTL_AFD_WINE_SET_SO_SNDTIMEO: { DWORD sndtimeo; if (get_req_data_size() < sizeof(sndtimeo)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } sndtimeo = *(DWORD *)get_req_data(); sock->sndtimeo = sndtimeo; return; } case IOCTL_AFD_WINE_GET_SO_CONNECT_TIME: { DWORD time = ~0u; if (get_reply_max_size() < sizeof(time)) { set_error( STATUS_BUFFER_TOO_SMALL ); return; } if (sock->state == SOCK_CONNECTED) time = (current_time - sock->connect_time) / 10000000; set_reply_data( &time, sizeof(time) ); return; } case IOCTL_AFD_POLL: { if (get_reply_max_size() < get_req_data_size()) { set_error( STATUS_INVALID_PARAMETER ); return; } if (is_machine_64bit( current->process->machine )) { const struct afd_poll_params_64 *params = get_req_data(); if (get_req_data_size() < sizeof(struct afd_poll_params_64) || get_req_data_size() < offsetof( struct afd_poll_params_64, sockets[params->count] )) { set_error( STATUS_INVALID_PARAMETER ); return; } poll_socket( sock, async, params->exclusive, params->timeout, params->count, params->sockets ); } else { const struct afd_poll_params_32 *params = get_req_data(); struct afd_poll_socket_64 *sockets; unsigned int i; if (get_req_data_size() < sizeof(struct afd_poll_params_32) || get_req_data_size() < offsetof( struct afd_poll_params_32, sockets[params->count] )) { set_error( STATUS_INVALID_PARAMETER ); return; } if (!(sockets = mem_alloc( params->count * sizeof(*sockets) ))) return; for (i = 0; i < params->count; ++i) { sockets[i].socket = params->sockets[i].socket; sockets[i].flags = params->sockets[i].flags; sockets[i].status = params->sockets[i].status; } poll_socket( sock, async, params->exclusive, params->timeout, params->count, sockets ); free( sockets ); } return; } default: set_error( STATUS_NOT_SUPPORTED ); return; } } static int poll_single_socket( struct sock *sock, int mask ) { struct pollfd pollfd; pollfd.fd = get_unix_fd( sock->fd ); pollfd.events = poll_flags_from_afd( sock, mask ); if (pollfd.events < 0 || poll( &pollfd, 1, 0 ) < 0) return 0; if (sock->state == SOCK_CONNECTING && (pollfd.revents & (POLLERR | POLLHUP))) pollfd.revents &= ~POLLOUT; if ((mask & AFD_POLL_HUP) && (pollfd.revents & POLLIN) && sock->type == WS_SOCK_STREAM) { char dummy; if (!recv( get_unix_fd( sock->fd ), &dummy, 1, MSG_PEEK )) { pollfd.revents &= ~POLLIN; pollfd.revents |= POLLHUP; } } return get_poll_flags( sock, pollfd.revents ) & mask; } static void handle_exclusive_poll(struct poll_req *req) { unsigned int i; for (i = 0; i < req->count; ++i) { struct sock *sock = req->sockets[i].sock; struct poll_req *main_poll = sock->main_poll; if (main_poll && main_poll->exclusive && req->exclusive) { complete_async_poll( main_poll, STATUS_SUCCESS ); main_poll = NULL; } if (!main_poll) sock->main_poll = req; } } static void poll_socket( struct sock *poll_sock, struct async *async, int exclusive, timeout_t timeout, unsigned int count, const struct afd_poll_socket_64 *sockets ) { BOOL signaled = FALSE; struct poll_req *req; unsigned int i, j; if (!count) { set_error( STATUS_INVALID_PARAMETER ); return; } if (!(req = mem_alloc( offsetof( struct poll_req, sockets[count] ) ))) return; req->timeout = NULL; if (timeout && timeout != TIMEOUT_INFINITE && !(req->timeout = add_timeout_user( timeout, async_poll_timeout, req ))) { free( req ); return; } req->orig_timeout = timeout; for (i = 0; i < count; ++i) { req->sockets[i].sock = (struct sock *)get_handle_obj( current->process, sockets[i].socket, 0, &sock_ops ); if (!req->sockets[i].sock) { for (j = 0; j < i; ++j) release_object( req->sockets[j].sock ); if (req->timeout) remove_timeout_user( req->timeout ); free( req ); return; } req->sockets[i].handle = sockets[i].socket; req->sockets[i].mask = sockets[i].flags; req->sockets[i].flags = 0; } req->exclusive = exclusive; req->count = count; req->async = (struct async *)grab_object( async ); req->iosb = async_get_iosb( async ); handle_exclusive_poll(req); list_add_tail( &poll_list, &req->entry ); async_set_completion_callback( async, free_poll_req, req ); queue_async( &poll_sock->poll_q, async ); for (i = 0; i < count; ++i) { struct sock *sock = req->sockets[i].sock; int mask = req->sockets[i].mask; int flags = poll_single_socket( sock, mask ); if (flags) { signaled = TRUE; req->sockets[i].flags = flags; req->sockets[i].status = sock_get_ntstatus( sock_error( sock->fd ) ); } /* FIXME: do other error conditions deserve a similar treatment? */ if (sock->state != SOCK_CONNECTING && sock->errors[AFD_POLL_BIT_CONNECT_ERR] && (mask & AFD_POLL_CONNECT_ERR)) { signaled = TRUE; req->sockets[i].flags |= AFD_POLL_CONNECT_ERR; req->sockets[i].status = sock_get_ntstatus( sock->errors[AFD_POLL_BIT_CONNECT_ERR] ); } } if (!timeout || signaled) complete_async_poll( req, STATUS_SUCCESS ); for (i = 0; i < req->count; ++i) sock_reselect( req->sockets[i].sock ); set_error( STATUS_PENDING ); } #ifdef HAVE_LINUX_RTNETLINK_H /* only keep one ifchange object around, all sockets waiting for wakeups will look to it */ static struct object *ifchange_object; static void ifchange_dump( struct object *obj, int verbose ); static struct fd *ifchange_get_fd( struct object *obj ); static void ifchange_destroy( struct object *obj ); static int ifchange_get_poll_events( struct fd *fd ); static void ifchange_poll_event( struct fd *fd, int event ); struct ifchange { struct object obj; /* object header */ struct fd *fd; /* interface change file descriptor */ struct list sockets; /* list of sockets to send interface change notifications */ }; static const struct object_ops ifchange_ops = { sizeof(struct ifchange), /* size */ &no_type, /* type */ ifchange_dump, /* dump */ no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ no_satisfied, /* satisfied */ NULL, /* get_esync_fd */ no_signal, /* signal */ ifchange_get_fd, /* get_fd */ default_map_access, /* map_access */ default_get_sd, /* get_sd */ default_set_sd, /* set_sd */ no_get_full_name, /* get_full_name */ no_lookup_name, /* lookup_name */ no_link_name, /* link_name */ NULL, /* unlink_name */ no_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ no_close_handle, /* close_handle */ ifchange_destroy /* destroy */ }; static const struct fd_ops ifchange_fd_ops = { ifchange_get_poll_events, /* get_poll_events */ ifchange_poll_event, /* poll_event */ NULL, /* get_fd_type */ no_fd_read, /* read */ no_fd_write, /* write */ no_fd_flush, /* flush */ no_fd_get_file_info, /* get_file_info */ no_fd_get_volume_info, /* get_volume_info */ no_fd_ioctl, /* ioctl */ NULL, /* cancel_async */ NULL, /* queue_async */ NULL /* reselect_async */ }; static void ifchange_dump( struct object *obj, int verbose ) { assert( obj->ops == &ifchange_ops ); fprintf( stderr, "Interface change\n" ); } static struct fd *ifchange_get_fd( struct object *obj ) { struct ifchange *ifchange = (struct ifchange *)obj; return (struct fd *)grab_object( ifchange->fd ); } static void ifchange_destroy( struct object *obj ) { struct ifchange *ifchange = (struct ifchange *)obj; assert( obj->ops == &ifchange_ops ); release_object( ifchange->fd ); /* reset the global ifchange object so that it will be recreated if it is needed again */ assert( obj == ifchange_object ); ifchange_object = NULL; } static int ifchange_get_poll_events( struct fd *fd ) { return POLLIN; } /* wake up all the sockets waiting for a change notification event */ static void ifchange_wake_up( struct object *obj, unsigned int status ) { struct ifchange *ifchange = (struct ifchange *)obj; struct list *ptr, *next; assert( obj->ops == &ifchange_ops ); assert( obj == ifchange_object ); LIST_FOR_EACH_SAFE( ptr, next, &ifchange->sockets ) { struct sock *sock = LIST_ENTRY( ptr, struct sock, ifchange_entry ); assert( sock->ifchange_obj ); async_wake_up( &sock->ifchange_q, status ); /* issue ifchange notification for the socket */ sock_release_ifchange( sock ); /* remove socket from list and decrement ifchange refcount */ } } static void ifchange_poll_event( struct fd *fd, int event ) { struct object *ifchange = get_fd_user( fd ); unsigned int status = STATUS_PENDING; char buffer[PIPE_BUF]; int r; r = recv( get_unix_fd(fd), buffer, sizeof(buffer), MSG_DONTWAIT ); if (r < 0) { if (errno == EWOULDBLOCK || (EWOULDBLOCK != EAGAIN && errno == EAGAIN)) return; /* retry when poll() says the socket is ready */ status = sock_get_ntstatus( errno ); } else if (r > 0) { struct nlmsghdr *nlh; for (nlh = (struct nlmsghdr *)buffer; NLMSG_OK(nlh, r); nlh = NLMSG_NEXT(nlh, r)) { if (nlh->nlmsg_type == NLMSG_DONE) break; if (nlh->nlmsg_type == RTM_NEWADDR || nlh->nlmsg_type == RTM_DELADDR) status = STATUS_SUCCESS; } } else status = STATUS_CANCELLED; if (status != STATUS_PENDING) ifchange_wake_up( ifchange, status ); } #endif /* we only need one of these interface notification objects, all of the sockets dependent upon * it will wake up when a notification event occurs */ static struct object *get_ifchange( void ) { #ifdef HAVE_LINUX_RTNETLINK_H struct ifchange *ifchange; struct sockaddr_nl addr; int unix_fd; if (ifchange_object) { /* increment the refcount for each socket that uses the ifchange object */ return grab_object( ifchange_object ); } /* create the socket we need for processing interface change notifications */ unix_fd = socket( PF_NETLINK, SOCK_RAW, NETLINK_ROUTE ); if (unix_fd == -1) { set_error( sock_get_ntstatus( errno )); return NULL; } fcntl( unix_fd, F_SETFL, O_NONBLOCK ); /* make socket nonblocking */ memset( &addr, 0, sizeof(addr) ); addr.nl_family = AF_NETLINK; addr.nl_groups = RTMGRP_IPV4_IFADDR; /* bind the socket to the special netlink kernel interface */ if (bind( unix_fd, (struct sockaddr *)&addr, sizeof(addr) ) == -1) { close( unix_fd ); set_error( sock_get_ntstatus( errno )); return NULL; } if (!(ifchange = alloc_object( &ifchange_ops ))) { close( unix_fd ); set_error( STATUS_NO_MEMORY ); return NULL; } list_init( &ifchange->sockets ); if (!(ifchange->fd = create_anonymous_fd( &ifchange_fd_ops, unix_fd, &ifchange->obj, 0 ))) { release_object( ifchange ); set_error( STATUS_NO_MEMORY ); return NULL; } set_fd_events( ifchange->fd, POLLIN ); /* enable read wakeup on the file descriptor */ /* the ifchange object is now successfully configured */ ifchange_object = &ifchange->obj; return &ifchange->obj; #else set_error( STATUS_NOT_SUPPORTED ); return NULL; #endif } /* add the socket to the interface change notification list */ static void ifchange_add_sock( struct object *obj, struct sock *sock ) { #ifdef HAVE_LINUX_RTNETLINK_H struct ifchange *ifchange = (struct ifchange *)obj; list_add_tail( &ifchange->sockets, &sock->ifchange_entry ); #endif } /* create a new ifchange queue for a specific socket or, if one already exists, reuse the existing one */ static struct object *sock_get_ifchange( struct sock *sock ) { struct object *ifchange; if (sock->ifchange_obj) /* reuse existing ifchange_obj for this socket */ return sock->ifchange_obj; if (!(ifchange = get_ifchange())) return NULL; /* add the socket to the ifchange notification list */ ifchange_add_sock( ifchange, sock ); sock->ifchange_obj = ifchange; return ifchange; } /* destroy an existing ifchange queue for a specific socket */ static void sock_release_ifchange( struct sock *sock ) { if (sock->ifchange_obj) { list_remove( &sock->ifchange_entry ); release_object( sock->ifchange_obj ); sock->ifchange_obj = NULL; } } static void socket_device_dump( struct object *obj, int verbose ); static struct object *socket_device_lookup_name( struct object *obj, struct unicode_str *name, unsigned int attr, struct object *root ); static struct object *socket_device_open_file( struct object *obj, unsigned int access, unsigned int sharing, unsigned int options ); static const struct object_ops socket_device_ops = { sizeof(struct object), /* size */ &device_type, /* type */ socket_device_dump, /* dump */ no_add_queue, /* add_queue */ NULL, /* remove_queue */ NULL, /* signaled */ NULL, /* get_esync_fd */ no_satisfied, /* satisfied */ no_signal, /* signal */ no_get_fd, /* get_fd */ default_map_access, /* map_access */ default_get_sd, /* get_sd */ default_set_sd, /* set_sd */ default_get_full_name, /* get_full_name */ socket_device_lookup_name, /* lookup_name */ directory_link_name, /* link_name */ default_unlink_name, /* unlink_name */ socket_device_open_file, /* open_file */ no_kernel_obj_list, /* get_kernel_obj_list */ no_close_handle, /* close_handle */ no_destroy /* destroy */ }; static void socket_device_dump( struct object *obj, int verbose ) { fputs( "Socket device\n", stderr ); } static struct object *socket_device_lookup_name( struct object *obj, struct unicode_str *name, unsigned int attr, struct object *root ) { if (name) name->len = 0; return NULL; } static struct object *socket_device_open_file( struct object *obj, unsigned int access, unsigned int sharing, unsigned int options ) { struct sock *sock; if (!(sock = create_socket())) return NULL; if (!(sock->fd = alloc_pseudo_fd( &sock_fd_ops, &sock->obj, options ))) { release_object( sock ); return NULL; } return &sock->obj; } struct object *create_socket_device( struct object *root, const struct unicode_str *name, unsigned int attr, const struct security_descriptor *sd ) { return create_named_object( root, &socket_device_ops, name, attr, sd ); } DECL_HANDLER(recv_socket) { struct sock *sock = (struct sock *)get_handle_obj( current->process, req->async.handle, 0, &sock_ops ); unsigned int status = STATUS_PENDING; timeout_t timeout = 0; struct async *async; struct fd *fd; if (!sock) return; fd = sock->fd; if (!req->force_async && !sock->nonblocking && is_fd_overlapped( fd )) timeout = (timeout_t)sock->rcvtimeo * -10000; if (sock->rd_shutdown) status = STATUS_PIPE_DISCONNECTED; else if (!async_queued( &sock->read_q )) { /* If read_q is not empty, we cannot really tell if the already queued * asyncs will not consume all available data; if there's no data * available, the current request won't be immediately satiable. */ struct pollfd pollfd; pollfd.fd = get_unix_fd( sock->fd ); pollfd.events = req->oob ? POLLPRI : POLLIN; pollfd.revents = 0; if (poll(&pollfd, 1, 0) >= 0 && pollfd.revents) { /* Give the client opportunity to complete synchronously. * If it turns out that the I/O request is not actually immediately satiable, * the client may then choose to re-queue the async (with STATUS_PENDING). */ status = STATUS_ALERTED; } } if (status == STATUS_PENDING && !req->force_async && sock->nonblocking) status = STATUS_DEVICE_NOT_READY; sock->pending_events &= ~(req->oob ? AFD_POLL_OOB : AFD_POLL_READ); sock->reported_events &= ~(req->oob ? AFD_POLL_OOB : AFD_POLL_READ); if ((async = create_request_async( fd, get_fd_comp_flags( fd ), &req->async ))) { set_error( status ); if (timeout) async_set_timeout( async, timeout, STATUS_IO_TIMEOUT ); if (status == STATUS_PENDING || status == STATUS_ALERTED) queue_async( &sock->read_q, async ); /* always reselect; we changed reported_events above */ sock_reselect( sock ); reply->wait = async_handoff( async, NULL, 0 ); reply->options = get_fd_options( fd ); reply->nonblocking = sock->nonblocking; release_object( async ); } release_object( sock ); } DECL_HANDLER(send_socket) { struct sock *sock = (struct sock *)get_handle_obj( current->process, req->async.handle, 0, &sock_ops ); unsigned int status = req->status; timeout_t timeout = 0; struct async *async; struct fd *fd; if (!sock) return; fd = sock->fd; if (sock->type == WS_SOCK_DGRAM) { /* sendto() and sendmsg() implicitly binds a socket */ union unix_sockaddr unix_addr; socklen_t unix_len = sizeof(unix_addr); if (!sock->bound && !getsockname( get_unix_fd( fd ), &unix_addr.addr, &unix_len )) sock->addr_len = sockaddr_from_unix( &unix_addr, &sock->addr.addr, sizeof(sock->addr) ); sock->bound = 1; } if (status != STATUS_SUCCESS) { /* send() calls only clear and reselect events if unsuccessful. */ sock->pending_events &= ~AFD_POLL_WRITE; sock->reported_events &= ~AFD_POLL_WRITE; } /* If we had a short write and the socket is nonblocking (and the client is * not trying to force the operation to be asynchronous), return success. * Windows actually refuses to send any data in this case, and returns * EWOULDBLOCK, but we have no way of doing that. */ if (status == STATUS_DEVICE_NOT_READY && req->total && sock->nonblocking) status = STATUS_SUCCESS; /* send() returned EWOULDBLOCK or a short write, i.e. cannot send all data yet */ if (status == STATUS_DEVICE_NOT_READY && !sock->nonblocking) { /* Set a timeout on the async if necessary. * * We want to do this *only* if the client gave us STATUS_DEVICE_NOT_READY. * If the client gave us STATUS_PENDING, it expects the async to always * block (it was triggered by WSASend*() with a valid OVERLAPPED * structure) and for the timeout not to be respected. */ if (is_fd_overlapped( fd )) timeout = (timeout_t)sock->sndtimeo * -10000; status = STATUS_PENDING; } if ((status == STATUS_PENDING || status == STATUS_DEVICE_NOT_READY) && sock->wr_shutdown) status = STATUS_PIPE_DISCONNECTED; if ((async = create_request_async( fd, get_fd_comp_flags( fd ), &req->async ))) { if (status == STATUS_SUCCESS) { struct iosb *iosb = async_get_iosb( async ); iosb->result = req->total; release_object( iosb ); } set_error( status ); if (timeout) async_set_timeout( async, timeout, STATUS_IO_TIMEOUT ); if (status == STATUS_PENDING) queue_async( &sock->write_q, async ); /* always reselect; we changed reported_events above */ sock_reselect( sock ); reply->wait = async_handoff( async, NULL, 0 ); reply->options = get_fd_options( fd ); release_object( async ); } release_object( sock ); }
352067.c
#include <stdio.h> int main() { int ddd; scanf("%d", &ddd); switch (ddd) { case 61: { printf("Brasilia\n"); break; } case 71: { printf("Salvador\n"); break; } case 11: { printf("Sao Paulo\n"); break; } case 21: { printf("Rio de Janeiro\n"); break; } case 32: { printf("Juiz de Fora\n"); break; } case 19: { printf("Campinas\n"); break; } case 27: { printf("Vitoria\n"); break; } case 31: { printf("Belo Horizonte\n"); break; } default: { printf("DDD nao cadastrado\n"); break; } } return 0; }
112235.c
/* $NetBSD$ */ /*- * Copyright (c) 2011 UCHIYAMA Yasushi. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <sys/cdefs.h> __KERNEL_RCSID(0, "$NetBSD$"); #if defined _KERNEL_OPT #include "opt_v7fs.h" #endif #ifdef _KERNEL #include <sys/systm.h> #include <sys/param.h> // errno #else #include <stdio.h> #include <string.h> #include <errno.h> #endif #include "v7fs.h" #include "v7fs_impl.h" #include "v7fs_endian.h" #include "v7fs_superblock.h" #include "v7fs_inode.h" #include "v7fs_datablock.h" #ifdef V7FS_SUPERBLOCK_DEBUG #define DPRINTF(fmt, args...) printf("%s: " fmt, __func__, ##args) #define DPRINTF_(fmt, args...) printf(fmt, ##args) #else #define DPRINTF(fmt, args...) ((void)0) #define DPRINTF_(fmt, args...) ((void)0) #endif static void v7fs_superblock_endian_convert (struct v7fs_self *, struct v7fs_superblock *, struct v7fs_superblock *); int v7fs_superblock_sanity (struct v7fs_self *); // Load superblock from disk. int v7fs_superblock_load (struct v7fs_self *_) { struct v7fs_superblock *disksb; void *buf; int error; if (!(buf = scratch_read (_, V7FS_SUPERBLOCK_SECTOR))) return EIO; disksb = (struct v7fs_superblock *)buf; v7fs_superblock_endian_convert (_, &_->superblock, disksb); scratch_free (_, buf); if ((error = v7fs_superblock_sanity (_))) return error; return 0; } // Writeback superblock to disk. int v7fs_superblock_writeback (struct v7fs_self *_) { struct v7fs_superblock *memsb = &_->superblock; struct v7fs_superblock *disksb; void *buf; int error = 0; if (!memsb->modified) return 0; if (!(buf = scratch_read (_, V7FS_SUPERBLOCK_SECTOR))) return EIO; disksb = (struct v7fs_superblock *)buf; v7fs_superblock_endian_convert (_, disksb, memsb); if (!_->io.write (_->io.cookie, buf, V7FS_SUPERBLOCK_SECTOR)) error = EIO; scratch_free (_, buf); memsb->modified = 0; DPRINTF ("done. %d\n", error); return error; } // Check endian mismatch. int v7fs_superblock_sanity (struct v7fs_self *_) { const struct v7fs_superblock *sb = &_->superblock; void *buf = 0; if ((sb->volume_size < 128) || // smaller than 64KB. (sb->datablock_start_sector > sb->volume_size) || (sb->nfreeinode > V7FS_MAX_FREEINODE) || (sb->nfreeblock > V7FS_MAX_FREEBLOCK) || (sb->update_time < 0) || (sb->total_freeblock > sb->volume_size) || ((sb->nfreeinode == 0) && (sb->nfreeblock == 0) && (sb->total_freeblock == 0) && (sb->total_freeinode == 0)) || (!(buf = scratch_read (_, sb->volume_size - 1)))) { DPRINTF ("invalid super block.\n"); return EINVAL; } if (buf) scratch_free (_, buf); return 0; } // Fill free block to superblock cache. int v7fs_freeblock_update (struct v7fs_self *_, v7fs_daddr_t blk) { // Assume superblock is locked by caller. struct v7fs_superblock *sb = &_->superblock; struct v7fs_freeblock *fb; void *buf; int error; // Read next freeblock table from disk. if (!datablock_number_sanity (_, blk) || !(buf = scratch_read (_, blk))) return EIO; // Update in-core superblock freelist. fb = (struct v7fs_freeblock *)buf; if ((error = v7fs_freeblock_endian_convert (_, fb))) { scratch_free (_, buf); return error; } DPRINTF ("freeblock table#%d, nfree=%d\n", blk, fb->nfreeblock); memcpy (sb->freeblock, fb->freeblock, sizeof (v7fs_daddr_t) * fb->nfreeblock); sb->nfreeblock = fb->nfreeblock; sb->modified = true; scratch_free (_, buf); return 0; } int v7fs_freeblock_endian_convert (struct v7fs_self *_ __attribute__((unused)), struct v7fs_freeblock *fb __attribute__((unused))) { #ifdef V7FS_EI int i; int16_t nfree; nfree = V7FS_VAL16 (_, fb->nfreeblock); if (nfree <= 0 || nfree > V7FS_MAX_FREEBLOCK) { DPRINTF ("invalid freeblock list. %d (max=%d)\n", nfree, V7FS_MAX_FREEBLOCK); return ENOSPC; } fb->nfreeblock = nfree; for (i = 0; i < nfree; i++) { fb->freeblock[i] = V7FS_VAL32 (_, fb->freeblock[i]); } #endif // V7FS_EI return 0; } // Fill free inode to superblock cache. int v7fs_freeinode_update (struct v7fs_self *_) { // Assume superblock is locked by caller. struct v7fs_superblock *sb = &_->superblock; v7fs_ino_t *freeinode = sb->freeinode; size_t i, j, k; v7fs_ino_t ino; // Loop over all inode list. for (i = V7FS_ILIST_SECTOR, ino = 1/* inode start from 1*/, k = 0; i < sb->datablock_start_sector; i++) { struct v7fs_inode_diskimage *di; void *buf; if (!(buf = scratch_read (_, i))) { DPRINTF ("block %ld I/O error.\n", (long)i); ino += V7FS_INODE_PER_BLOCK; continue; } di = (struct v7fs_inode_diskimage *)buf; for (j = 0; (j < V7FS_INODE_PER_BLOCK) && (k < V7FS_MAX_FREEINODE); j++, di++, ino++) { if (v7fs_inode_allocated (di)) continue; DPRINTF ("free inode%d\n", ino); freeinode[k++] = ino; } scratch_free (_, buf); } sb->nfreeinode = k; return 0; } void v7fs_superblock_endian_convert (struct v7fs_self *_ __attribute__((unused)), struct v7fs_superblock *to, struct v7fs_superblock *from) { #ifdef V7FS_EI #define _16(x, m) (to->m = V7FS_VAL16 (_, from->m)) #define _32(x, m) (to->m = V7FS_VAL32 (_, from->m)) int i; _16 (_, datablock_start_sector); _32 (_, volume_size); _16 (_, nfreeblock); v7fs_daddr_t *dfrom = from->freeblock; v7fs_daddr_t *dto = to->freeblock; for (i = 0; i < V7FS_MAX_FREEBLOCK; i++, dfrom++, dto++) *dto = V7FS_VAL32 (_, *dfrom); _16 (_, nfreeinode); v7fs_ino_t *ifrom = from->freeinode; v7fs_ino_t *ito = to->freeinode; for (i = 0; i < V7FS_MAX_FREEINODE; i++, ifrom++, ito++) *ito = V7FS_VAL16 (_, *ifrom); _32 (_, update_time); _32 (_, total_freeblock); _16 (_, total_freeinode); #undef _16 #undef _32 #else // V7FS_EI memcpy (to, from , sizeof (struct v7fs_superblock)); #endif // V7FS_EI }
135573.c
/* recur.c -- recursion illustration */ #include <stdio.h> void up_and_down(int); int main(void) { up_and_down(1); return 0; } void up_and_down(int n) { printf("Level %d: n location %p\n", n, &n); // 1 if (n < 4) up_and_down(n+1); printf("LEVEL %d: n location %p\n", n, &n); // 2 }
592691.c
/* * TCP networking functions * * Copyright (C) 2006-2007 Christophe Devine * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "xyssl/config.h" #if defined(XYSSL_NET_C) #include "xyssl/net.h" #if defined(WIN32) || defined(_WIN32_WCE) #include <winsock2.h> #include <windows.h> #if defined(_WIN32_WCE) #pragma comment( lib, "ws2.lib" ) #else #pragma comment( lib, "ws2_32.lib" ) #endif #define read(fd,buf,len) recv(fd,buf,len,0) #define write(fd,buf,len) send(fd,buf,len,0) #define close(fd) closesocket(fd) static int wsa_init_done = 0; #else #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <sys/time.h> #include <unistd.h> #include <signal.h> #include <fcntl.h> #include <netdb.h> #include <errno.h> #endif #include <string.h> #include <stdlib.h> #include <stdio.h> #include <time.h> /* * htons() is not always available */ static unsigned short net_htons( int port ) { unsigned char buf[4]; buf[0] = (unsigned char)( port >> 8 ); buf[1] = (unsigned char)( port ); buf[2] = buf[3] = 0; return( *(unsigned short *) buf ); } /* * Initiate a TCP connection with host:port */ int net_connect( int *fd, char *host, int port ) { struct sockaddr_in server_addr; struct hostent *server_host; #if defined(WIN32) || defined(_WIN32_WCE) WSADATA wsaData; if( wsa_init_done == 0 ) { if( WSAStartup( MAKEWORD(2,0), &wsaData ) == SOCKET_ERROR ) return( XYSSL_ERR_NET_SOCKET_FAILED ); wsa_init_done = 1; } #else signal( SIGPIPE, SIG_IGN ); #endif if( ( server_host = gethostbyname( host ) ) == NULL ) return( XYSSL_ERR_NET_UNKNOWN_HOST ); if( ( *fd = socket( AF_INET, SOCK_STREAM, IPPROTO_IP ) ) < 0 ) return( XYSSL_ERR_NET_SOCKET_FAILED ); memcpy( (void *) &server_addr.sin_addr, (void *) server_host->h_addr, server_host->h_length ); server_addr.sin_family = AF_INET; server_addr.sin_port = net_htons( port ); if( connect( *fd, (struct sockaddr *) &server_addr, sizeof( server_addr ) ) < 0 ) { close( *fd ); return( XYSSL_ERR_NET_CONNECT_FAILED ); } return( 0 ); } /* * Create a listening socket on bind_ip:port */ int net_bind( int *fd, char *bind_ip, int port ) { int n, c[4]; struct sockaddr_in server_addr; #if defined(WIN32) || defined(_WIN32_WCE) WSADATA wsaData; if( wsa_init_done == 0 ) { if( WSAStartup( MAKEWORD(2,0), &wsaData ) == SOCKET_ERROR ) return( XYSSL_ERR_NET_SOCKET_FAILED ); wsa_init_done = 1; } #else signal( SIGPIPE, SIG_IGN ); #endif if( ( *fd = socket( AF_INET, SOCK_STREAM, IPPROTO_IP ) ) < 0 ) return( XYSSL_ERR_NET_SOCKET_FAILED ); n = 1; setsockopt( *fd, SOL_SOCKET, SO_REUSEADDR, (const char *) &n, sizeof( n ) ); server_addr.sin_addr.s_addr = INADDR_ANY; server_addr.sin_family = AF_INET; server_addr.sin_port = net_htons( port ); if( bind_ip != NULL ) { memset( c, 0, sizeof( c ) ); sscanf( bind_ip, "%d.%d.%d.%d", &c[0], &c[1], &c[2], &c[3] ); for( n = 0; n < 4; n++ ) if( c[n] < 0 || c[n] > 255 ) break; if( n == 4 ) server_addr.sin_addr.s_addr = ( c[0] << 24 ) | ( c[1] << 16 ) | ( c[2] << 8 ) | ( c[3] ); } if( bind( *fd, (struct sockaddr *) &server_addr, sizeof( server_addr ) ) < 0 ) { close( *fd ); return( XYSSL_ERR_NET_BIND_FAILED ); } if( listen( *fd, 10 ) != 0 ) { close( *fd ); return( XYSSL_ERR_NET_LISTEN_FAILED ); } return( 0 ); } /* * Check if the current operation is blocking */ static int net_is_blocking( void ) { #if defined(WIN32) || defined(_WIN32_WCE) return( WSAGetLastError() == WSAEWOULDBLOCK ); #else switch( errno ) { #if defined EAGAIN case EAGAIN: #endif #if defined EWOULDBLOCK && EWOULDBLOCK != EAGAIN case EWOULDBLOCK: #endif return( 1 ); } return( 0 ); #endif } /* * Accept a connection from a remote client */ int net_accept( int bind_fd, int *client_fd, void *client_ip ) { struct sockaddr_in client_addr; #if defined(__socklen_t_defined) socklen_t n = (socklen_t) sizeof( client_addr ); #else int n = (int) sizeof( client_addr ); #endif *client_fd = accept( bind_fd, (struct sockaddr *) &client_addr, &n ); if( *client_fd < 0 ) { if( net_is_blocking() != 0 ) return( XYSSL_ERR_NET_TRY_AGAIN ); return( XYSSL_ERR_NET_ACCEPT_FAILED ); } if( client_ip != NULL ) memcpy( client_ip, &client_addr.sin_addr.s_addr, sizeof( client_addr.sin_addr.s_addr ) ); return( 0 ); } /* * Set the socket blocking or non-blocking */ int net_set_block( int fd ) { #if defined(WIN32) || defined(_WIN32_WCE) long n = 0; return( ioctlsocket( fd, FIONBIO, &n ) ); #else return( fcntl( fd, F_SETFL, fcntl( fd, F_GETFL ) & ~O_NONBLOCK ) ); #endif } int net_set_nonblock( int fd ) { #if defined(WIN32) || defined(_WIN32_WCE) long n = 1; return( ioctlsocket( fd, FIONBIO, &n ) ); #else return( fcntl( fd, F_SETFL, fcntl( fd, F_GETFL ) | O_NONBLOCK ) ); #endif } /* * Portable usleep helper */ void net_usleep( unsigned long usec ) { struct timeval tv; tv.tv_sec = 0; tv.tv_usec = usec; select( 0, NULL, NULL, NULL, &tv ); } /* * Read at most 'len' characters */ int net_recv( void *ctx, unsigned char *buf, int len ) { int ret = read( *((int *) ctx), buf, len ); if( len > 0 && ret == 0 ) return( XYSSL_ERR_NET_CONN_RESET ); if( ret < 0 ) { if( net_is_blocking() != 0 ) return( XYSSL_ERR_NET_TRY_AGAIN ); #if defined(WIN32) || defined(_WIN32_WCE) if( WSAGetLastError() == WSAECONNRESET ) return( XYSSL_ERR_NET_CONN_RESET ); #else if( errno == EPIPE || errno == ECONNRESET ) return( XYSSL_ERR_NET_CONN_RESET ); if( errno == EINTR ) return( XYSSL_ERR_NET_TRY_AGAIN ); #endif return( XYSSL_ERR_NET_RECV_FAILED ); } return( ret ); } /* * Write at most 'len' characters */ int net_send( void *ctx, unsigned char *buf, int len ) { int ret = write( *((int *) ctx), buf, len ); if( ret < 0 ) { if( net_is_blocking() != 0 ) return( XYSSL_ERR_NET_TRY_AGAIN ); #if defined(WIN32) || defined(_WIN32_WCE) if( WSAGetLastError() == WSAECONNRESET ) return( XYSSL_ERR_NET_CONN_RESET ); #else if( errno == EPIPE || errno == ECONNRESET ) return( XYSSL_ERR_NET_CONN_RESET ); if( errno == EINTR ) return( XYSSL_ERR_NET_TRY_AGAIN ); #endif return( XYSSL_ERR_NET_SEND_FAILED ); } return( ret ); } /* * Gracefully close the connection */ void net_close( int fd ) { shutdown( fd, 2 ); close( fd ); } #endif
23769.c
#include <stdio.h> #define MAXN 100 #define ORDER 4 int main(int argc, char const *argv[]) { float ax[MAXN+1], ay[MAXN+1], diff[MAXN+1][ORDER+1], nr=1.0, dr=1.0, x, p, h, yp; int n, i, j, k; printf("Enter the value of n\n"); scanf("%d", &n); printf("Enter the values in the form of x,y \n"); for (i=0;i<=n;i++){ scanf("%f %f", &ax[i], &ay[i]); } printf("Enter the values of x for which values of y is wanted \n "); scanf("%f", &x); h=ax[1]-ax[0]; /** * now making the diff table calculating the first order differences * / */ for(i=0;i<=n-1;i++) diff[i][i] =ay[i+1]-ay[i]; /** * calculating the second and higher order differences * */ for (j=2;j<=ORDER;j++){ for(i=0;i<=n-j;i++){ diff[i][j] = diff[i+1][j-1] -diff[i][j-1]; } } // now finding x0 i=0; while(!(ax[i] > x)) i++; i--; p=(x-ax[i])/h; yp= ay[i]; for(k=1;k<=ORDER;k++){ nr *=p-k+1; dr *=k; yp += (nr/dr)*diff[i][k]; } printf("when x= %6.1f , y = %6.2f .\n ", x, yp); return 0; }
554719.c
/******************************************************************************* * * Module Name: dmresrcl.c - "Large" Resource Descriptor disassembly * $Revision: 1.29 $ * ******************************************************************************/ /****************************************************************************** * * 1. Copyright Notice * * Some or all of this work - Copyright (c) 1999 - 2005, Intel Corp. * All rights reserved. * * 2. License * * 2.1. This is your license from Intel Corp. under its intellectual property * rights. You may have additional license terms from the party that provided * you this software, covering your right to use that party's intellectual * property rights. * * 2.2. Intel grants, free of charge, to any person ("Licensee") obtaining a * copy of the source code appearing in this file ("Covered Code") an * irrevocable, perpetual, worldwide license under Intel's copyrights in the * base code distributed originally by Intel ("Original Intel Code") to copy, * make derivatives, distribute, use and display any portion of the Covered * Code in any form, with the right to sublicense such rights; and * * 2.3. Intel grants Licensee a non-exclusive and non-transferable patent * license (with the right to sublicense), under only those claims of Intel * patents that are infringed by the Original Intel Code, to make, use, sell, * offer to sell, and import the Covered Code and derivative works thereof * solely to the minimum extent necessary to exercise the above copyright * license, and in no event shall the patent license extend to any additions * to or modifications of the Original Intel Code. No other license or right * is granted directly or by implication, estoppel or otherwise; * * The above copyright and patent license is granted only if the following * conditions are met: * * 3. Conditions * * 3.1. Redistribution of Source with Rights to Further Distribute Source. * Redistribution of source code of any substantial portion of the Covered * Code or modification with rights to further distribute source must include * the above Copyright Notice, the above License, this list of Conditions, * and the following Disclaimer and Export Compliance provision. In addition, * Licensee must cause all Covered Code to which Licensee contributes to * contain a file documenting the changes Licensee made to create that Covered * Code and the date of any change. Licensee must include in that file the * documentation of any changes made by any predecessor Licensee. Licensee * must include a prominent statement that the modification is derived, * directly or indirectly, from Original Intel Code. * * 3.2. Redistribution of Source with no Rights to Further Distribute Source. * Redistribution of source code of any substantial portion of the Covered * Code or modification without rights to further distribute source must * include the following Disclaimer and Export Compliance provision in the * documentation and/or other materials provided with distribution. In * addition, Licensee may not authorize further sublicense of source of any * portion of the Covered Code, and must include terms to the effect that the * license from Licensee to its licensee is limited to the intellectual * property embodied in the software Licensee provides to its licensee, and * not to intellectual property embodied in modifications its licensee may * make. * * 3.3. Redistribution of Executable. Redistribution in executable form of any * substantial portion of the Covered Code or modification must reproduce the * above Copyright Notice, and the following Disclaimer and Export Compliance * provision in the documentation and/or other materials provided with the * distribution. * * 3.4. Intel retains all right, title, and interest in and to the Original * Intel Code. * * 3.5. Neither the name Intel nor any other trademark owned or controlled by * Intel shall be used in advertising or otherwise to promote the sale, use or * other dealings in products derived from or relating to the Covered Code * without prior written authorization from Intel. * * 4. Disclaimer and Export Compliance * * 4.1. INTEL MAKES NO WARRANTY OF ANY KIND REGARDING ANY SOFTWARE PROVIDED * HERE. ANY SOFTWARE ORIGINATING FROM INTEL OR DERIVED FROM INTEL SOFTWARE * IS PROVIDED "AS IS," AND INTEL WILL NOT PROVIDE ANY SUPPORT, ASSISTANCE, * INSTALLATION, TRAINING OR OTHER SERVICES. INTEL WILL NOT PROVIDE ANY * UPDATES, ENHANCEMENTS OR EXTENSIONS. INTEL SPECIFICALLY DISCLAIMS ANY * IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT AND FITNESS FOR A * PARTICULAR PURPOSE. * * 4.2. IN NO EVENT SHALL INTEL HAVE ANY LIABILITY TO LICENSEE, ITS LICENSEES * OR ANY OTHER THIRD PARTY, FOR ANY LOST PROFITS, LOST DATA, LOSS OF USE OR * COSTS OF PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY INDIRECT, * SPECIAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THIS AGREEMENT, UNDER ANY * CAUSE OF ACTION OR THEORY OF LIABILITY, AND IRRESPECTIVE OF WHETHER INTEL * HAS ADVANCE NOTICE OF THE POSSIBILITY OF SUCH DAMAGES. THESE LIMITATIONS * SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE ESSENTIAL PURPOSE OF ANY * LIMITED REMEDY. * * 4.3. Licensee shall not export, either directly or indirectly, any of this * software or system incorporating such software without first obtaining any * required license or other approval from the U. S. Department of Commerce or * any other agency or department of the United States Government. In the * event Licensee exports any such software from the United States or * re-exports any such software from a foreign destination, Licensee shall * ensure that the distribution and export/re-export of the software is in * compliance with all laws, regulations, orders, or other restrictions of the * U.S. Export Administration Regulations. Licensee agrees that neither it nor * any of its subsidiaries will export/re-export any technical data, process, * software, or service, directly or indirectly, to any country for which the * United States government or any agency thereof requires an export license, * other governmental approval, or letter of assurance, without first obtaining * such license, approval or letter. * *****************************************************************************/ #include <contrib/dev/acpica/acpi.h> #include <contrib/dev/acpica/acdisasm.h> #ifdef ACPI_DISASSEMBLER #define _COMPONENT ACPI_CA_DEBUGGER ACPI_MODULE_NAME ("dbresrcl") /* Common names for address and memory descriptors */ static char *AcpiDmAddressNames[] = { "Address Space Granularity", "Address Range Minimum", "Address Range Maximum", "Address Translation Offset", "Address Length" }; static char *AcpiDmMemoryNames[] = { "Address Range Minimum", "Address Range Maximum", "Address Alignment", "Address Length" }; /* Local prototypes */ static void AcpiDmSpaceFlags ( UINT8 Flags); static void AcpiDmIoFlags ( UINT8 Flags); static void AcpiDmIoFlags2 ( UINT8 SpecificFlags); static void AcpiDmMemoryFlags ( UINT8 Flags, UINT8 SpecificFlags); static void AcpiDmMemoryFlags2 ( UINT8 SpecificFlags); static void AcpiDmResourceSource ( AML_RESOURCE *Resource, ACPI_SIZE MinimumLength, UINT32 Length); static void AcpiDmAddressFields ( void *Source, UINT8 Type, UINT32 Level); static void AcpiDmAddressPrefix ( UINT8 Type); static void AcpiDmAddressCommon ( AML_RESOURCE *Resource, UINT8 Type, UINT32 Level); static void AcpiDmAddressFlags ( AML_RESOURCE *Resource); /******************************************************************************* * * FUNCTION: AcpiDmMemoryFields * * PARAMETERS: Source - Pointer to the contiguous data fields * Type - 16 or 32 (bit) * Level - Current source code indentation level * * RETURN: None * * DESCRIPTION: Decode fields common to Memory24 and Memory32 descriptors * ******************************************************************************/ static void AcpiDmMemoryFields ( void *Source, UINT8 Type, UINT32 Level) { ACPI_NATIVE_UINT i; for (i = 0; i < 4; i++) { AcpiDmIndent (Level + 1); switch (Type) { case 16: AcpiDmDumpInteger16 (((UINT16 *) Source)[i], AcpiDmMemoryNames[i]); break; case 32: AcpiDmDumpInteger32 (((UINT32 *) Source)[i], AcpiDmMemoryNames[i]); break; default: return; } } } /******************************************************************************* * * FUNCTION: AcpiDm * * PARAMETERS: Source - Pointer to the contiguous data fields * Type - 16, 32, or 64 (bit) * Level - Current source code indentation level * * RETURN: None * * DESCRIPTION: Decode fields common to address descriptors * ******************************************************************************/ static void AcpiDmAddressFields ( void *Source, UINT8 Type, UINT32 Level) { ACPI_NATIVE_UINT i; AcpiOsPrintf ("\n"); for (i = 0; i < 5; i++) { AcpiDmIndent (Level + 1); switch (Type) { case 16: AcpiDmDumpInteger16 (((UINT16 *) Source)[i], AcpiDmAddressNames[i]); break; case 32: AcpiDmDumpInteger32 (((UINT32 *) Source)[i], AcpiDmAddressNames[i]); break; case 64: AcpiDmDumpInteger64 (((UINT64 *) Source)[i], AcpiDmAddressNames[i]); break; default: return; } } } /******************************************************************************* * * FUNCTION: AcpiDmAddressPrefix * * PARAMETERS: Type - Descriptor type * * RETURN: None * * DESCRIPTION: Emit name prefix representing the address descriptor type * ******************************************************************************/ static void AcpiDmAddressPrefix ( UINT8 Type) { switch (Type) { case ACPI_RESOURCE_TYPE_ADDRESS16: AcpiOsPrintf ("Word"); break; case ACPI_RESOURCE_TYPE_ADDRESS32: AcpiOsPrintf ("DWord"); break; case ACPI_RESOURCE_TYPE_ADDRESS64: AcpiOsPrintf ("QWord"); break; case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64: AcpiOsPrintf ("Extended"); break; default: return; } } /******************************************************************************* * * FUNCTION: AcpiDmAddressCommon * * PARAMETERS: Resource - Raw AML descriptor * Type - Descriptor type * Level - Current source code indentation level * * RETURN: None * * DESCRIPTION: Emit common name and flag fields common to address descriptors * ******************************************************************************/ static void AcpiDmAddressCommon ( AML_RESOURCE *Resource, UINT8 Type, UINT32 Level) { UINT8 ResourceType; UINT8 SpecificFlags; UINT8 Flags; ResourceType = Resource->Address.ResourceType; SpecificFlags = Resource->Address.SpecificFlags; Flags = Resource->Address.Flags; AcpiDmIndent (Level); /* Validate ResourceType */ if ((ResourceType > 2) && (ResourceType < 0xC0)) { AcpiOsPrintf ("/**** Invalid Resource Type: 0x%X ****/", ResourceType); return; } /* Prefix is either Word, DWord, QWord, or Extended */ AcpiDmAddressPrefix (Type); /* Resource Types above 0xC0 are vendor-defined */ if (ResourceType > 2) { AcpiOsPrintf ("Space (0x%2.2X, ", ResourceType); AcpiDmSpaceFlags (Flags); AcpiOsPrintf (" 0x%2.2X,", SpecificFlags); return; } /* This is either a Memory, IO, or BusNumber descriptor (0,1,2) */ AcpiOsPrintf ("%s (", AcpiGbl_WordDecode [ResourceType]); /* Decode the general and type-specific flags */ if (ResourceType == ACPI_MEMORY_RANGE) { AcpiDmMemoryFlags (Flags, SpecificFlags); } else /* IO range or BusNumberRange */ { AcpiDmIoFlags (Flags); if (ResourceType == ACPI_IO_RANGE) { AcpiOsPrintf (" %s,", AcpiGbl_RNGDecode [SpecificFlags & 0x3]); } } } /******************************************************************************* * * FUNCTION: AcpiDmAddressFlags * * PARAMETERS: Resource - Raw AML descriptor * * RETURN: None * * DESCRIPTION: Emit flags common to address descriptors * ******************************************************************************/ static void AcpiDmAddressFlags ( AML_RESOURCE *Resource) { if (Resource->Address.ResourceType == ACPI_IO_RANGE) { AcpiDmIoFlags2 (Resource->Address.SpecificFlags); } else if (Resource->Address.ResourceType == ACPI_MEMORY_RANGE) { AcpiDmMemoryFlags2 (Resource->Address.SpecificFlags); } } /******************************************************************************* * * FUNCTION: AcpiDmSpaceFlags * * PARAMETERS: Flags - Flag byte to be decoded * * RETURN: None * * DESCRIPTION: Decode the flags specific to Space Address space descriptors * ******************************************************************************/ static void AcpiDmSpaceFlags ( UINT8 Flags) { AcpiOsPrintf ("%s, %s, %s, %s,", AcpiGbl_ConsumeDecode [(Flags & 1)], AcpiGbl_DECDecode [(Flags & 0x2) >> 1], AcpiGbl_MinDecode [(Flags & 0x4) >> 2], AcpiGbl_MaxDecode [(Flags & 0x8) >> 3]); } /******************************************************************************* * * FUNCTION: AcpiDmIoFlags * * PARAMETERS: Flags - Flag byte to be decoded * * RETURN: None * * DESCRIPTION: Decode the flags specific to IO Address space descriptors * ******************************************************************************/ static void AcpiDmIoFlags ( UINT8 Flags) { AcpiOsPrintf ("%s, %s, %s, %s,", AcpiGbl_ConsumeDecode [(Flags & 1)], AcpiGbl_MinDecode [(Flags & 0x4) >> 2], AcpiGbl_MaxDecode [(Flags & 0x8) >> 3], AcpiGbl_DECDecode [(Flags & 0x2) >> 1]); } /******************************************************************************* * * FUNCTION: AcpiDmIoFlags2 * * PARAMETERS: SpecificFlags - "Specific" flag byte to be decoded * * RETURN: None * * DESCRIPTION: Decode the flags specific to IO Address space descriptors * ******************************************************************************/ static void AcpiDmIoFlags2 ( UINT8 SpecificFlags) { AcpiOsPrintf (", %s", AcpiGbl_TTPDecode [(SpecificFlags & 0x10) >> 4]); /* TRS is only used if TTP is TypeTranslation */ if (SpecificFlags & 0x10) { AcpiOsPrintf (", %s", AcpiGbl_TRSDecode [(SpecificFlags & 0x20) >> 5]); } } /******************************************************************************* * * FUNCTION: AcpiDmMemoryFlags * * PARAMETERS: Flags - Flag byte to be decoded * SpecificFlags - "Specific" flag byte to be decoded * * RETURN: None * * DESCRIPTION: Decode flags specific to Memory Address Space descriptors * ******************************************************************************/ static void AcpiDmMemoryFlags ( UINT8 Flags, UINT8 SpecificFlags) { AcpiOsPrintf ("%s, %s, %s, %s, %s, %s,", AcpiGbl_ConsumeDecode [(Flags & 1)], AcpiGbl_DECDecode [(Flags & 0x2) >> 1], AcpiGbl_MinDecode [(Flags & 0x4) >> 2], AcpiGbl_MaxDecode [(Flags & 0x8) >> 3], AcpiGbl_MEMDecode [(SpecificFlags & 0x6) >> 1], AcpiGbl_RWDecode [(SpecificFlags & 0x1)]); } /******************************************************************************* * * FUNCTION: AcpiDmMemoryFlags2 * * PARAMETERS: SpecificFlags - "Specific" flag byte to be decoded * * RETURN: None * * DESCRIPTION: Decode flags specific to Memory Address Space descriptors * ******************************************************************************/ static void AcpiDmMemoryFlags2 ( UINT8 SpecificFlags) { AcpiOsPrintf (", %s, %s", AcpiGbl_MTPDecode [(SpecificFlags & 0x18) >> 3], AcpiGbl_TTPDecode [(SpecificFlags & 0x20) >> 5]); } /******************************************************************************* * * FUNCTION: AcpiDmResourceSource * * PARAMETERS: Resource - Raw AML descriptor * MinimumLength - descriptor length without optional fields * ResourceLength * * RETURN: None * * DESCRIPTION: Dump optional ResourceSource fields of an address descriptor * ******************************************************************************/ static void AcpiDmResourceSource ( AML_RESOURCE *Resource, ACPI_SIZE MinimumTotalLength, UINT32 ResourceLength) { UINT8 *AmlResourceSource; UINT32 TotalLength; TotalLength = ResourceLength + sizeof (AML_RESOURCE_LARGE_HEADER); /* Check if the optional ResourceSource fields are present */ if (TotalLength <= MinimumTotalLength) { /* The two optional fields are not used */ AcpiOsPrintf (",,"); return; } /* Get a pointer to the ResourceSource */ AmlResourceSource = ((UINT8 *) Resource) + MinimumTotalLength; /* * Always emit the ResourceSourceIndex (Byte) * * NOTE: Some ASL compilers always create a 0 byte (in the AML) for the * Index even if the String does not exist. Although this is in violation * of the ACPI specification, it is very important to emit ASL code that * can be compiled back to the identical AML. There may be fields and/or * indexes into the resource template buffer that are compiled to absolute * offsets, and these will be broken if the AML length is changed. */ AcpiOsPrintf ("0x%2.2X,", (UINT32) AmlResourceSource[0]); /* Make sure that the ResourceSource string exists before dumping it */ if (TotalLength > (MinimumTotalLength + 1)) { AcpiOsPrintf (" "); AcpiUtPrintString ((char *) &AmlResourceSource[1], ACPI_UINT8_MAX); } AcpiOsPrintf (","); } /******************************************************************************* * * FUNCTION: AcpiDmWordDescriptor * * PARAMETERS: Resource - Pointer to the resource descriptor * Length - Length of the descriptor in bytes * Level - Current source code indentation level * * RETURN: None * * DESCRIPTION: Decode a Word Address Space descriptor * ******************************************************************************/ void AcpiDmWordDescriptor ( AML_RESOURCE *Resource, UINT32 Length, UINT32 Level) { /* Dump resource name and flags */ AcpiDmAddressCommon (Resource, ACPI_RESOURCE_TYPE_ADDRESS16, Level); /* Dump the 5 contiguous WORD values */ AcpiDmAddressFields (&Resource->Address16.Granularity, 16, Level); /* The ResourceSource fields are optional */ AcpiDmIndent (Level + 1); AcpiDmResourceSource (Resource, sizeof (AML_RESOURCE_ADDRESS16), Length); /* Type-specific flags */ AcpiDmAddressFlags (Resource); AcpiOsPrintf (")\n"); } /******************************************************************************* * * FUNCTION: AcpiDmDwordDescriptor * * PARAMETERS: Resource - Pointer to the resource descriptor * Length - Length of the descriptor in bytes * Level - Current source code indentation level * * RETURN: None * * DESCRIPTION: Decode a DWord Address Space descriptor * ******************************************************************************/ void AcpiDmDwordDescriptor ( AML_RESOURCE *Resource, UINT32 Length, UINT32 Level) { /* Dump resource name and flags */ AcpiDmAddressCommon (Resource, ACPI_RESOURCE_TYPE_ADDRESS32, Level); /* Dump the 5 contiguous DWORD values */ AcpiDmAddressFields (&Resource->Address32.Granularity, 32, Level); /* The ResourceSource fields are optional */ AcpiDmIndent (Level + 1); AcpiDmResourceSource (Resource, sizeof (AML_RESOURCE_ADDRESS32), Length); /* Type-specific flags */ AcpiDmAddressFlags (Resource); AcpiOsPrintf (")\n"); } /******************************************************************************* * * FUNCTION: AcpiDmQwordDescriptor * * PARAMETERS: Resource - Pointer to the resource descriptor * Length - Length of the descriptor in bytes * Level - Current source code indentation level * * RETURN: None * * DESCRIPTION: Decode a QWord Address Space descriptor * ******************************************************************************/ void AcpiDmQwordDescriptor ( AML_RESOURCE *Resource, UINT32 Length, UINT32 Level) { /* Dump resource name and flags */ AcpiDmAddressCommon (Resource, ACPI_RESOURCE_TYPE_ADDRESS64, Level); /* Dump the 5 contiguous QWORD values */ AcpiDmAddressFields (&Resource->Address64.Granularity, 64, Level); /* The ResourceSource fields are optional */ AcpiDmIndent (Level + 1); AcpiDmResourceSource (Resource, sizeof (AML_RESOURCE_ADDRESS64), Length); /* Type-specific flags */ AcpiDmAddressFlags (Resource); AcpiOsPrintf (")\n"); } /******************************************************************************* * * FUNCTION: AcpiDmExtendedDescriptor * * PARAMETERS: Resource - Pointer to the resource descriptor * Length - Length of the descriptor in bytes * Level - Current source code indentation level * * RETURN: None * * DESCRIPTION: Decode a Extended Address Space descriptor * ******************************************************************************/ void AcpiDmExtendedDescriptor ( AML_RESOURCE *Resource, UINT32 Length, UINT32 Level) { /* Dump resource name and flags */ AcpiDmAddressCommon (Resource, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64, Level); /* Dump the 5 contiguous QWORD values */ AcpiDmAddressFields (&Resource->ExtAddress64.Granularity, 64, Level); /* Extra field for this descriptor only */ AcpiDmIndent (Level + 1); AcpiDmDumpInteger64 (Resource->ExtAddress64.TypeSpecific, "Type-Specific Attributes"); /* Type-specific flags */ AcpiDmIndent (Level + 1); AcpiDmAddressFlags (Resource); AcpiOsPrintf (")\n"); } /******************************************************************************* * * FUNCTION: AcpiDmMemory24Descriptor * * PARAMETERS: Resource - Pointer to the resource descriptor * Length - Length of the descriptor in bytes * Level - Current source code indentation level * * RETURN: None * * DESCRIPTION: Decode a Memory24 descriptor * ******************************************************************************/ void AcpiDmMemory24Descriptor ( AML_RESOURCE *Resource, UINT32 Length, UINT32 Level) { /* Dump name and read/write flag */ AcpiDmIndent (Level); AcpiOsPrintf ("Memory24 (%s,\n", AcpiGbl_RWDecode [Resource->Memory24.Flags & 1]); /* Dump the 4 contiguous WORD values */ AcpiDmMemoryFields (&Resource->Memory24.Minimum, 16, Level); AcpiDmIndent (Level + 1); AcpiOsPrintf (")\n"); } /******************************************************************************* * * FUNCTION: AcpiDmMemory32Descriptor * * PARAMETERS: Resource - Pointer to the resource descriptor * Length - Length of the descriptor in bytes * Level - Current source code indentation level * * RETURN: None * * DESCRIPTION: Decode a Memory32 descriptor * ******************************************************************************/ void AcpiDmMemory32Descriptor ( AML_RESOURCE *Resource, UINT32 Length, UINT32 Level) { /* Dump name and read/write flag */ AcpiDmIndent (Level); AcpiOsPrintf ("Memory32 (%s,\n", AcpiGbl_RWDecode [Resource->Memory32.Flags & 1]); /* Dump the 4 contiguous DWORD values */ AcpiDmMemoryFields (&Resource->Memory32.Minimum, 32, Level); AcpiDmIndent (Level + 1); AcpiOsPrintf (")\n"); } /******************************************************************************* * * FUNCTION: AcpiDmFixedMemory32Descriptor * * PARAMETERS: Resource - Pointer to the resource descriptor * Length - Length of the descriptor in bytes * Level - Current source code indentation level * * RETURN: None * * DESCRIPTION: Decode a Fixed Memory32 descriptor * ******************************************************************************/ void AcpiDmFixedMemory32Descriptor ( AML_RESOURCE *Resource, UINT32 Length, UINT32 Level) { /* Dump name and read/write flag */ AcpiDmIndent (Level); AcpiOsPrintf ("Memory32Fixed (%s,\n", AcpiGbl_RWDecode [Resource->FixedMemory32.Flags & 1]); AcpiDmIndent (Level + 1); AcpiDmDumpInteger32 (Resource->FixedMemory32.Address, "Address Base"); AcpiDmIndent (Level + 1); AcpiDmDumpInteger32 (Resource->FixedMemory32.AddressLength, "Address Length"); AcpiDmIndent (Level + 1); AcpiOsPrintf (")\n"); } /******************************************************************************* * * FUNCTION: AcpiDmGenericRegisterDescriptor * * PARAMETERS: Resource - Pointer to the resource descriptor * Length - Length of the descriptor in bytes * Level - Current source code indentation level * * RETURN: None * * DESCRIPTION: Decode a Generic Register descriptor * ******************************************************************************/ void AcpiDmGenericRegisterDescriptor ( AML_RESOURCE *Resource, UINT32 Length, UINT32 Level) { AcpiDmIndent (Level); AcpiOsPrintf ("Register ("); AcpiDmAddressSpace (Resource->GenericReg.AddressSpaceId); AcpiOsPrintf ("\n"); AcpiDmIndent (Level + 1); AcpiDmDumpInteger8 (Resource->GenericReg.BitWidth, "Register Bit Width"); AcpiDmIndent (Level + 1); AcpiDmDumpInteger8 (Resource->GenericReg.BitOffset, "Register Bit Offset"); AcpiDmIndent (Level + 1); AcpiDmDumpInteger64 (Resource->GenericReg.Address, "Register Address"); /* Optional field for ACPI 3.0 */ if (Resource->GenericReg.AccessSize) { AcpiDmIndent (Level + 1); AcpiOsPrintf ("0x%2.2X // %s\n", Resource->GenericReg.AccessSize, "Access Size"); } AcpiDmIndent (Level + 1); AcpiOsPrintf (")\n"); } /******************************************************************************* * * FUNCTION: AcpiDmInterruptDescriptor * * PARAMETERS: Resource - Pointer to the resource descriptor * Length - Length of the descriptor in bytes * Level - Current source code indentation level * * RETURN: None * * DESCRIPTION: Decode a extended Interrupt descriptor * ******************************************************************************/ void AcpiDmInterruptDescriptor ( AML_RESOURCE *Resource, UINT32 Length, UINT32 Level) { UINT32 i; AcpiDmIndent (Level); AcpiOsPrintf ("Interrupt (%s, %s, %s, %s, ", AcpiGbl_ConsumeDecode [(Resource->ExtendedIrq.Flags & 1)], AcpiGbl_HEDecode [(Resource->ExtendedIrq.Flags >> 1) & 1], AcpiGbl_LLDecode [(Resource->ExtendedIrq.Flags >> 2) & 1], AcpiGbl_SHRDecode [(Resource->ExtendedIrq.Flags >> 3) & 1]); /* * The ResourceSource fields are optional and appear after the interrupt * list. Must compute length based on length of the list. First xrupt * is included in the struct (reason for -1 below) */ AcpiDmResourceSource (Resource, sizeof (AML_RESOURCE_EXTENDED_IRQ) + (Resource->ExtendedIrq.InterruptCount - 1) * sizeof (UINT32), Resource->ExtendedIrq.ResourceLength); /* Dump the interrupt list */ AcpiOsPrintf (")\n"); AcpiDmIndent (Level); AcpiOsPrintf ("{\n"); for (i = 0; i < Resource->ExtendedIrq.InterruptCount; i++) { AcpiDmIndent (Level + 1); AcpiOsPrintf ("0x%8.8X,\n", (UINT32) Resource->ExtendedIrq.Interrupts[i]); } AcpiDmIndent (Level); AcpiOsPrintf ("}\n"); } /******************************************************************************* * * FUNCTION: AcpiDmVendorCommon * * PARAMETERS: Name - Descriptor name suffix * ByteData - Pointer to the vendor byte data * Length - Length of the byte data * Level - Current source code indentation level * * RETURN: None * * DESCRIPTION: Decode a Vendor descriptor, both Large and Small * ******************************************************************************/ void AcpiDmVendorCommon ( char *Name, UINT8 *ByteData, UINT32 Length, UINT32 Level) { /* Dump descriptor name */ AcpiDmIndent (Level); AcpiOsPrintf ("Vendor%s // Length = 0x%.2X\n", Name, Length); /* Dump the vendor bytes */ AcpiDmIndent (Level); AcpiOsPrintf ("{\n"); AcpiDmDisasmByteList (Level + 1, ByteData, Length); AcpiDmIndent (Level); AcpiOsPrintf ("}\n"); } /******************************************************************************* * * FUNCTION: AcpiDmVendorLargeDescriptor * * PARAMETERS: Resource - Pointer to the resource descriptor * Length - Length of the descriptor in bytes * Level - Current source code indentation level * * RETURN: None * * DESCRIPTION: Decode a Vendor Large descriptor * ******************************************************************************/ void AcpiDmVendorLargeDescriptor ( AML_RESOURCE *Resource, UINT32 Length, UINT32 Level) { AcpiDmVendorCommon ("Long () ", ((UINT8 *) Resource) + sizeof (AML_RESOURCE_LARGE_HEADER), Length, Level); } #endif
485420.c
/* ======================================================================== * Copyright 1988-2006 University of Washington * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * * ======================================================================== */ /* * Program: Standard login for very old UNIX systems * * Author: Mark Crispin * Networks and Distributed Computing * Computing & Communications * University of Washington * Administration Building, AG-44 * Seattle, WA 98195 * Internet: [email protected] * * Date: 1 August 1988 * Last Edited: 30 August 2006 */ /* Log in * Accepts: login passwd struct * argument count * argument vector * Returns: T if success, NIL otherwise */ long loginpw (struct passwd *pw,int argc,char *argv[]) { return !(setgid (pw->pw_gid) || setuid (pw->pw_uid)); }
910058.c
/* * (C) Copyright 2013 Kurento (http://kurento.org/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <gst/check/gstcheck.h> #include <gst/gst.h> #include <glib.h> #include <valgrind/valgrind.h> #include <commons/kmsuriendpointstate.h> #define SINK_VIDEO_STREAM "sink_video_default" #define SINK_AUDIO_STREAM "sink_audio_default" #define KMS_ELEMENT_PAD_TYPE_AUDIO 1 #define KMS_ELEMENT_PAD_TYPE_VIDEO 2 gboolean set_state_start (gpointer *); gboolean set_state_pause (gpointer *); gboolean set_state_stop (gpointer *); static GstElement *recorder = NULL; static guint number_of_transitions; static gboolean expected_warnings; static guint test_number; static guint state; typedef struct _RequestPadData { gint n; gint count; gchar **pads; } RequestPadData; struct state_controller { KmsUriEndpointState state; guint seconds; }; static const struct state_controller trasnsitions0[] = { {KMS_URI_ENDPOINT_STATE_START, 2}, {KMS_URI_ENDPOINT_STATE_PAUSE, 1}, {KMS_URI_ENDPOINT_STATE_START, 2}, {KMS_URI_ENDPOINT_STATE_PAUSE, 1}, {KMS_URI_ENDPOINT_STATE_START, 2}, {KMS_URI_ENDPOINT_STATE_STOP, 1} }; static const struct state_controller trasnsitions1[] = { {KMS_URI_ENDPOINT_STATE_START, 2}, {KMS_URI_ENDPOINT_STATE_PAUSE, 1}, {KMS_URI_ENDPOINT_STATE_START, 1} }; static const struct state_controller * get_transtions () { switch (test_number) { case 0: return trasnsitions0; case 1: return trasnsitions1; default: fail ("Undefined transitions for test %d.", test_number); return NULL; } } static const gchar * state2string (KmsUriEndpointState state) { switch (state) { case KMS_URI_ENDPOINT_STATE_STOP: return "STOP"; case KMS_URI_ENDPOINT_STATE_START: return "START"; case KMS_URI_ENDPOINT_STATE_PAUSE: return "PAUSE"; default: return "Invalid state"; } } static void change_state (KmsUriEndpointState state) { GstElement *testsrc; GstElement *testsink; GST_DEBUG ("Setting recorder to state %s", state2string (state)); g_object_set (G_OBJECT (recorder), "state", state, NULL); /* Add more element to the pipeline to check that this does not affect to the timestamps */ testsrc = gst_element_factory_make ("videotestsrc", NULL); testsink = gst_element_factory_make ("fakesink", NULL); g_object_set (testsink, "async", FALSE, "sync", FALSE, NULL); g_object_set (testsrc, "is-live", TRUE, NULL); GST_DEBUG_OBJECT (recorder, "Adding more elements"); gst_bin_add_many (GST_BIN (GST_OBJECT_PARENT (recorder)), testsrc, testsink, NULL); gst_element_link (testsrc, testsink); gst_element_sync_state_with_parent (testsink); gst_element_sync_state_with_parent (testsrc); } static void bus_msg (GstBus * bus, GstMessage * msg, gpointer pipe) { switch (GST_MESSAGE_TYPE (msg)) { case GST_MESSAGE_ERROR:{ GError *err = NULL; gchar *dbg_info = NULL; gchar *err_str; GST_ERROR ("Error: %" GST_PTR_FORMAT, msg); GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (pipe), GST_DEBUG_GRAPH_SHOW_ALL, "bus_error"); gst_message_parse_error (msg, &err, &dbg_info); err_str = g_strdup_printf ("Error received on bus: %s: %s", err->message, dbg_info); GST_ERROR ("%s", err_str); g_error_free (err); g_free (dbg_info); fail (err_str); g_free (err_str); break; } case GST_MESSAGE_WARNING:{ GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (pipe), GST_DEBUG_GRAPH_SHOW_ALL, "warning"); if (expected_warnings) GST_INFO ("Do not worry. Warning expected"); else fail ("Warnings not expected"); break; } case GST_MESSAGE_STATE_CHANGED:{ GST_TRACE ("Event: %" GST_PTR_FORMAT, msg); break; } default: break; } } static void transite (gpointer loop) { const struct state_controller *transitions = get_transtions (); if (state < number_of_transitions) { change_state (transitions[state].state); } else { GST_DEBUG ("All transitions done. Finishing recorder test suite"); g_main_loop_quit (loop); } } static gboolean transite_cb (gpointer loop) { state++; transite (loop); return FALSE; } static void state_changed_cb (GstElement * recorder, KmsUriEndpointState newState, gpointer loop) { const struct state_controller *transitions = get_transtions (); guint seconds = transitions[state].seconds; GST_DEBUG ("State changed %s. Time %d seconds.", state2string (newState), seconds); if (RUNNING_ON_VALGRIND) { g_timeout_add (seconds * 10000, transite_cb, loop); } else { g_timeout_add (seconds * 1000, transite_cb, loop); } } static void remove_on_unlinked (GstPad * pad, GstPad * peer, gpointer data) { GstElement *parent = gst_pad_get_parent_element (pad); if (parent != NULL) { gst_element_release_request_pad (parent, pad); g_object_unref (parent); } } static void connect_pads_and_remove_on_unlinked (GstElement * agnosticbin, GstElement * elem, const gchar * sink_name) { GstPad *src; src = gst_element_get_request_pad (agnosticbin, "src_%u"); g_assert (src != NULL); g_signal_connect (src, "unlinked", G_CALLBACK (remove_on_unlinked), NULL); gst_element_link_pads (agnosticbin, GST_OBJECT_NAME (src), elem, sink_name); g_object_unref (src); } typedef struct _KmsConnectData { GstElement *src; const gchar *pad_name; gulong id; } KmsConnectData; static void connect_sink (GstElement * element, GstPad * pad, gpointer user_data) { KmsConnectData *data = user_data; GST_DEBUG_OBJECT (pad, "New pad %" GST_PTR_FORMAT, element); if (g_strcmp0 (GST_OBJECT_NAME (pad), data->pad_name)) { return; } connect_pads_and_remove_on_unlinked (data->src, element, data->pad_name); GST_INFO_OBJECT (pad, "Linking %s", data->pad_name); } static void kms_connect_data_destroy (gpointer data) { g_slice_free (KmsConnectData, data); } static void connect_sink_async (GstElement * recorder, GstElement * src, const gchar * pad_name) { KmsConnectData *data = g_slice_new (KmsConnectData); data->src = src; data->pad_name = pad_name; data->id = g_signal_connect_data (recorder, "pad-added", G_CALLBACK (connect_sink), data, (GClosureNotify) kms_connect_data_destroy, 0); } static void link_to_recorder (GstElement * recorder, GstElement * src, GstElement * pipe, const gchar * pad_name) { GstPad *sink; GstElement *agnosticbin = gst_element_factory_make ("agnosticbin", NULL); gst_bin_add (GST_BIN (pipe), agnosticbin); gst_element_link (src, agnosticbin); gst_element_sync_state_with_parent (agnosticbin); connect_sink_async (recorder, agnosticbin, pad_name); sink = gst_element_get_static_pad (recorder, pad_name); if (sink != NULL) { connect_pads_and_remove_on_unlinked (agnosticbin, recorder, pad_name); g_object_unref (sink); } } GST_START_TEST (check_states_pipeline) { GstElement *pipeline, *videotestsrc, *vencoder, *aencoder, *audiotestsrc, *timeoverlay; guint bus_watch_id; GstBus *bus; GMainLoop *loop = g_main_loop_new (NULL, FALSE); number_of_transitions = 6; expected_warnings = FALSE; test_number = 0; state = 0; /* Create gstreamer elements */ pipeline = gst_pipeline_new ("recorderendpoint0-test"); videotestsrc = gst_element_factory_make ("videotestsrc", NULL); fail_unless (videotestsrc != NULL); vencoder = gst_element_factory_make ("vp8enc", NULL); fail_unless (vencoder != NULL); aencoder = gst_element_factory_make ("vorbisenc", NULL); fail_unless (aencoder != NULL); timeoverlay = gst_element_factory_make ("timeoverlay", NULL); fail_unless (timeoverlay != NULL); audiotestsrc = gst_element_factory_make ("audiotestsrc", NULL); fail_unless (audiotestsrc != NULL); recorder = gst_element_factory_make ("recorderendpoint", NULL); fail_unless (recorder != NULL); g_object_set (G_OBJECT (recorder), "uri", "file:///tmp/state_recorder.webm", "profile", 0 /* WEBM */ , NULL); bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline)); bus_watch_id = gst_bus_add_watch (bus, gst_bus_async_signal_func, NULL); g_signal_connect (bus, "message", G_CALLBACK (bus_msg), pipeline); g_object_unref (bus); gst_bin_add_many (GST_BIN (pipeline), audiotestsrc, videotestsrc, vencoder, aencoder, recorder, timeoverlay, NULL); gst_element_link (videotestsrc, timeoverlay); gst_element_link (timeoverlay, vencoder); gst_element_link (audiotestsrc, aencoder); link_to_recorder (recorder, vencoder, pipeline, SINK_VIDEO_STREAM); link_to_recorder (recorder, aencoder, pipeline, SINK_AUDIO_STREAM); g_signal_connect (recorder, "state-changed", G_CALLBACK (state_changed_cb), loop); g_object_set (G_OBJECT (videotestsrc), "is-live", TRUE, "do-timestamp", TRUE, "pattern", 18, NULL); g_object_set (G_OBJECT (audiotestsrc), "is-live", TRUE, "do-timestamp", TRUE, "wave", 8, NULL); g_object_set (G_OBJECT (timeoverlay), "font-desc", "Sans 28", NULL); gst_element_set_state (pipeline, GST_STATE_PLAYING); transite (loop); g_main_loop_run (loop); GST_DEBUG ("Stop executed"); gst_element_set_state (pipeline, GST_STATE_NULL); gst_object_unref (GST_OBJECT (pipeline)); GST_DEBUG ("Pipe released"); g_source_remove (bus_watch_id); g_main_loop_unref (loop); } GST_END_TEST GST_START_TEST (warning_pipeline) { GstElement *pipeline, *videotestsrc, *vencoder, *aencoder, *audiotestsrc, *timeoverlay; guint bus_watch_id; GstBus *bus; GMainLoop *loop = g_main_loop_new (NULL, FALSE); number_of_transitions = 3; expected_warnings = TRUE; test_number = 1; state = 0; /* Create gstreamer elements */ pipeline = gst_pipeline_new ("recorderendpoint0-test"); videotestsrc = gst_element_factory_make ("videotestsrc", NULL); fail_unless (videotestsrc != NULL); vencoder = gst_element_factory_make ("vp8enc", NULL); fail_unless (vencoder != NULL); aencoder = gst_element_factory_make ("vorbisenc", NULL); fail_unless (aencoder != NULL); timeoverlay = gst_element_factory_make ("timeoverlay", NULL); fail_unless (timeoverlay != NULL); audiotestsrc = gst_element_factory_make ("audiotestsrc", NULL); fail_unless (audiotestsrc != NULL); recorder = gst_element_factory_make ("recorderendpoint", NULL); fail_unless (recorder != NULL); g_object_set (G_OBJECT (recorder), "uri", "file:///tmp/warning_pipeline.webm", "profile", 0 /* WEBM */ , NULL); bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline)); bus_watch_id = gst_bus_add_watch (bus, gst_bus_async_signal_func, NULL); g_signal_connect (bus, "message", G_CALLBACK (bus_msg), pipeline); g_object_unref (bus); gst_bin_add_many (GST_BIN (pipeline), audiotestsrc, videotestsrc, vencoder, aencoder, recorder, timeoverlay, NULL); gst_element_link (videotestsrc, timeoverlay); gst_element_link (timeoverlay, vencoder); gst_element_link (audiotestsrc, aencoder); link_to_recorder (recorder, vencoder, pipeline, SINK_VIDEO_STREAM); link_to_recorder (recorder, aencoder, pipeline, SINK_AUDIO_STREAM); g_signal_connect (recorder, "state-changed", G_CALLBACK (state_changed_cb), loop); g_object_set (G_OBJECT (videotestsrc), "is-live", TRUE, "do-timestamp", TRUE, "pattern", 18, NULL); g_object_set (G_OBJECT (audiotestsrc), "is-live", TRUE, "do-timestamp", TRUE, "wave", 8, NULL); g_object_set (G_OBJECT (timeoverlay), "font-desc", "Sans 28", NULL); GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "entering_main_loop"); gst_element_set_state (pipeline, GST_STATE_PLAYING); transite (loop); g_main_loop_run (loop); GST_DEBUG ("Stop executed"); GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "after_main_loop"); gst_element_set_state (pipeline, GST_STATE_NULL); gst_object_unref (GST_OBJECT (pipeline)); GST_DEBUG ("Pipe released"); g_source_remove (bus_watch_id); g_main_loop_unref (loop); } GST_END_TEST static gboolean quit_main_loop_idle (gpointer data) { GMainLoop *loop = data; GST_DEBUG ("Test finished exiting main loop"); g_main_loop_quit (loop); return FALSE; } static gboolean stop_recorder (gpointer data) { GST_DEBUG ("Setting recorder to STOP"); g_object_set (G_OBJECT (recorder), "state", KMS_URI_ENDPOINT_STATE_STOP, NULL); return FALSE; } static void state_changed_cb3 (GstElement * recorder, KmsUriEndpointState newState, gpointer loop) { GST_DEBUG ("State changed %s.", state2string (newState)); if (newState == KMS_URI_ENDPOINT_STATE_START) { if (RUNNING_ON_VALGRIND) { g_timeout_add (15000, stop_recorder, NULL); } else { g_timeout_add (3000, stop_recorder, NULL); } } else if (newState == KMS_URI_ENDPOINT_STATE_STOP) { g_idle_add (quit_main_loop_idle, loop); } } GST_START_TEST (check_video_only) { GstElement *pipeline, *videotestsrc, *vencoder, *timeoverlay; guint bus_watch_id; GstBus *bus; GMainLoop *loop = g_main_loop_new (NULL, FALSE); expected_warnings = FALSE; /* Create gstreamer elements */ pipeline = gst_pipeline_new ("recorderendpoint0-test"); videotestsrc = gst_element_factory_make ("videotestsrc", NULL); fail_unless (videotestsrc != NULL); vencoder = gst_element_factory_make ("vp8enc", NULL); fail_unless (vencoder != NULL); timeoverlay = gst_element_factory_make ("timeoverlay", NULL); fail_unless (timeoverlay != NULL); recorder = gst_element_factory_make ("recorderendpoint", NULL); fail_unless (recorder != NULL); g_object_set (G_OBJECT (recorder), "uri", "file:///tmp/check_video_only.webm", "profile", 2 /* WEBM_VIDEO_ONLY */ , NULL); bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline)); bus_watch_id = gst_bus_add_watch (bus, gst_bus_async_signal_func, NULL); g_signal_connect (bus, "message", G_CALLBACK (bus_msg), pipeline); g_object_unref (bus); gst_bin_add_many (GST_BIN (pipeline), videotestsrc, vencoder, recorder, timeoverlay, NULL); gst_element_link (videotestsrc, timeoverlay); gst_element_link (timeoverlay, vencoder); link_to_recorder (recorder, vencoder, pipeline, SINK_VIDEO_STREAM); g_signal_connect (recorder, "state-changed", G_CALLBACK (state_changed_cb3), loop); g_object_set (G_OBJECT (videotestsrc), "is-live", TRUE, "do-timestamp", TRUE, "pattern", 18, NULL); g_object_set (G_OBJECT (timeoverlay), "font-desc", "Sans 28", NULL); GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "entering_main_loop"); g_object_set (G_OBJECT (recorder), "state", KMS_URI_ENDPOINT_STATE_START, NULL); gst_element_set_state (pipeline, GST_STATE_PLAYING); g_main_loop_run (loop); GST_DEBUG ("Stop executed"); GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "after_main_loop"); gst_element_set_state (pipeline, GST_STATE_NULL); gst_object_unref (GST_OBJECT (pipeline)); GST_DEBUG ("Pipe released"); g_source_remove (bus_watch_id); g_main_loop_unref (loop); } GST_END_TEST; GST_START_TEST (check_audio_only) { GstElement *pipeline, *audiotestsrc, *encoder; guint bus_watch_id; GstBus *bus; GMainLoop *loop = g_main_loop_new (NULL, FALSE); expected_warnings = FALSE; /* Create gstreamer elements */ pipeline = gst_pipeline_new ("recorderendpoint0-test"); audiotestsrc = gst_element_factory_make ("audiotestsrc", NULL); encoder = gst_element_factory_make ("vorbisenc", NULL); recorder = gst_element_factory_make ("recorderendpoint", NULL); g_object_set (G_OBJECT (recorder), "uri", "file:///tmp/check_audio_only.webm", "profile", 3 /* WEBM_AUDIO_ONLY */ , NULL); bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline)); bus_watch_id = gst_bus_add_watch (bus, gst_bus_async_signal_func, NULL); g_signal_connect (bus, "message", G_CALLBACK (bus_msg), pipeline); g_object_unref (bus); gst_bin_add_many (GST_BIN (pipeline), audiotestsrc, encoder, recorder, NULL); gst_element_link (audiotestsrc, encoder); link_to_recorder (recorder, encoder, pipeline, SINK_AUDIO_STREAM); g_signal_connect (recorder, "state-changed", G_CALLBACK (state_changed_cb3), loop); g_object_set (G_OBJECT (audiotestsrc), "is-live", TRUE, "do-timestamp", TRUE, NULL); GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "entering_main_loop"); g_object_set (G_OBJECT (recorder), "state", KMS_URI_ENDPOINT_STATE_START, NULL); gst_element_set_state (pipeline, GST_STATE_PLAYING); g_main_loop_run (loop); GST_DEBUG ("Stop executed"); GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS (GST_BIN (pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "after_main_loop"); gst_element_set_state (pipeline, GST_STATE_NULL); gst_object_unref (GST_OBJECT (pipeline)); GST_DEBUG ("Pipe released"); g_source_remove (bus_watch_id); g_main_loop_unref (loop); } GST_END_TEST static gboolean check_support_for_ksr () { GstPlugin *plugin = NULL; gboolean supported; plugin = gst_plugin_load_by_name ("kmsrecorder"); supported = plugin != NULL; g_clear_object (&plugin); return supported; } static void state_changed_ksr (GstElement * recorder, KmsUriEndpointState newState, gpointer loop) { GST_DEBUG ("State changed %s.", state2string (newState)); if (newState == KMS_URI_ENDPOINT_STATE_STOP) { g_idle_add (quit_main_loop_idle, loop); } } static gboolean is_pad_requested (GstPad * pad, gchar ** pads, gint n) { gboolean found = FALSE; gchar *padname; gint i; padname = gst_pad_get_name (pad); for (i = 0; i < n && !found; i++) { found = g_strcmp0 (padname, pads[i]) == 0; } g_free (padname); return found; } static void sink_pad_added (GstElement * element, GstPad * new_pad, gpointer user_data) { RequestPadData *data = (RequestPadData *) user_data; GST_INFO_OBJECT (element, "Added pad %" GST_PTR_FORMAT, new_pad); if (!is_pad_requested (new_pad, data->pads, data->n)) { return; } if (g_atomic_int_dec_and_test (&data->count)) { GST_DEBUG_OBJECT (element, "All sink pads created"); g_idle_add (stop_recorder, NULL); } } GST_START_TEST (check_ksm_sink_request) { GstElement *pipeline; guint bus_watch_id; RequestPadData data; GstBus *bus; GMainLoop *loop = g_main_loop_new (NULL, FALSE); guint i; data.count = data.n = 4; data.pads = g_slice_alloc0 (sizeof (guint8) * data.n); pipeline = gst_pipeline_new (__FUNCTION__); recorder = gst_element_factory_make ("recorderendpoint", NULL); g_object_set (G_OBJECT (recorder), "uri", "file:///tmp/output.ksr", "profile", 6 /* KMS_RECORDING_PROFILE_KSR */ , NULL); g_signal_connect (recorder, "state-changed", G_CALLBACK (state_changed_ksr), loop); g_signal_connect (recorder, "pad-added", G_CALLBACK (sink_pad_added), &data); bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline)); bus_watch_id = gst_bus_add_watch (bus, gst_bus_async_signal_func, NULL); g_signal_connect (bus, "message", G_CALLBACK (bus_msg), pipeline); g_object_unref (bus); gst_bin_add (GST_BIN (pipeline), recorder); /* request src pad using action */ for (i = 0; i < data.n; i++) { gchar *id = g_strdup_printf ("tag_%u", i); g_signal_emit_by_name (recorder, "request-new-pad", KMS_ELEMENT_PAD_TYPE_VIDEO, id, GST_PAD_SINK, &data.pads[i]); g_free (id); } g_object_set (G_OBJECT (recorder), "state", KMS_URI_ENDPOINT_STATE_START, NULL); gst_element_set_state (pipeline, GST_STATE_PLAYING); g_main_loop_run (loop); GST_DEBUG ("Stop executed"); for (i = 0; i < data.n; i++) { g_free (data.pads[i]); } g_slice_free1 (sizeof (guint8) * data.n, data.pads); gst_element_set_state (pipeline, GST_STATE_NULL); gst_object_unref (GST_OBJECT (pipeline)); GST_DEBUG ("Pipe released"); g_source_remove (bus_watch_id); g_main_loop_unref (loop); } GST_END_TEST /******************************/ /* RecorderEndpoint test suit */ /******************************/ static Suite * recorderendpoint_suite (void) { Suite *s = suite_create ("recorderendpoint"); TCase *tc_chain = tcase_create ("element"); suite_add_tcase (s, tc_chain); /* Enable test when recorder is able to emit dropable buffers for the muxer */ tcase_add_test (tc_chain, check_video_only); tcase_add_test (tc_chain, check_audio_only); tcase_add_test (tc_chain, check_states_pipeline); tcase_add_test (tc_chain, warning_pipeline); if (check_support_for_ksr ()) { tcase_add_test (tc_chain, check_ksm_sink_request); } else { GST_WARNING ("No ksr profile supported. Test skipped"); } return s; } GST_CHECK_MAIN (recorderendpoint);
979992.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Xilinx Zynq GPIO device driver * * Copyright (C) 2009 - 2014 Xilinx, Inc. */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/gpio/driver.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/of.h> #define DRIVER_NAME "zynq-gpio" /* Maximum banks */ #define ZYNQ_GPIO_MAX_BANK 4 #define ZYNQMP_GPIO_MAX_BANK 6 #define VERSAL_GPIO_MAX_BANK 4 #define PMC_GPIO_MAX_BANK 5 #define VERSAL_UNUSED_BANKS 2 #define ZYNQ_GPIO_BANK0_NGPIO 32 #define ZYNQ_GPIO_BANK1_NGPIO 22 #define ZYNQ_GPIO_BANK2_NGPIO 32 #define ZYNQ_GPIO_BANK3_NGPIO 32 #define ZYNQMP_GPIO_BANK0_NGPIO 26 #define ZYNQMP_GPIO_BANK1_NGPIO 26 #define ZYNQMP_GPIO_BANK2_NGPIO 26 #define ZYNQMP_GPIO_BANK3_NGPIO 32 #define ZYNQMP_GPIO_BANK4_NGPIO 32 #define ZYNQMP_GPIO_BANK5_NGPIO 32 #define ZYNQ_GPIO_NR_GPIOS 118 #define ZYNQMP_GPIO_NR_GPIOS 174 #define ZYNQ_GPIO_BANK0_PIN_MIN(str) 0 #define ZYNQ_GPIO_BANK0_PIN_MAX(str) (ZYNQ_GPIO_BANK0_PIN_MIN(str) + \ ZYNQ##str##_GPIO_BANK0_NGPIO - 1) #define ZYNQ_GPIO_BANK1_PIN_MIN(str) (ZYNQ_GPIO_BANK0_PIN_MAX(str) + 1) #define ZYNQ_GPIO_BANK1_PIN_MAX(str) (ZYNQ_GPIO_BANK1_PIN_MIN(str) + \ ZYNQ##str##_GPIO_BANK1_NGPIO - 1) #define ZYNQ_GPIO_BANK2_PIN_MIN(str) (ZYNQ_GPIO_BANK1_PIN_MAX(str) + 1) #define ZYNQ_GPIO_BANK2_PIN_MAX(str) (ZYNQ_GPIO_BANK2_PIN_MIN(str) + \ ZYNQ##str##_GPIO_BANK2_NGPIO - 1) #define ZYNQ_GPIO_BANK3_PIN_MIN(str) (ZYNQ_GPIO_BANK2_PIN_MAX(str) + 1) #define ZYNQ_GPIO_BANK3_PIN_MAX(str) (ZYNQ_GPIO_BANK3_PIN_MIN(str) + \ ZYNQ##str##_GPIO_BANK3_NGPIO - 1) #define ZYNQ_GPIO_BANK4_PIN_MIN(str) (ZYNQ_GPIO_BANK3_PIN_MAX(str) + 1) #define ZYNQ_GPIO_BANK4_PIN_MAX(str) (ZYNQ_GPIO_BANK4_PIN_MIN(str) + \ ZYNQ##str##_GPIO_BANK4_NGPIO - 1) #define ZYNQ_GPIO_BANK5_PIN_MIN(str) (ZYNQ_GPIO_BANK4_PIN_MAX(str) + 1) #define ZYNQ_GPIO_BANK5_PIN_MAX(str) (ZYNQ_GPIO_BANK5_PIN_MIN(str) + \ ZYNQ##str##_GPIO_BANK5_NGPIO - 1) /* Register offsets for the GPIO device */ /* LSW Mask & Data -WO */ #define ZYNQ_GPIO_DATA_LSW_OFFSET(BANK) (0x000 + (8 * BANK)) /* MSW Mask & Data -WO */ #define ZYNQ_GPIO_DATA_MSW_OFFSET(BANK) (0x004 + (8 * BANK)) /* Data Register-RW */ #define ZYNQ_GPIO_DATA_OFFSET(BANK) (0x040 + (4 * BANK)) #define ZYNQ_GPIO_DATA_RO_OFFSET(BANK) (0x060 + (4 * BANK)) /* Direction mode reg-RW */ #define ZYNQ_GPIO_DIRM_OFFSET(BANK) (0x204 + (0x40 * BANK)) /* Output enable reg-RW */ #define ZYNQ_GPIO_OUTEN_OFFSET(BANK) (0x208 + (0x40 * BANK)) /* Interrupt mask reg-RO */ #define ZYNQ_GPIO_INTMASK_OFFSET(BANK) (0x20C + (0x40 * BANK)) /* Interrupt enable reg-WO */ #define ZYNQ_GPIO_INTEN_OFFSET(BANK) (0x210 + (0x40 * BANK)) /* Interrupt disable reg-WO */ #define ZYNQ_GPIO_INTDIS_OFFSET(BANK) (0x214 + (0x40 * BANK)) /* Interrupt status reg-RO */ #define ZYNQ_GPIO_INTSTS_OFFSET(BANK) (0x218 + (0x40 * BANK)) /* Interrupt type reg-RW */ #define ZYNQ_GPIO_INTTYPE_OFFSET(BANK) (0x21C + (0x40 * BANK)) /* Interrupt polarity reg-RW */ #define ZYNQ_GPIO_INTPOL_OFFSET(BANK) (0x220 + (0x40 * BANK)) /* Interrupt on any, reg-RW */ #define ZYNQ_GPIO_INTANY_OFFSET(BANK) (0x224 + (0x40 * BANK)) /* Disable all interrupts mask */ #define ZYNQ_GPIO_IXR_DISABLE_ALL 0xFFFFFFFF /* Mid pin number of a bank */ #define ZYNQ_GPIO_MID_PIN_NUM 16 /* GPIO upper 16 bit mask */ #define ZYNQ_GPIO_UPPER_MASK 0xFFFF0000 /* set to differentiate zynq from zynqmp, 0=zynqmp, 1=zynq */ #define ZYNQ_GPIO_QUIRK_IS_ZYNQ BIT(0) #define GPIO_QUIRK_DATA_RO_BUG BIT(1) #define GPIO_QUIRK_VERSAL BIT(2) struct gpio_regs { u32 datamsw[ZYNQMP_GPIO_MAX_BANK]; u32 datalsw[ZYNQMP_GPIO_MAX_BANK]; u32 dirm[ZYNQMP_GPIO_MAX_BANK]; u32 outen[ZYNQMP_GPIO_MAX_BANK]; u32 int_en[ZYNQMP_GPIO_MAX_BANK]; u32 int_dis[ZYNQMP_GPIO_MAX_BANK]; u32 int_type[ZYNQMP_GPIO_MAX_BANK]; u32 int_polarity[ZYNQMP_GPIO_MAX_BANK]; u32 int_any[ZYNQMP_GPIO_MAX_BANK]; }; /** * struct zynq_gpio - gpio device private data structure * @chip: instance of the gpio_chip * @base_addr: base address of the GPIO device * @clk: clock resource for this controller * @irq: interrupt for the GPIO device * @p_data: pointer to platform data * @context: context registers * @dirlock: lock used for direction in/out synchronization */ struct zynq_gpio { struct gpio_chip chip; void __iomem *base_addr; struct clk *clk; int irq; const struct zynq_platform_data *p_data; struct gpio_regs context; spinlock_t dirlock; /* lock */ }; /** * struct zynq_platform_data - zynq gpio platform data structure * @label: string to store in gpio->label * @quirks: Flags is used to identify the platform * @ngpio: max number of gpio pins * @max_bank: maximum number of gpio banks * @bank_min: this array represents bank's min pin * @bank_max: this array represents bank's max pin */ struct zynq_platform_data { const char *label; u32 quirks; u16 ngpio; int max_bank; int bank_min[ZYNQMP_GPIO_MAX_BANK]; int bank_max[ZYNQMP_GPIO_MAX_BANK]; }; static struct irq_chip zynq_gpio_level_irqchip; static struct irq_chip zynq_gpio_edge_irqchip; /** * zynq_gpio_is_zynq - test if HW is zynq or zynqmp * @gpio: Pointer to driver data struct * * Return: 0 if zynqmp, 1 if zynq. */ static int zynq_gpio_is_zynq(struct zynq_gpio *gpio) { return !!(gpio->p_data->quirks & ZYNQ_GPIO_QUIRK_IS_ZYNQ); } /** * gpio_data_ro_bug - test if HW bug exists or not * @gpio: Pointer to driver data struct * * Return: 0 if bug doesnot exist, 1 if bug exists. */ static int gpio_data_ro_bug(struct zynq_gpio *gpio) { return !!(gpio->p_data->quirks & GPIO_QUIRK_DATA_RO_BUG); } /** * zynq_gpio_get_bank_pin - Get the bank number and pin number within that bank * for a given pin in the GPIO device * @pin_num: gpio pin number within the device * @bank_num: an output parameter used to return the bank number of the gpio * pin * @bank_pin_num: an output parameter used to return pin number within a bank * for the given gpio pin * @gpio: gpio device data structure * * Returns the bank number and pin offset within the bank. */ static inline void zynq_gpio_get_bank_pin(unsigned int pin_num, unsigned int *bank_num, unsigned int *bank_pin_num, struct zynq_gpio *gpio) { int bank; for (bank = 0; bank < gpio->p_data->max_bank; bank++) { if ((pin_num >= gpio->p_data->bank_min[bank]) && (pin_num <= gpio->p_data->bank_max[bank])) { *bank_num = bank; *bank_pin_num = pin_num - gpio->p_data->bank_min[bank]; return; } if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL) bank = bank + VERSAL_UNUSED_BANKS; } /* default */ WARN(true, "invalid GPIO pin number: %u", pin_num); *bank_num = 0; *bank_pin_num = 0; } /** * zynq_gpio_get_value - Get the state of the specified pin of GPIO device * @chip: gpio_chip instance to be worked on * @pin: gpio pin number within the device * * This function reads the state of the specified pin of the GPIO device. * * Return: 0 if the pin is low, 1 if pin is high. */ static int zynq_gpio_get_value(struct gpio_chip *chip, unsigned int pin) { u32 data; unsigned int bank_num, bank_pin_num; struct zynq_gpio *gpio = gpiochip_get_data(chip); zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio); if (gpio_data_ro_bug(gpio)) { if (zynq_gpio_is_zynq(gpio)) { if (bank_num <= 1) { data = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DATA_RO_OFFSET(bank_num)); } else { data = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DATA_OFFSET(bank_num)); } } else { if (bank_num <= 2) { data = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DATA_RO_OFFSET(bank_num)); } else { data = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DATA_OFFSET(bank_num)); } } } else { data = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DATA_RO_OFFSET(bank_num)); } return (data >> bank_pin_num) & 1; } /** * zynq_gpio_set_value - Modify the state of the pin with specified value * @chip: gpio_chip instance to be worked on * @pin: gpio pin number within the device * @state: value used to modify the state of the specified pin * * This function calculates the register offset (i.e to lower 16 bits or * upper 16 bits) based on the given pin number and sets the state of a * gpio pin to the specified value. The state is either 0 or non-zero. */ static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin, int state) { unsigned int reg_offset, bank_num, bank_pin_num; struct zynq_gpio *gpio = gpiochip_get_data(chip); zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio); if (bank_pin_num >= ZYNQ_GPIO_MID_PIN_NUM) { /* only 16 data bits in bit maskable reg */ bank_pin_num -= ZYNQ_GPIO_MID_PIN_NUM; reg_offset = ZYNQ_GPIO_DATA_MSW_OFFSET(bank_num); } else { reg_offset = ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num); } /* * get the 32 bit value to be written to the mask/data register where * the upper 16 bits is the mask and lower 16 bits is the data */ state = !!state; state = ~(1 << (bank_pin_num + ZYNQ_GPIO_MID_PIN_NUM)) & ((state << bank_pin_num) | ZYNQ_GPIO_UPPER_MASK); writel_relaxed(state, gpio->base_addr + reg_offset); } /** * zynq_gpio_dir_in - Set the direction of the specified GPIO pin as input * @chip: gpio_chip instance to be worked on * @pin: gpio pin number within the device * * This function uses the read-modify-write sequence to set the direction of * the gpio pin as input. * * Return: 0 always */ static int zynq_gpio_dir_in(struct gpio_chip *chip, unsigned int pin) { u32 reg; unsigned int bank_num, bank_pin_num; unsigned long flags; struct zynq_gpio *gpio = gpiochip_get_data(chip); zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio); /* * On zynq bank 0 pins 7 and 8 are special and cannot be used * as inputs. */ if (zynq_gpio_is_zynq(gpio) && bank_num == 0 && (bank_pin_num == 7 || bank_pin_num == 8)) return -EINVAL; /* clear the bit in direction mode reg to set the pin as input */ spin_lock_irqsave(&gpio->dirlock, flags); reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); reg &= ~BIT(bank_pin_num); writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); spin_unlock_irqrestore(&gpio->dirlock, flags); return 0; } /** * zynq_gpio_dir_out - Set the direction of the specified GPIO pin as output * @chip: gpio_chip instance to be worked on * @pin: gpio pin number within the device * @state: value to be written to specified pin * * This function sets the direction of specified GPIO pin as output, configures * the Output Enable register for the pin and uses zynq_gpio_set to set * the state of the pin to the value specified. * * Return: 0 always */ static int zynq_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int state) { u32 reg; unsigned int bank_num, bank_pin_num; unsigned long flags; struct zynq_gpio *gpio = gpiochip_get_data(chip); zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio); /* set the GPIO pin as output */ spin_lock_irqsave(&gpio->dirlock, flags); reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); reg |= BIT(bank_pin_num); writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); /* configure the output enable reg for the pin */ reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_OUTEN_OFFSET(bank_num)); reg |= BIT(bank_pin_num); writel_relaxed(reg, gpio->base_addr + ZYNQ_GPIO_OUTEN_OFFSET(bank_num)); spin_unlock_irqrestore(&gpio->dirlock, flags); /* set the state of the pin */ zynq_gpio_set_value(chip, pin, state); return 0; } /** * zynq_gpio_get_direction - Read the direction of the specified GPIO pin * @chip: gpio_chip instance to be worked on * @pin: gpio pin number within the device * * This function returns the direction of the specified GPIO. * * Return: GPIO_LINE_DIRECTION_OUT or GPIO_LINE_DIRECTION_IN */ static int zynq_gpio_get_direction(struct gpio_chip *chip, unsigned int pin) { u32 reg; unsigned int bank_num, bank_pin_num; struct zynq_gpio *gpio = gpiochip_get_data(chip); zynq_gpio_get_bank_pin(pin, &bank_num, &bank_pin_num, gpio); reg = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); if (reg & BIT(bank_pin_num)) return GPIO_LINE_DIRECTION_OUT; return GPIO_LINE_DIRECTION_IN; } /** * zynq_gpio_irq_mask - Disable the interrupts for a gpio pin * @irq_data: per irq and chip data passed down to chip functions * * This function calculates gpio pin number from irq number and sets the * bit in the Interrupt Disable register of the corresponding bank to disable * interrupts for that pin. */ static void zynq_gpio_irq_mask(struct irq_data *irq_data) { unsigned int device_pin_num, bank_num, bank_pin_num; struct zynq_gpio *gpio = gpiochip_get_data(irq_data_get_irq_chip_data(irq_data)); device_pin_num = irq_data->hwirq; zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio); writel_relaxed(BIT(bank_pin_num), gpio->base_addr + ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); } /** * zynq_gpio_irq_unmask - Enable the interrupts for a gpio pin * @irq_data: irq data containing irq number of gpio pin for the interrupt * to enable * * This function calculates the gpio pin number from irq number and sets the * bit in the Interrupt Enable register of the corresponding bank to enable * interrupts for that pin. */ static void zynq_gpio_irq_unmask(struct irq_data *irq_data) { unsigned int device_pin_num, bank_num, bank_pin_num; struct zynq_gpio *gpio = gpiochip_get_data(irq_data_get_irq_chip_data(irq_data)); device_pin_num = irq_data->hwirq; zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio); writel_relaxed(BIT(bank_pin_num), gpio->base_addr + ZYNQ_GPIO_INTEN_OFFSET(bank_num)); } /** * zynq_gpio_irq_ack - Acknowledge the interrupt of a gpio pin * @irq_data: irq data containing irq number of gpio pin for the interrupt * to ack * * This function calculates gpio pin number from irq number and sets the bit * in the Interrupt Status Register of the corresponding bank, to ACK the irq. */ static void zynq_gpio_irq_ack(struct irq_data *irq_data) { unsigned int device_pin_num, bank_num, bank_pin_num; struct zynq_gpio *gpio = gpiochip_get_data(irq_data_get_irq_chip_data(irq_data)); device_pin_num = irq_data->hwirq; zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio); writel_relaxed(BIT(bank_pin_num), gpio->base_addr + ZYNQ_GPIO_INTSTS_OFFSET(bank_num)); } /** * zynq_gpio_irq_enable - Enable the interrupts for a gpio pin * @irq_data: irq data containing irq number of gpio pin for the interrupt * to enable * * Clears the INTSTS bit and unmasks the given interrupt. */ static void zynq_gpio_irq_enable(struct irq_data *irq_data) { /* * The Zynq GPIO controller does not disable interrupt detection when * the interrupt is masked and only disables the propagation of the * interrupt. This means when the controller detects an interrupt * condition while the interrupt is logically disabled it will propagate * that interrupt event once the interrupt is enabled. This will cause * the interrupt consumer to see spurious interrupts to prevent this * first make sure that the interrupt is not asserted and then enable * it. */ zynq_gpio_irq_ack(irq_data); zynq_gpio_irq_unmask(irq_data); } /** * zynq_gpio_set_irq_type - Set the irq type for a gpio pin * @irq_data: irq data containing irq number of gpio pin * @type: interrupt type that is to be set for the gpio pin * * This function gets the gpio pin number and its bank from the gpio pin number * and configures the INT_TYPE, INT_POLARITY and INT_ANY registers. * * Return: 0, negative error otherwise. * TYPE-EDGE_RISING, INT_TYPE - 1, INT_POLARITY - 1, INT_ANY - 0; * TYPE-EDGE_FALLING, INT_TYPE - 1, INT_POLARITY - 0, INT_ANY - 0; * TYPE-EDGE_BOTH, INT_TYPE - 1, INT_POLARITY - NA, INT_ANY - 1; * TYPE-LEVEL_HIGH, INT_TYPE - 0, INT_POLARITY - 1, INT_ANY - NA; * TYPE-LEVEL_LOW, INT_TYPE - 0, INT_POLARITY - 0, INT_ANY - NA */ static int zynq_gpio_set_irq_type(struct irq_data *irq_data, unsigned int type) { u32 int_type, int_pol, int_any; unsigned int device_pin_num, bank_num, bank_pin_num; struct zynq_gpio *gpio = gpiochip_get_data(irq_data_get_irq_chip_data(irq_data)); device_pin_num = irq_data->hwirq; zynq_gpio_get_bank_pin(device_pin_num, &bank_num, &bank_pin_num, gpio); int_type = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_INTTYPE_OFFSET(bank_num)); int_pol = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_INTPOL_OFFSET(bank_num)); int_any = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); /* * based on the type requested, configure the INT_TYPE, INT_POLARITY * and INT_ANY registers */ switch (type) { case IRQ_TYPE_EDGE_RISING: int_type |= BIT(bank_pin_num); int_pol |= BIT(bank_pin_num); int_any &= ~BIT(bank_pin_num); break; case IRQ_TYPE_EDGE_FALLING: int_type |= BIT(bank_pin_num); int_pol &= ~BIT(bank_pin_num); int_any &= ~BIT(bank_pin_num); break; case IRQ_TYPE_EDGE_BOTH: int_type |= BIT(bank_pin_num); int_any |= BIT(bank_pin_num); break; case IRQ_TYPE_LEVEL_HIGH: int_type &= ~BIT(bank_pin_num); int_pol |= BIT(bank_pin_num); break; case IRQ_TYPE_LEVEL_LOW: int_type &= ~BIT(bank_pin_num); int_pol &= ~BIT(bank_pin_num); break; default: return -EINVAL; } writel_relaxed(int_type, gpio->base_addr + ZYNQ_GPIO_INTTYPE_OFFSET(bank_num)); writel_relaxed(int_pol, gpio->base_addr + ZYNQ_GPIO_INTPOL_OFFSET(bank_num)); writel_relaxed(int_any, gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); if (type & IRQ_TYPE_LEVEL_MASK) irq_set_chip_handler_name_locked(irq_data, &zynq_gpio_level_irqchip, handle_fasteoi_irq, NULL); else irq_set_chip_handler_name_locked(irq_data, &zynq_gpio_edge_irqchip, handle_level_irq, NULL); return 0; } static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on) { struct zynq_gpio *gpio = gpiochip_get_data(irq_data_get_irq_chip_data(data)); irq_set_irq_wake(gpio->irq, on); return 0; } static int zynq_gpio_irq_reqres(struct irq_data *d) { struct gpio_chip *chip = irq_data_get_irq_chip_data(d); int ret; ret = pm_runtime_resume_and_get(chip->parent); if (ret < 0) return ret; return gpiochip_reqres_irq(chip, d->hwirq); } static void zynq_gpio_irq_relres(struct irq_data *d) { struct gpio_chip *chip = irq_data_get_irq_chip_data(d); gpiochip_relres_irq(chip, d->hwirq); pm_runtime_put(chip->parent); } /* irq chip descriptor */ static struct irq_chip zynq_gpio_level_irqchip = { .name = DRIVER_NAME, .irq_enable = zynq_gpio_irq_enable, .irq_eoi = zynq_gpio_irq_ack, .irq_mask = zynq_gpio_irq_mask, .irq_unmask = zynq_gpio_irq_unmask, .irq_set_type = zynq_gpio_set_irq_type, .irq_set_wake = zynq_gpio_set_wake, .irq_request_resources = zynq_gpio_irq_reqres, .irq_release_resources = zynq_gpio_irq_relres, .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED | IRQCHIP_MASK_ON_SUSPEND, }; static struct irq_chip zynq_gpio_edge_irqchip = { .name = DRIVER_NAME, .irq_enable = zynq_gpio_irq_enable, .irq_ack = zynq_gpio_irq_ack, .irq_mask = zynq_gpio_irq_mask, .irq_unmask = zynq_gpio_irq_unmask, .irq_set_type = zynq_gpio_set_irq_type, .irq_set_wake = zynq_gpio_set_wake, .irq_request_resources = zynq_gpio_irq_reqres, .irq_release_resources = zynq_gpio_irq_relres, .flags = IRQCHIP_MASK_ON_SUSPEND, }; static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio, unsigned int bank_num, unsigned long pending) { unsigned int bank_offset = gpio->p_data->bank_min[bank_num]; struct irq_domain *irqdomain = gpio->chip.irq.domain; int offset; if (!pending) return; for_each_set_bit(offset, &pending, 32) { unsigned int gpio_irq; gpio_irq = irq_find_mapping(irqdomain, offset + bank_offset); generic_handle_irq(gpio_irq); } } /** * zynq_gpio_irqhandler - IRQ handler for the gpio banks of a gpio device * @desc: irq descriptor instance of the 'irq' * * This function reads the Interrupt Status Register of each bank to get the * gpio pin number which has triggered an interrupt. It then acks the triggered * interrupt and calls the pin specific handler set by the higher layer * application for that pin. * Note: A bug is reported if no handler is set for the gpio pin. */ static void zynq_gpio_irqhandler(struct irq_desc *desc) { u32 int_sts, int_enb; unsigned int bank_num; struct zynq_gpio *gpio = gpiochip_get_data(irq_desc_get_handler_data(desc)); struct irq_chip *irqchip = irq_desc_get_chip(desc); chained_irq_enter(irqchip, desc); for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) { int_sts = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_INTSTS_OFFSET(bank_num)); int_enb = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_INTMASK_OFFSET(bank_num)); zynq_gpio_handle_bank_irq(gpio, bank_num, int_sts & ~int_enb); if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL) bank_num = bank_num + VERSAL_UNUSED_BANKS; } chained_irq_exit(irqchip, desc); } static void zynq_gpio_save_context(struct zynq_gpio *gpio) { unsigned int bank_num; for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) { gpio->context.datalsw[bank_num] = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num)); gpio->context.datamsw[bank_num] = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DATA_MSW_OFFSET(bank_num)); gpio->context.dirm[bank_num] = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); gpio->context.int_en[bank_num] = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_INTMASK_OFFSET(bank_num)); gpio->context.int_type[bank_num] = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_INTTYPE_OFFSET(bank_num)); gpio->context.int_polarity[bank_num] = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_INTPOL_OFFSET(bank_num)); gpio->context.int_any[bank_num] = readl_relaxed(gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL) bank_num = bank_num + VERSAL_UNUSED_BANKS; } } static void zynq_gpio_restore_context(struct zynq_gpio *gpio) { unsigned int bank_num; for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) { writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr + ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); writel_relaxed(gpio->context.datalsw[bank_num], gpio->base_addr + ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num)); writel_relaxed(gpio->context.datamsw[bank_num], gpio->base_addr + ZYNQ_GPIO_DATA_MSW_OFFSET(bank_num)); writel_relaxed(gpio->context.dirm[bank_num], gpio->base_addr + ZYNQ_GPIO_DIRM_OFFSET(bank_num)); writel_relaxed(gpio->context.int_type[bank_num], gpio->base_addr + ZYNQ_GPIO_INTTYPE_OFFSET(bank_num)); writel_relaxed(gpio->context.int_polarity[bank_num], gpio->base_addr + ZYNQ_GPIO_INTPOL_OFFSET(bank_num)); writel_relaxed(gpio->context.int_any[bank_num], gpio->base_addr + ZYNQ_GPIO_INTANY_OFFSET(bank_num)); writel_relaxed(~(gpio->context.int_en[bank_num]), gpio->base_addr + ZYNQ_GPIO_INTEN_OFFSET(bank_num)); if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL) bank_num = bank_num + VERSAL_UNUSED_BANKS; } } static int __maybe_unused zynq_gpio_suspend(struct device *dev) { struct zynq_gpio *gpio = dev_get_drvdata(dev); struct irq_data *data = irq_get_irq_data(gpio->irq); if (!device_may_wakeup(dev)) disable_irq(gpio->irq); if (!irqd_is_wakeup_set(data)) { zynq_gpio_save_context(gpio); return pm_runtime_force_suspend(dev); } return 0; } static int __maybe_unused zynq_gpio_resume(struct device *dev) { struct zynq_gpio *gpio = dev_get_drvdata(dev); struct irq_data *data = irq_get_irq_data(gpio->irq); int ret; if (!device_may_wakeup(dev)) enable_irq(gpio->irq); if (!irqd_is_wakeup_set(data)) { ret = pm_runtime_force_resume(dev); zynq_gpio_restore_context(gpio); return ret; } return 0; } static int __maybe_unused zynq_gpio_runtime_suspend(struct device *dev) { struct zynq_gpio *gpio = dev_get_drvdata(dev); clk_disable_unprepare(gpio->clk); return 0; } static int __maybe_unused zynq_gpio_runtime_resume(struct device *dev) { struct zynq_gpio *gpio = dev_get_drvdata(dev); return clk_prepare_enable(gpio->clk); } static int zynq_gpio_request(struct gpio_chip *chip, unsigned int offset) { int ret; ret = pm_runtime_get_sync(chip->parent); /* * If the device is already active pm_runtime_get() will return 1 on * success, but gpio_request still needs to return 0. */ return ret < 0 ? ret : 0; } static void zynq_gpio_free(struct gpio_chip *chip, unsigned int offset) { pm_runtime_put(chip->parent); } static const struct dev_pm_ops zynq_gpio_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(zynq_gpio_suspend, zynq_gpio_resume) SET_RUNTIME_PM_OPS(zynq_gpio_runtime_suspend, zynq_gpio_runtime_resume, NULL) }; static const struct zynq_platform_data versal_gpio_def = { .label = "versal_gpio", .quirks = GPIO_QUIRK_VERSAL, .ngpio = 58, .max_bank = VERSAL_GPIO_MAX_BANK, .bank_min[0] = 0, .bank_max[0] = 25, /* 0 to 25 are connected to MIOs (26 pins) */ .bank_min[3] = 26, .bank_max[3] = 57, /* Bank 3 is connected to FMIOs (32 pins) */ }; static const struct zynq_platform_data pmc_gpio_def = { .label = "pmc_gpio", .ngpio = 116, .max_bank = PMC_GPIO_MAX_BANK, .bank_min[0] = 0, .bank_max[0] = 25, /* 0 to 25 are connected to MIOs (26 pins) */ .bank_min[1] = 26, .bank_max[1] = 51, /* Bank 1 are connected to MIOs (26 pins) */ .bank_min[3] = 52, .bank_max[3] = 83, /* Bank 3 is connected to EMIOs (32 pins) */ .bank_min[4] = 84, .bank_max[4] = 115, /* Bank 4 is connected to EMIOs (32 pins) */ }; static const struct zynq_platform_data zynqmp_gpio_def = { .label = "zynqmp_gpio", .quirks = GPIO_QUIRK_DATA_RO_BUG, .ngpio = ZYNQMP_GPIO_NR_GPIOS, .max_bank = ZYNQMP_GPIO_MAX_BANK, .bank_min[0] = ZYNQ_GPIO_BANK0_PIN_MIN(MP), .bank_max[0] = ZYNQ_GPIO_BANK0_PIN_MAX(MP), .bank_min[1] = ZYNQ_GPIO_BANK1_PIN_MIN(MP), .bank_max[1] = ZYNQ_GPIO_BANK1_PIN_MAX(MP), .bank_min[2] = ZYNQ_GPIO_BANK2_PIN_MIN(MP), .bank_max[2] = ZYNQ_GPIO_BANK2_PIN_MAX(MP), .bank_min[3] = ZYNQ_GPIO_BANK3_PIN_MIN(MP), .bank_max[3] = ZYNQ_GPIO_BANK3_PIN_MAX(MP), .bank_min[4] = ZYNQ_GPIO_BANK4_PIN_MIN(MP), .bank_max[4] = ZYNQ_GPIO_BANK4_PIN_MAX(MP), .bank_min[5] = ZYNQ_GPIO_BANK5_PIN_MIN(MP), .bank_max[5] = ZYNQ_GPIO_BANK5_PIN_MAX(MP), }; static const struct zynq_platform_data zynq_gpio_def = { .label = "zynq_gpio", .quirks = ZYNQ_GPIO_QUIRK_IS_ZYNQ | GPIO_QUIRK_DATA_RO_BUG, .ngpio = ZYNQ_GPIO_NR_GPIOS, .max_bank = ZYNQ_GPIO_MAX_BANK, .bank_min[0] = ZYNQ_GPIO_BANK0_PIN_MIN(), .bank_max[0] = ZYNQ_GPIO_BANK0_PIN_MAX(), .bank_min[1] = ZYNQ_GPIO_BANK1_PIN_MIN(), .bank_max[1] = ZYNQ_GPIO_BANK1_PIN_MAX(), .bank_min[2] = ZYNQ_GPIO_BANK2_PIN_MIN(), .bank_max[2] = ZYNQ_GPIO_BANK2_PIN_MAX(), .bank_min[3] = ZYNQ_GPIO_BANK3_PIN_MIN(), .bank_max[3] = ZYNQ_GPIO_BANK3_PIN_MAX(), }; static const struct of_device_id zynq_gpio_of_match[] = { { .compatible = "xlnx,zynq-gpio-1.0", .data = &zynq_gpio_def }, { .compatible = "xlnx,zynqmp-gpio-1.0", .data = &zynqmp_gpio_def }, { .compatible = "xlnx,versal-gpio-1.0", .data = &versal_gpio_def }, { .compatible = "xlnx,pmc-gpio-1.0", .data = &pmc_gpio_def }, { /* end of table */ } }; MODULE_DEVICE_TABLE(of, zynq_gpio_of_match); /** * zynq_gpio_probe - Initialization method for a zynq_gpio device * @pdev: platform device instance * * This function allocates memory resources for the gpio device and registers * all the banks of the device. It will also set up interrupts for the gpio * pins. * Note: Interrupts are disabled for all the banks during initialization. * * Return: 0 on success, negative error otherwise. */ static int zynq_gpio_probe(struct platform_device *pdev) { int ret, bank_num; struct zynq_gpio *gpio; struct gpio_chip *chip; struct gpio_irq_chip *girq; const struct of_device_id *match; gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); if (!gpio) return -ENOMEM; match = of_match_node(zynq_gpio_of_match, pdev->dev.of_node); if (!match) { dev_err(&pdev->dev, "of_match_node() failed\n"); return -EINVAL; } gpio->p_data = match->data; platform_set_drvdata(pdev, gpio); gpio->base_addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gpio->base_addr)) return PTR_ERR(gpio->base_addr); gpio->irq = platform_get_irq(pdev, 0); if (gpio->irq < 0) return gpio->irq; /* configure the gpio chip */ chip = &gpio->chip; chip->label = gpio->p_data->label; chip->owner = THIS_MODULE; chip->parent = &pdev->dev; chip->get = zynq_gpio_get_value; chip->set = zynq_gpio_set_value; chip->request = zynq_gpio_request; chip->free = zynq_gpio_free; chip->direction_input = zynq_gpio_dir_in; chip->direction_output = zynq_gpio_dir_out; chip->get_direction = zynq_gpio_get_direction; chip->base = of_alias_get_id(pdev->dev.of_node, "gpio"); chip->ngpio = gpio->p_data->ngpio; /* Retrieve GPIO clock */ gpio->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(gpio->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(gpio->clk), "input clock not found.\n"); ret = clk_prepare_enable(gpio->clk); if (ret) { dev_err(&pdev->dev, "Unable to enable clock.\n"); return ret; } spin_lock_init(&gpio->dirlock); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) goto err_pm_dis; /* disable interrupts for all banks */ for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) { writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr + ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); if (gpio->p_data->quirks & GPIO_QUIRK_VERSAL) bank_num = bank_num + VERSAL_UNUSED_BANKS; } /* Set up the GPIO irqchip */ girq = &chip->irq; girq->chip = &zynq_gpio_edge_irqchip; girq->parent_handler = zynq_gpio_irqhandler; girq->num_parents = 1; girq->parents = devm_kcalloc(&pdev->dev, 1, sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) { ret = -ENOMEM; goto err_pm_put; } girq->parents[0] = gpio->irq; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_level_irq; /* report a bug if gpio chip registration fails */ ret = gpiochip_add_data(chip, gpio); if (ret) { dev_err(&pdev->dev, "Failed to add gpio chip\n"); goto err_pm_put; } irq_set_status_flags(gpio->irq, IRQ_DISABLE_UNLAZY); device_init_wakeup(&pdev->dev, 1); pm_runtime_put(&pdev->dev); return 0; err_pm_put: pm_runtime_put(&pdev->dev); err_pm_dis: pm_runtime_disable(&pdev->dev); clk_disable_unprepare(gpio->clk); return ret; } /** * zynq_gpio_remove - Driver removal function * @pdev: platform device instance * * Return: 0 always */ static int zynq_gpio_remove(struct platform_device *pdev) { struct zynq_gpio *gpio = platform_get_drvdata(pdev); pm_runtime_get_sync(&pdev->dev); gpiochip_remove(&gpio->chip); clk_disable_unprepare(gpio->clk); device_set_wakeup_capable(&pdev->dev, 0); pm_runtime_disable(&pdev->dev); return 0; } static struct platform_driver zynq_gpio_driver = { .driver = { .name = DRIVER_NAME, .pm = &zynq_gpio_dev_pm_ops, .of_match_table = zynq_gpio_of_match, }, .probe = zynq_gpio_probe, .remove = zynq_gpio_remove, }; /** * zynq_gpio_init - Initial driver registration call * * Return: value from platform_driver_register */ static int __init zynq_gpio_init(void) { return platform_driver_register(&zynq_gpio_driver); } postcore_initcall(zynq_gpio_init); static void __exit zynq_gpio_exit(void) { platform_driver_unregister(&zynq_gpio_driver); } module_exit(zynq_gpio_exit); MODULE_AUTHOR("Xilinx Inc."); MODULE_DESCRIPTION("Zynq GPIO driver"); MODULE_LICENSE("GPL");
744903.c
/* * PSA persistent key storage */ /* Copyright (C) 2018, ARM Limited, All Rights Reserved * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This file is part of mbed TLS (https://tls.mbed.org) */ #if defined(MBEDTLS_CONFIG_FILE) #include MBEDTLS_CONFIG_FILE #else #include "mbedtls/config.h" #endif #if defined(MBEDTLS_PSA_CRYPTO_STORAGE_C) #include <stdlib.h> #include <string.h> #include "psa/crypto.h" #include "psa_crypto_storage.h" #include "psa_crypto_storage_backend.h" #include "mbedtls/platform_util.h" #if defined(MBEDTLS_PLATFORM_C) #include "mbedtls/platform.h" #else #define mbedtls_calloc calloc #define mbedtls_free free #endif /* * 32-bit integer manipulation macros (little endian) */ #ifndef GET_UINT32_LE #define GET_UINT32_LE(n,b,i) \ { \ (n) = ( (uint32_t) (b)[(i) ] ) \ | ( (uint32_t) (b)[(i) + 1] << 8 ) \ | ( (uint32_t) (b)[(i) + 2] << 16 ) \ | ( (uint32_t) (b)[(i) + 3] << 24 ); \ } #endif #ifndef PUT_UINT32_LE #define PUT_UINT32_LE(n,b,i) \ { \ (b)[(i) ] = (unsigned char) ( ( (n) ) & 0xFF ); \ (b)[(i) + 1] = (unsigned char) ( ( (n) >> 8 ) & 0xFF ); \ (b)[(i) + 2] = (unsigned char) ( ( (n) >> 16 ) & 0xFF ); \ (b)[(i) + 3] = (unsigned char) ( ( (n) >> 24 ) & 0xFF ); \ } #endif /** * Persistent key storage magic header. */ #define PSA_KEY_STORAGE_MAGIC_HEADER "PSA\0KEY" #define PSA_KEY_STORAGE_MAGIC_HEADER_LENGTH ( sizeof( PSA_KEY_STORAGE_MAGIC_HEADER ) ) typedef struct { uint8_t magic[PSA_KEY_STORAGE_MAGIC_HEADER_LENGTH]; uint8_t version[4]; uint8_t type[sizeof( psa_key_type_t )]; uint8_t policy[sizeof( psa_key_policy_t )]; uint8_t data_len[4]; uint8_t key_data[]; } psa_persistent_key_storage_format; void psa_format_key_data_for_storage( const uint8_t *data, const size_t data_length, const psa_key_type_t type, const psa_key_policy_t *policy, uint8_t *storage_data ) { psa_persistent_key_storage_format *storage_format = (psa_persistent_key_storage_format *) storage_data; memcpy( storage_format->magic, PSA_KEY_STORAGE_MAGIC_HEADER, PSA_KEY_STORAGE_MAGIC_HEADER_LENGTH ); PUT_UINT32_LE(0, storage_format->version, 0); PUT_UINT32_LE(type, storage_format->type, 0); PUT_UINT32_LE(policy->usage, storage_format->policy, 0); PUT_UINT32_LE(policy->alg, storage_format->policy, sizeof( uint32_t )); PUT_UINT32_LE(data_length, storage_format->data_len, 0); memcpy( storage_format->key_data, data, data_length ); } static psa_status_t check_magic_header( const uint8_t *data ) { if( memcmp( data, PSA_KEY_STORAGE_MAGIC_HEADER, PSA_KEY_STORAGE_MAGIC_HEADER_LENGTH ) != 0 ) return( PSA_ERROR_STORAGE_FAILURE ); return( PSA_SUCCESS ); } psa_status_t psa_parse_key_data_from_storage( const uint8_t *storage_data, size_t storage_data_length, uint8_t **key_data, size_t *key_data_length, psa_key_type_t *type, psa_key_policy_t *policy ) { psa_status_t status; const psa_persistent_key_storage_format *storage_format = (const psa_persistent_key_storage_format *)storage_data; uint32_t version; if( storage_data_length < sizeof(*storage_format) ) return( PSA_ERROR_STORAGE_FAILURE ); status = check_magic_header( storage_data ); if( status != PSA_SUCCESS ) return( status ); GET_UINT32_LE(version, storage_format->version, 0); if( version != 0 ) return( PSA_ERROR_STORAGE_FAILURE ); GET_UINT32_LE(*key_data_length, storage_format->data_len, 0); if( *key_data_length > ( storage_data_length - sizeof(*storage_format) ) || *key_data_length > PSA_CRYPTO_MAX_STORAGE_SIZE ) return( PSA_ERROR_STORAGE_FAILURE ); *key_data = mbedtls_calloc( 1, *key_data_length ); if( *key_data == NULL ) return( PSA_ERROR_INSUFFICIENT_MEMORY ); GET_UINT32_LE(*type, storage_format->type, 0); GET_UINT32_LE(policy->usage, storage_format->policy, 0); GET_UINT32_LE(policy->alg, storage_format->policy, sizeof( uint32_t )); memcpy( *key_data, storage_format->key_data, *key_data_length ); return( PSA_SUCCESS ); } psa_status_t psa_save_persistent_key( const psa_key_id_t key, const psa_key_type_t type, const psa_key_policy_t *policy, const uint8_t *data, const size_t data_length ) { size_t storage_data_length; uint8_t *storage_data; psa_status_t status; if( data_length > PSA_CRYPTO_MAX_STORAGE_SIZE ) return PSA_ERROR_INSUFFICIENT_STORAGE; storage_data_length = data_length + sizeof( psa_persistent_key_storage_format ); storage_data = mbedtls_calloc( 1, storage_data_length ); if( storage_data == NULL ) return( PSA_ERROR_INSUFFICIENT_MEMORY ); psa_format_key_data_for_storage( data, data_length, type, policy, storage_data ); status = psa_crypto_storage_store( key, storage_data, storage_data_length ); mbedtls_free( storage_data ); return( status ); } void psa_free_persistent_key_data( uint8_t *key_data, size_t key_data_length ) { if( key_data != NULL ) { mbedtls_platform_zeroize( key_data, key_data_length ); } mbedtls_free( key_data ); } psa_status_t psa_load_persistent_key( psa_key_id_t key, psa_key_type_t *type, psa_key_policy_t *policy, uint8_t **data, size_t *data_length ) { psa_status_t status = PSA_SUCCESS; uint8_t *loaded_data; size_t storage_data_length = 0; status = psa_crypto_storage_get_data_length( key, &storage_data_length ); if( status != PSA_SUCCESS ) return( status ); loaded_data = mbedtls_calloc( 1, storage_data_length ); if( loaded_data == NULL ) return( PSA_ERROR_INSUFFICIENT_MEMORY ); status = psa_crypto_storage_load( key, loaded_data, storage_data_length ); if( status != PSA_SUCCESS ) goto exit; status = psa_parse_key_data_from_storage( loaded_data, storage_data_length, data, data_length, type, policy ); exit: mbedtls_free( loaded_data ); return( status ); } #endif /* MBEDTLS_PSA_CRYPTO_STORAGE_C */
262590.c
/* * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ #include "native_thread.c" #include "nsk_tools.c" #include "jni_tools.c" #include "jvmti_tools.c" #include "agent_tools.c" #include "jvmti_FollowRefObjects.c" #include "Injector.c" #include "JVMTITools.c" #include "agent_common.c" #include "hs302t010.c"
465166.c
/* * data_pipe.c * * Created on: 3 Oct 2019 * Author: David */ #include "cetris.h" int init_data_pipe(struct data_pipe_t *data_pipe) { int error; // Save pipe filename strncpy(data_pipe->pipeName, "cetris.pipe", MAX_PIPE_NAME_LEN); // Test if the file exists if (access(data_pipe->pipeName, F_OK) == 0) { if (data_pipe_close(data_pipe) != 0) { return -1; } } // Create the pipe error = mkfifo(data_pipe->pipeName, S_IRWXU); if (error != 0) { log_error("Unable to create the pipe: %s", strerror(errno)); return -1; } return 0; } int data_pipe_close(struct data_pipe_t *data_pipe) { // Close and delete the pipe if (unlink(data_pipe->pipeName) != 0) { log_error("Unable to unlink the pipe: %s", strerror(errno)); return -1; } return 0; } int data_pipe_write(struct game_t *game) { struct board_t *board = &game->board; // Shortcut struct data_pipe_t *data_pipe = &game->data_pipe; // Shortcut struct tetri_t *tetri = &game->tetri_comming[ON_BOARD]; // Shortcut uint8_t y, x; uint8_t tetriY, tetriX; uint8_t color; uint16_t boardSize; uint8_t headers[2]; // Open the pipe int pipeFd = open(data_pipe->pipeName, O_WRONLY | O_NONBLOCK); if (pipeFd < 0) { // There is no consummer for the data, quit return -1; } // Copy the whole board table boardSize = board->w * board->h; memcpy(data_pipe->tableCpy, board->table, boardSize); for (y = 0; y < tetri->h; y++) { for (x = 0; x < tetri->w; x++) { color = tetri->shape[y * tetri->w + x]; if (color != FREE) { tetriX = (tetri->x + x) % game->board.w; tetriY = tetri->y + y; data_pipe->tableCpy[tetriY * board->w + tetriX] = color; } } } headers[0] = board->w; headers[1] = board->h; write(pipeFd, headers, sizeof(headers)); // Write in pipe write(pipeFd, data_pipe->tableCpy, boardSize); // Close the pipe close(pipeFd); return 0; }
442620.c
/* emits an optimized version of LTC_SAFER+ ... only does encrypt so far... */ #include <stdio.h> #include <string.h> /* This is the "Armenian" Shuffle. It takes the input from b and stores it in b2 */ #define SHUF\ b2[0] = b[8]; b2[1] = b[11]; b2[2] = b[12]; b2[3] = b[15]; \ b2[4] = b[2]; b2[5] = b[1]; b2[6] = b[6]; b2[7] = b[5]; \ b2[8] = b[10]; b2[9] = b[9]; b2[10] = b[14]; b2[11] = b[13]; \ b2[12] = b[0]; b2[13] = b[7]; b2[14] = b[4]; b2[15] = b[3]; memcpy(b, b2, sizeof(b)); /* This is the inverse shuffle. It takes from b and gives to b2 */ #define iSHUF(b, b2) \ b2[0] = b[12]; b2[1] = b[5]; b2[2] = b[4]; b2[3] = b[15]; \ b2[4] = b[14]; b2[5] = b[7]; b2[6] = b[6]; b2[7] = b[13]; \ b2[8] = b[0]; b2[9] = b[9]; b2[10] = b[8]; b2[11] = b[1]; \ b2[12] = b[2]; b2[13] = b[11]; b2[14] = b[10]; b2[15] = b[3]; memcpy(b, b2, sizeof(b)); #define ROUND(b, i) \ b[0] = (safer_ebox[(b[0] ^ skey->saferp.K[i][0]) & 255] + skey->saferp.K[i+1][0]) & 255; \ b[1] = safer_lbox[(b[1] + skey->saferp.K[i][1]) & 255] ^ skey->saferp.K[i+1][1]; \ b[2] = safer_lbox[(b[2] + skey->saferp.K[i][2]) & 255] ^ skey->saferp.K[i+1][2]; \ b[3] = (safer_ebox[(b[3] ^ skey->saferp.K[i][3]) & 255] + skey->saferp.K[i+1][3]) & 255; \ b[4] = (safer_ebox[(b[4] ^ skey->saferp.K[i][4]) & 255] + skey->saferp.K[i+1][4]) & 255; \ b[5] = safer_lbox[(b[5] + skey->saferp.K[i][5]) & 255] ^ skey->saferp.K[i+1][5]; \ b[6] = safer_lbox[(b[6] + skey->saferp.K[i][6]) & 255] ^ skey->saferp.K[i+1][6]; \ b[7] = (safer_ebox[(b[7] ^ skey->saferp.K[i][7]) & 255] + skey->saferp.K[i+1][7]) & 255; \ b[8] = (safer_ebox[(b[8] ^ skey->saferp.K[i][8]) & 255] + skey->saferp.K[i+1][8]) & 255; \ b[9] = safer_lbox[(b[9] + skey->saferp.K[i][9]) & 255] ^ skey->saferp.K[i+1][9]; \ b[10] = safer_lbox[(b[10] + skey->saferp.K[i][10]) & 255] ^ skey->saferp.K[i+1][10]; \ b[11] = (safer_ebox[(b[11] ^ skey->saferp.K[i][11]) & 255] + skey->saferp.K[i+1][11]) & 255; \ b[12] = (safer_ebox[(b[12] ^ skey->saferp.K[i][12]) & 255] + skey->saferp.K[i+1][12]) & 255; \ b[13] = safer_lbox[(b[13] + skey->saferp.K[i][13]) & 255] ^ skey->saferp.K[i+1][13]; \ b[14] = safer_lbox[(b[14] + skey->saferp.K[i][14]) & 255] ^ skey->saferp.K[i+1][14]; \ b[15] = (safer_ebox[(b[15] ^ skey->saferp.K[i][15]) & 255] + skey->saferp.K[i+1][15]) & 255; int main(void) { int b[16], b2[16], x, y, z; /* -- ENCRYPT --- */ for (x = 0; x < 16; x++) b[x] = x; /* emit encrypt preabmle */ printf( "void saferp_ecb_encrypt(const unsigned char *pt, unsigned char *ct, symmetric_key *skey)\n" "{\n" " int x;\n" " unsigned char b[16];\n" "\n" " LTC_ARGCHK(pt != NULL);\n" " LTC_ARGCHK(ct != NULL);\n" " LTC_ARGCHK(skey != NULL);\n" "\n" " /* do eight rounds */\n" " for (x = 0; x < 16; x++) {\n" " b[x] = pt[x];\n" " }\n"); /* do 8 rounds of ROUND; LT; */ for (x = 0; x < 8; x++) { /* ROUND(..., x*2) */ for (y = 0; y < 16; y++) { printf("b[%d] = (safer_%cbox[(b[%d] %c skey->saferp.K[%d][%d]) & 255] %c skey->saferp.K[%d][%d]) & 255;\n", b[y], "elle"[y&3], b[y], "^++^"[y&3], x*2, y, "+^^+"[y&3], x*2+1, y); } /* LT */ for (y = 0; y < 4; y++) { printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[0], b[0], b[1], b[0], b[1]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[2], b[2], b[3], b[3], b[2]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[4], b[4], b[5], b[5], b[4]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[6], b[6], b[7], b[7], b[6]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[8], b[8], b[9], b[9], b[8]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[10], b[10], b[11], b[11], b[10]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[12], b[12], b[13], b[13], b[12]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[14], b[14], b[15], b[15], b[14]); if (y < 3) { SHUF; } } } printf( " if (skey->saferp.rounds <= 8) {\n"); /* finish */ for (x = 0; x < 16; x++) { printf( " ct[%d] = (b[%d] %c skey->saferp.K[skey->saferp.rounds*2][%d]) & 255;\n", x, b[x], "^++^"[x&3], x); } printf(" return;\n }\n"); /* 192-bit keys */ printf( " /* 192-bit key? */\n" " if (skey->saferp.rounds > 8) {\n"); /* do 4 rounds of ROUND; LT; */ for (x = 8; x < 12; x++) { /* ROUND(..., x*2) */ for (y = 0; y < 16; y++) { printf("b[%d] = (safer_%cbox[(b[%d] %c skey->saferp.K[%d][%d]) & 255] %c skey->saferp.K[%d][%d]) & 255;\n", b[y], "elle"[y&3], b[y], "^++^"[y&3], x*2, y, "+^^+"[y&3], x*2+1, y); } /* LT */ for (y = 0; y < 4; y++) { printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[0], b[0], b[1], b[0], b[1]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[2], b[2], b[3], b[3], b[2]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[4], b[4], b[5], b[5], b[4]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[6], b[6], b[7], b[7], b[6]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[8], b[8], b[9], b[9], b[8]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[10], b[10], b[11], b[11], b[10]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[12], b[12], b[13], b[13], b[12]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[14], b[14], b[15], b[15], b[14]); if (y < 3) { SHUF; } } } printf("}\n"); printf( " if (skey->saferp.rounds <= 12) {\n"); /* finish */ for (x = 0; x < 16; x++) { printf( " ct[%d] = (b[%d] %c skey->saferp.K[skey->saferp.rounds*2][%d]) & 255;\n", x, b[x], "^++^"[x&3], x); } printf(" return;\n }\n"); /* 256-bit keys */ printf( " /* 256-bit key? */\n" " if (skey->saferp.rounds > 12) {\n"); /* do 4 rounds of ROUND; LT; */ for (x = 12; x < 16; x++) { /* ROUND(..., x*2) */ for (y = 0; y < 16; y++) { printf("b[%d] = (safer_%cbox[(b[%d] %c skey->saferp.K[%d][%d]) & 255] %c skey->saferp.K[%d][%d]) & 255;\n", b[y], "elle"[y&3], b[y], "^++^"[y&3], x*2, y, "+^^+"[y&3], x*2+1, y); } /* LT */ for (y = 0; y < 4; y++) { printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[0], b[0], b[1], b[0], b[1]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[2], b[2], b[3], b[3], b[2]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[4], b[4], b[5], b[5], b[4]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[6], b[6], b[7], b[7], b[6]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[8], b[8], b[9], b[9], b[8]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[10], b[10], b[11], b[11], b[10]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[12], b[12], b[13], b[13], b[12]); printf(" b[%d] = (b[%d] + (b[%d] = (b[%d] + b[%d]) & 255)) & 255;\n", b[14], b[14], b[15], b[15], b[14]); if (y < 3) { SHUF; } } } /* finish */ for (x = 0; x < 16; x++) { printf( " ct[%d] = (b[%d] %c skey->saferp.K[skey->saferp.rounds*2][%d]) & 255;\n", x, b[x], "^++^"[x&3], x); } printf(" return;\n"); printf(" }\n}\n\n"); return 0; } /* ref: tag: v1.18.2, master */ /* git commit: 7e7eb695d581782f04b24dc444cbfde86af59853 */ /* commit time: 2018-07-01 22:49:01 +0200 */
602472.c
/** @file Play beep. Copyright (c) 2020, vit9696. All rights reserved.<BR> This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. **/ #include <Uefi.h> #include <Library/BaseMemoryLib.h> #include <Library/OcDebugLogLib.h> #include <Library/MemoryAllocationLib.h> #include <Library/OcConsoleLib.h> #include <Library/OcMiscLib.h> #include <Library/UefiApplicationEntryPoint.h> #include <Library/UefiBootServicesTableLib.h> #include <Library/UefiLib.h> #include <Protocol/DevicePath.h> #include <Protocol/AppleHda.h> #include <Protocol/AppleBeepGen.h> #include <Protocol/ShellParameters.h> STATIC EFI_STATUS GetArguments ( OUT UINTN *Argc, OUT CHAR16 ***Argv ) { EFI_STATUS Status; EFI_SHELL_PARAMETERS_PROTOCOL *ShellParameters; Status = gBS->HandleProtocol ( gImageHandle, &gEfiShellParametersProtocolGuid, (VOID**) &ShellParameters ); if (EFI_ERROR (Status)) { return Status; } *Argc = ShellParameters->Argc; *Argv = ShellParameters->Argv; return EFI_SUCCESS; } EFI_STATUS EFIAPI UefiMain ( IN EFI_HANDLE ImageHandle, IN EFI_SYSTEM_TABLE *SystemTable ) { EFI_STATUS Status; UINTN Argc; CHAR16 **Argv; APPLE_HIGH_DEFINITION_AUDIO_PROTOCOL *HdaProtocol; APPLE_BEEP_GEN_PROTOCOL *BeepGenProtocol; UINTN Count; UINTN Signal; UINTN Silence; UINTN Frequency; gBS->SetWatchdogTimer (0, 0, 0, NULL); OcProvideConsoleGop (FALSE); OcConsoleControlSetMode (EfiConsoleControlScreenText); OcSetConsoleResolution (0, 0, 0); Status = GetArguments (&Argc, &Argv); if (EFI_ERROR (Status) || Argc < 5) { Print (L"Usage: ChipTune <any|hda|beep> <count> <signal> <silence> [<frequency>]\n"); return EFI_SUCCESS; } Status = StrDecimalToUintnS (Argv[2], NULL, &Count); if (EFI_ERROR (Status)) { Print (L"Invalid count value - %r\n", Status); return EFI_SUCCESS; } Status = StrDecimalToUintnS (Argv[3], NULL, &Signal); if (EFI_ERROR (Status)) { Print (L"Invalid signal length value - %r\n", Status); return EFI_SUCCESS; } Status = StrDecimalToUintnS (Argv[4], NULL, &Silence); if (EFI_ERROR (Status)) { Print (L"Invalid silence length value - %r\n", Status); return EFI_SUCCESS; } if (Argc >= 6) { Status = StrDecimalToUintnS (Argv[5], NULL, &Frequency); if (EFI_ERROR (Status)) { Print (L"Invalid frequency value - %r\n", Status); return EFI_SUCCESS; } } else { Frequency = 0; } HdaProtocol = NULL; BeepGenProtocol = NULL; if (StrCmp (Argv[1], L"any") == 0 || StrCmp (Argv[1], L"beep") == 0) { Status = gBS->LocateProtocol ( &gAppleBeepGenProtocolGuid, NULL, (VOID **) &BeepGenProtocol ); if (EFI_ERROR (Status) || BeepGenProtocol->GenBeep == NULL) { Print (L"Beep protocol is unusable - %r\n", Status); BeepGenProtocol = NULL; } } if (BeepGenProtocol == NULL && (StrCmp (Argv[1], L"any") == 0 || StrCmp (Argv[1], L"hda") == 0)) { Status = gBS->LocateProtocol ( &gAppleHighDefinitionAudioProtocolGuid, NULL, (VOID **) &HdaProtocol ); if (EFI_ERROR (Status) || HdaProtocol->PlayTone == NULL) { Print (L"HDA protocol is unusable - %r\n", Status); HdaProtocol = NULL; } } Print ( L"Trying playback %u %Lu %Lu %d\n", (UINT32) Count, (UINT64) Signal, (UINT64) Silence, (UINT64) Frequency ); if (BeepGenProtocol != NULL) { Status = BeepGenProtocol->GenBeep ( Count, Signal, Silence ); } else if (HdaProtocol != NULL) { Status = HdaProtocol->PlayTone ( HdaProtocol, (UINT32) Count, Signal, Silence, Frequency ); } else { Status = EFI_UNSUPPORTED; } if (EFI_ERROR (Status)) { Print (L"Playback failure - %r\n", Status); } return EFI_SUCCESS; }
936496.c
/* * main.c * * this is an example of button debouncing in an rtos * * author: Alex Shenfield * date: 04/02/2019 * purpose: 55-604481 embedded computer networks */ // include the basic headers for the hal drivers and the rtos library #include "stm32f7xx_hal.h" #include "cmsis_os.h" // include the shu bsp libraries for the stm32f7 discovery board #include "pinmappings.h" #include "clock.h" #include "gpio.h" // include the itm debugging #include "itm_debug.h" // BUTTON // define the button gpio_pin_t pb1 = {PB_8, GPIOB, GPIO_PIN_8}; // declare a timer callback and a timer void test_for_button_press(void const *arg); osTimerDef(button, test_for_button_press); // lets use an led as a message indicator gpio_pin_t led = {PI_3, GPIOI, GPIO_PIN_3}; // OVERRIDE HAL DELAY // make HAL_Delay point to osDelay (otherwise any use of HAL_Delay breaks things) void HAL_Delay(__IO uint32_t Delay) { osDelay(Delay); } // CODE // this is the main method int main() { // initialise the real time kernel osKernelInitialize(); // we need to initialise the hal library and set up the SystemCoreClock // properly HAL_Init(); init_sysclk_216MHz(); // note also that we need to set the correct core clock in the rtx_conf_cm.c // file (OS_CLOCK) which we can do using the configuration wizard // initialise our button init_gpio(pb1, INPUT); init_gpio(led, OUTPUT); // start our timer for button debouncing osTimerId timer_1 = osTimerCreate(osTimer(button), osTimerPeriodic, NULL); osTimerStart(timer_1, 5); // start everything running osKernelStart(); } // BUTTON // button debouncer (implemented as a timer callback) void test_for_button_press(void const *args) { // 8 bits of button history static uint8_t button_history = 0xFF; // every time this timer callback is called we shift the button history // across and update the state button_history = button_history << 1; uint8_t val = read_gpio(pb1); button_history = button_history | val; // use some simple pattern matching to see if the button has been pressed - // if so, reset the button history and send a message ... if((button_history & 0xC7) == 0x07) { // toggle the led to indicate whats going on toggle_gpio(led); // reset button history button_history = 0xFF; print_debug("button pressed", 14); } }
625334.c
#include "crypto_sign/sphincs256/ref/api.h" #include "randombytes.h" int crypto_sign_sphincs_keypair (unsigned char *pk, unsigned char *sk); int crypto_sign_sphincs ( unsigned char *sm, unsigned long long *smlen, const unsigned char *m, unsigned long long mlen, const unsigned char *sk ); int crypto_sign_sphincs_open( unsigned char *m, unsigned long long *mlen, const unsigned char *sm, unsigned long long smlen, const unsigned char *pk ); void sphincsjs_init () { randombytes_stir(); } long sphincsjs_public_key_bytes () { return CRYPTO_PUBLICKEYBYTES; } long sphincsjs_secret_key_bytes () { return CRYPTO_SECRETKEYBYTES; } long sphincsjs_signature_bytes () { return CRYPTO_BYTES; } long sphincsjs_keypair ( uint8_t* public_key, uint8_t* private_key ) { return crypto_sign_sphincs_keypair(public_key, private_key); } long sphincsjs_open ( uint8_t *m, unsigned long long *mlen, const uint8_t *sm, unsigned long smlen, const uint8_t *pk ) { return crypto_sign_sphincs_open(m, mlen, sm, smlen, pk); } long sphincsjs_sign ( uint8_t *sm, unsigned long long *smlen, const uint8_t *m, unsigned long mlen, const uint8_t *sk ) { return crypto_sign_sphincs(sm, smlen, m, mlen, sk); }
263618.c
/* Copyright 2006 - 2011 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef _WIN32_WCE #define _CRTDBG_MAP_ALLOC #include <math.h> #include <winerror.h> #include <stdlib.h> #include <stdio.h> #include <stddef.h> #include <string.h> #include <winsock.h> #include <wininet.h> #include <windows.h> #include <winioctl.h> #include <winbase.h> #elif WIN32 #define _CRTDBG_MAP_ALLOC #include <math.h> #include <winerror.h> #include <stdlib.h> #include <stdio.h> #include <stddef.h> #include <string.h> #ifdef WINSOCK2 #include <winsock2.h> #include <ws2tcpip.h> #else #include <winsock.h> #include <wininet.h> #endif #include <windows.h> #include <winioctl.h> #include <winbase.h> #include <crtdbg.h> #elif _POSIX #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <sys/time.h> #include <netdb.h> #include <string.h> #include <sys/ioctl.h> #include <net/if.h> #include <sys/utsname.h> #include <sys/socket.h> #include <netinet/in.h> #include <unistd.h> #include <fcntl.h> #include <errno.h> #endif #include "ILibParsers.h" #include "ILibAsyncSocket.h" #include "Utility.h" #ifdef _WIN32_WCE #define strncasecmp(x,y,z) _strnicmp(x,y,z) #define gettimeofday(x,y) (x)->tv_sec = GetTickCount()/1000 #define sem_t HANDLE #define sem_init(x,pshared,pvalue) *x=CreateSemaphore(NULL,pvalue,FD_SETSIZE,NULL) #define sem_destroy(x) (CloseHandle(*x)==0?1:0) #define sem_wait(x) WaitForSingleObject(*x,INFINITE) #define sem_trywait(x) ((WaitForSingleObject(*x,0)==WAIT_OBJECT_0)?0:1) #define sem_post(x) ReleaseSemaphore(*x,1,NULL) #elif WIN32 #include <errno.h> #define strncasecmp(x,y,z) _strnicmp(x,y,z) #define gettimeofday(x,y) (x)->tv_sec = GetTickCount()/1000 #define sem_t HANDLE #define sem_init(x,y,z) *x=CreateSemaphore(NULL,z,FD_SETSIZE,NULL) #define sem_destroy(x) (CloseHandle(*x)==0?1:0) #define sem_wait(x) WaitForSingleObject(*x,INFINITE) #define sem_trywait(x) ((WaitForSingleObject(*x,0)==WAIT_OBJECT_0)?0:1) #define sem_post(x) ReleaseSemaphore(*x,1,NULL) #elif _POSIX #include <errno.h> #include <semaphore.h> extern int errno; #endif #define DEBUGSTATEMENT(x) struct AsyncSocket_SendData { char* buffer; int bufferSize; int bytesSent; int UserFree; struct AsyncSocket_SendData *Next; }; struct AsyncSocketModule { void (*PreSelect)(void* object,fd_set *readset, fd_set *writeset, fd_set *errorset, int* blocktime); void (*PostSelect)(void* object,int slct, fd_set *readset, fd_set *writeset, fd_set *errorset); void (*Destroy)(void* object); void *Chain; unsigned int PendingBytesToSend; unsigned int TotalBytesSent; #ifdef _WIN32_WCE SOCKET internalSocket; #elif WIN32 SOCKET internalSocket; #elif _POSIX int internalSocket; #endif int RemoteIPAddress; int LocalIPAddress; void(*OnData)(void* socketModule,char* buffer,int *p_beginPointer, int endPointer,void (**InterruptPtr)(void *socketModule, void *user), void **user, int *PAUSE); void(*OnConnect)(void* socketModule, int OK, void *user); void(*OnDisconnect)(void* socketModule, void *user); void(*OnSendOK)(void *socketModule, void *user); void(*OnInterrupt)(void *socketModule, void *user); void *user; int IsFree; int PAUSE; int FinConnect; int BeginPointer; int EndPointer; char* buffer; int MallocSize; int InitialSize; struct AsyncSocket_SendData *PendingSend_Head; struct AsyncSocket_SendData *PendingSend_Tail; sem_t SendLock; }; void ILibAsyncSocket_PostSelect(void* object,int slct, fd_set *readset, fd_set *writeset, fd_set *errorset); void ILibAsyncSocket_PreSelect(void* object,fd_set *readset, fd_set *writeset, fd_set *errorset, int* blocktime); void ILibAsyncSocket_Destroy(void *socketModule) { struct AsyncSocketModule* module = (struct AsyncSocketModule*)socketModule; struct AsyncSocket_SendData *temp,*current; if(module->internalSocket!=~0) { #ifdef _WIN32_WCE closesocket(module->internalSocket); #elif WIN32 closesocket(module->internalSocket); #elif _POSIX close(module->internalSocket); #endif } if(module->IsFree==0) { if(module->OnInterrupt!=NULL) { module->OnInterrupt(module,module->user); } } if(module->buffer!=NULL) { FREE(module->buffer); module->buffer = NULL; module->MallocSize = 0; } temp=current=module->PendingSend_Head; while(current!=NULL) { temp = current->Next; if(current->UserFree==0) { FREE(current->buffer); } FREE(current); current = temp; } sem_destroy(&(module->SendLock)); } void* ILibCreateAsyncSocketModule(void *Chain, int initialBufferSize, void(*OnData)(void* socketModule,char* buffer,int *p_beginPointer, int endPointer, void (**InterruptPtr)(void *socketModule, void *user),void **user, int *PAUSE), void(*OnConnect)(void* socketModule, int Connected, void *user),void(*OnDisconnect)(void* socketModule, void *user),void(*OnSendOK)(void *socketModule, void *user)) { struct AsyncSocketModule *RetVal = (struct AsyncSocketModule*)MALLOC(sizeof(struct AsyncSocketModule)); memset(RetVal,0,sizeof(struct AsyncSocketModule)); RetVal->PreSelect = &ILibAsyncSocket_PreSelect; RetVal->PostSelect = &ILibAsyncSocket_PostSelect; RetVal->Destroy = &ILibAsyncSocket_Destroy; RetVal->IsFree = 1; RetVal->internalSocket = -1; RetVal->OnData = OnData; RetVal->OnConnect = OnConnect; RetVal->OnDisconnect = OnDisconnect; RetVal->OnSendOK = OnSendOK; RetVal->buffer = (char*)MALLOC(initialBufferSize); RetVal->InitialSize = initialBufferSize; RetVal->MallocSize = initialBufferSize; sem_init(&(RetVal->SendLock),0,1); RetVal->Chain = Chain; ILibAddToChain(Chain,RetVal); return((void*)RetVal); } void ILibAsyncSocket_ClearPendingSend(void *socketModule) { struct AsyncSocketModule *module = (struct AsyncSocketModule*)socketModule; struct AsyncSocket_SendData *data,*temp; data = module->PendingSend_Head; module->PendingSend_Tail = NULL; while(data!=NULL) { temp = data->Next; if(data->UserFree==0) { FREE(data->buffer); } FREE(data); data = temp; } module->PendingSend_Head = NULL; } int ILibAsyncSocket_Send(void* socketModule, char* buffer, int length, enum ILibAsyncSocket_MemoryOwnership UserFree) { struct AsyncSocketModule *module = (struct AsyncSocketModule*)socketModule; struct AsyncSocket_SendData *data = (struct AsyncSocket_SendData*)MALLOC(sizeof(struct AsyncSocket_SendData)); int unblock=0; int bytesSent; data->buffer = buffer; data->bufferSize = length; data->bytesSent = 0; data->UserFree = UserFree; data->Next = NULL; sem_wait(&(module->SendLock)); if(module->internalSocket==~0) { // Too Bad, the socket closed if(UserFree==0){FREE(buffer);} FREE(data); sem_post(&(module->SendLock)); return(ILibAsyncSocket_SEND_ON_CLOSED_SOCKET_ERROR); } module->PendingBytesToSend += length; if(module->PendingSend_Tail!=NULL) { module->PendingSend_Tail->Next = data; module->PendingSend_Tail = data; unblock=1; if(UserFree==ILibAsyncSocket_MemoryOwnership_USER) { data->buffer = (char*)MALLOC(data->bufferSize); memcpy(data->buffer,buffer,length); data->UserFree = ILibAsyncSocket_MemoryOwnership_CHAIN; } } else { module->PendingSend_Tail = data; module->PendingSend_Head = data; bytesSent = send(module->internalSocket,module->PendingSend_Head->buffer+module->PendingSend_Head->bytesSent,module->PendingSend_Head->bufferSize-module->PendingSend_Head->bytesSent,0); if(bytesSent>0) { module->PendingSend_Head->bytesSent+=bytesSent; module->PendingBytesToSend -= bytesSent; module->TotalBytesSent += bytesSent; } if(bytesSent==-1) { // Send Failed #ifdef _WIN32_WCE bytesSent = WSAGetLastError(); if(bytesSent!=WSAEWOULDBLOCK) #elif WIN32 bytesSent = WSAGetLastError(); if(bytesSent!=WSAEWOULDBLOCK) #else if(errno!=EWOULDBLOCK) #endif { if(UserFree==0){FREE(buffer);} module->PendingSend_Head = module->PendingSend_Tail = NULL; FREE(data); sem_post(&(module->SendLock)); ILibAsyncSocket_Disconnect(socketModule); return(ILibAsyncSocket_SEND_ON_CLOSED_SOCKET_ERROR); } } if(module->PendingSend_Head->bytesSent==module->PendingSend_Head->bufferSize) { if(UserFree==0){FREE(module->PendingSend_Head->buffer);} module->PendingSend_Tail = NULL; FREE(module->PendingSend_Head); module->PendingSend_Head = NULL; } else { if(UserFree==ILibAsyncSocket_MemoryOwnership_USER) { data->buffer = (char*)MALLOC(data->bufferSize); memcpy(data->buffer,buffer,length); data->UserFree = ILibAsyncSocket_MemoryOwnership_CHAIN; } unblock = 1; } } sem_post(&(module->SendLock)); if(unblock!=0) {ILibForceUnBlockChain(module->Chain);} return(unblock); } void ILibAsyncSocket_Disconnect(void* socketModule) { #ifdef _WIN32_WCE SOCKET s; #elif WIN32 SOCKET s; #elif _POSIX int s; #endif struct AsyncSocketModule *module = (struct AsyncSocketModule*)socketModule; module->IsFree = 1; module->PAUSE = 1; s = module->internalSocket; module->internalSocket = ~0; if(s!=-1) { #ifdef _WIN32_WCE closesocket(s); #elif WIN32 closesocket(s); #elif _POSIX close(s); #endif } sem_wait(&(module->SendLock)); ILibAsyncSocket_ClearPendingSend(socketModule); sem_post(&(module->SendLock)); if(module->OnDisconnect!=NULL) { module->OnDisconnect(module,module->user); } } void ILibAsyncSocket_ConnectTo(void* socketModule, int localInterface, int remoteInterface, int remotePortNumber, void (*InterruptPtr)(void *socketModule, void *user),void *user) { int flags; struct sockaddr_in addr; struct AsyncSocketModule *module = (struct AsyncSocketModule*)socketModule; module->PendingBytesToSend = 0; module->TotalBytesSent = 0; module->IsFree = 0; module->PAUSE = 0; module->user = user; module->OnInterrupt = InterruptPtr; module->buffer = (char*)realloc(module->buffer,module->InitialSize); module->MallocSize = module->InitialSize; memset((char *)&addr, 0,sizeof(addr)); addr.sin_family = AF_INET; addr.sin_addr.s_addr = remoteInterface; #ifdef _WIN32_WCE addr.sin_port = htons((unsigned short)remotePortNumber); #elif WIN32 addr.sin_port = htons(remotePortNumber); #elif _POSIX addr.sin_port = htons(remotePortNumber); #endif if(module->internalSocket==-1) { #ifdef WINSOCK2 ILibGetStreamSocket(localInterface,0,(HANDLE*)&(module->internalSocket)); #else ILibGetStreamSocket(localInterface,0,&(module->internalSocket)); #endif } module->FinConnect = 0; module->BeginPointer = 0; module->EndPointer = 0; #ifdef _WIN32_WCE flags = 1; ioctlsocket(module->internalSocket,FIONBIO,&flags); #elif WIN32 flags = 1; ioctlsocket(module->internalSocket,FIONBIO,&flags); #elif _POSIX flags = fcntl(module->internalSocket,F_GETFL,0); fcntl(module->internalSocket,F_SETFL,O_NONBLOCK|flags); #endif connect(module->internalSocket,(struct sockaddr*)&addr,sizeof(addr)); ILibForceUnBlockChain(module->Chain); } void ILibProcessAsyncSocket(struct AsyncSocketModule *Reader) { int bytesReceived; while(Reader->PAUSE==0 && Reader->BeginPointer!=Reader->EndPointer && Reader->BeginPointer!=0) { memcpy(Reader->buffer,Reader->buffer+Reader->BeginPointer,Reader->EndPointer-Reader->BeginPointer); Reader->EndPointer = Reader->EndPointer-Reader->BeginPointer; Reader->BeginPointer = 0; if(Reader->OnData!=NULL) { Reader->OnData(Reader,Reader->buffer,&(Reader->BeginPointer),Reader->EndPointer,&(Reader->OnInterrupt),&(Reader->user),&(Reader->PAUSE)); } } if(Reader->PAUSE!=0) { return; } /* Reading Body Only */ if(Reader->BeginPointer == Reader->EndPointer) { Reader->BeginPointer = 0; Reader->EndPointer = 0; } else { if(Reader->BeginPointer!=0) { Reader->EndPointer = Reader->BeginPointer; } } bytesReceived = recv(Reader->internalSocket,Reader->buffer+Reader->EndPointer,Reader->MallocSize-Reader->EndPointer,0); Reader->EndPointer += bytesReceived; if(bytesReceived<=0) { Reader->IsFree = 1; sem_wait(&(Reader->SendLock)); ILibAsyncSocket_ClearPendingSend(Reader); sem_post(&(Reader->SendLock)); #ifdef _WIN32_WCE closesocket(Reader->internalSocket); #elif WIN32 closesocket(Reader->internalSocket); #elif _POSIX close(Reader->internalSocket); #endif Reader->internalSocket = ~0; Reader->IsFree = 1; if(Reader->OnDisconnect!=NULL) { Reader->OnDisconnect(Reader,Reader->user); } if(Reader->IsFree!=0 && Reader->buffer!=NULL) { FREE(Reader->buffer); Reader->buffer = NULL; Reader->MallocSize = 0; } } else { if(Reader->OnData!=NULL) { Reader->OnData(Reader,Reader->buffer,&(Reader->BeginPointer),Reader->EndPointer,&(Reader->OnInterrupt),&(Reader->user),&(Reader->PAUSE)); } while(Reader->PAUSE==0 && Reader->BeginPointer!=Reader->EndPointer && Reader->BeginPointer!=0) { memcpy(Reader->buffer,Reader->buffer+Reader->BeginPointer,Reader->EndPointer-Reader->BeginPointer); Reader->EndPointer = Reader->EndPointer-Reader->BeginPointer; Reader->BeginPointer = 0; if(Reader->OnData!=NULL) { Reader->OnData(Reader,Reader->buffer,&(Reader->BeginPointer),Reader->EndPointer,&(Reader->OnInterrupt),&(Reader->user),&(Reader->PAUSE)); } } if(Reader->BeginPointer==Reader->EndPointer) { Reader->BeginPointer = 0; Reader->EndPointer = 0; } if(Reader->MallocSize - Reader->EndPointer <1024) { Reader->MallocSize += 4096; Reader->buffer = (char*)realloc(Reader->buffer,Reader->MallocSize); } } } void ILibAsyncSocket_PreSelect(void* socketModule,fd_set *readset, fd_set *writeset, fd_set *errorset, int* blocktime) { struct AsyncSocketModule *module = (struct AsyncSocketModule*)socketModule; if(module->internalSocket!=-1) { if(module->FinConnect==0) { /* Not Connected Yet */ FD_SET(module->internalSocket,writeset); FD_SET(module->internalSocket,errorset); } else { if(module->PAUSE==0) { /* Already Connected, just needs reading */ FD_SET(module->internalSocket,readset); FD_SET(module->internalSocket,errorset); } } } sem_wait(&(module->SendLock)); if(module->PendingSend_Head!=NULL) { FD_SET(module->internalSocket,writeset); } sem_post(&(module->SendLock)); } void ILibAsyncSocket_PostSelect(void* socketModule,int slct, fd_set *readset, fd_set *writeset, fd_set *errorset) { int TriggerSendOK = 0; struct AsyncSocket_SendData *temp; int bytesSent=0; int flags; struct sockaddr_in receivingAddress; int receivingAddressLength = sizeof(struct sockaddr_in); struct AsyncSocketModule *module = (struct AsyncSocketModule*)socketModule; int TRY_TO_SEND = 1; // Write Handling if(module->FinConnect!=0 && module->internalSocket!=~0 && FD_ISSET(module->internalSocket,writeset)!=0) { sem_wait(&(module->SendLock)); while(TRY_TO_SEND!=0) { bytesSent = send(module->internalSocket,module->PendingSend_Head->buffer+module->PendingSend_Head->bytesSent,module->PendingSend_Head->bufferSize-module->PendingSend_Head->bytesSent,0); if(bytesSent>0) { module->PendingBytesToSend -= bytesSent; module->TotalBytesSent += bytesSent; module->PendingSend_Head->bytesSent+=bytesSent; if(module->PendingSend_Head->bytesSent==module->PendingSend_Head->bufferSize) { // Finished Sending this block if(module->PendingSend_Head==module->PendingSend_Tail) { module->PendingSend_Tail = NULL; } if(module->PendingSend_Head->UserFree==0) { FREE(module->PendingSend_Head->buffer); } temp = module->PendingSend_Head->Next; FREE(module->PendingSend_Head); module->PendingSend_Head = temp; if(module->PendingSend_Head==NULL) {TRY_TO_SEND=0;} } else { TRY_TO_SEND = 1; } } if(bytesSent==-1) { // Error, clean up everything #ifdef _WIN32_WCE bytesSent = WSAGetLastError(); if(bytesSent!=WSAEWOULDBLOCK) #elif WIN32 bytesSent = WSAGetLastError(); if(bytesSent!=WSAEWOULDBLOCK) #else if(errno!=EWOULDBLOCK) #endif { ILibAsyncSocket_ClearPendingSend(socketModule); TRY_TO_SEND = 0; } } } if(module->PendingSend_Head==NULL && bytesSent!=-1) {TriggerSendOK=1;} sem_post(&(module->SendLock)); if(TriggerSendOK!=0) { module->OnSendOK(module,module->user); } } // Connection Handling / Read Handling if(module->internalSocket!=~0) { if(module->FinConnect==0) { /* Not Connected Yet */ if(FD_ISSET(module->internalSocket,writeset)!=0) { /* Connected */ getsockname(module->internalSocket,(struct sockaddr*)&receivingAddress,&receivingAddressLength); module->LocalIPAddress = receivingAddress.sin_addr.s_addr; module->FinConnect = 1; module->PAUSE = 0; #ifdef _WIN32_WCE flags = 1; ioctlsocket(module->internalSocket,FIONBIO,&flags); #elif WIN32 flags = 1; ioctlsocket(module->internalSocket,FIONBIO,&flags); #elif _POSIX flags = fcntl(module->internalSocket,F_GETFL,0); fcntl(module->internalSocket,F_SETFL,O_NONBLOCK|flags); #endif /* Connection Complete */ if(module->OnConnect!=NULL) { module->OnConnect(module,-1,module->user); } } if(FD_ISSET(module->internalSocket,errorset)!=0) { /* Connection Failed */ #ifdef _WIN32_WCE closesocket(module->internalSocket); #elif WIN32 closesocket(module->internalSocket); #elif _POSIX close(module->internalSocket); #endif module->internalSocket = ~0; if(module->OnConnect!=NULL) { module->OnConnect(module,0,module->user); } } } else { /* Check if PeerReset */ if(FD_ISSET(module->internalSocket,errorset)!=0) { /* Socket Closed */ #ifdef _WIN32_WCE closesocket(module->internalSocket); #elif WIN32 closesocket(module->internalSocket); #elif _POSIX close(module->internalSocket); #endif module->internalSocket = ~0; module->IsFree=1; module->PAUSE = 1; sem_wait(&(module->SendLock)); ILibAsyncSocket_ClearPendingSend(socketModule); sem_post(&(module->SendLock)); if(module->OnDisconnect!=NULL) { module->OnDisconnect(module,module->user); } } /* Already Connected, just needs reading */ if(FD_ISSET(module->internalSocket,readset)!=0) { /* Data Available */ ILibProcessAsyncSocket(module); } } } } int ILibAsyncSocket_IsFree(void *socketModule) { struct AsyncSocketModule *module = (struct AsyncSocketModule*)socketModule; return(module->IsFree); } unsigned int ILibAsyncSocket_GetPendingBytesToSend(void *socketModule) { struct AsyncSocketModule *module = (struct AsyncSocketModule*)socketModule; return(module->PendingBytesToSend); } unsigned int ILibAsyncSocket_GetTotalBytesSent(void *socketModule) { struct AsyncSocketModule *module = (struct AsyncSocketModule*)socketModule; return(module->TotalBytesSent); } void ILibAsyncSocket_ResetTotalBytesSent(void *socketModule) { struct AsyncSocketModule *module = (struct AsyncSocketModule*)socketModule; module->TotalBytesSent = 0; } void ILibAsyncSocket_GetBuffer(void *socketModule, char **buffer, int *BeginPointer, int *EndPointer) { struct AsyncSocketModule* module = (struct AsyncSocketModule*)socketModule; *buffer = module->buffer; *BeginPointer = module->BeginPointer; *EndPointer = module->EndPointer; } void ILibAsyncSocket_SetRemoteAddress(void *socketModule,int RemoteAddress) { struct AsyncSocketModule* module = (struct AsyncSocketModule*)socketModule; module->RemoteIPAddress = RemoteAddress; } void ILibAsyncSocket_UseThisSocket(void *socketModule,void* UseThisSocket,void (*InterruptPtr)(void *socketModule, void *user),void *user) { #ifdef _WIN32_WCE SOCKET TheSocket = *((SOCKET*)UseThisSocket); #elif WIN32 SOCKET TheSocket = *((SOCKET*)UseThisSocket); #elif _POSIX int TheSocket = *((int*)UseThisSocket); #endif int flags; struct AsyncSocketModule* module = (struct AsyncSocketModule*)socketModule; module->PendingBytesToSend = 0; module->TotalBytesSent = 0; module->internalSocket = TheSocket; module->IsFree = 0; module->OnInterrupt = InterruptPtr; module->user = user; module->FinConnect = 1; module->PAUSE = 0; module->buffer = (char*)realloc(module->buffer,module->InitialSize); module->MallocSize = module->InitialSize; module->FinConnect = 1; module->BeginPointer = 0; module->EndPointer = 0; #ifdef _WIN32_WCE flags = 1; ioctlsocket(module->internalSocket,FIONBIO,&flags); #elif WIN32 flags = 1; ioctlsocket(module->internalSocket,FIONBIO,&flags); #elif _POSIX flags = fcntl(module->internalSocket,F_GETFL,0); fcntl(module->internalSocket,F_SETFL,O_NONBLOCK|flags); #endif } int ILibAsyncSocket_GetRemoteInterface(void *socketModule) { struct AsyncSocketModule *module = (struct AsyncSocketModule*)socketModule; return(module->RemoteIPAddress); } int ILibAsyncSocket_GetLocalInterface(void *socketModule) { struct AsyncSocketModule *module = (struct AsyncSocketModule*)socketModule; struct sockaddr_in receivingAddress; int receivingAddressLength = sizeof(struct sockaddr_in); getsockname(module->internalSocket,(struct sockaddr*)&receivingAddress,&receivingAddressLength); return(receivingAddress.sin_addr.s_addr); } void ILibAsyncSocket_Resume(void *socketModule) { struct AsyncSocketModule *sm = (struct AsyncSocketModule*)socketModule; if(sm->PAUSE!=0) { sm->PAUSE=0; ILibForceUnBlockChain(sm->Chain); } }
128827.c
/* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ /************************************************************************/ /* */ /* PROJECT : exFAT & FAT12/16/32 File System */ /* FILE : fatent.c */ /* PURPOSE : sdFAT FAT entry manager */ /* */ /*----------------------------------------------------------------------*/ /* NOTES */ /* */ /* */ /************************************************************************/ #include <asm/unaligned.h> #include "sdfat.h" #include "core.h" /*----------------------------------------------------------------------*/ /* Global Variable Definitions */ /*----------------------------------------------------------------------*/ /* All buffer structures are protected w/ fsi->v_sem */ /*----------------------------------------------------------------------*/ /* Static functions */ /*----------------------------------------------------------------------*/ /*======================================================================*/ /* FAT Read/Write Functions */ /*======================================================================*/ /* in : sb, loc * out: content * returns 0 on success, -1 on error */ static s32 exfat_ent_get(struct super_block *sb, u32 loc, u32 *content) { u32 off, _content; u64 sec; u8 *fat_sector; FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); /* fsi->vol_type == EXFAT */ sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-2)); off = (loc << 2) & (u32)(sb->s_blocksize - 1); fat_sector = fcache_getblk(sb, sec); if (!fat_sector) return -EIO; _content = le32_to_cpu(*(__le32 *)(&fat_sector[off])); /* remap reserved clusters to simplify code */ if (_content >= CLUSTER_32(0xFFFFFFF8)) _content = CLUS_EOF; *content = CLUSTER_32(_content); return 0; } static s32 exfat_ent_set(struct super_block *sb, u32 loc, u32 content) { u32 off; u64 sec; u8 *fat_sector; __le32 *fat_entry; FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-2)); off = (loc << 2) & (u32)(sb->s_blocksize - 1); fat_sector = fcache_getblk(sb, sec); if (!fat_sector) return -EIO; fat_entry = (__le32 *)&(fat_sector[off]); *fat_entry = cpu_to_le32(content); return fcache_modify(sb, sec); } #define FATENT_FAT32_VALID_MASK (0x0FFFFFFFU) #define FATENT_FAT32_IGNORE_MASK (0xF0000000U) static s32 fat32_ent_get(struct super_block *sb, u32 loc, u32 *content) { u32 off, _content; u64 sec; u8 *fat_sector; FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-2)); off = (loc << 2) & (u32)(sb->s_blocksize - 1); fat_sector = fcache_getblk(sb, sec); if (!fat_sector) return -EIO; _content = le32_to_cpu(*(__le32 *)(&fat_sector[off])); _content &= FATENT_FAT32_VALID_MASK; /* remap reserved clusters to simplify code */ if (_content == CLUSTER_32(0x0FFFFFF7U)) _content = CLUS_BAD; else if (_content >= CLUSTER_32(0x0FFFFFF8U)) _content = CLUS_EOF; *content = CLUSTER_32(_content); return 0; } static s32 fat32_ent_set(struct super_block *sb, u32 loc, u32 content) { u32 off; u64 sec; u8 *fat_sector; __le32 *fat_entry; FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); content &= FATENT_FAT32_VALID_MASK; sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-2)); off = (loc << 2) & (u32)(sb->s_blocksize - 1); fat_sector = fcache_getblk(sb, sec); if (!fat_sector) return -EIO; fat_entry = (__le32 *)&(fat_sector[off]); content |= (le32_to_cpu(*fat_entry) & FATENT_FAT32_IGNORE_MASK); *fat_entry = cpu_to_le32(content); return fcache_modify(sb, sec); } #define FATENT_FAT16_VALID_MASK (0x0000FFFFU) static s32 fat16_ent_get(struct super_block *sb, u32 loc, u32 *content) { u32 off, _content; u64 sec; u8 *fat_sector; FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-1)); off = (loc << 1) & (u32)(sb->s_blocksize - 1); fat_sector = fcache_getblk(sb, sec); if (!fat_sector) return -EIO; _content = (u32)le16_to_cpu(*(__le16 *)(&fat_sector[off])); _content &= FATENT_FAT16_VALID_MASK; /* remap reserved clusters to simplify code */ if (_content == CLUSTER_16(0xFFF7U)) _content = CLUS_BAD; else if (_content >= CLUSTER_16(0xFFF8U)) _content = CLUS_EOF; *content = CLUSTER_32(_content); return 0; } static s32 fat16_ent_set(struct super_block *sb, u32 loc, u32 content) { u32 off; u64 sec; u8 *fat_sector; __le16 *fat_entry; FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); content &= FATENT_FAT16_VALID_MASK; sec = fsi->FAT1_start_sector + (loc >> (sb->s_blocksize_bits-1)); off = (loc << 1) & (u32)(sb->s_blocksize - 1); fat_sector = fcache_getblk(sb, sec); if (!fat_sector) return -EIO; fat_entry = (__le16 *)&(fat_sector[off]); *fat_entry = cpu_to_le16(content); return fcache_modify(sb, sec); } #define FATENT_FAT12_VALID_MASK (0x00000FFFU) static s32 fat12_ent_get(struct super_block *sb, u32 loc, u32 *content) { u32 off, _content; u64 sec; u8 *fat_sector; FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); sec = fsi->FAT1_start_sector + ((loc + (loc >> 1)) >> sb->s_blocksize_bits); off = (loc + (loc >> 1)) & (u32)(sb->s_blocksize - 1); fat_sector = fcache_getblk(sb, sec); if (!fat_sector) return -EIO; if (off == (u32)(sb->s_blocksize - 1)) { _content = (u32) fat_sector[off]; fat_sector = fcache_getblk(sb, ++sec); if (!fat_sector) return -EIO; _content |= (u32) fat_sector[0] << 8; } else { _content = get_unaligned_le16(&fat_sector[off]); } if (loc & 1) _content >>= 4; _content &= FATENT_FAT12_VALID_MASK; /* remap reserved clusters to simplify code */ if (_content == CLUSTER_16(0x0FF7U)) _content = CLUS_BAD; else if (_content >= CLUSTER_16(0x0FF8U)) _content = CLUS_EOF; *content = CLUSTER_32(_content); return 0; } static s32 fat12_ent_set(struct super_block *sb, u32 loc, u32 content) { u32 off; u64 sec; u8 *fat_sector, *fat_entry; FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); content &= FATENT_FAT12_VALID_MASK; sec = fsi->FAT1_start_sector + ((loc + (loc >> 1)) >> sb->s_blocksize_bits); off = (loc + (loc >> 1)) & (u32)(sb->s_blocksize - 1); fat_sector = fcache_getblk(sb, sec); if (!fat_sector) return -EIO; if (loc & 1) { /* odd */ content <<= 4; if (off == (u32)(sb->s_blocksize-1)) { fat_sector[off] = (u8)(content | (fat_sector[off] & 0x0F)); if (fcache_modify(sb, sec)) return -EIO; fat_sector = fcache_getblk(sb, ++sec); if (!fat_sector) return -EIO; fat_sector[0] = (u8)(content >> 8); } else { fat_entry = &(fat_sector[off]); content |= 0x000F & get_unaligned_le16(fat_entry); put_unaligned_le16(content, fat_entry); } } else { /* even */ fat_sector[off] = (u8)(content); if (off == (u32)(sb->s_blocksize-1)) { fat_sector[off] = (u8)(content); if (fcache_modify(sb, sec)) return -EIO; fat_sector = fcache_getblk(sb, ++sec); if (!fat_sector) return -EIO; fat_sector[0] = (u8)((fat_sector[0] & 0xF0) | (content >> 8)); } else { fat_entry = &(fat_sector[off]); content |= 0xF000 & get_unaligned_le16(fat_entry); put_unaligned_le16(content, fat_entry); } } return fcache_modify(sb, sec); } static FATENT_OPS_T fat12_ent_ops = { fat12_ent_get, fat12_ent_set }; static FATENT_OPS_T fat16_ent_ops = { fat16_ent_get, fat16_ent_set }; static FATENT_OPS_T fat32_ent_ops = { fat32_ent_get, fat32_ent_set }; static FATENT_OPS_T exfat_ent_ops = { exfat_ent_get, exfat_ent_set }; s32 fat_ent_ops_init(struct super_block *sb) { FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); switch (fsi->vol_type) { case EXFAT: fsi->fatent_ops = &exfat_ent_ops; break; case FAT32: fsi->fatent_ops = &fat32_ent_ops; break; case FAT16: fsi->fatent_ops = &fat16_ent_ops; break; case FAT12: fsi->fatent_ops = &fat12_ent_ops; break; default: fsi->fatent_ops = NULL; EMSG("Unknown volume type : %d", (int)fsi->vol_type); return -ENOTSUPP; } return 0; } static inline bool is_reserved_clus(u32 clus) { if (IS_CLUS_FREE(clus)) return true; if (IS_CLUS_EOF(clus)) return true; if (IS_CLUS_BAD(clus)) return true; return false; } static inline bool is_valid_clus(FS_INFO_T *fsi, u32 clus) { if (clus < CLUS_BASE || fsi->num_clusters <= clus) return false; return true; } s32 fat_ent_get(struct super_block *sb, u32 loc, u32 *content) { FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); s32 err; if (!is_valid_clus(fsi, loc)) { sdfat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", loc); return -EIO; } err = fsi->fatent_ops->ent_get(sb, loc, content); if (err) { sdfat_fs_error(sb, "failed to access to FAT " "(entry 0x%08x, err:%d)", loc, err); return err; } if (!is_reserved_clus(*content) && !is_valid_clus(fsi, *content)) { sdfat_fs_error(sb, "invalid access to FAT (entry 0x%08x) " "bogus content (0x%08x)", loc, *content); return -EIO; } return 0; } s32 fat_ent_set(struct super_block *sb, u32 loc, u32 content) { FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi); return fsi->fatent_ops->ent_set(sb, loc, content); } s32 fat_ent_get_safe(struct super_block *sb, u32 loc, u32 *content) { s32 err = fat_ent_get(sb, loc, content); if (err) return err; if (IS_CLUS_FREE(*content)) { sdfat_fs_error(sb, "invalid access to FAT free cluster " "(entry 0x%08x)", loc); return -EIO; } if (IS_CLUS_BAD(*content)) { sdfat_fs_error(sb, "invalid access to FAT bad cluster " "(entry 0x%08x)", loc); return -EIO; } return 0; } /* end of fatent.c */
187065.c
/* * If not stated otherwise in this file or this component's Licenses.txt file the * following copyright and licenses apply: * * Copyright 2015 RDK Management * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /********************************************************************** Copyright [2014] [Cisco Systems, Inc.] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. **********************************************************************/ /********************************************************************** module: bbhm_diagip_sink_operation.c For Broadband Home Manager Model Implementation (BBHM), BroadWay Service Delivery System --------------------------------------------------------------- description: This module implements the advanced functions of the Ping Sink Object. * BbhmDiagipSinkGetRecvBuffer * BbhmDiagipSinkAccept * BbhmDiagipSinkRecv * BbhmDiagipSinkClose * BbhmDiagipSinkAbort --------------------------------------------------------------- environment: platform independent --------------------------------------------------------------- author: Li Shi --------------------------------------------------------------- revision: 08/08/09 initial revision. **********************************************************************/ #include "bbhm_diagip_global.h" /********************************************************************** caller: owner of this object prototype: PVOID BbhmDiagipSinkGetRecvBuffer ( ANSC_HANDLE hThisObject, PANSC_HANDLE phRecvHandle, PULONG pulSize ); description: This function is called by the receive task to retrieve buffer from the socket owner to hold received data. argument: ANSC_HANDLE hThisObject This handle is actually the pointer of this object itself. PANSC_HANDLE phRecvHandle Specifies a context which is associated with the returned buffer. PULONG pulSize This parameter returns the buffer size. return: buffer pointer. **********************************************************************/ ANSC_STATUS BbhmDiagitSinkRemove ( ANSC_HANDLE hThisObject ); PVOID BbhmDiagipSinkGetRecvBuffer ( ANSC_HANDLE hThisObject, PANSC_HANDLE phRecvHandle, PULONG pulSize ) { PBBHM_IP_PING_SINK_OBJECT pSink = (PBBHM_IP_PING_SINK_OBJECT )hThisObject; ULONG ulRestSize = pSink->MaxMessageSize; *phRecvHandle = (ANSC_HANDLE)NULL; *pulSize = ulRestSize; return pSink->RecvBuffer; } /********************************************************************** caller: owner of this object prototype: ANSC_STATUS BbhmDiagipSinkAccept ( ANSC_HANDLE hThisObject, ANSC_HANDLE hNewSocket ); description: This function notifies the socket owner when network data arrives at the socket or socket status has changed. argument: ANSC_HANDLE hThisObject This handle is actually the pointer of this object itself. ANSC_HANDLE hNewSocket Specifies the socket object we have just created. return: status of operation. **********************************************************************/ ANSC_STATUS BbhmDiagipSinkAccept ( ANSC_HANDLE hThisObject, ANSC_HANDLE hNewSocket ) { PBBHM_IP_PING_SINK_OBJECT pSink = (PBBHM_IP_PING_SINK_OBJECT )hThisObject; PBBHM_IP_PING_SINK_OBJECT pNewSink = (PBBHM_IP_PING_SINK_OBJECT )BbhmDiagipSinkCreate(pSink->hOwnerContext); /*RDKB-7450, CID-33258; free unused memeory allocated*/ if(pNewSink) { BbhmDiagitSinkRemove(pNewSink); } return ANSC_STATUS_UNAPPLICABLE; } /********************************************************************** caller: owner of this object prototype: ANSC_STATUS BbhmDiagipSinkRecv ( ANSC_HANDLE hThisObject, ANSC_HANDLE hRecvHandle, PVOID buffer, ULONG ulSize ); description: This function notifies the socket owner when network data arrives at the socket or socket status has changed. argument: ANSC_HANDLE hThisObject This handle is actually the pointer of this object itself. ANSC_HANDLE hRecvHandle Specifies the context returned by get_recv_buffer(). PVOID buffer Specifies the buffer holding the received data. ULONG ulSize Specifies the size of the data buffer. return: status of operation. **********************************************************************/ ANSC_STATUS BbhmDiagipSinkRecv ( ANSC_HANDLE hThisObject, ANSC_HANDLE hRecvHandle, PVOID buffer, ULONG ulSize ) { PBBHM_IP_PING_SINK_OBJECT pSink = (PBBHM_IP_PING_SINK_OBJECT )hThisObject; PBBHM_DIAG_IP_PING_OBJECT pBbhmDiagip = (PBBHM_DIAG_IP_PING_OBJECT)pSink->hOwnerContext; pBbhmDiagip->Recv ( (ANSC_HANDLE)pBbhmDiagip, (ANSC_HANDLE)pSink, buffer, ulSize ); return ANSC_STATUS_SUCCESS; } /********************************************************************** caller: owner of this object prototype: ANSC_STATUS BbhmDiagipSinkClose ( ANSC_HANDLE hThisObject, BOOL bByPeer ); description: This function notifies the socket owner when network data arrives at the socket or socket status has changed. argument: ANSC_HANDLE hThisObject This handle is actually the pointer of this object itself. BOOL bByPeer Specifies whether the host or the peer closed the pingection. return: status of operation. **********************************************************************/ ANSC_STATUS BbhmDiagipSinkClose ( ANSC_HANDLE hThisObject, BOOL bByPeer ) { PBBHM_IP_PING_SINK_OBJECT pSink = (PBBHM_IP_PING_SINK_OBJECT)hThisObject; pSink->Reset((ANSC_HANDLE)pSink); return ANSC_STATUS_SUCCESS; } /********************************************************************** caller: owner of this object prototype: ANSC_STATUS BbhmDiagipSinkAbort ( ANSC_HANDLE hThisObject ); description: This function notifies the socket owner when critical network failure is detected. argument: ANSC_HANDLE hThisObject This handle is actually the pointer of this object itself. return: status of operation. **********************************************************************/ ANSC_STATUS BbhmDiagipSinkAbort ( ANSC_HANDLE hThisObject ) { PBBHM_IP_PING_SINK_OBJECT pSink = (PBBHM_IP_PING_SINK_OBJECT )hThisObject; pSink->Reset((ANSC_HANDLE)pSink); return ANSC_STATUS_SUCCESS; }
873677.c
/* * Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Test kTLS * * Implements a simple TLS server * Optionally enable kernel TLS mode * * Requires openssl 1.0 (i.e., not 1.1+) for direct struct access */ #define _GNU_SOURCE #include <arpa/inet.h> #include <byteswap.h> #include <error.h> #include <errno.h> #include <fcntl.h> #include <linux/tcp.h> #include <linux/tls.h> #include <stdbool.h> #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <sys/socket.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <openssl/ssl.h> #include <openssl/err.h> #include <openssl/aes.h> #include <openssl/modes.h> static const char *cfg_certfile = "cert.pem"; static const char *cfg_ciphers = "AES128-GCM-SHA256"; static const char *cfg_keyfile = "key.pem"; static bool cfg_do_ktls; static bool cfg_do_splice; static void error_ssl(void) { ERR_print_errors_fp(stderr); exit(1); } static int setup_tcp(void) { struct sockaddr_in6 addr = {0}; int fd, one = 1; fd = socket(PF_INET6, SOCK_STREAM, 0); if (fd == -1) error(1, errno, "socket"); if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one))) error(1, errno, "setsockopt reuseaddr"); addr.sin6_family = AF_INET6; addr.sin6_port = htons(443); addr.sin6_addr = in6addr_any; if (bind(fd, (void *)&addr, sizeof(addr))) error(1, errno, "bind"); if (listen(fd, 1)) error(1, errno, "listen"); return fd; } static SSL_CTX * setup_tls(void) { SSL_CTX *ctx; SSL_library_init(); SSL_load_error_strings(); ctx = SSL_CTX_new(TLSv1_2_method()); if (!ctx) error_ssl(); if (SSL_CTX_set_cipher_list(ctx, cfg_ciphers) != 1) error_ssl(); if (SSL_CTX_use_certificate_file(ctx, cfg_certfile, SSL_FILETYPE_PEM) != 1) error_ssl(); if (SSL_CTX_use_PrivateKey_file(ctx, cfg_keyfile, SSL_FILETYPE_PEM) != 1) error_ssl(); return ctx; } static void readwrite_tls(SSL *ssl) { char msg; if (SSL_read(ssl, &msg, sizeof(msg)) != 1) error_ssl(); printf("recv: %c (SSL_read)\n", msg); if (SSL_write(ssl, &msg, sizeof(msg)) != 1) error_ssl(); printf("sent: %c (SSL_write)\n", msg); } #ifdef OPENSSL_IS_BORINGSSL /* BoringSSL SSL_generate_key_block generates a concatenation of * digest + encryption secrets and optional IV. * * The exact layout is cipher specific. GCM does not have independent * digest secrets, for instance. * * Note: fields follow a client/server layout, as described in RFC 5246 6.3 * this does NOT map 1:1 onto kTLS TLS_TX/TLS_RX. * * on the server (SSL_accept), map key_server onto TLS_TX * on the client (SSL_connect), do the opposite */ struct boringssl_aesgcm128_keyblock { unsigned char key_client[TLS_CIPHER_AES_GCM_128_KEY_SIZE]; unsigned char key_server[TLS_CIPHER_AES_GCM_128_KEY_SIZE]; unsigned char salt_client[TLS_CIPHER_AES_GCM_128_SALT_SIZE]; unsigned char salt_server[TLS_CIPHER_AES_GCM_128_SALT_SIZE]; } __attribute__((packed)); static void setup_kernel_tls(SSL *ssl, int fd, bool is_tx) { struct tls12_crypto_info_aes_gcm_128 ci = {0}; struct boringssl_aesgcm128_keyblock kb; unsigned char *key, *salt; const SSL_CIPHER *cipher; uint64_t seq; int optname; cipher = SSL_get_current_cipher(ssl); if (!cipher) error(1, 0, "error at SSL_get_current_cipher"); if (SSL_CIPHER_get_cipher_nid(cipher) != NID_aes_128_gcm) error(1, 0, "unexpected cipher"); if (SSL_get_key_block_len(ssl) != sizeof(kb)) error(1, 0, "unexpected keyblock length"); if (SSL_generate_key_block(ssl, (void *) &kb, sizeof(kb)) != 1) error(1, 0, "error at generate keyblock"); if (is_tx) { seq = SSL_get_write_sequence(ssl); key = kb.key_server; salt = kb.salt_server; optname = TLS_TX; } else { seq = SSL_get_read_sequence(ssl); key = kb.key_client; salt = kb.salt_client; optname = TLS_RX; } seq = bswap_64(seq); ci.info.version = TLS_1_2_VERSION; ci.info.cipher_type = TLS_CIPHER_AES_GCM_128; memcpy(ci.rec_seq, &seq, TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); memcpy(ci.key, key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); memcpy(ci.salt, salt, TLS_CIPHER_AES_GCM_128_SALT_SIZE); /* Explicit portion of the nonce. Can be anything. Passed along in * the TLS record XOR-ed with a counter increasing on each record. */ *((uint64_t *)ci.iv) = bswap_64(0xDEADBEEF); if (setsockopt(fd, SOL_TLS, optname, &ci, sizeof(ci))) error(1, errno, "setsockopt tls %cx", is_tx ? 't' : 'r'); } #else /* define some openssl internals */ #ifndef EVP_AES_GCM_CTX typedef struct { uint64_t val[2]; } uint128_t; struct gcm128_context { uint128_t Yi,EKi,EK0,len,Xi,H; uint128_t Htable[16]; void *gmult; void *ghash; unsigned int mres, ares; void *block; void *key; }; typedef struct { union { double align; /* essential, see with pahole */ AES_KEY ks; } ks; int key_set; int iv_set; GCM128_CONTEXT gcm; unsigned char *iv; int ivlen; int taglen; int iv_gen; int tls_aad_len; ctr128_f ctr; } EVP_AES_GCM_CTX; #endif static void setup_kernel_tls(SSL *ssl, int fd, bool is_tx) { struct tls12_crypto_info_aes_gcm_128 ci = {0}; struct ssl_st *_ssl = (void *) ssl; EVP_AES_GCM_CTX *ctx; unsigned char *seq; int optname; if (is_tx) { ctx = (void *) _ssl->enc_write_ctx->cipher_data; seq = _ssl->s3->write_sequence; optname = TLS_TX; } else { ctx = (void *) _ssl->enc_read_ctx->cipher_data; seq = _ssl->s3->read_sequence; optname = TLS_RX; } ci.info.version = TLS_1_2_VERSION; ci.info.cipher_type = TLS_CIPHER_AES_GCM_128; memcpy(ci.rec_seq, seq, TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); memcpy(ci.key, ctx->gcm.key, TLS_CIPHER_AES_GCM_128_KEY_SIZE); memcpy(ci.salt, ctx->iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE); memcpy(ci.iv, ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, TLS_CIPHER_AES_GCM_128_IV_SIZE); if (setsockopt(fd, SOL_TLS, optname, &ci, sizeof(ci))) error(1, errno, "setsockopt tls %cx", is_tx ? 't' : 'r'); } #endif static void __setup_kernel_tls(SSL *ssl, int fd) { if (setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"))) error(1, errno, "setsockopt upper layer protocol"); setup_kernel_tls(ssl, fd, true); setup_kernel_tls(ssl, fd, false); } static void readwrite_kernel_tls(SSL *ssl, int fd) { char msg[100]; int ret; __setup_kernel_tls(ssl, fd); ret = read(fd, &msg, sizeof(msg)); if (ret == -1) error(1, errno, "read"); if (ret == 0) error(1, 0, "read: EOF"); printf("recv: %c (kTLS)\n", msg[0]); if (write(fd, &msg, ret) != ret) error(1, errno, "write"); printf("sent: %c (kTLS)\n", msg[0]); } /* use splice instead of read() plus write() * cannot splice from fd to itself, so use intermediate pipe */ static void splice_kernel_tls(SSL *ssl, int fd) { int ret, pipes[2]; __setup_kernel_tls(ssl, fd); if (pipe(pipes)) error(1, errno, "pipe"); ret = splice(fd, NULL, pipes[1], NULL, 100, 0); if (ret == -1) error(1, errno, "splice from"); if (ret == 0) error(1, errno, "splice from: no data"); printf("splice: %dB (kTLS) (from)\n", ret); ret = splice(pipes[0], NULL, fd, NULL, ret, 0); if (ret == -1) error(1, errno, "splice to"); if (ret == 0) error(1, errno, "splice to: no data"); printf("splice: %dB (kTLS) (to)\n", ret); } static void usage(const char *filepath) { error(1, 0, "usage: %s [-c cipher] [-C certfile] [-k] [-K keyfile] [-s]\n", filepath); } static void parse_opts(int argc, char **argv) { int c; while ((c = getopt(argc, argv, "c:C:kK:s")) != -1) { switch (c) { case 'c': cfg_ciphers = optarg; break; case 'C': cfg_certfile = optarg; break; case 'k': cfg_do_ktls = true; break; case 'K': cfg_keyfile = optarg; break; case 's': cfg_do_splice = true; break; default: usage(argv[0]); } } if (cfg_do_splice && !cfg_do_ktls) error(1, 0, "splice requires ktls"); } int main(int argc, char **argv) { SSL_CTX *ctx; SSL *ssl; int fd, fd_listen; parse_opts(argc, argv); ctx = setup_tls(); fd_listen = setup_tcp(); fd = accept(fd_listen, NULL, 0); if (fd == -1) error(1, errno, "accept"); ssl = SSL_new(ctx); if (!ssl) error_ssl(); if (SSL_set_fd(ssl, fd) != 1) error_ssl(); if (SSL_accept(ssl) != 1) error_ssl(); if (cfg_do_splice) splice_kernel_tls(ssl, fd); else if (cfg_do_ktls) readwrite_kernel_tls(ssl, fd); else readwrite_tls(ssl); SSL_free(ssl); if (close(fd)) error(1, errno, "close connection"); if (close(fd_listen)) error(1, errno, "close listen"); SSL_CTX_free(ctx); return 0; }
747693.c
/* * NASA Docket No. GSC-18,370-1, and identified as "Operating System Abstraction Layer" * * Copyright (c) 2019 United States Government as represented by * the Administrator of the National Aeronautics and Space Administration. * All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Filename: select-test.c * * Purpose: This file contains functional tests for "osapi-select" * Single select test will create a server and client to stream data between them and the select watches that stream. * Multi select test will setup a second server and client also streaming data between them so that it can watch * multiple streams. * */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include "common_types.h" #include "osapi.h" #include "utassert.h" #include "uttest.h" #include "utbsp.h" #define MAX_BUFFER_LOOP 1000000 /* * Timeouts for various socket ops in the test cases * * Note that the act of calling any "assert" routine causes console output, which * can easily take tens or even hundreds of milliseconds to execute on platforms * where the console is on a slow serial port. Therefore this timeout must * not be too short. */ #define UT_TIMEOUT 500 osal_id_t s1_task_id; osal_id_t s2_task_id; osal_id_t s1_socket_id; osal_id_t s2_socket_id; osal_id_t c1_socket_id; osal_id_t c2_socket_id; osal_id_t bin_sem_id; bool networkImplemented = true; char filldata[16834]; /* *************************************** MAIN ************************************** */ void BinSemSetup(void) { OS_bin_sem_prop_t bin_sem_prop; /* * Create the binary semaphore * BinSem1 is used to control when the server can accept connections */ UtAssert_INT32_EQ(OS_BinSemCreate(&bin_sem_id, "BinSem1", 0, 0), OS_SUCCESS); UtAssert_True(OS_ObjectIdDefined(bin_sem_id), "bin_sem_id (%lu) != UNDEFINED", OS_ObjectIdToInteger(bin_sem_id)); UtAssert_INT32_EQ(OS_BinSemGetInfo(bin_sem_id, &bin_sem_prop), OS_SUCCESS); UtPrintf("BinSem1 value=%d", (int)bin_sem_prop.value); } /* * Helper function to open a server socket, bind server to a port, * creates a server task, and create a client socket. * Then finally connects the client to the server and leave sockets open. */ void Setup_SocketPair(osal_id_t *server_sockid_ptr, osal_id_t *client_sockid_ptr, uint16 portnum, osal_id_t *taskid_ptr, const char *SrvName, void (*SrvFunc)(void)) { OS_SockAddr_t addr; int sock_status; /* Open a server socket, if OS_ERR_NOT_IMPLEMENTED then give up now */ sock_status = OS_SocketOpen(server_sockid_ptr, OS_SocketDomain_INET, OS_SocketType_STREAM); if (sock_status == OS_ERR_NOT_IMPLEMENTED) { networkImplemented = false; return; } UtAssert_True(sock_status == OS_SUCCESS, "OS_SocketOpen(s1_socketid_ptr, OS_SocketDomain_INET, OS_SocketType_STREAM) (%d) == OS_SUCCESS", (int)sock_status); /* Open a client socket */ UtAssert_INT32_EQ(OS_SocketOpen(client_sockid_ptr, OS_SocketDomain_INET, OS_SocketType_STREAM), OS_SUCCESS); /* Initialize server address */ UtAssert_INT32_EQ(OS_SocketAddrInit(&addr, OS_SocketDomain_INET), OS_SUCCESS); /* Set server port */ UtAssert_INT32_EQ(OS_SocketAddrSetPort(&addr, portnum), OS_SUCCESS); /* Set server address */ UtAssert_INT32_EQ(OS_SocketAddrFromString(&addr, "127.0.0.1"), OS_SUCCESS); /* Bind server socket to server address */ UtAssert_INT32_EQ(OS_SocketBind(*server_sockid_ptr, &addr), OS_SUCCESS); /* Print the sockets for informational purposes - should both be valid/defined */ UtAssert_True(OS_ObjectIdDefined(*server_sockid_ptr), "s1_socket_id (%lu) != UNDEFINED (port %u)", OS_ObjectIdToInteger(*server_sockid_ptr), (unsigned int)portnum); UtAssert_True(OS_ObjectIdDefined(*client_sockid_ptr), "c1_socket_id (%lu) != UNDEFINED", OS_ObjectIdToInteger(*client_sockid_ptr)); /* * Create a server thread, and connect client from * this thread to server thread */ /* Create a server task/thread */ UtAssert_INT32_EQ(OS_TaskCreate(taskid_ptr, SrvName, SrvFunc, OSAL_TASK_STACK_ALLOCATE, OSAL_SIZE_C(16384), OSAL_PRIORITY_C(50), 0), OS_SUCCESS); /* Connect to a server */ UtAssert_INT32_EQ(OS_SocketConnect(*client_sockid_ptr, &addr, UT_TIMEOUT), OS_SUCCESS); } void Delete_SocketPair(osal_id_t server_sockid, osal_id_t client_sockid, osal_id_t task_id) { OS_task_prop_t task_prop; while (OS_TaskGetInfo(task_id, &task_prop) == OS_SUCCESS) { OS_TaskDelay(10); } UtAssert_INT32_EQ(OS_close(server_sockid), OS_SUCCESS); UtAssert_INT32_EQ(OS_close(client_sockid), OS_SUCCESS); } bool FillOutputBuffer(osal_id_t conn_id) { uint32 count; UtMemFill(filldata, sizeof(filldata)); for (count = 0; count < MAX_BUFFER_LOOP; ++count) { if (OS_TimedWrite(conn_id, filldata, sizeof(filldata), UT_TIMEOUT) == OS_ERROR_TIMEOUT) { break; } } return (count < MAX_BUFFER_LOOP); } void Server_Fn(void) { osal_id_t connsock_id; OS_SockAddr_t addr; /* Accept incoming connections */ UtAssert_INT32_EQ(OS_SocketAccept(s1_socket_id, &connsock_id, &addr, OS_PEND), OS_SUCCESS); UtAssert_INT32_EQ(OS_BinSemTake(bin_sem_id), OS_SUCCESS); UtAssert_INT32_EQ(OS_close(connsock_id), OS_SUCCESS); } /* end Server_Fn */ void Server_Fn2(void) { osal_id_t connsock_id; OS_SockAddr_t addr; /* Accept incoming connections */ UtAssert_INT32_EQ(OS_SocketAccept(s2_socket_id, &connsock_id, &addr, OS_PEND), OS_SUCCESS); UtAssert_INT32_EQ(OS_close(connsock_id), OS_SUCCESS); } /* end Server_Fn */ void Setup_Single(void) { BinSemSetup(); Setup_SocketPair(&s1_socket_id, &c1_socket_id, 9994, &s1_task_id, "Server1", Server_Fn); } void Setup_Multi(void) { Setup_Single(); if (networkImplemented) { Setup_SocketPair(&s2_socket_id, &c2_socket_id, 9995, &s2_task_id, "Server2", Server_Fn2); } } void Teardown_Single(void) { if (networkImplemented) { Delete_SocketPair(s1_socket_id, c1_socket_id, s1_task_id); } UtAssert_INT32_EQ(OS_BinSemDelete(bin_sem_id), OS_SUCCESS); } void Teardown_Multi(void) { if (networkImplemented) { Delete_SocketPair(s2_socket_id, c2_socket_id, s2_task_id); } Teardown_Single(); } void TestSelectSingleRead(void) { uint32 StateFlags; if (!networkImplemented) { UtAssert_NA("Network API not implemented"); return; } StateFlags = OS_STREAM_STATE_READABLE; UtAssert_INT32_EQ(OS_SelectSingle(c1_socket_id, &StateFlags, UT_TIMEOUT), OS_ERROR_TIMEOUT); /* Verify Outputs */ UtAssert_True(StateFlags == 0, "StateFlags after OS_SelectSingle (0x%x) == None", (unsigned int)StateFlags); UtAssert_INT32_EQ(OS_BinSemGive(bin_sem_id), OS_SUCCESS); OS_TaskDelay(10); /* Give server time to run and close the socket */ StateFlags = OS_STREAM_STATE_READABLE; UtAssert_INT32_EQ(OS_SelectSingle(c1_socket_id, &StateFlags, UT_TIMEOUT), OS_SUCCESS); /* Verify Outputs */ UtAssert_True(StateFlags == OS_STREAM_STATE_READABLE, "StateFlags after OS_SelectSingle() (0x%x) == OS_STREAM_STATE_READABLE", (unsigned int)StateFlags); } void TestSelectMultipleRead(void) { /* * Test Case For: * int32 OS_SelectMultiple(OS_FdSet *ReadSet, OS_FdSet *WriteSet, int32 msecs); */ OS_FdSet ReadSet; if (!networkImplemented) { UtAssert_NA("Network API not implemented"); return; } OS_SelectFdZero(&ReadSet); OS_SelectFdAdd(&ReadSet, c1_socket_id); /* * Check for readability on socket 1 should time out, as server1 is waiting on Sem */ UtAssert_INT32_EQ(OS_SelectMultiple(&ReadSet, NULL, UT_TIMEOUT), OS_ERROR_TIMEOUT); /* * NOTE: NOT checking sets, because after OS_SelectMultiple fails, sets are not defined. * (because it timed out, by definition it means all sets are considered empty) */ OS_SelectFdZero(&ReadSet); OS_SelectFdAdd(&ReadSet, c1_socket_id); OS_SelectFdAdd(&ReadSet, c2_socket_id); UtAssert_INT32_EQ(OS_SelectMultiple(&ReadSet, NULL, UT_TIMEOUT), OS_SUCCESS); UtAssert_INT32_EQ(OS_SelectFdIsSet(&ReadSet, c1_socket_id), false); UtAssert_INT32_EQ(OS_SelectFdIsSet(&ReadSet, c2_socket_id), true); UtAssert_INT32_EQ(OS_BinSemGive(bin_sem_id), OS_SUCCESS); OS_TaskDelay(10); /* Give server time to run and close the socket */ OS_SelectFdZero(&ReadSet); OS_SelectFdAdd(&ReadSet, c1_socket_id); OS_SelectFdAdd(&ReadSet, c2_socket_id); UtAssert_INT32_EQ(OS_SelectMultiple(&ReadSet, NULL, UT_TIMEOUT), OS_SUCCESS); UtAssert_INT32_EQ(OS_SelectFdIsSet(&ReadSet, c1_socket_id), true); UtAssert_INT32_EQ(OS_SelectFdIsSet(&ReadSet, c2_socket_id), true); } void TestSelectSingleWrite(void) { /* * Test Case For: * int32 OS_SelectSingle(uint32 objid, uint32 *StateFlags, int32 msecs); */ uint32 StateFlags; if (!networkImplemented) { UtAssert_NA("Network API not implemented"); return; } /* * In order to get the "write" to block, data must be written to the socket * until the OS buffer fills. Note the server function is waiting on a sem, * and not actually reading this data, so writes here will accumulate. */ StateFlags = OS_STREAM_STATE_WRITABLE; UtAssert_INT32_EQ(OS_SelectSingle(c1_socket_id, &StateFlags, UT_TIMEOUT), OS_SUCCESS); UtAssert_True(StateFlags == OS_STREAM_STATE_WRITABLE, "StateFlags after OS_SelectSingle() (0x%x) == OS_STREAM_STATE_WRITABLE", (unsigned int)StateFlags); if (!FillOutputBuffer(c1_socket_id)) { UtAssertEx(false, UTASSERT_CASETYPE_MIR, __FILE__, __LINE__, "%s", "Unable to fill buffer with large looped writes, skipping verification"); } else { UtAssert_INT32_EQ(OS_SelectSingle(c1_socket_id, &StateFlags, UT_TIMEOUT), OS_ERROR_TIMEOUT); /* Verify Outputs */ UtAssert_True(StateFlags == 0, "StateFlags after OS_SelectSingle() (0x%x) == None", (unsigned int)StateFlags); } /* * Giving the sem should cause the server to close the socket, * which will discard all written data. The OS should then consider * it writable again, due to EOF condition. */ UtAssert_INT32_EQ(OS_BinSemGive(bin_sem_id), OS_SUCCESS); OS_TaskDelay(10); /* Give server time to run and close the socket */ UtAssert_INT32_EQ(OS_SelectSingle(c1_socket_id, &StateFlags, UT_TIMEOUT), OS_SUCCESS); /* Verify Outputs */ UtAssert_True(StateFlags == 0, "StateFlags after OS_SelectSingle() (0x%x) == OS_STREAM_STATE_WRITABLE", (unsigned int)StateFlags); } void TestSelectMultipleWrite(void) { /* * Test Case For: * int32 OS_SelectSingle(uint32 objid, uint32 *StateFlags, int32 msecs); */ OS_FdSet WriteSet; if (!networkImplemented) { return; } /* * Create a server thread, and connect client from * this thread to server thread and verify connection */ OS_SelectFdZero(&WriteSet); OS_SelectFdAdd(&WriteSet, c1_socket_id); OS_SelectFdAdd(&WriteSet, c2_socket_id); UtAssert_INT32_EQ(OS_SelectMultiple(NULL, &WriteSet, UT_TIMEOUT), OS_SUCCESS); /* * Both sockets should initially be writable */ UtAssert_INT32_EQ(OS_SelectFdIsSet(&WriteSet, c1_socket_id), true); UtAssert_INT32_EQ(OS_SelectFdIsSet(&WriteSet, c2_socket_id), true); if (!FillOutputBuffer(c1_socket_id)) { UtAssertEx(false, UTASSERT_CASETYPE_MIR, __FILE__, __LINE__, "%s", "Unable to fill buffer with large looped writes, skipping verification"); } else { /* only add the first socket, to get a timeout */ OS_SelectFdZero(&WriteSet); OS_SelectFdAdd(&WriteSet, c1_socket_id); UtAssert_INT32_EQ(OS_SelectMultiple(NULL, &WriteSet, UT_TIMEOUT), OS_ERROR_TIMEOUT); /* * NOTE: NOT checking sets, because after OS_SelectMultiple fails, sets are not defined. * (because it timed out, by definition it means all sets are considered empty) */ } /* * Giving the sem should cause the server to close the socket, * which will discard all written data. The OS should then consider * it writable again, due to EOF condition. */ UtAssert_INT32_EQ(OS_BinSemGive(bin_sem_id), OS_SUCCESS); OS_TaskDelay(10); /* Give server time to run and close the socket */ OS_SelectFdZero(&WriteSet); OS_SelectFdAdd(&WriteSet, c1_socket_id); OS_SelectFdAdd(&WriteSet, c2_socket_id); UtAssert_INT32_EQ(OS_SelectMultiple(NULL, &WriteSet, UT_TIMEOUT), OS_SUCCESS); UtAssert_INT32_EQ(OS_SelectFdIsSet(&WriteSet, c1_socket_id), true); UtAssert_INT32_EQ(OS_SelectFdIsSet(&WriteSet, c2_socket_id), true); } void UtTest_Setup(void) { if (OS_API_Init() != OS_SUCCESS) { UtAssert_Abort("OS_API_Init() failed"); } /* the test should call OS_API_Teardown() before exiting */ UtTest_AddTeardown(OS_API_Teardown, "Cleanup"); /* * Register the test setup and check routines in UT assert */ UtTest_Add(TestSelectSingleRead, Setup_Single, Teardown_Single, "TestSelectSingleRead"); UtTest_Add(TestSelectMultipleRead, Setup_Multi, Teardown_Multi, "TestSelectMultipleRead"); UtTest_Add(TestSelectSingleWrite, Setup_Single, Teardown_Single, "TestSelectSingleWrite"); UtTest_Add(TestSelectMultipleWrite, Setup_Multi, Teardown_Multi, "TestSelectMultipleWrite"); }
854131.c
#include "config.h" #include "AL/alc.h" #include "AL/al.h" #include "AL/alext.h" #include "alMain.h" #include "alError.h" #include "ringbuffer.h" static int EventThread(void *arg) { ALCcontext *context = arg; /* Clear all pending posts on the semaphore. */ while(alsem_trywait(&context->EventSem) == althrd_success) { } while(1) { ALbitfieldSOFT enabledevts; AsyncEvent evt; if(ll_ringbuffer_read(context->AsyncEvents, (char*)&evt, 1) == 0) { alsem_wait(&context->EventSem); continue; } if(!evt.EnumType) break; almtx_lock(&context->EventCbLock); enabledevts = ATOMIC_LOAD(&context->EnabledEvts, almemory_order_acquire); if(context->EventCb && (enabledevts&evt.EnumType) == evt.EnumType) context->EventCb(evt.Type, evt.ObjectId, evt.Param, (ALsizei)strlen(evt.Message), evt.Message, context->EventParam); almtx_unlock(&context->EventCbLock); } return 0; } AL_API void AL_APIENTRY alEventControlSOFT(ALsizei count, const ALenum *types, ALboolean enable) { ALCcontext *context; ALbitfieldSOFT enabledevts; ALbitfieldSOFT flags = 0; bool isrunning; ALsizei i; context = GetContextRef(); if(!context) return; if(count < 0) SETERR_GOTO(context, AL_INVALID_VALUE, done, "Controlling %d events", count); if(count == 0) goto done; if(!types) SETERR_GOTO(context, AL_INVALID_VALUE, done, "NULL pointer"); for(i = 0;i < count;i++) { if(types[i] == AL_EVENT_TYPE_BUFFER_COMPLETED_SOFT) flags |= EventType_BufferCompleted; else if(types[i] == AL_EVENT_TYPE_SOURCE_STATE_CHANGED_SOFT) flags |= EventType_SourceStateChange; else if(types[i] == AL_EVENT_TYPE_ERROR_SOFT) flags |= EventType_Error; else if(types[i] == AL_EVENT_TYPE_PERFORMANCE_SOFT) flags |= EventType_Performance; else if(types[i] == AL_EVENT_TYPE_DEPRECATED_SOFT) flags |= EventType_Deprecated; else if(types[i] == AL_EVENT_TYPE_DISCONNECTED_SOFT) flags |= EventType_Disconnected; else SETERR_GOTO(context, AL_INVALID_ENUM, done, "Invalid event type 0x%04x", types[i]); } almtx_lock(&context->EventThrdLock); if(enable) { if(!context->AsyncEvents) context->AsyncEvents = ll_ringbuffer_create(63, sizeof(AsyncEvent), false); enabledevts = ATOMIC_LOAD(&context->EnabledEvts, almemory_order_relaxed); isrunning = !!enabledevts; while(ATOMIC_COMPARE_EXCHANGE_WEAK(&context->EnabledEvts, &enabledevts, enabledevts|flags, almemory_order_acq_rel, almemory_order_acquire) == 0) { /* enabledevts is (re-)filled with the current value on failure, so * just try again. */ } if(!isrunning && flags) althrd_create(&context->EventThread, EventThread, context); } else { enabledevts = ATOMIC_LOAD(&context->EnabledEvts, almemory_order_relaxed); isrunning = !!enabledevts; while(ATOMIC_COMPARE_EXCHANGE_WEAK(&context->EnabledEvts, &enabledevts, enabledevts&~flags, almemory_order_acq_rel, almemory_order_acquire) == 0) { } if(isrunning && !(enabledevts&~flags)) { static const AsyncEvent kill_evt = { 0 }; while(ll_ringbuffer_write(context->AsyncEvents, (const char*)&kill_evt, 1) == 0) althrd_yield(); alsem_post(&context->EventSem); althrd_join(context->EventThread, NULL); } else { /* Wait to ensure the event handler sees the changed flags before * returning. */ almtx_lock(&context->EventCbLock); almtx_unlock(&context->EventCbLock); } } almtx_unlock(&context->EventThrdLock); done: ALCcontext_DecRef(context); } AL_API void AL_APIENTRY alEventCallbackSOFT(ALEVENTPROCSOFT callback, void *userParam) { ALCcontext *context; context = GetContextRef(); if(!context) return; almtx_lock(&context->PropLock); almtx_lock(&context->EventCbLock); context->EventCb = callback; context->EventParam = userParam; almtx_unlock(&context->EventCbLock); almtx_unlock(&context->PropLock); ALCcontext_DecRef(context); }
520387.c
/* * Copyright (c) 2009-2020, Salvatore Sanfilippo <antirez at gmail dot com> * Copyright (c) 2020, Redis Labs, Inc * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "server.h" #include "util.h" #include "sha1.h" /* SHA1 is used for DEBUG DIGEST */ #include "crc64.h" #include "bio.h" #include "quicklist.h" #include <arpa/inet.h> #include <signal.h> #include <dlfcn.h> #include <fcntl.h> #include <sys/mman.h> #include <unistd.h> #ifdef HAVE_BACKTRACE #include <execinfo.h> #ifndef __OpenBSD__ #include <ucontext.h> #else typedef ucontext_t sigcontext_t; #endif #endif /* HAVE_BACKTRACE */ #ifdef __CYGWIN__ #ifndef SA_ONSTACK #define SA_ONSTACK 0x08000000 #endif #endif #if defined(__APPLE__) && defined(__arm64__) #include <mach/mach.h> #endif /* Globals */ static int bug_report_start = 0; /* True if bug report header was already logged. */ static pthread_mutex_t bug_report_start_mutex = PTHREAD_MUTEX_INITIALIZER; /* Forward declarations */ void bugReportStart(void); void printCrashReport(void); void bugReportEnd(int killViaSignal, int sig); void logStackTrace(void *eip, int uplevel); /* ================================= Debugging ============================== */ /* Compute the sha1 of string at 's' with 'len' bytes long. * The SHA1 is then xored against the string pointed by digest. * Since xor is commutative, this operation is used in order to * "add" digests relative to unordered elements. * * So digest(a,b,c,d) will be the same of digest(b,a,c,d) */ void xorDigest(unsigned char *digest, void *ptr, size_t len) { SHA1_CTX ctx; unsigned char hash[20], *s = ptr; int j; SHA1Init(&ctx); SHA1Update(&ctx,s,len); SHA1Final(hash,&ctx); for (j = 0; j < 20; j++) digest[j] ^= hash[j]; } void xorStringObjectDigest(unsigned char *digest, robj *o) { o = getDecodedObject(o); xorDigest(digest,o->ptr,sdslen(o->ptr)); decrRefCount(o); } /* This function instead of just computing the SHA1 and xoring it * against digest, also perform the digest of "digest" itself and * replace the old value with the new one. * * So the final digest will be: * * digest = SHA1(digest xor SHA1(data)) * * This function is used every time we want to preserve the order so * that digest(a,b,c,d) will be different than digest(b,c,d,a) * * Also note that mixdigest("foo") followed by mixdigest("bar") * will lead to a different digest compared to "fo", "obar". */ void mixDigest(unsigned char *digest, void *ptr, size_t len) { SHA1_CTX ctx; char *s = ptr; xorDigest(digest,s,len); SHA1Init(&ctx); SHA1Update(&ctx,digest,20); SHA1Final(digest,&ctx); } void mixStringObjectDigest(unsigned char *digest, robj *o) { o = getDecodedObject(o); mixDigest(digest,o->ptr,sdslen(o->ptr)); decrRefCount(o); } /* This function computes the digest of a data structure stored in the * object 'o'. It is the core of the DEBUG DIGEST command: when taking the * digest of a whole dataset, we take the digest of the key and the value * pair, and xor all those together. * * Note that this function does not reset the initial 'digest' passed, it * will continue mixing this object digest to anything that was already * present. */ void xorObjectDigest(redisDb *db, robj *keyobj, unsigned char *digest, robj *o) { uint32_t aux = htonl(o->type); mixDigest(digest,&aux,sizeof(aux)); long long expiretime = getExpire(db,keyobj); char buf[128]; /* Save the key and associated value */ if (o->type == OBJ_STRING) { mixStringObjectDigest(digest,o); } else if (o->type == OBJ_LIST) { listTypeIterator *li = listTypeInitIterator(o,0,LIST_TAIL); listTypeEntry entry; while(listTypeNext(li,&entry)) { robj *eleobj = listTypeGet(&entry); mixStringObjectDigest(digest,eleobj); decrRefCount(eleobj); } listTypeReleaseIterator(li); } else if (o->type == OBJ_SET) { setTypeIterator *si = setTypeInitIterator(o); sds sdsele; while((sdsele = setTypeNextObject(si)) != NULL) { xorDigest(digest,sdsele,sdslen(sdsele)); sdsfree(sdsele); } setTypeReleaseIterator(si); } else if (o->type == OBJ_ZSET) { unsigned char eledigest[20]; if (o->encoding == OBJ_ENCODING_LISTPACK) { unsigned char *zl = o->ptr; unsigned char *eptr, *sptr; unsigned char *vstr; unsigned int vlen; long long vll; double score; eptr = lpSeek(zl,0); serverAssert(eptr != NULL); sptr = lpNext(zl,eptr); serverAssert(sptr != NULL); while (eptr != NULL) { vstr = lpGetValue(eptr,&vlen,&vll); score = zzlGetScore(sptr); memset(eledigest,0,20); if (vstr != NULL) { mixDigest(eledigest,vstr,vlen); } else { ll2string(buf,sizeof(buf),vll); mixDigest(eledigest,buf,strlen(buf)); } snprintf(buf,sizeof(buf),"%.17g",score); mixDigest(eledigest,buf,strlen(buf)); xorDigest(digest,eledigest,20); zzlNext(zl,&eptr,&sptr); } } else if (o->encoding == OBJ_ENCODING_SKIPLIST) { zset *zs = o->ptr; dictIterator *di = dictGetIterator(zs->dict); dictEntry *de; while((de = dictNext(di)) != NULL) { sds sdsele = dictGetKey(de); double *score = dictGetVal(de); snprintf(buf,sizeof(buf),"%.17g",*score); memset(eledigest,0,20); mixDigest(eledigest,sdsele,sdslen(sdsele)); mixDigest(eledigest,buf,strlen(buf)); xorDigest(digest,eledigest,20); } dictReleaseIterator(di); } else { serverPanic("Unknown sorted set encoding"); } } else if (o->type == OBJ_HASH) { hashTypeIterator *hi = hashTypeInitIterator(o); while (hashTypeNext(hi) != C_ERR) { unsigned char eledigest[20]; sds sdsele; memset(eledigest,0,20); sdsele = hashTypeCurrentObjectNewSds(hi,OBJ_HASH_KEY); mixDigest(eledigest,sdsele,sdslen(sdsele)); sdsfree(sdsele); sdsele = hashTypeCurrentObjectNewSds(hi,OBJ_HASH_VALUE); mixDigest(eledigest,sdsele,sdslen(sdsele)); sdsfree(sdsele); xorDigest(digest,eledigest,20); } hashTypeReleaseIterator(hi); } else if (o->type == OBJ_STREAM) { streamIterator si; streamIteratorStart(&si,o->ptr,NULL,NULL,0); streamID id; int64_t numfields; while(streamIteratorGetID(&si,&id,&numfields)) { sds itemid = sdscatfmt(sdsempty(),"%U.%U",id.ms,id.seq); mixDigest(digest,itemid,sdslen(itemid)); sdsfree(itemid); while(numfields--) { unsigned char *field, *value; int64_t field_len, value_len; streamIteratorGetField(&si,&field,&value, &field_len,&value_len); mixDigest(digest,field,field_len); mixDigest(digest,value,value_len); } } streamIteratorStop(&si); } else if (o->type == OBJ_MODULE) { RedisModuleDigest md = {{0},{0},keyobj,db->id}; moduleValue *mv = o->ptr; moduleType *mt = mv->type; moduleInitDigestContext(md); if (mt->digest) { mt->digest(&md,mv->value); xorDigest(digest,md.x,sizeof(md.x)); } } else { serverPanic("Unknown object type"); } /* If the key has an expire, add it to the mix */ if (expiretime != -1) xorDigest(digest,"!!expire!!",10); } /* Compute the dataset digest. Since keys, sets elements, hashes elements * are not ordered, we use a trick: every aggregate digest is the xor * of the digests of their elements. This way the order will not change * the result. For list instead we use a feedback entering the output digest * as input in order to ensure that a different ordered list will result in * a different digest. */ void computeDatasetDigest(unsigned char *final) { unsigned char digest[20]; dictIterator *di = NULL; dictEntry *de; int j; uint32_t aux; memset(final,0,20); /* Start with a clean result */ for (j = 0; j < server.dbnum; j++) { redisDb *db = server.db+j; if (dictSize(db->dict) == 0) continue; di = dictGetSafeIterator(db->dict); /* hash the DB id, so the same dataset moved in a different * DB will lead to a different digest */ aux = htonl(j); mixDigest(final,&aux,sizeof(aux)); /* Iterate this DB writing every entry */ while((de = dictNext(di)) != NULL) { sds key; robj *keyobj, *o; memset(digest,0,20); /* This key-val digest */ key = dictGetKey(de); keyobj = createStringObject(key,sdslen(key)); mixDigest(digest,key,sdslen(key)); o = dictGetVal(de); xorObjectDigest(db,keyobj,digest,o); /* We can finally xor the key-val digest to the final digest */ xorDigest(final,digest,20); decrRefCount(keyobj); } dictReleaseIterator(di); } } #ifdef USE_JEMALLOC void mallctl_int(client *c, robj **argv, int argc) { int ret; /* start with the biggest size (int64), and if that fails, try smaller sizes (int32, bool) */ int64_t old = 0, val; if (argc > 1) { long long ll; if (getLongLongFromObjectOrReply(c, argv[1], &ll, NULL) != C_OK) return; val = ll; } size_t sz = sizeof(old); while (sz > 0) { if ((ret=je_mallctl(argv[0]->ptr, &old, &sz, argc > 1? &val: NULL, argc > 1?sz: 0))) { if (ret == EPERM && argc > 1) { /* if this option is write only, try just writing to it. */ if (!(ret=je_mallctl(argv[0]->ptr, NULL, 0, &val, sz))) { addReply(c, shared.ok); return; } } if (ret==EINVAL) { /* size might be wrong, try a smaller one */ sz /= 2; #if BYTE_ORDER == BIG_ENDIAN val <<= 8*sz; #endif continue; } addReplyErrorFormat(c,"%s", strerror(ret)); return; } else { #if BYTE_ORDER == BIG_ENDIAN old >>= 64 - 8*sz; #endif addReplyLongLong(c, old); return; } } addReplyErrorFormat(c,"%s", strerror(EINVAL)); } void mallctl_string(client *c, robj **argv, int argc) { int rret, wret; char *old; size_t sz = sizeof(old); /* for strings, it seems we need to first get the old value, before overriding it. */ if ((rret=je_mallctl(argv[0]->ptr, &old, &sz, NULL, 0))) { /* return error unless this option is write only. */ if (!(rret == EPERM && argc > 1)) { addReplyErrorFormat(c,"%s", strerror(rret)); return; } } if(argc > 1) { char *val = argv[1]->ptr; char **valref = &val; if ((!strcmp(val,"VOID"))) valref = NULL, sz = 0; wret = je_mallctl(argv[0]->ptr, NULL, 0, valref, sz); } if (!rret) addReplyBulkCString(c, old); else if (wret) addReplyErrorFormat(c,"%s", strerror(wret)); else addReply(c, shared.ok); } #endif void debugCommand(client *c) { if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"help")) { const char *help[] = { "AOF-FLUSH-SLEEP <microsec>", " Server will sleep before flushing the AOF, this is used for testing.", "ASSERT", " Crash by assertion failed.", "CHANGE-REPL-ID", " Change the replication IDs of the instance.", " Dangerous: should be used only for testing the replication subsystem.", "CONFIG-REWRITE-FORCE-ALL", " Like CONFIG REWRITE but writes all configuration options, including", " keywords not listed in original configuration file or default values.", "CRASH-AND-RECOVER [<milliseconds>]", " Hard crash and restart after a <milliseconds> delay (default 0).", "DIGEST", " Output a hex signature representing the current DB content.", "DIGEST-VALUE <key> [<key> ...]", " Output a hex signature of the values of all the specified keys.", "ERROR <string>", " Return a Redis protocol error with <string> as message. Useful for clients", " unit tests to simulate Redis errors.", "LEAK <string>", " Create a memory leak of the input string.", "LOG <message>", " Write <message> to the server log.", "HTSTATS <dbid>", " Return hash table statistics of the specified Redis database.", "HTSTATS-KEY <key>", " Like HTSTATS but for the hash table stored at <key>'s value.", "LOADAOF", " Flush the AOF buffers on disk and reload the AOF in memory.", #ifdef USE_JEMALLOC "MALLCTL <key> [<val>]", " Get or set a malloc tuning integer.", "MALLCTL-STR <key> [<val>]", " Get or set a malloc tuning string.", #endif "OBJECT <key>", " Show low level info about `key` and associated value.", "OOM", " Crash the server simulating an out-of-memory error.", "PANIC", " Crash the server simulating a panic.", "POPULATE <count> [<prefix>] [<size>]", " Create <count> string keys named key:<num>. If <prefix> is specified then", " it is used instead of the 'key' prefix. These are not propagated to", " replicas. Cluster slots are not respected so keys not belonging to the", " current node can be created in cluster mode.", "PROTOCOL <type>", " Reply with a test value of the specified type. <type> can be: string,", " integer, double, bignum, null, array, set, map, attrib, push, verbatim,", " true, false.", "RELOAD [option ...]", " Save the RDB on disk and reload it back to memory. Valid <option> values:", " * MERGE: conflicting keys will be loaded from RDB.", " * NOFLUSH: the existing database will not be removed before load, but", " conflicting keys will generate an exception and kill the server.", " * NOSAVE: the database will be loaded from an existing RDB file.", " Examples:", " * DEBUG RELOAD: verify that the server is able to persist, flush and reload", " the database.", " * DEBUG RELOAD NOSAVE: replace the current database with the contents of an", " existing RDB file.", " * DEBUG RELOAD NOSAVE NOFLUSH MERGE: add the contents of an existing RDB", " file to the database.", "RESTART [<milliseconds>]", " Graceful restart: save config, db, restart after a <milliseconds> delay (default 0).", "SDSLEN <key>", " Show low level SDS string info representing `key` and value.", "SEGFAULT", " Crash the server with sigsegv.", "SET-ACTIVE-EXPIRE <0|1>", " Setting it to 0 disables expiring keys in background when they are not", " accessed (otherwise the Redis behavior). Setting it to 1 reenables back the", " default.", "QUICKLIST-PACKED-THRESHOLD <size>", " Sets the threshold for elements to be inserted as plain vs packed nodes", " Default value is 1GB, allows values up to 4GB", "SET-SKIP-CHECKSUM-VALIDATION <0|1>", " Enables or disables checksum checks for RDB files and RESTORE's payload.", "SLEEP <seconds>", " Stop the server for <seconds>. Decimals allowed.", "STRINGMATCH-TEST", " Run a fuzz tester against the stringmatchlen() function.", "STRUCTSIZE", " Return the size of different Redis core C structures.", "LISTPACK <key>", " Show low level info about the listpack encoding of <key>.", "QUICKLIST <key> [<0|1>]", " Show low level info about the quicklist encoding of <key>.", " The optional argument (0 by default) sets the level of detail", "CLIENT-EVICTION", " Show low level client eviction pools info (maxmemory-clients).", "PAUSE-CRON <0|1>", " Stop periodic cron job processing.", NULL }; addReplyHelp(c, help); } else if (!strcasecmp(c->argv[1]->ptr,"segfault")) { /* Compiler gives warnings about writing to a random address * e.g "*((char*)-1) = 'x';". As a workaround, we map a read-only area * and try to write there to trigger segmentation fault. */ char* p = mmap(NULL, 4096, PROT_READ, MAP_PRIVATE | MAP_ANON, -1, 0); *p = 'x'; } else if (!strcasecmp(c->argv[1]->ptr,"panic")) { serverPanic("DEBUG PANIC called at Unix time %lld", (long long)time(NULL)); } else if (!strcasecmp(c->argv[1]->ptr,"restart") || !strcasecmp(c->argv[1]->ptr,"crash-and-recover")) { long long delay = 0; if (c->argc >= 3) { if (getLongLongFromObjectOrReply(c, c->argv[2], &delay, NULL) != C_OK) return; if (delay < 0) delay = 0; } int flags = !strcasecmp(c->argv[1]->ptr,"restart") ? (RESTART_SERVER_GRACEFULLY|RESTART_SERVER_CONFIG_REWRITE) : RESTART_SERVER_NONE; restartServer(flags,delay); addReplyError(c,"failed to restart the server. Check server logs."); } else if (!strcasecmp(c->argv[1]->ptr,"oom")) { void *ptr = zmalloc(ULONG_MAX); /* Should trigger an out of memory. */ zfree(ptr); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"assert")) { serverAssertWithInfo(c,c->argv[0],1 == 2); } else if (!strcasecmp(c->argv[1]->ptr,"log") && c->argc == 3) { serverLog(LL_WARNING, "DEBUG LOG: %s", (char*)c->argv[2]->ptr); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"leak") && c->argc == 3) { sdsdup(c->argv[2]->ptr); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"reload")) { int flush = 1, save = 1; int flags = RDBFLAGS_NONE; /* Parse the additional options that modify the RELOAD * behavior. */ for (int j = 2; j < c->argc; j++) { char *opt = c->argv[j]->ptr; if (!strcasecmp(opt,"MERGE")) { flags |= RDBFLAGS_ALLOW_DUP; } else if (!strcasecmp(opt,"NOFLUSH")) { flush = 0; } else if (!strcasecmp(opt,"NOSAVE")) { save = 0; } else { addReplyError(c,"DEBUG RELOAD only supports the " "MERGE, NOFLUSH and NOSAVE options."); return; } } /* The default behavior is to save the RDB file before loading * it back. */ if (save) { rdbSaveInfo rsi, *rsiptr; rsiptr = rdbPopulateSaveInfo(&rsi); if (rdbSave(server.rdb_filename,rsiptr) != C_OK) { addReplyErrorObject(c,shared.err); return; } } /* The default behavior is to remove the current dataset from * memory before loading the RDB file, however when MERGE is * used together with NOFLUSH, we are able to merge two datasets. */ if (flush) emptyData(-1,EMPTYDB_NO_FLAGS,NULL); protectClient(c); int ret = rdbLoad(server.rdb_filename,NULL,flags); unprotectClient(c); if (ret != C_OK) { addReplyError(c,"Error trying to load the RDB dump"); return; } serverLog(LL_WARNING,"DB reloaded by DEBUG RELOAD"); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"loadaof")) { if (server.aof_state != AOF_OFF) flushAppendOnlyFile(1); emptyData(-1,EMPTYDB_NO_FLAGS,NULL); protectClient(c); int ret = loadAppendOnlyFile(server.aof_filename); if (ret != AOF_OK && ret != AOF_EMPTY) exit(1); unprotectClient(c); server.dirty = 0; /* Prevent AOF / replication */ serverLog(LL_WARNING,"Append Only File loaded by DEBUG LOADAOF"); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"object") && c->argc == 3) { dictEntry *de; robj *val; char *strenc; if ((de = dictFind(c->db->dict,c->argv[2]->ptr)) == NULL) { addReplyErrorObject(c,shared.nokeyerr); return; } val = dictGetVal(de); strenc = strEncoding(val->encoding); char extra[138] = {0}; if (val->encoding == OBJ_ENCODING_QUICKLIST) { char *nextra = extra; int remaining = sizeof(extra); quicklist *ql = val->ptr; /* Add number of quicklist nodes */ int used = snprintf(nextra, remaining, " ql_nodes:%lu", ql->len); nextra += used; remaining -= used; /* Add average quicklist fill factor */ double avg = (double)ql->count/ql->len; used = snprintf(nextra, remaining, " ql_avg_node:%.2f", avg); nextra += used; remaining -= used; /* Add quicklist fill level / max listpack size */ used = snprintf(nextra, remaining, " ql_listpack_max:%d", ql->fill); nextra += used; remaining -= used; /* Add isCompressed? */ int compressed = ql->compress != 0; used = snprintf(nextra, remaining, " ql_compressed:%d", compressed); nextra += used; remaining -= used; /* Add total uncompressed size */ unsigned long sz = 0; for (quicklistNode *node = ql->head; node; node = node->next) { sz += node->sz; } used = snprintf(nextra, remaining, " ql_uncompressed_size:%lu", sz); nextra += used; remaining -= used; } addReplyStatusFormat(c, "Value at:%p refcount:%d " "encoding:%s serializedlength:%zu " "lru:%d lru_seconds_idle:%llu%s", (void*)val, val->refcount, strenc, rdbSavedObjectLen(val, c->argv[2], c->db->id), val->lru, estimateObjectIdleTime(val)/1000, extra); } else if (!strcasecmp(c->argv[1]->ptr,"sdslen") && c->argc == 3) { dictEntry *de; robj *val; sds key; if ((de = dictFind(c->db->dict,c->argv[2]->ptr)) == NULL) { addReplyErrorObject(c,shared.nokeyerr); return; } val = dictGetVal(de); key = dictGetKey(de); if (val->type != OBJ_STRING || !sdsEncodedObject(val)) { addReplyError(c,"Not an sds encoded string."); } else { addReplyStatusFormat(c, "key_sds_len:%lld, key_sds_avail:%lld, key_zmalloc: %lld, " "val_sds_len:%lld, val_sds_avail:%lld, val_zmalloc: %lld", (long long) sdslen(key), (long long) sdsavail(key), (long long) sdsZmallocSize(key), (long long) sdslen(val->ptr), (long long) sdsavail(val->ptr), (long long) getStringObjectSdsUsedMemory(val)); } } else if (!strcasecmp(c->argv[1]->ptr,"listpack") && c->argc == 3) { robj *o; if ((o = objectCommandLookupOrReply(c,c->argv[2],shared.nokeyerr)) == NULL) return; if (o->encoding != OBJ_ENCODING_LISTPACK) { addReplyError(c,"Not a listpack encoded object."); } else { lpRepr(o->ptr); addReplyStatus(c,"Listpack structure printed on stdout"); } } else if (!strcasecmp(c->argv[1]->ptr,"quicklist") && (c->argc == 3 || c->argc == 4)) { robj *o; if ((o = objectCommandLookupOrReply(c,c->argv[2],shared.nokeyerr)) == NULL) return; int full = 0; if (c->argc == 4) full = atoi(c->argv[3]->ptr); if (o->encoding != OBJ_ENCODING_QUICKLIST) { addReplyError(c,"Not a quicklist encoded object."); } else { quicklistRepr(o->ptr, full); addReplyStatus(c,"Quicklist structure printed on stdout"); } } else if (!strcasecmp(c->argv[1]->ptr,"populate") && c->argc >= 3 && c->argc <= 5) { long keys, j; robj *key, *val; char buf[128]; if (getPositiveLongFromObjectOrReply(c, c->argv[2], &keys, NULL) != C_OK) return; dictExpand(c->db->dict,keys); long valsize = 0; if ( c->argc == 5 && getPositiveLongFromObjectOrReply(c, c->argv[4], &valsize, NULL) != C_OK ) return; for (j = 0; j < keys; j++) { snprintf(buf,sizeof(buf),"%s:%lu", (c->argc == 3) ? "key" : (char*)c->argv[3]->ptr, j); key = createStringObject(buf,strlen(buf)); if (lookupKeyWrite(c->db,key) != NULL) { decrRefCount(key); continue; } snprintf(buf,sizeof(buf),"value:%lu",j); if (valsize==0) val = createStringObject(buf,strlen(buf)); else { int buflen = strlen(buf); val = createStringObject(NULL,valsize); memcpy(val->ptr, buf, valsize<=buflen? valsize: buflen); } dbAdd(c->db,key,val); signalModifiedKey(c,c->db,key); decrRefCount(key); } addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"digest") && c->argc == 2) { /* DEBUG DIGEST (form without keys specified) */ unsigned char digest[20]; sds d = sdsempty(); computeDatasetDigest(digest); for (int i = 0; i < 20; i++) d = sdscatprintf(d, "%02x",digest[i]); addReplyStatus(c,d); sdsfree(d); } else if (!strcasecmp(c->argv[1]->ptr,"digest-value") && c->argc >= 2) { /* DEBUG DIGEST-VALUE key key key ... key. */ addReplyArrayLen(c,c->argc-2); for (int j = 2; j < c->argc; j++) { unsigned char digest[20]; memset(digest,0,20); /* Start with a clean result */ /* We don't use lookupKey because a debug command should * work on logically expired keys */ dictEntry *de; robj *o = ((de = dictFind(c->db->dict,c->argv[j]->ptr)) == NULL) ? NULL : dictGetVal(de); if (o) xorObjectDigest(c->db,c->argv[j],digest,o); sds d = sdsempty(); for (int i = 0; i < 20; i++) d = sdscatprintf(d, "%02x",digest[i]); addReplyStatus(c,d); sdsfree(d); } } else if (!strcasecmp(c->argv[1]->ptr,"protocol") && c->argc == 3) { /* DEBUG PROTOCOL [string|integer|double|bignum|null|array|set|map| * attrib|push|verbatim|true|false] */ char *name = c->argv[2]->ptr; if (!strcasecmp(name,"string")) { addReplyBulkCString(c,"Hello World"); } else if (!strcasecmp(name,"integer")) { addReplyLongLong(c,12345); } else if (!strcasecmp(name,"double")) { addReplyDouble(c,3.141); } else if (!strcasecmp(name,"bignum")) { addReplyBigNum(c,"1234567999999999999999999999999999999",37); } else if (!strcasecmp(name,"null")) { addReplyNull(c); } else if (!strcasecmp(name,"array")) { addReplyArrayLen(c,3); for (int j = 0; j < 3; j++) addReplyLongLong(c,j); } else if (!strcasecmp(name,"set")) { addReplySetLen(c,3); for (int j = 0; j < 3; j++) addReplyLongLong(c,j); } else if (!strcasecmp(name,"map")) { addReplyMapLen(c,3); for (int j = 0; j < 3; j++) { addReplyLongLong(c,j); addReplyBool(c, j == 1); } } else if (!strcasecmp(name,"attrib")) { if (c->resp >= 3) { addReplyAttributeLen(c,1); addReplyBulkCString(c,"key-popularity"); addReplyArrayLen(c,2); addReplyBulkCString(c,"key:123"); addReplyLongLong(c,90); } /* Attributes are not real replies, so a well formed reply should * also have a normal reply type after the attribute. */ addReplyBulkCString(c,"Some real reply following the attribute"); } else if (!strcasecmp(name,"push")) { addReplyPushLen(c,2); addReplyBulkCString(c,"server-cpu-usage"); addReplyLongLong(c,42); /* Push replies are not synchronous replies, so we emit also a * normal reply in order for blocking clients just discarding the * push reply, to actually consume the reply and continue. */ addReplyBulkCString(c,"Some real reply following the push reply"); } else if (!strcasecmp(name,"true")) { addReplyBool(c,1); } else if (!strcasecmp(name,"false")) { addReplyBool(c,0); } else if (!strcasecmp(name,"verbatim")) { addReplyVerbatim(c,"This is a verbatim\nstring",25,"txt"); } else { addReplyError(c,"Wrong protocol type name. Please use one of the following: string|integer|double|bignum|null|array|set|map|attrib|push|verbatim|true|false"); } } else if (!strcasecmp(c->argv[1]->ptr,"sleep") && c->argc == 3) { double dtime = strtod(c->argv[2]->ptr,NULL); long long utime = dtime*1000000; struct timespec tv; tv.tv_sec = utime / 1000000; tv.tv_nsec = (utime % 1000000) * 1000; nanosleep(&tv, NULL); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"set-active-expire") && c->argc == 3) { server.active_expire_enabled = atoi(c->argv[2]->ptr); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"quicklist-packed-threshold") && c->argc == 3) { int memerr; unsigned long long sz = memtoull((const char *)c->argv[2]->ptr, &memerr); if (memerr || !quicklistisSetPackedThreshold(sz)) { addReplyError(c, "argument must be a memory value bigger then 1 and smaller than 4gb"); } else { addReply(c,shared.ok); } } else if (!strcasecmp(c->argv[1]->ptr,"set-skip-checksum-validation") && c->argc == 3) { server.skip_checksum_validation = atoi(c->argv[2]->ptr); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"aof-flush-sleep") && c->argc == 3) { server.aof_flush_sleep = atoi(c->argv[2]->ptr); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"error") && c->argc == 3) { sds errstr = sdsnewlen("-",1); errstr = sdscatsds(errstr,c->argv[2]->ptr); errstr = sdsmapchars(errstr,"\n\r"," ",2); /* no newlines in errors. */ errstr = sdscatlen(errstr,"\r\n",2); addReplySds(c,errstr); } else if (!strcasecmp(c->argv[1]->ptr,"structsize") && c->argc == 2) { sds sizes = sdsempty(); sizes = sdscatprintf(sizes,"bits:%d ",(sizeof(void*) == 8)?64:32); sizes = sdscatprintf(sizes,"robj:%d ",(int)sizeof(robj)); sizes = sdscatprintf(sizes,"dictentry:%d ",(int)sizeof(dictEntry)); sizes = sdscatprintf(sizes,"sdshdr5:%d ",(int)sizeof(struct sdshdr5)); sizes = sdscatprintf(sizes,"sdshdr8:%d ",(int)sizeof(struct sdshdr8)); sizes = sdscatprintf(sizes,"sdshdr16:%d ",(int)sizeof(struct sdshdr16)); sizes = sdscatprintf(sizes,"sdshdr32:%d ",(int)sizeof(struct sdshdr32)); sizes = sdscatprintf(sizes,"sdshdr64:%d ",(int)sizeof(struct sdshdr64)); addReplyBulkSds(c,sizes); } else if (!strcasecmp(c->argv[1]->ptr,"htstats") && c->argc == 3) { long dbid; sds stats = sdsempty(); char buf[4096]; if (getLongFromObjectOrReply(c, c->argv[2], &dbid, NULL) != C_OK) { sdsfree(stats); return; } if (dbid < 0 || dbid >= server.dbnum) { sdsfree(stats); addReplyError(c,"Out of range database"); return; } stats = sdscatprintf(stats,"[Dictionary HT]\n"); dictGetStats(buf,sizeof(buf),server.db[dbid].dict); stats = sdscat(stats,buf); stats = sdscatprintf(stats,"[Expires HT]\n"); dictGetStats(buf,sizeof(buf),server.db[dbid].expires); stats = sdscat(stats,buf); addReplyVerbatim(c,stats,sdslen(stats),"txt"); sdsfree(stats); } else if (!strcasecmp(c->argv[1]->ptr,"htstats-key") && c->argc == 3) { robj *o; dict *ht = NULL; if ((o = objectCommandLookupOrReply(c,c->argv[2],shared.nokeyerr)) == NULL) return; /* Get the hash table reference from the object, if possible. */ switch (o->encoding) { case OBJ_ENCODING_SKIPLIST: { zset *zs = o->ptr; ht = zs->dict; } break; case OBJ_ENCODING_HT: ht = o->ptr; break; } if (ht == NULL) { addReplyError(c,"The value stored at the specified key is not " "represented using an hash table"); } else { char buf[4096]; dictGetStats(buf,sizeof(buf),ht); addReplyVerbatim(c,buf,strlen(buf),"txt"); } } else if (!strcasecmp(c->argv[1]->ptr,"change-repl-id") && c->argc == 2) { serverLog(LL_WARNING,"Changing replication IDs after receiving DEBUG change-repl-id"); changeReplicationId(); clearReplicationId2(); addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"stringmatch-test") && c->argc == 2) { stringmatchlen_fuzz_test(); addReplyStatus(c,"Apparently Redis did not crash: test passed"); } else if (!strcasecmp(c->argv[1]->ptr,"set-disable-deny-scripts") && c->argc == 3) { server.script_disable_deny_script = atoi(c->argv[2]->ptr);; addReply(c,shared.ok); } else if (!strcasecmp(c->argv[1]->ptr,"config-rewrite-force-all") && c->argc == 2) { if (rewriteConfig(server.configfile, 1) == -1) addReplyError(c, "CONFIG-REWRITE-FORCE-ALL failed"); else addReply(c, shared.ok); } else if(!strcasecmp(c->argv[1]->ptr,"client-eviction") && c->argc == 2) { sds bucket_info = sdsempty(); for (int j = 0; j < CLIENT_MEM_USAGE_BUCKETS; j++) { if (j == 0) bucket_info = sdscatprintf(bucket_info, "bucket 0"); else bucket_info = sdscatprintf(bucket_info, "bucket %10zu", (size_t)1<<(j-1+CLIENT_MEM_USAGE_BUCKET_MIN_LOG)); if (j == CLIENT_MEM_USAGE_BUCKETS-1) bucket_info = sdscatprintf(bucket_info, "+ : "); else bucket_info = sdscatprintf(bucket_info, " - %10zu: ", ((size_t)1<<(j+CLIENT_MEM_USAGE_BUCKET_MIN_LOG))-1); bucket_info = sdscatprintf(bucket_info, "tot-mem: %10zu, clients: %lu\n", server.client_mem_usage_buckets[j].mem_usage_sum, server.client_mem_usage_buckets[j].clients->len); } addReplyVerbatim(c,bucket_info,sdslen(bucket_info),"txt"); sdsfree(bucket_info); #ifdef USE_JEMALLOC } else if(!strcasecmp(c->argv[1]->ptr,"mallctl") && c->argc >= 3) { mallctl_int(c, c->argv+2, c->argc-2); return; } else if(!strcasecmp(c->argv[1]->ptr,"mallctl-str") && c->argc >= 3) { mallctl_string(c, c->argv+2, c->argc-2); return; #endif } else if (!strcasecmp(c->argv[1]->ptr,"pause-cron") && c->argc == 3) { server.pause_cron = atoi(c->argv[2]->ptr); addReply(c,shared.ok); } else { addReplySubcommandSyntaxError(c); return; } } /* =========================== Crash handling ============================== */ void _serverAssert(const char *estr, const char *file, int line) { bugReportStart(); serverLog(LL_WARNING,"=== ASSERTION FAILED ==="); serverLog(LL_WARNING,"==> %s:%d '%s' is not true",file,line,estr); if (server.crashlog_enabled) { #ifdef HAVE_BACKTRACE logStackTrace(NULL, 1); #endif printCrashReport(); } // remove the signal handler so on abort() we will output the crash report. removeSignalHandlers(); bugReportEnd(0, 0); } void _serverAssertPrintClientInfo(const client *c) { int j; char conninfo[CONN_INFO_LEN]; bugReportStart(); serverLog(LL_WARNING,"=== ASSERTION FAILED CLIENT CONTEXT ==="); serverLog(LL_WARNING,"client->flags = %llu", (unsigned long long) c->flags); serverLog(LL_WARNING,"client->conn = %s", connGetInfo(c->conn, conninfo, sizeof(conninfo))); serverLog(LL_WARNING,"client->argc = %d", c->argc); for (j=0; j < c->argc; j++) { char buf[128]; char *arg; if (c->argv[j]->type == OBJ_STRING && sdsEncodedObject(c->argv[j])) { arg = (char*) c->argv[j]->ptr; } else { snprintf(buf,sizeof(buf),"Object type: %u, encoding: %u", c->argv[j]->type, c->argv[j]->encoding); arg = buf; } serverLog(LL_WARNING,"client->argv[%d] = \"%s\" (refcount: %d)", j, arg, c->argv[j]->refcount); } } void serverLogObjectDebugInfo(const robj *o) { serverLog(LL_WARNING,"Object type: %u", o->type); serverLog(LL_WARNING,"Object encoding: %u", o->encoding); serverLog(LL_WARNING,"Object refcount: %d", o->refcount); #if UNSAFE_CRASH_REPORT /* This code is now disabled. o->ptr may be unreliable to print. in some * cases a ziplist could have already been freed by realloc, but not yet * updated to o->ptr. in other cases the call to ziplistLen may need to * iterate on all the items in the list (and possibly crash again). * For some cases it may be ok to crash here again, but these could cause * invalid memory access which will bother valgrind and also possibly cause * random memory portion to be "leaked" into the logfile. */ if (o->type == OBJ_STRING && sdsEncodedObject(o)) { serverLog(LL_WARNING,"Object raw string len: %zu", sdslen(o->ptr)); if (sdslen(o->ptr) < 4096) { sds repr = sdscatrepr(sdsempty(),o->ptr,sdslen(o->ptr)); serverLog(LL_WARNING,"Object raw string content: %s", repr); sdsfree(repr); } } else if (o->type == OBJ_LIST) { serverLog(LL_WARNING,"List length: %d", (int) listTypeLength(o)); } else if (o->type == OBJ_SET) { serverLog(LL_WARNING,"Set size: %d", (int) setTypeSize(o)); } else if (o->type == OBJ_HASH) { serverLog(LL_WARNING,"Hash size: %d", (int) hashTypeLength(o)); } else if (o->type == OBJ_ZSET) { serverLog(LL_WARNING,"Sorted set size: %d", (int) zsetLength(o)); if (o->encoding == OBJ_ENCODING_SKIPLIST) serverLog(LL_WARNING,"Skiplist level: %d", (int) ((const zset*)o->ptr)->zsl->level); } else if (o->type == OBJ_STREAM) { serverLog(LL_WARNING,"Stream size: %d", (int) streamLength(o)); } #endif } void _serverAssertPrintObject(const robj *o) { bugReportStart(); serverLog(LL_WARNING,"=== ASSERTION FAILED OBJECT CONTEXT ==="); serverLogObjectDebugInfo(o); } void _serverAssertWithInfo(const client *c, const robj *o, const char *estr, const char *file, int line) { if (c) _serverAssertPrintClientInfo(c); if (o) _serverAssertPrintObject(o); _serverAssert(estr,file,line); } void _serverPanic(const char *file, int line, const char *msg, ...) { va_list ap; va_start(ap,msg); char fmtmsg[256]; vsnprintf(fmtmsg,sizeof(fmtmsg),msg,ap); va_end(ap); bugReportStart(); serverLog(LL_WARNING,"------------------------------------------------"); serverLog(LL_WARNING,"!!! Software Failure. Press left mouse button to continue"); serverLog(LL_WARNING,"Guru Meditation: %s #%s:%d",fmtmsg,file,line); if (server.crashlog_enabled) { #ifdef HAVE_BACKTRACE logStackTrace(NULL, 1); #endif printCrashReport(); } // remove the signal handler so on abort() we will output the crash report. removeSignalHandlers(); bugReportEnd(0, 0); } void bugReportStart(void) { pthread_mutex_lock(&bug_report_start_mutex); if (bug_report_start == 0) { serverLogRaw(LL_WARNING|LL_RAW, "\n\n=== REDIS BUG REPORT START: Cut & paste starting from here ===\n"); bug_report_start = 1; } pthread_mutex_unlock(&bug_report_start_mutex); } #ifdef HAVE_BACKTRACE static void *getMcontextEip(ucontext_t *uc) { #define NOT_SUPPORTED() do {\ UNUSED(uc);\ return NULL;\ } while(0) #if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_6) /* OSX < 10.6 */ #if defined(__x86_64__) return (void*) uc->uc_mcontext->__ss.__rip; #elif defined(__i386__) return (void*) uc->uc_mcontext->__ss.__eip; #else return (void*) uc->uc_mcontext->__ss.__srr0; #endif #elif defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6) /* OSX >= 10.6 */ #if defined(_STRUCT_X86_THREAD_STATE64) && !defined(__i386__) return (void*) uc->uc_mcontext->__ss.__rip; #elif defined(__i386__) return (void*) uc->uc_mcontext->__ss.__eip; #else /* OSX ARM64 */ return (void*) arm_thread_state64_get_pc(uc->uc_mcontext->__ss); #endif #elif defined(__linux__) /* Linux */ #if defined(__i386__) || ((defined(__X86_64__) || defined(__x86_64__)) && defined(__ILP32__)) return (void*) uc->uc_mcontext.gregs[14]; /* Linux 32 */ #elif defined(__X86_64__) || defined(__x86_64__) return (void*) uc->uc_mcontext.gregs[16]; /* Linux 64 */ #elif defined(__ia64__) /* Linux IA64 */ return (void*) uc->uc_mcontext.sc_ip; #elif defined(__arm__) /* Linux ARM */ return (void*) uc->uc_mcontext.arm_pc; #elif defined(__aarch64__) /* Linux AArch64 */ return (void*) uc->uc_mcontext.pc; #else NOT_SUPPORTED(); #endif #elif defined(__FreeBSD__) /* FreeBSD */ #if defined(__i386__) return (void*) uc->uc_mcontext.mc_eip; #elif defined(__x86_64__) return (void*) uc->uc_mcontext.mc_rip; #else NOT_SUPPORTED(); #endif #elif defined(__OpenBSD__) /* OpenBSD */ #if defined(__i386__) return (void*) uc->sc_eip; #elif defined(__x86_64__) return (void*) uc->sc_rip; #else NOT_SUPPORTED(); #endif #elif defined(__NetBSD__) #if defined(__i386__) return (void*) uc->uc_mcontext.__gregs[_REG_EIP]; #elif defined(__x86_64__) return (void*) uc->uc_mcontext.__gregs[_REG_RIP]; #else NOT_SUPPORTED(); #endif #elif defined(__DragonFly__) return (void*) uc->uc_mcontext.mc_rip; #else NOT_SUPPORTED(); #endif #undef NOT_SUPPORTED } REDIS_NO_SANITIZE("address") void logStackContent(void **sp) { int i; for (i = 15; i >= 0; i--) { unsigned long addr = (unsigned long) sp+i; unsigned long val = (unsigned long) sp[i]; if (sizeof(long) == 4) serverLog(LL_WARNING, "(%08lx) -> %08lx", addr, val); else serverLog(LL_WARNING, "(%016lx) -> %016lx", addr, val); } } /* Log dump of processor registers */ void logRegisters(ucontext_t *uc) { serverLog(LL_WARNING|LL_RAW, "\n------ REGISTERS ------\n"); #define NOT_SUPPORTED() do {\ UNUSED(uc);\ serverLog(LL_WARNING,\ " Dumping of registers not supported for this OS/arch");\ } while(0) /* OSX */ #if defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6) /* OSX AMD64 */ #if defined(_STRUCT_X86_THREAD_STATE64) && !defined(__i386__) serverLog(LL_WARNING, "\n" "RAX:%016lx RBX:%016lx\nRCX:%016lx RDX:%016lx\n" "RDI:%016lx RSI:%016lx\nRBP:%016lx RSP:%016lx\n" "R8 :%016lx R9 :%016lx\nR10:%016lx R11:%016lx\n" "R12:%016lx R13:%016lx\nR14:%016lx R15:%016lx\n" "RIP:%016lx EFL:%016lx\nCS :%016lx FS:%016lx GS:%016lx", (unsigned long) uc->uc_mcontext->__ss.__rax, (unsigned long) uc->uc_mcontext->__ss.__rbx, (unsigned long) uc->uc_mcontext->__ss.__rcx, (unsigned long) uc->uc_mcontext->__ss.__rdx, (unsigned long) uc->uc_mcontext->__ss.__rdi, (unsigned long) uc->uc_mcontext->__ss.__rsi, (unsigned long) uc->uc_mcontext->__ss.__rbp, (unsigned long) uc->uc_mcontext->__ss.__rsp, (unsigned long) uc->uc_mcontext->__ss.__r8, (unsigned long) uc->uc_mcontext->__ss.__r9, (unsigned long) uc->uc_mcontext->__ss.__r10, (unsigned long) uc->uc_mcontext->__ss.__r11, (unsigned long) uc->uc_mcontext->__ss.__r12, (unsigned long) uc->uc_mcontext->__ss.__r13, (unsigned long) uc->uc_mcontext->__ss.__r14, (unsigned long) uc->uc_mcontext->__ss.__r15, (unsigned long) uc->uc_mcontext->__ss.__rip, (unsigned long) uc->uc_mcontext->__ss.__rflags, (unsigned long) uc->uc_mcontext->__ss.__cs, (unsigned long) uc->uc_mcontext->__ss.__fs, (unsigned long) uc->uc_mcontext->__ss.__gs ); logStackContent((void**)uc->uc_mcontext->__ss.__rsp); #elif defined(__i386__) /* OSX x86 */ serverLog(LL_WARNING, "\n" "EAX:%08lx EBX:%08lx ECX:%08lx EDX:%08lx\n" "EDI:%08lx ESI:%08lx EBP:%08lx ESP:%08lx\n" "SS:%08lx EFL:%08lx EIP:%08lx CS :%08lx\n" "DS:%08lx ES:%08lx FS :%08lx GS :%08lx", (unsigned long) uc->uc_mcontext->__ss.__eax, (unsigned long) uc->uc_mcontext->__ss.__ebx, (unsigned long) uc->uc_mcontext->__ss.__ecx, (unsigned long) uc->uc_mcontext->__ss.__edx, (unsigned long) uc->uc_mcontext->__ss.__edi, (unsigned long) uc->uc_mcontext->__ss.__esi, (unsigned long) uc->uc_mcontext->__ss.__ebp, (unsigned long) uc->uc_mcontext->__ss.__esp, (unsigned long) uc->uc_mcontext->__ss.__ss, (unsigned long) uc->uc_mcontext->__ss.__eflags, (unsigned long) uc->uc_mcontext->__ss.__eip, (unsigned long) uc->uc_mcontext->__ss.__cs, (unsigned long) uc->uc_mcontext->__ss.__ds, (unsigned long) uc->uc_mcontext->__ss.__es, (unsigned long) uc->uc_mcontext->__ss.__fs, (unsigned long) uc->uc_mcontext->__ss.__gs ); logStackContent((void**)uc->uc_mcontext->__ss.__esp); #else /* OSX ARM64 */ serverLog(LL_WARNING, "\n" "x0:%016lx x1:%016lx x2:%016lx x3:%016lx\n" "x4:%016lx x5:%016lx x6:%016lx x7:%016lx\n" "x8:%016lx x9:%016lx x10:%016lx x11:%016lx\n" "x12:%016lx x13:%016lx x14:%016lx x15:%016lx\n" "x16:%016lx x17:%016lx x18:%016lx x19:%016lx\n" "x20:%016lx x21:%016lx x22:%016lx x23:%016lx\n" "x24:%016lx x25:%016lx x26:%016lx x27:%016lx\n" "x28:%016lx fp:%016lx lr:%016lx\n" "sp:%016lx pc:%016lx cpsr:%08lx\n", (unsigned long) uc->uc_mcontext->__ss.__x[0], (unsigned long) uc->uc_mcontext->__ss.__x[1], (unsigned long) uc->uc_mcontext->__ss.__x[2], (unsigned long) uc->uc_mcontext->__ss.__x[3], (unsigned long) uc->uc_mcontext->__ss.__x[4], (unsigned long) uc->uc_mcontext->__ss.__x[5], (unsigned long) uc->uc_mcontext->__ss.__x[6], (unsigned long) uc->uc_mcontext->__ss.__x[7], (unsigned long) uc->uc_mcontext->__ss.__x[8], (unsigned long) uc->uc_mcontext->__ss.__x[9], (unsigned long) uc->uc_mcontext->__ss.__x[10], (unsigned long) uc->uc_mcontext->__ss.__x[11], (unsigned long) uc->uc_mcontext->__ss.__x[12], (unsigned long) uc->uc_mcontext->__ss.__x[13], (unsigned long) uc->uc_mcontext->__ss.__x[14], (unsigned long) uc->uc_mcontext->__ss.__x[15], (unsigned long) uc->uc_mcontext->__ss.__x[16], (unsigned long) uc->uc_mcontext->__ss.__x[17], (unsigned long) uc->uc_mcontext->__ss.__x[18], (unsigned long) uc->uc_mcontext->__ss.__x[19], (unsigned long) uc->uc_mcontext->__ss.__x[20], (unsigned long) uc->uc_mcontext->__ss.__x[21], (unsigned long) uc->uc_mcontext->__ss.__x[22], (unsigned long) uc->uc_mcontext->__ss.__x[23], (unsigned long) uc->uc_mcontext->__ss.__x[24], (unsigned long) uc->uc_mcontext->__ss.__x[25], (unsigned long) uc->uc_mcontext->__ss.__x[26], (unsigned long) uc->uc_mcontext->__ss.__x[27], (unsigned long) uc->uc_mcontext->__ss.__x[28], (unsigned long) arm_thread_state64_get_fp(uc->uc_mcontext->__ss), (unsigned long) arm_thread_state64_get_lr(uc->uc_mcontext->__ss), (unsigned long) arm_thread_state64_get_sp(uc->uc_mcontext->__ss), (unsigned long) arm_thread_state64_get_pc(uc->uc_mcontext->__ss), (unsigned long) uc->uc_mcontext->__ss.__cpsr ); logStackContent((void**) arm_thread_state64_get_sp(uc->uc_mcontext->__ss)); #endif /* Linux */ #elif defined(__linux__) /* Linux x86 */ #if defined(__i386__) || ((defined(__X86_64__) || defined(__x86_64__)) && defined(__ILP32__)) serverLog(LL_WARNING, "\n" "EAX:%08lx EBX:%08lx ECX:%08lx EDX:%08lx\n" "EDI:%08lx ESI:%08lx EBP:%08lx ESP:%08lx\n" "SS :%08lx EFL:%08lx EIP:%08lx CS:%08lx\n" "DS :%08lx ES :%08lx FS :%08lx GS:%08lx", (unsigned long) uc->uc_mcontext.gregs[11], (unsigned long) uc->uc_mcontext.gregs[8], (unsigned long) uc->uc_mcontext.gregs[10], (unsigned long) uc->uc_mcontext.gregs[9], (unsigned long) uc->uc_mcontext.gregs[4], (unsigned long) uc->uc_mcontext.gregs[5], (unsigned long) uc->uc_mcontext.gregs[6], (unsigned long) uc->uc_mcontext.gregs[7], (unsigned long) uc->uc_mcontext.gregs[18], (unsigned long) uc->uc_mcontext.gregs[17], (unsigned long) uc->uc_mcontext.gregs[14], (unsigned long) uc->uc_mcontext.gregs[15], (unsigned long) uc->uc_mcontext.gregs[3], (unsigned long) uc->uc_mcontext.gregs[2], (unsigned long) uc->uc_mcontext.gregs[1], (unsigned long) uc->uc_mcontext.gregs[0] ); logStackContent((void**)uc->uc_mcontext.gregs[7]); #elif defined(__X86_64__) || defined(__x86_64__) /* Linux AMD64 */ serverLog(LL_WARNING, "\n" "RAX:%016lx RBX:%016lx\nRCX:%016lx RDX:%016lx\n" "RDI:%016lx RSI:%016lx\nRBP:%016lx RSP:%016lx\n" "R8 :%016lx R9 :%016lx\nR10:%016lx R11:%016lx\n" "R12:%016lx R13:%016lx\nR14:%016lx R15:%016lx\n" "RIP:%016lx EFL:%016lx\nCSGSFS:%016lx", (unsigned long) uc->uc_mcontext.gregs[13], (unsigned long) uc->uc_mcontext.gregs[11], (unsigned long) uc->uc_mcontext.gregs[14], (unsigned long) uc->uc_mcontext.gregs[12], (unsigned long) uc->uc_mcontext.gregs[8], (unsigned long) uc->uc_mcontext.gregs[9], (unsigned long) uc->uc_mcontext.gregs[10], (unsigned long) uc->uc_mcontext.gregs[15], (unsigned long) uc->uc_mcontext.gregs[0], (unsigned long) uc->uc_mcontext.gregs[1], (unsigned long) uc->uc_mcontext.gregs[2], (unsigned long) uc->uc_mcontext.gregs[3], (unsigned long) uc->uc_mcontext.gregs[4], (unsigned long) uc->uc_mcontext.gregs[5], (unsigned long) uc->uc_mcontext.gregs[6], (unsigned long) uc->uc_mcontext.gregs[7], (unsigned long) uc->uc_mcontext.gregs[16], (unsigned long) uc->uc_mcontext.gregs[17], (unsigned long) uc->uc_mcontext.gregs[18] ); logStackContent((void**)uc->uc_mcontext.gregs[15]); #elif defined(__aarch64__) /* Linux AArch64 */ serverLog(LL_WARNING, "\n" "X18:%016lx X19:%016lx\nX20:%016lx X21:%016lx\n" "X22:%016lx X23:%016lx\nX24:%016lx X25:%016lx\n" "X26:%016lx X27:%016lx\nX28:%016lx X29:%016lx\n" "X30:%016lx\n" "pc:%016lx sp:%016lx\npstate:%016lx fault_address:%016lx\n", (unsigned long) uc->uc_mcontext.regs[18], (unsigned long) uc->uc_mcontext.regs[19], (unsigned long) uc->uc_mcontext.regs[20], (unsigned long) uc->uc_mcontext.regs[21], (unsigned long) uc->uc_mcontext.regs[22], (unsigned long) uc->uc_mcontext.regs[23], (unsigned long) uc->uc_mcontext.regs[24], (unsigned long) uc->uc_mcontext.regs[25], (unsigned long) uc->uc_mcontext.regs[26], (unsigned long) uc->uc_mcontext.regs[27], (unsigned long) uc->uc_mcontext.regs[28], (unsigned long) uc->uc_mcontext.regs[29], (unsigned long) uc->uc_mcontext.regs[30], (unsigned long) uc->uc_mcontext.pc, (unsigned long) uc->uc_mcontext.sp, (unsigned long) uc->uc_mcontext.pstate, (unsigned long) uc->uc_mcontext.fault_address ); logStackContent((void**)uc->uc_mcontext.sp); #elif defined(__arm__) /* Linux ARM */ serverLog(LL_WARNING, "\n" "R10:%016lx R9 :%016lx\nR8 :%016lx R7 :%016lx\n" "R6 :%016lx R5 :%016lx\nR4 :%016lx R3 :%016lx\n" "R2 :%016lx R1 :%016lx\nR0 :%016lx EC :%016lx\n" "fp: %016lx ip:%016lx\n" "pc:%016lx sp:%016lx\ncpsr:%016lx fault_address:%016lx\n", (unsigned long) uc->uc_mcontext.arm_r10, (unsigned long) uc->uc_mcontext.arm_r9, (unsigned long) uc->uc_mcontext.arm_r8, (unsigned long) uc->uc_mcontext.arm_r7, (unsigned long) uc->uc_mcontext.arm_r6, (unsigned long) uc->uc_mcontext.arm_r5, (unsigned long) uc->uc_mcontext.arm_r4, (unsigned long) uc->uc_mcontext.arm_r3, (unsigned long) uc->uc_mcontext.arm_r2, (unsigned long) uc->uc_mcontext.arm_r1, (unsigned long) uc->uc_mcontext.arm_r0, (unsigned long) uc->uc_mcontext.error_code, (unsigned long) uc->uc_mcontext.arm_fp, (unsigned long) uc->uc_mcontext.arm_ip, (unsigned long) uc->uc_mcontext.arm_pc, (unsigned long) uc->uc_mcontext.arm_sp, (unsigned long) uc->uc_mcontext.arm_cpsr, (unsigned long) uc->uc_mcontext.fault_address ); logStackContent((void**)uc->uc_mcontext.arm_sp); #else NOT_SUPPORTED(); #endif #elif defined(__FreeBSD__) #if defined(__x86_64__) serverLog(LL_WARNING, "\n" "RAX:%016lx RBX:%016lx\nRCX:%016lx RDX:%016lx\n" "RDI:%016lx RSI:%016lx\nRBP:%016lx RSP:%016lx\n" "R8 :%016lx R9 :%016lx\nR10:%016lx R11:%016lx\n" "R12:%016lx R13:%016lx\nR14:%016lx R15:%016lx\n" "RIP:%016lx EFL:%016lx\nCSGSFS:%016lx", (unsigned long) uc->uc_mcontext.mc_rax, (unsigned long) uc->uc_mcontext.mc_rbx, (unsigned long) uc->uc_mcontext.mc_rcx, (unsigned long) uc->uc_mcontext.mc_rdx, (unsigned long) uc->uc_mcontext.mc_rdi, (unsigned long) uc->uc_mcontext.mc_rsi, (unsigned long) uc->uc_mcontext.mc_rbp, (unsigned long) uc->uc_mcontext.mc_rsp, (unsigned long) uc->uc_mcontext.mc_r8, (unsigned long) uc->uc_mcontext.mc_r9, (unsigned long) uc->uc_mcontext.mc_r10, (unsigned long) uc->uc_mcontext.mc_r11, (unsigned long) uc->uc_mcontext.mc_r12, (unsigned long) uc->uc_mcontext.mc_r13, (unsigned long) uc->uc_mcontext.mc_r14, (unsigned long) uc->uc_mcontext.mc_r15, (unsigned long) uc->uc_mcontext.mc_rip, (unsigned long) uc->uc_mcontext.mc_rflags, (unsigned long) uc->uc_mcontext.mc_cs ); logStackContent((void**)uc->uc_mcontext.mc_rsp); #elif defined(__i386__) serverLog(LL_WARNING, "\n" "EAX:%08lx EBX:%08lx ECX:%08lx EDX:%08lx\n" "EDI:%08lx ESI:%08lx EBP:%08lx ESP:%08lx\n" "SS :%08lx EFL:%08lx EIP:%08lx CS:%08lx\n" "DS :%08lx ES :%08lx FS :%08lx GS:%08lx", (unsigned long) uc->uc_mcontext.mc_eax, (unsigned long) uc->uc_mcontext.mc_ebx, (unsigned long) uc->uc_mcontext.mc_ebx, (unsigned long) uc->uc_mcontext.mc_edx, (unsigned long) uc->uc_mcontext.mc_edi, (unsigned long) uc->uc_mcontext.mc_esi, (unsigned long) uc->uc_mcontext.mc_ebp, (unsigned long) uc->uc_mcontext.mc_esp, (unsigned long) uc->uc_mcontext.mc_ss, (unsigned long) uc->uc_mcontext.mc_eflags, (unsigned long) uc->uc_mcontext.mc_eip, (unsigned long) uc->uc_mcontext.mc_cs, (unsigned long) uc->uc_mcontext.mc_es, (unsigned long) uc->uc_mcontext.mc_fs, (unsigned long) uc->uc_mcontext.mc_gs ); logStackContent((void**)uc->uc_mcontext.mc_esp); #else NOT_SUPPORTED(); #endif #elif defined(__OpenBSD__) #if defined(__x86_64__) serverLog(LL_WARNING, "\n" "RAX:%016lx RBX:%016lx\nRCX:%016lx RDX:%016lx\n" "RDI:%016lx RSI:%016lx\nRBP:%016lx RSP:%016lx\n" "R8 :%016lx R9 :%016lx\nR10:%016lx R11:%016lx\n" "R12:%016lx R13:%016lx\nR14:%016lx R15:%016lx\n" "RIP:%016lx EFL:%016lx\nCSGSFS:%016lx", (unsigned long) uc->sc_rax, (unsigned long) uc->sc_rbx, (unsigned long) uc->sc_rcx, (unsigned long) uc->sc_rdx, (unsigned long) uc->sc_rdi, (unsigned long) uc->sc_rsi, (unsigned long) uc->sc_rbp, (unsigned long) uc->sc_rsp, (unsigned long) uc->sc_r8, (unsigned long) uc->sc_r9, (unsigned long) uc->sc_r10, (unsigned long) uc->sc_r11, (unsigned long) uc->sc_r12, (unsigned long) uc->sc_r13, (unsigned long) uc->sc_r14, (unsigned long) uc->sc_r15, (unsigned long) uc->sc_rip, (unsigned long) uc->sc_rflags, (unsigned long) uc->sc_cs ); logStackContent((void**)uc->sc_rsp); #elif defined(__i386__) serverLog(LL_WARNING, "\n" "EAX:%08lx EBX:%08lx ECX:%08lx EDX:%08lx\n" "EDI:%08lx ESI:%08lx EBP:%08lx ESP:%08lx\n" "SS :%08lx EFL:%08lx EIP:%08lx CS:%08lx\n" "DS :%08lx ES :%08lx FS :%08lx GS:%08lx", (unsigned long) uc->sc_eax, (unsigned long) uc->sc_ebx, (unsigned long) uc->sc_ebx, (unsigned long) uc->sc_edx, (unsigned long) uc->sc_edi, (unsigned long) uc->sc_esi, (unsigned long) uc->sc_ebp, (unsigned long) uc->sc_esp, (unsigned long) uc->sc_ss, (unsigned long) uc->sc_eflags, (unsigned long) uc->sc_eip, (unsigned long) uc->sc_cs, (unsigned long) uc->sc_es, (unsigned long) uc->sc_fs, (unsigned long) uc->sc_gs ); logStackContent((void**)uc->sc_esp); #else NOT_SUPPORTED(); #endif #elif defined(__NetBSD__) #if defined(__x86_64__) serverLog(LL_WARNING, "\n" "RAX:%016lx RBX:%016lx\nRCX:%016lx RDX:%016lx\n" "RDI:%016lx RSI:%016lx\nRBP:%016lx RSP:%016lx\n" "R8 :%016lx R9 :%016lx\nR10:%016lx R11:%016lx\n" "R12:%016lx R13:%016lx\nR14:%016lx R15:%016lx\n" "RIP:%016lx EFL:%016lx\nCSGSFS:%016lx", (unsigned long) uc->uc_mcontext.__gregs[_REG_RAX], (unsigned long) uc->uc_mcontext.__gregs[_REG_RBX], (unsigned long) uc->uc_mcontext.__gregs[_REG_RCX], (unsigned long) uc->uc_mcontext.__gregs[_REG_RDX], (unsigned long) uc->uc_mcontext.__gregs[_REG_RDI], (unsigned long) uc->uc_mcontext.__gregs[_REG_RSI], (unsigned long) uc->uc_mcontext.__gregs[_REG_RBP], (unsigned long) uc->uc_mcontext.__gregs[_REG_RSP], (unsigned long) uc->uc_mcontext.__gregs[_REG_R8], (unsigned long) uc->uc_mcontext.__gregs[_REG_R9], (unsigned long) uc->uc_mcontext.__gregs[_REG_R10], (unsigned long) uc->uc_mcontext.__gregs[_REG_R11], (unsigned long) uc->uc_mcontext.__gregs[_REG_R12], (unsigned long) uc->uc_mcontext.__gregs[_REG_R13], (unsigned long) uc->uc_mcontext.__gregs[_REG_R14], (unsigned long) uc->uc_mcontext.__gregs[_REG_R15], (unsigned long) uc->uc_mcontext.__gregs[_REG_RIP], (unsigned long) uc->uc_mcontext.__gregs[_REG_RFLAGS], (unsigned long) uc->uc_mcontext.__gregs[_REG_CS] ); logStackContent((void**)uc->uc_mcontext.__gregs[_REG_RSP]); #elif defined(__i386__) serverLog(LL_WARNING, "\n" "EAX:%08lx EBX:%08lx ECX:%08lx EDX:%08lx\n" "EDI:%08lx ESI:%08lx EBP:%08lx ESP:%08lx\n" "SS :%08lx EFL:%08lx EIP:%08lx CS:%08lx\n" "DS :%08lx ES :%08lx FS :%08lx GS:%08lx", (unsigned long) uc->uc_mcontext.__gregs[_REG_EAX], (unsigned long) uc->uc_mcontext.__gregs[_REG_EBX], (unsigned long) uc->uc_mcontext.__gregs[_REG_EDX], (unsigned long) uc->uc_mcontext.__gregs[_REG_EDI], (unsigned long) uc->uc_mcontext.__gregs[_REG_ESI], (unsigned long) uc->uc_mcontext.__gregs[_REG_EBP], (unsigned long) uc->uc_mcontext.__gregs[_REG_ESP], (unsigned long) uc->uc_mcontext.__gregs[_REG_SS], (unsigned long) uc->uc_mcontext.__gregs[_REG_EFLAGS], (unsigned long) uc->uc_mcontext.__gregs[_REG_EIP], (unsigned long) uc->uc_mcontext.__gregs[_REG_CS], (unsigned long) uc->uc_mcontext.__gregs[_REG_ES], (unsigned long) uc->uc_mcontext.__gregs[_REG_FS], (unsigned long) uc->uc_mcontext.__gregs[_REG_GS] ); #else NOT_SUPPORTED(); #endif #elif defined(__DragonFly__) serverLog(LL_WARNING, "\n" "RAX:%016lx RBX:%016lx\nRCX:%016lx RDX:%016lx\n" "RDI:%016lx RSI:%016lx\nRBP:%016lx RSP:%016lx\n" "R8 :%016lx R9 :%016lx\nR10:%016lx R11:%016lx\n" "R12:%016lx R13:%016lx\nR14:%016lx R15:%016lx\n" "RIP:%016lx EFL:%016lx\nCSGSFS:%016lx", (unsigned long) uc->uc_mcontext.mc_rax, (unsigned long) uc->uc_mcontext.mc_rbx, (unsigned long) uc->uc_mcontext.mc_rcx, (unsigned long) uc->uc_mcontext.mc_rdx, (unsigned long) uc->uc_mcontext.mc_rdi, (unsigned long) uc->uc_mcontext.mc_rsi, (unsigned long) uc->uc_mcontext.mc_rbp, (unsigned long) uc->uc_mcontext.mc_rsp, (unsigned long) uc->uc_mcontext.mc_r8, (unsigned long) uc->uc_mcontext.mc_r9, (unsigned long) uc->uc_mcontext.mc_r10, (unsigned long) uc->uc_mcontext.mc_r11, (unsigned long) uc->uc_mcontext.mc_r12, (unsigned long) uc->uc_mcontext.mc_r13, (unsigned long) uc->uc_mcontext.mc_r14, (unsigned long) uc->uc_mcontext.mc_r15, (unsigned long) uc->uc_mcontext.mc_rip, (unsigned long) uc->uc_mcontext.mc_rflags, (unsigned long) uc->uc_mcontext.mc_cs ); logStackContent((void**)uc->uc_mcontext.mc_rsp); #else NOT_SUPPORTED(); #endif #undef NOT_SUPPORTED } #endif /* HAVE_BACKTRACE */ /* Return a file descriptor to write directly to the Redis log with the * write(2) syscall, that can be used in critical sections of the code * where the rest of Redis can't be trusted (for example during the memory * test) or when an API call requires a raw fd. * * Close it with closeDirectLogFiledes(). */ int openDirectLogFiledes(void) { int log_to_stdout = server.logfile[0] == '\0'; int fd = log_to_stdout ? STDOUT_FILENO : open(server.logfile, O_APPEND|O_CREAT|O_WRONLY, 0644); return fd; } /* Used to close what closeDirectLogFiledes() returns. */ void closeDirectLogFiledes(int fd) { int log_to_stdout = server.logfile[0] == '\0'; if (!log_to_stdout) close(fd); } #ifdef HAVE_BACKTRACE /* Logs the stack trace using the backtrace() call. This function is designed * to be called from signal handlers safely. * The eip argument is optional (can take NULL). * The uplevel argument indicates how many of the calling functions to skip. */ void logStackTrace(void *eip, int uplevel) { void *trace[100]; int trace_size = 0, fd = openDirectLogFiledes(); char *msg; uplevel++; /* skip this function */ if (fd == -1) return; /* If we can't log there is anything to do. */ /* Get the stack trace first! */ trace_size = backtrace(trace, 100); msg = "\n------ STACK TRACE ------\n"; if (write(fd,msg,strlen(msg)) == -1) {/* Avoid warning. */}; if (eip) { /* Write EIP to the log file*/ msg = "EIP:\n"; if (write(fd,msg,strlen(msg)) == -1) {/* Avoid warning. */}; backtrace_symbols_fd(&eip, 1, fd); } /* Write symbols to log file */ msg = "\nBacktrace:\n"; if (write(fd,msg,strlen(msg)) == -1) {/* Avoid warning. */}; backtrace_symbols_fd(trace+uplevel, trace_size-uplevel, fd); /* Cleanup */ closeDirectLogFiledes(fd); } #endif /* HAVE_BACKTRACE */ /* Log global server info */ void logServerInfo(void) { sds infostring, clients; serverLogRaw(LL_WARNING|LL_RAW, "\n------ INFO OUTPUT ------\n"); infostring = genRedisInfoString("all"); serverLogRaw(LL_WARNING|LL_RAW, infostring); serverLogRaw(LL_WARNING|LL_RAW, "\n------ CLIENT LIST OUTPUT ------\n"); clients = getAllClientsInfoString(-1); serverLogRaw(LL_WARNING|LL_RAW, clients); sdsfree(infostring); sdsfree(clients); } /* Log certain config values, which can be used for debuggin */ void logConfigDebugInfo(void) { sds configstring; configstring = getConfigDebugInfo(); serverLogRaw(LL_WARNING|LL_RAW, "\n------ CONFIG DEBUG OUTPUT ------\n"); serverLogRaw(LL_WARNING|LL_RAW, configstring); sdsfree(configstring); } /* Log modules info. Something we wanna do last since we fear it may crash. */ void logModulesInfo(void) { serverLogRaw(LL_WARNING|LL_RAW, "\n------ MODULES INFO OUTPUT ------\n"); sds infostring = modulesCollectInfo(sdsempty(), NULL, 1, 0); serverLogRaw(LL_WARNING|LL_RAW, infostring); sdsfree(infostring); } /* Log information about the "current" client, that is, the client that is * currently being served by Redis. May be NULL if Redis is not serving a * client right now. */ void logCurrentClient(void) { if (server.current_client == NULL) return; client *cc = server.current_client; sds client; int j; serverLogRaw(LL_WARNING|LL_RAW, "\n------ CURRENT CLIENT INFO ------\n"); client = catClientInfoString(sdsempty(),cc); serverLog(LL_WARNING|LL_RAW,"%s\n", client); sdsfree(client); for (j = 0; j < cc->argc; j++) { robj *decoded; decoded = getDecodedObject(cc->argv[j]); serverLog(LL_WARNING|LL_RAW,"argv[%d]: '%s'\n", j, (char*)decoded->ptr); decrRefCount(decoded); } /* Check if the first argument, usually a key, is found inside the * selected DB, and if so print info about the associated object. */ if (cc->argc > 1) { robj *val, *key; dictEntry *de; key = getDecodedObject(cc->argv[1]); de = dictFind(cc->db->dict, key->ptr); if (de) { val = dictGetVal(de); serverLog(LL_WARNING,"key '%s' found in DB containing the following object:", (char*)key->ptr); serverLogObjectDebugInfo(val); } decrRefCount(key); } } #if defined(HAVE_PROC_MAPS) #define MEMTEST_MAX_REGIONS 128 /* A non destructive memory test executed during segfault. */ int memtest_test_linux_anonymous_maps(void) { FILE *fp; char line[1024]; char logbuf[1024]; size_t start_addr, end_addr, size; size_t start_vect[MEMTEST_MAX_REGIONS]; size_t size_vect[MEMTEST_MAX_REGIONS]; int regions = 0, j; int fd = openDirectLogFiledes(); if (!fd) return 0; fp = fopen("/proc/self/maps","r"); if (!fp) return 0; while(fgets(line,sizeof(line),fp) != NULL) { char *start, *end, *p = line; start = p; p = strchr(p,'-'); if (!p) continue; *p++ = '\0'; end = p; p = strchr(p,' '); if (!p) continue; *p++ = '\0'; if (strstr(p,"stack") || strstr(p,"vdso") || strstr(p,"vsyscall")) continue; if (!strstr(p,"00:00")) continue; if (!strstr(p,"rw")) continue; start_addr = strtoul(start,NULL,16); end_addr = strtoul(end,NULL,16); size = end_addr-start_addr; start_vect[regions] = start_addr; size_vect[regions] = size; snprintf(logbuf,sizeof(logbuf), "*** Preparing to test memory region %lx (%lu bytes)\n", (unsigned long) start_vect[regions], (unsigned long) size_vect[regions]); if (write(fd,logbuf,strlen(logbuf)) == -1) { /* Nothing to do. */ } regions++; } int errors = 0; for (j = 0; j < regions; j++) { if (write(fd,".",1) == -1) { /* Nothing to do. */ } errors += memtest_preserving_test((void*)start_vect[j],size_vect[j],1); if (write(fd, errors ? "E" : "O",1) == -1) { /* Nothing to do. */ } } if (write(fd,"\n",1) == -1) { /* Nothing to do. */ } /* NOTE: It is very important to close the file descriptor only now * because closing it before may result into unmapping of some memory * region that we are testing. */ fclose(fp); closeDirectLogFiledes(fd); return errors; } #endif /* HAVE_PROC_MAPS */ static void killMainThread(void) { int err; if (pthread_self() != server.main_thread_id && pthread_cancel(server.main_thread_id) == 0) { if ((err = pthread_join(server.main_thread_id,NULL)) != 0) { serverLog(LL_WARNING, "main thread can not be joined: %s", strerror(err)); } else { serverLog(LL_WARNING, "main thread terminated"); } } } /* Kill the running threads (other than current) in an unclean way. This function * should be used only when it's critical to stop the threads for some reason. * Currently Redis does this only on crash (for instance on SIGSEGV) in order * to perform a fast memory check without other threads messing with memory. */ void killThreads(void) { killMainThread(); bioKillThreads(); killIOThreads(); } void doFastMemoryTest(void) { #if defined(HAVE_PROC_MAPS) if (server.memcheck_enabled) { /* Test memory */ serverLogRaw(LL_WARNING|LL_RAW, "\n------ FAST MEMORY TEST ------\n"); killThreads(); if (memtest_test_linux_anonymous_maps()) { serverLogRaw(LL_WARNING|LL_RAW, "!!! MEMORY ERROR DETECTED! Check your memory ASAP !!!\n"); } else { serverLogRaw(LL_WARNING|LL_RAW, "Fast memory test PASSED, however your memory can still be broken. Please run a memory test for several hours if possible.\n"); } } #endif /* HAVE_PROC_MAPS */ } /* Scans the (assumed) x86 code starting at addr, for a max of `len` * bytes, searching for E8 (callq) opcodes, and dumping the symbols * and the call offset if they appear to be valid. */ void dumpX86Calls(void *addr, size_t len) { size_t j; unsigned char *p = addr; Dl_info info; /* Hash table to best-effort avoid printing the same symbol * multiple times. */ unsigned long ht[256] = {0}; if (len < 5) return; for (j = 0; j < len-4; j++) { if (p[j] != 0xE8) continue; /* Not an E8 CALL opcode. */ unsigned long target = (unsigned long)addr+j+5; uint32_t tmp; memcpy(&tmp, p+j+1, sizeof(tmp)); target += tmp; if (dladdr((void*)target, &info) != 0 && info.dli_sname != NULL) { if (ht[target&0xff] != target) { printf("Function at 0x%lx is %s\n",target,info.dli_sname); ht[target&0xff] = target; } j += 4; /* Skip the 32 bit immediate. */ } } } void dumpCodeAroundEIP(void *eip) { Dl_info info; if (dladdr(eip, &info) != 0) { serverLog(LL_WARNING|LL_RAW, "\n------ DUMPING CODE AROUND EIP ------\n" "Symbol: %s (base: %p)\n" "Module: %s (base %p)\n" "$ xxd -r -p /tmp/dump.hex /tmp/dump.bin\n" "$ objdump --adjust-vma=%p -D -b binary -m i386:x86-64 /tmp/dump.bin\n" "------\n", info.dli_sname, info.dli_saddr, info.dli_fname, info.dli_fbase, info.dli_saddr); size_t len = (long)eip - (long)info.dli_saddr; unsigned long sz = sysconf(_SC_PAGESIZE); if (len < 1<<13) { /* we don't have functions over 8k (verified) */ /* Find the address of the next page, which is our "safety" * limit when dumping. Then try to dump just 128 bytes more * than EIP if there is room, or stop sooner. */ void *base = (void *)info.dli_saddr; unsigned long next = ((unsigned long)eip + sz) & ~(sz-1); unsigned long end = (unsigned long)eip + 128; if (end > next) end = next; len = end - (unsigned long)base; serverLogHexDump(LL_WARNING, "dump of function", base, len); dumpX86Calls(base, len); } } } void sigsegvHandler(int sig, siginfo_t *info, void *secret) { UNUSED(secret); UNUSED(info); bugReportStart(); serverLog(LL_WARNING, "Redis %s crashed by signal: %d, si_code: %d", REDIS_VERSION, sig, info->si_code); if (sig == SIGSEGV || sig == SIGBUS) { serverLog(LL_WARNING, "Accessing address: %p", (void*)info->si_addr); } if (info->si_code <= SI_USER && info->si_pid != -1) { serverLog(LL_WARNING, "Killed by PID: %ld, UID: %d", (long) info->si_pid, info->si_uid); } #ifdef HAVE_BACKTRACE ucontext_t *uc = (ucontext_t*) secret; void *eip = getMcontextEip(uc); if (eip != NULL) { serverLog(LL_WARNING, "Crashed running the instruction at: %p", eip); } logStackTrace(getMcontextEip(uc), 1); logRegisters(uc); #endif printCrashReport(); #ifdef HAVE_BACKTRACE if (eip != NULL) dumpCodeAroundEIP(eip); #endif bugReportEnd(1, sig); } void printCrashReport(void) { /* Log INFO and CLIENT LIST */ logServerInfo(); /* Log the current client */ logCurrentClient(); /* Log modules info. Something we wanna do last since we fear it may crash. */ logModulesInfo(); /* Log debug config information, which are some values * which may be useful for debugging crashes. */ logConfigDebugInfo(); /* Run memory test in case the crash was triggered by memory corruption. */ doFastMemoryTest(); } void bugReportEnd(int killViaSignal, int sig) { struct sigaction act; serverLogRaw(LL_WARNING|LL_RAW, "\n=== REDIS BUG REPORT END. Make sure to include from START to END. ===\n\n" " Please report the crash by opening an issue on github:\n\n" " http://github.com/redis/redis/issues\n\n" " Suspect RAM error? Use redis-server --test-memory to verify it.\n\n" ); /* free(messages); Don't call free() with possibly corrupted memory. */ if (server.daemonize && server.supervised == 0 && server.pidfile) unlink(server.pidfile); if (!killViaSignal) { /* To avoid issues with valgrind, we may wanna exit rahter than generate a signal */ if (server.use_exit_on_panic) { /* Using _exit to bypass false leak reports by gcc ASAN */ fflush(stdout); _exit(1); } abort(); } /* Make sure we exit with the right signal at the end. So for instance * the core will be dumped if enabled. */ sigemptyset (&act.sa_mask); act.sa_flags = SA_NODEFER | SA_ONSTACK | SA_RESETHAND; act.sa_handler = SIG_DFL; sigaction (sig, &act, NULL); kill(getpid(),sig); } /* ==================== Logging functions for debugging ===================== */ void serverLogHexDump(int level, char *descr, void *value, size_t len) { char buf[65], *b; unsigned char *v = value; char charset[] = "0123456789abcdef"; serverLog(level,"%s (hexdump of %zu bytes):", descr, len); b = buf; while(len) { b[0] = charset[(*v)>>4]; b[1] = charset[(*v)&0xf]; b[2] = '\0'; b += 2; len--; v++; if (b-buf == 64 || len == 0) { serverLogRaw(level|LL_RAW,buf); b = buf; } } serverLogRaw(level|LL_RAW,"\n"); } /* =========================== Software Watchdog ============================ */ #include <sys/time.h> void watchdogSignalHandler(int sig, siginfo_t *info, void *secret) { #ifdef HAVE_BACKTRACE ucontext_t *uc = (ucontext_t*) secret; #else (void)secret; #endif UNUSED(info); UNUSED(sig); serverLogFromHandler(LL_WARNING,"\n--- WATCHDOG TIMER EXPIRED ---"); #ifdef HAVE_BACKTRACE logStackTrace(getMcontextEip(uc), 1); #else serverLogFromHandler(LL_WARNING,"Sorry: no support for backtrace()."); #endif serverLogFromHandler(LL_WARNING,"--------\n"); } /* Schedule a SIGALRM delivery after the specified period in milliseconds. * If a timer is already scheduled, this function will re-schedule it to the * specified time. If period is 0 the current timer is disabled. */ void watchdogScheduleSignal(int period) { struct itimerval it; /* Will stop the timer if period is 0. */ it.it_value.tv_sec = period/1000; it.it_value.tv_usec = (period%1000)*1000; /* Don't automatically restart. */ it.it_interval.tv_sec = 0; it.it_interval.tv_usec = 0; setitimer(ITIMER_REAL, &it, NULL); } void applyWatchdogPeriod() { struct sigaction act; /* Disable watchdog when period is 0 */ if (server.watchdog_period == 0) { watchdogScheduleSignal(0); /* Stop the current timer. */ /* Set the signal handler to SIG_IGN, this will also remove pending * signals from the queue. */ sigemptyset(&act.sa_mask); act.sa_flags = 0; act.sa_handler = SIG_IGN; sigaction(SIGALRM, &act, NULL); } else { /* Setup the signal handler. */ sigemptyset(&act.sa_mask); act.sa_flags = SA_SIGINFO; act.sa_sigaction = watchdogSignalHandler; sigaction(SIGALRM, &act, NULL); /* If the configured period is smaller than twice the timer period, it is * too short for the software watchdog to work reliably. Fix it now * if needed. */ int min_period = (1000/server.hz)*2; if (server.watchdog_period < min_period) server.watchdog_period = min_period; watchdogScheduleSignal(server.watchdog_period); /* Adjust the current timer. */ } } /* Positive input is sleep time in microseconds. Negative input is fractions * of microseconds, i.e. -10 means 100 nanoseconds. */ void debugDelay(int usec) { /* Since even the shortest sleep results in context switch and system call, * the way we achieve short sleeps is by statistically sleeping less often. */ if (usec < 0) usec = (rand() % -usec) == 0 ? 1: 0; if (usec) usleep(usec); }
668228.c
/* * Copyright (C) Yichun Zhang (agentzh) */ #ifndef DDEBUG #define DDEBUG 0 #endif #include "ddebug.h" #if (NGX_PCRE) #include "ngx_stream_lua_regex.h" #include "ngx_stream_lua_pcrefix.h" #include "ngx_stream_lua_script.h" #include "ngx_stream_lua_pcrefix.h" #include "ngx_stream_lua_util.h" #include <pcre.h> #if (PCRE_MAJOR > 8) || (PCRE_MAJOR == 8 && PCRE_MINOR >= 21) # define LUA_HAVE_PCRE_JIT 1 #else # define LUA_HAVE_PCRE_JIT 0 #endif #if (PCRE_MAJOR >= 6) # define LUA_HAVE_PCRE_DFA 1 #else # define LUA_HAVE_PCRE_DFA 0 #endif #define NGX_LUA_RE_COMPILE_ONCE (1<<0) #define NGX_LUA_RE_MODE_DFA (1<<1) #define NGX_LUA_RE_MODE_JIT (1<<2) #define NGX_LUA_RE_MODE_DUPNAMES (1<<3) #define NGX_LUA_RE_NO_UTF8_CHECK (1<<4) #define NGX_LUA_RE_DFA_MODE_WORKSPACE_COUNT (100) typedef struct { #ifndef NGX_LUA_NO_FFI_API ngx_pool_t *pool; u_char *name_table; int name_count; int name_entry_size; #endif int ncaptures; int *captures; pcre *regex; pcre_extra *regex_sd; ngx_stream_lua_complex_value_t *replace; #ifndef NGX_LUA_NO_FFI_API /* only for (stap) debugging, and may be an invalid pointer */ const u_char *pattern; #endif } ngx_stream_lua_regex_t; typedef struct { ngx_str_t pattern; ngx_pool_t *pool; ngx_int_t options; pcre *regex; int captures; ngx_str_t err; } ngx_stream_lua_regex_compile_t; typedef struct { ngx_pool_cleanup_pt *cleanup; ngx_stream_session_t *session; pcre *regex; pcre_extra *regex_sd; int ncaptures; int *captures; int captures_len; uint8_t flags; } ngx_stream_lua_regex_ctx_t; static int ngx_stream_lua_ngx_re_gmatch_iterator(lua_State *L); static ngx_uint_t ngx_stream_lua_ngx_re_parse_opts(lua_State *L, ngx_stream_lua_regex_compile_t *re, ngx_str_t *opts, int narg); static int ngx_stream_lua_ngx_re_sub_helper(lua_State *L, unsigned global); static int ngx_stream_lua_ngx_re_match_helper(lua_State *L, int wantcaps); static int ngx_stream_lua_ngx_re_find(lua_State *L); static int ngx_stream_lua_ngx_re_match(lua_State *L); static int ngx_stream_lua_ngx_re_gmatch(lua_State *L); static int ngx_stream_lua_ngx_re_sub(lua_State *L); static int ngx_stream_lua_ngx_re_gsub(lua_State *L); static void ngx_stream_lua_regex_free_study_data(ngx_pool_t *pool, pcre_extra *sd); static ngx_int_t ngx_stream_lua_regex_compile( ngx_stream_lua_regex_compile_t *rc); static void ngx_stream_lua_ngx_re_gmatch_cleanup(void *data); static int ngx_stream_lua_ngx_re_gmatch_gc(lua_State *L); static void ngx_stream_lua_re_collect_named_captures(lua_State *L, int res_tb_idx, u_char *name_table, int name_count, int name_entry_size, unsigned flags, ngx_str_t *subj); #define ngx_stream_lua_regex_exec(re, e, s, start, captures, size, opts) \ pcre_exec(re, e, (const char *) (s)->data, (s)->len, start, opts, \ captures, size) #define ngx_stream_lua_regex_dfa_exec(re, e, s, start, captures, size, ws, \ wscount, opts) \ pcre_dfa_exec(re, e, (const char *) (s)->data, (s)->len, start, opts, \ captures, size, ws, wscount) static int ngx_stream_lua_ngx_re_match(lua_State *L) { return ngx_stream_lua_ngx_re_match_helper(L, 1 /* want captures */); } static int ngx_stream_lua_ngx_re_find(lua_State *L) { return ngx_stream_lua_ngx_re_match_helper(L, 0 /* want captures */); } static int ngx_stream_lua_ngx_re_match_helper(lua_State *L, int wantcaps) { /* u_char *p; */ int res_tb_idx = 0; ngx_stream_session_t *s; ngx_str_t subj; ngx_str_t pat; ngx_str_t opts; ngx_stream_lua_regex_t *re; const char *msg; ngx_int_t rc; ngx_uint_t n; int i; ngx_int_t pos = 0; int nargs; int *cap = NULL; int ovecsize; ngx_uint_t flags; ngx_pool_t *pool, *old_pool; ngx_stream_lua_main_conf_t *lmcf; u_char errstr[NGX_MAX_CONF_ERRSTR + 1]; pcre_extra *sd = NULL; int name_entry_size = 0, name_count; u_char *name_table = NULL; int exec_opts; int group_id = 0; ngx_stream_lua_regex_compile_t re_comp; nargs = lua_gettop(L); if (nargs != 2 && nargs != 3 && nargs != 4 && nargs != 5) { return luaL_error(L, "expecting 2, 3, 4 or 5 arguments, " "but got %d", nargs); } s = ngx_stream_lua_get_session(L); if (s == NULL) { return luaL_error(L, "no session object found"); } subj.data = (u_char *) luaL_checklstring(L, 1, &subj.len); pat.data = (u_char *) luaL_checklstring(L, 2, &pat.len); ngx_memzero(&re_comp, sizeof(ngx_stream_lua_regex_compile_t)); if (nargs >= 3) { opts.data = (u_char *) luaL_checklstring(L, 3, &opts.len); if (nargs == 4) { luaL_checktype(L, 4, LUA_TTABLE); lua_getfield(L, 4, "pos"); if (lua_isnumber(L, -1)) { pos = (ngx_int_t) lua_tointeger(L, -1); if (pos <= 0) { pos = 0; } else { pos--; /* 1-based on the Lua land */ } } else if (lua_isnil(L, -1)) { pos = 0; } else { msg = lua_pushfstring(L, "bad pos field type in the ctx table " "argument: %s", luaL_typename(L, -1)); return luaL_argerror(L, 4, msg); } lua_pop(L, 1); } } else { opts.data = (u_char *) ""; opts.len = 0; } if (nargs == 5) { if (wantcaps) { luaL_checktype(L, 5, LUA_TTABLE); res_tb_idx = 5; #if 0 /* clear the Lua table */ lua_pushnil(L); while (lua_next(L, res_tb_idx) != 0) { lua_pop(L, 1); lua_pushvalue(L, -1); lua_pushnil(L); lua_rawset(L, res_tb_idx); } #endif } else { group_id = luaL_checkint(L, 5); if (group_id < 0) { group_id = 0; } } } re_comp.options = 0; flags = ngx_stream_lua_ngx_re_parse_opts(L, &re_comp, &opts, 3); lmcf = ngx_stream_get_module_main_conf(s, ngx_stream_lua_module); if (flags & NGX_LUA_RE_COMPILE_ONCE) { pool = lmcf->pool; dd("server pool %p", lmcf->pool); lua_pushlightuserdata(L, &ngx_stream_lua_regex_cache_key); lua_rawget(L, LUA_REGISTRYINDEX); /* table */ lua_pushliteral(L, "m"); lua_pushvalue(L, 2); /* table regex */ dd("options size: %d", (int) sizeof(re_comp.options)); lua_pushlstring(L, (char *) &re_comp.options, sizeof(re_comp.options)); /* table regex opts */ lua_concat(L, 3); /* table key */ lua_pushvalue(L, -1); /* table key key */ dd("regex cache key: %.*s", (int) (pat.len + sizeof(re_comp.options)), lua_tostring(L, -1)); lua_rawget(L, -3); /* table key re */ re = lua_touserdata(L, -1); lua_pop(L, 1); /* table key */ if (re) { ngx_log_debug2(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "stream lua regex cache hit for match regex " "\"%s\" with options \"%s\"", pat.data, opts.data); lua_pop(L, 2); dd("restoring regex %p, ncaptures %d, captures %p", re->regex, re->ncaptures, re->captures); re_comp.regex = re->regex; sd = re->regex_sd; re_comp.captures = re->ncaptures; cap = re->captures; if (flags & NGX_LUA_RE_MODE_DFA) { ovecsize = 2; } else { ovecsize = (re->ncaptures + 1) * 3; } goto exec; } ngx_log_debug2(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "stream lua regex cache miss for match regex \"%s\" " "with options \"%s\"", pat.data, opts.data); if (lmcf->regex_cache_entries >= lmcf->regex_cache_max_entries) { if (lmcf->regex_cache_entries == lmcf->regex_cache_max_entries) { ngx_log_error(NGX_LOG_WARN, s->connection->log, 0, "stream lua exceeding regex cache max " "entries (%i)", lmcf->regex_cache_max_entries); lmcf->regex_cache_entries++; } pool = s->connection->pool; flags &= ~NGX_LUA_RE_COMPILE_ONCE; } } else { pool = s->connection->pool; } dd("pool %p, s pool %p", pool, s->connection->pool); re_comp.pattern = pat; re_comp.err.len = NGX_MAX_CONF_ERRSTR; re_comp.err.data = errstr; re_comp.pool = pool; ngx_log_debug5(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "stream lua compiling match regex \"%s\" with " "options \"%s\" (compile once: %d) (dfa mode: %d) " "(jit mode: %d)", pat.data, opts.data, (flags & NGX_LUA_RE_COMPILE_ONCE) != 0, (flags & NGX_LUA_RE_MODE_DFA) != 0, (flags & NGX_LUA_RE_MODE_JIT) != 0); old_pool = ngx_stream_lua_pcre_malloc_init(pool); rc = ngx_stream_lua_regex_compile(&re_comp); ngx_stream_lua_pcre_malloc_done(old_pool); if (rc != NGX_OK) { dd("compile failed"); lua_pushnil(L); if (!wantcaps) { lua_pushnil(L); } lua_pushlstring(L, (char *) re_comp.err.data, re_comp.err.len); return wantcaps ? 2 : 3; } #if (LUA_HAVE_PCRE_JIT) if (flags & NGX_LUA_RE_MODE_JIT) { old_pool = ngx_stream_lua_pcre_malloc_init(pool); sd = pcre_study(re_comp.regex, PCRE_STUDY_JIT_COMPILE, &msg); ngx_stream_lua_pcre_malloc_done(old_pool); # if (NGX_DEBUG) dd("sd = %p", sd); if (msg != NULL) { ngx_log_debug2(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "pcre study failed with PCRE_STUDY_JIT_COMPILE: " "%s (%p)", msg, sd); } if (sd != NULL) { int jitted; old_pool = ngx_stream_lua_pcre_malloc_init(pool); pcre_fullinfo(re_comp.regex, sd, PCRE_INFO_JIT, &jitted); ngx_stream_lua_pcre_malloc_done(old_pool); ngx_log_debug1(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "pcre JIT compiling result: %d", jitted); } # endif /* !(NGX_DEBUG) */ } else { old_pool = ngx_stream_lua_pcre_malloc_init(pool); sd = pcre_study(re_comp.regex, 0, &msg); ngx_stream_lua_pcre_malloc_done(old_pool); # if (NGX_DEBUG) dd("sd = %p", sd); if (msg != NULL) { ngx_log_debug2(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "pcre_study failed with PCRE_STUDY_JIT_COMPILE: " "%s (%p)", msg, sd); } # endif /* NGX_DEBUG */ } #else /* !(LUA_HAVE_PCRE_JIT) */ if (flags & NGX_LUA_RE_MODE_JIT) { ngx_log_debug0(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "your pcre build does not have JIT support and " "the \"j\" regex option is ignored"); } #endif /* LUA_HAVE_PCRE_JIT */ if (sd && lmcf->regex_match_limit > 0) { sd->flags |= PCRE_EXTRA_MATCH_LIMIT; sd->match_limit = lmcf->regex_match_limit; } dd("compile done, captures %d", (int) re_comp.captures); if (flags & NGX_LUA_RE_MODE_DFA) { ovecsize = 2; } else { ovecsize = (re_comp.captures + 1) * 3; } dd("allocating cap with size: %d", (int) ovecsize); cap = ngx_palloc(pool, ovecsize * sizeof(int)); if (cap == NULL) { flags &= ~NGX_LUA_RE_COMPILE_ONCE; msg = "no memory"; goto error; } if (flags & NGX_LUA_RE_COMPILE_ONCE) { ngx_log_debug2(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "stream lua saving compiled regex (%d captures) into " "the cache (entries %i)", re_comp.captures, lmcf->regex_cache_entries); re = ngx_palloc(pool, sizeof(ngx_stream_lua_regex_t)); if (re == NULL) { msg = "no memory"; goto error; } dd("saving regex %p, ncaptures %d, captures %p", re_comp.regex, re_comp.captures, cap); re->regex = re_comp.regex; re->regex_sd = sd; re->ncaptures = re_comp.captures; re->captures = cap; re->replace = NULL; lua_pushlightuserdata(L, re); /* table key value */ lua_rawset(L, -3); /* table */ lua_pop(L, 1); lmcf->regex_cache_entries++; } exec: if (pcre_fullinfo(re_comp.regex, NULL, PCRE_INFO_NAMECOUNT, &name_count) != 0) { msg = "cannot acquire named subpattern count"; goto error; } if (name_count > 0) { if (pcre_fullinfo(re_comp.regex, NULL, PCRE_INFO_NAMEENTRYSIZE, &name_entry_size) != 0) { msg = "cannot acquire named subpattern entry size"; goto error; } if (pcre_fullinfo(re_comp.regex, NULL, PCRE_INFO_NAMETABLE, &name_table) != 0) { msg = "cannot acquire named subpattern table"; goto error; } } if (flags & NGX_LUA_RE_NO_UTF8_CHECK) { exec_opts = PCRE_NO_UTF8_CHECK; } else { exec_opts = 0; } if (flags & NGX_LUA_RE_MODE_DFA) { #if LUA_HAVE_PCRE_DFA int ws[NGX_LUA_RE_DFA_MODE_WORKSPACE_COUNT]; rc = ngx_stream_lua_regex_dfa_exec(re_comp.regex, sd, &subj, (int) pos, cap, ovecsize, ws, sizeof(ws)/sizeof(ws[0]), exec_opts); #else /* LUA_HAVE_PCRE_DFA */ msg = "at least pcre 6.0 is required for the DFA mode"; goto error; #endif /* LUA_HAVE_PCRE_DFA */ } else { rc = ngx_stream_lua_regex_exec(re_comp.regex, sd, &subj, (int) pos, cap, ovecsize, exec_opts); } if (rc == NGX_REGEX_NO_MATCHED) { ngx_log_debug3(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "regex \"%V\" not matched on string \"%V\" starting " "from %i", &pat, &subj, pos); if (!(flags & NGX_LUA_RE_COMPILE_ONCE)) { if (sd) { ngx_stream_lua_regex_free_study_data(pool, sd); } ngx_pfree(pool, re_comp.regex); ngx_pfree(pool, cap); } lua_pushnil(L); return 1; } if (rc < 0) { msg = lua_pushfstring(L, ngx_regex_exec_n " failed: %d", (int) rc); goto error; } if (rc == 0) { if (flags & NGX_LUA_RE_MODE_DFA) { rc = 1; } else { msg = "capture size too small"; goto error; } } dd("rc = %d", (int) rc); if (nargs == 4) { /* having ctx table */ pos = cap[1]; lua_pushinteger(L, (lua_Integer) (pos + 1)); lua_setfield(L, 4, "pos"); } if (!wantcaps) { if (group_id > re_comp.captures) { lua_pushnil(L); lua_pushnil(L); lua_pushliteral(L, "nth out of bound"); return 3; } if (group_id >= rc) { lua_pushnil(L); lua_pushnil(L); return 2; } { int from, to; from = cap[group_id * 2] + 1; to = cap[group_id * 2 + 1]; if (from < 0 || to < 0) { lua_pushnil(L); lua_pushnil(L); return 2; } lua_pushinteger(L, from); lua_pushinteger(L, to); return 2; } } if (res_tb_idx == 0) { lua_createtable(L, rc /* narr */, 0 /* nrec */); res_tb_idx = lua_gettop(L); } for (i = 0, n = 0; i < rc; i++, n += 2) { dd("capture %d: %d %d", i, cap[n], cap[n + 1]); if (cap[n] < 0) { lua_pushnil(L); } else { lua_pushlstring(L, (char *) &subj.data[cap[n]], cap[n + 1] - cap[n]); dd("pushing capture %s at %d", lua_tostring(L, -1), (int) i); } lua_rawseti(L, res_tb_idx, (int) i); } if (name_count > 0) { ngx_stream_lua_re_collect_named_captures(L, res_tb_idx, name_table, name_count, name_entry_size, flags, &subj); } if (!(flags & NGX_LUA_RE_COMPILE_ONCE)) { if (sd) { ngx_stream_lua_regex_free_study_data(pool, sd); } ngx_pfree(pool, re_comp.regex); ngx_pfree(pool, cap); } return 1; error: if (!(flags & NGX_LUA_RE_COMPILE_ONCE)) { if (sd) { ngx_stream_lua_regex_free_study_data(pool, sd); } if (re_comp.regex) { ngx_pfree(pool, re_comp.regex); } if (cap) { ngx_pfree(pool, cap); } } lua_pushnil(L); if (!wantcaps) { lua_pushnil(L); } lua_pushstring(L, msg); return wantcaps ? 2 : 3; } static int ngx_stream_lua_ngx_re_gmatch(lua_State *L) { ngx_stream_lua_main_conf_t *lmcf; ngx_stream_session_t *s; ngx_str_t subj; ngx_str_t pat; ngx_str_t opts; int ovecsize; ngx_stream_lua_regex_t *re; ngx_stream_lua_regex_ctx_t *ctx; const char *msg; int nargs; ngx_int_t flags; int *cap = NULL; ngx_int_t rc; ngx_pool_t *pool, *old_pool; u_char errstr[NGX_MAX_CONF_ERRSTR + 1]; pcre_extra *sd = NULL; ngx_stream_lua_cleanup_t *cln; ngx_stream_lua_regex_compile_t re_comp; nargs = lua_gettop(L); if (nargs != 2 && nargs != 3) { return luaL_error(L, "expecting two or three arguments, but got %d", nargs); } s = ngx_stream_lua_get_session(L); if (s == NULL) { return luaL_error(L, "no session object found"); } subj.data = (u_char *) luaL_checklstring(L, 1, &subj.len); pat.data = (u_char *) luaL_checklstring(L, 2, &pat.len); if (nargs == 3) { opts.data = (u_char *) luaL_checklstring(L, 3, &opts.len); lua_pop(L, 1); } else { opts.data = (u_char *) ""; opts.len = 0; } /* stack: subj regex */ re_comp.options = 0; flags = ngx_stream_lua_ngx_re_parse_opts(L, &re_comp, &opts, 3); lmcf = ngx_stream_get_module_main_conf(s, ngx_stream_lua_module); if (flags & NGX_LUA_RE_COMPILE_ONCE) { pool = lmcf->pool; dd("server pool %p", lmcf->pool); lua_pushlightuserdata(L, &ngx_stream_lua_regex_cache_key); lua_rawget(L, LUA_REGISTRYINDEX); /* table */ lua_pushliteral(L, "m"); lua_pushvalue(L, 2); /* table regex */ dd("options size: %d", (int) sizeof(re_comp.options)); lua_pushlstring(L, (char *) &re_comp.options, sizeof(re_comp.options)); /* table regex opts */ lua_concat(L, 3); /* table key */ lua_pushvalue(L, -1); /* table key key */ dd("regex cache key: %.*s", (int) (pat.len + sizeof(re_comp.options)), lua_tostring(L, -1)); lua_rawget(L, -3); /* table key re */ re = lua_touserdata(L, -1); lua_pop(L, 1); /* table key */ if (re) { ngx_log_debug2(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "stream lua regex cache hit for match regex \"%s\" " "with options \"%s\"", pat.data, opts.data); lua_pop(L, 2); dd("restoring regex %p, ncaptures %d, captures %p", re->regex, re->ncaptures, re->captures); re_comp.regex = re->regex; sd = re->regex_sd; re_comp.captures = re->ncaptures; cap = re->captures; if (flags & NGX_LUA_RE_MODE_DFA) { ovecsize = 2; } else { ovecsize = (re->ncaptures + 1) * 3; } goto compiled; } ngx_log_debug2(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "stream lua regex cache miss for match regex \"%s\" " "with options \"%s\"", pat.data, opts.data); if (lmcf->regex_cache_entries >= lmcf->regex_cache_max_entries) { if (lmcf->regex_cache_entries == lmcf->regex_cache_max_entries) { ngx_log_error(NGX_LOG_WARN, s->connection->log, 0, "stream lua exceeding regex cache max " "entries (%i)", lmcf->regex_cache_max_entries); lmcf->regex_cache_entries++; } pool = s->connection->pool; flags &= ~NGX_LUA_RE_COMPILE_ONCE; } } else { pool = s->connection->pool; } re_comp.pattern = pat; re_comp.err.len = NGX_MAX_CONF_ERRSTR; re_comp.err.data = errstr; re_comp.pool = pool; ngx_log_debug5(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "stream lua compiling gmatch regex \"%s\" with " "options \"%s\" (compile once: %d) (dfa mode: %d) " "(jit mode: %d)", pat.data, opts.data, (flags & NGX_LUA_RE_COMPILE_ONCE) != 0, (flags & NGX_LUA_RE_MODE_DFA) != 0, (flags & NGX_LUA_RE_MODE_JIT) != 0); old_pool = ngx_stream_lua_pcre_malloc_init(pool); rc = ngx_stream_lua_regex_compile(&re_comp); ngx_stream_lua_pcre_malloc_done(old_pool); if (rc != NGX_OK) { dd("compile failed"); lua_pushnil(L); lua_pushlstring(L, (char *) re_comp.err.data, re_comp.err.len); return 2; } #if LUA_HAVE_PCRE_JIT if (flags & NGX_LUA_RE_MODE_JIT) { old_pool = ngx_stream_lua_pcre_malloc_init(pool); sd = pcre_study(re_comp.regex, PCRE_STUDY_JIT_COMPILE, &msg); ngx_stream_lua_pcre_malloc_done(old_pool); # if (NGX_DEBUG) dd("sd = %p", sd); if (msg != NULL) { ngx_log_debug2(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "pcre_study failed with PCRE_STUDY_JIT_COMPILE: " "%s (%p)", msg, sd); } if (sd != NULL) { int jitted; old_pool = ngx_stream_lua_pcre_malloc_init(pool); pcre_fullinfo(re_comp.regex, sd, PCRE_INFO_JIT, &jitted); ngx_stream_lua_pcre_malloc_done(old_pool); ngx_log_debug1(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "pcre JIT compiling result: %d", jitted); } # endif /* NGX_DEBUG */ } else { old_pool = ngx_stream_lua_pcre_malloc_init(pool); sd = pcre_study(re_comp.regex, 0, &msg); ngx_stream_lua_pcre_malloc_done(old_pool); # if (NGX_DEBUG) dd("sd = %p", sd); if (msg != NULL) { ngx_log_debug2(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "pcre study failed with PCRE_STUDY_JIT_COMPILE: " "%s (%p)", msg, sd); } # endif /* NGX_DEBUG */ } #else /* LUA_HAVE_PCRE_JIT */ if (flags & NGX_LUA_RE_MODE_JIT) { ngx_log_debug0(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "your pcre build does not have JIT support and " "the \"j\" regex option is ignored"); } #endif /* LUA_HAVE_PCRE_JIT */ if (sd && lmcf->regex_match_limit > 0) { sd->flags |= PCRE_EXTRA_MATCH_LIMIT; sd->match_limit = lmcf->regex_match_limit; } dd("compile done, captures %d", re_comp.captures); if (flags & NGX_LUA_RE_MODE_DFA) { ovecsize = 2; } else { ovecsize = (re_comp.captures + 1) * 3; } cap = ngx_palloc(pool, ovecsize * sizeof(int)); if (cap == NULL) { flags &= ~NGX_LUA_RE_COMPILE_ONCE; msg = "no memory"; goto error; } if (flags & NGX_LUA_RE_COMPILE_ONCE) { ngx_log_debug2(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "stream lua saving compiled regex (%d captures) into " "the cache (entries %i)", re_comp.captures, lmcf->regex_cache_entries); re = ngx_palloc(pool, sizeof(ngx_stream_lua_regex_t)); if (re == NULL) { msg = "no memory"; goto error; } dd("saving regex %p, ncaptures %d, captures %p", re_comp.regex, re_comp.captures, cap); re->regex = re_comp.regex; re->regex_sd = sd; re->ncaptures = re_comp.captures; re->captures = cap; re->replace = NULL; lua_pushlightuserdata(L, re); /* table key value */ lua_rawset(L, -3); /* table */ lua_pop(L, 1); lmcf->regex_cache_entries++; } compiled: lua_settop(L, 1); ctx = lua_newuserdata(L, sizeof(ngx_stream_lua_regex_ctx_t)); ctx->session = s; ctx->regex = re_comp.regex; ctx->regex_sd = sd; ctx->ncaptures = re_comp.captures; ctx->captures = cap; ctx->captures_len = ovecsize; ctx->flags = (uint8_t) flags; if (!(flags & NGX_LUA_RE_COMPILE_ONCE)) { lua_createtable(L, 0 /* narr */, 1 /* nrec */); /* metatable */ lua_pushcfunction(L, ngx_stream_lua_ngx_re_gmatch_gc); lua_setfield(L, -2, "__gc"); lua_setmetatable(L, -2); cln = ngx_stream_lua_cleanup_add(s, 0); if (cln == NULL) { msg = "no memory"; goto error; } cln->handler = ngx_stream_lua_ngx_re_gmatch_cleanup; cln->data = ctx; ctx->cleanup = &cln->handler; } else { ctx->cleanup = NULL; } lua_pushinteger(L, 0); /* upvalues in order: subj ctx offset */ lua_pushcclosure(L, ngx_stream_lua_ngx_re_gmatch_iterator, 3); return 1; error: if (!(flags & NGX_LUA_RE_COMPILE_ONCE)) { if (sd) { ngx_stream_lua_regex_free_study_data(pool, sd); } if (re_comp.regex) { ngx_pfree(pool, re_comp.regex); } if (cap) { ngx_pfree(pool, cap); } } lua_pushnil(L); lua_pushstring(L, msg); return 2; } static int ngx_stream_lua_ngx_re_gmatch_iterator(lua_State *L) { ngx_stream_lua_regex_ctx_t *ctx; ngx_stream_session_t *s; int *cap; ngx_int_t rc; ngx_uint_t n; int i; ngx_str_t subj; int offset; const char *msg = NULL; int name_entry_size = 0, name_count; u_char *name_table = NULL; int exec_opts; /* upvalues in order: subj ctx offset */ subj.data = (u_char *) lua_tolstring(L, lua_upvalueindex(1), &subj.len); ctx = (ngx_stream_lua_regex_ctx_t *) lua_touserdata(L, lua_upvalueindex(2)); offset = (int) lua_tointeger(L, lua_upvalueindex(3)); if (offset < 0) { lua_pushnil(L); return 1; } cap = ctx->captures; dd("offset %d, s %p, subj %s", (int) offset, ctx->session, subj.data); s = ngx_stream_lua_get_session(L); if (s == NULL) { return luaL_error(L, "no session object found"); } if (s != ctx->session || s->connection->pool != ctx->session->connection->pool) { return luaL_error(L, "attempt to use ngx.re.gmatch iterator in a " "session that did not create it"); } dd("regex exec..."); if (pcre_fullinfo(ctx->regex, NULL, PCRE_INFO_NAMECOUNT, &name_count) != 0) { msg = "cannot acquire named subpattern count"; goto error; } if (name_count > 0) { if (pcre_fullinfo(ctx->regex, NULL, PCRE_INFO_NAMEENTRYSIZE, &name_entry_size) != 0) { msg = "cannot acquire named subpattern entry size"; goto error; } if (pcre_fullinfo(ctx->regex, NULL, PCRE_INFO_NAMETABLE, &name_table) != 0) { msg = "cannot acquire named subpattern table"; goto error; } } if (ctx->flags & NGX_LUA_RE_NO_UTF8_CHECK) { exec_opts = PCRE_NO_UTF8_CHECK; } else { exec_opts = 0; } if (ctx->flags & NGX_LUA_RE_MODE_DFA) { #if LUA_HAVE_PCRE_DFA int ws[NGX_LUA_RE_DFA_MODE_WORKSPACE_COUNT]; rc = ngx_stream_lua_regex_dfa_exec(ctx->regex, ctx->regex_sd, &subj, offset, cap, ctx->captures_len, ws, sizeof(ws)/sizeof(ws[0]), exec_opts); #else /* LUA_HAVE_PCRE_DFA */ msg = "at least pcre 6.0 is required for the DFA mode"; goto error; #endif /* LUA_HAVE_PCRE_DFA */ } else { rc = ngx_stream_lua_regex_exec(ctx->regex, ctx->regex_sd, &subj, offset, cap, ctx->captures_len, exec_opts); } if (rc == NGX_REGEX_NO_MATCHED) { /* set upvalue "offset" to -1 */ lua_pushinteger(L, -1); lua_replace(L, lua_upvalueindex(3)); if (!(ctx->flags & NGX_LUA_RE_COMPILE_ONCE)) { if (ctx->regex_sd) { ngx_stream_lua_regex_free_study_data(s->connection->pool, ctx->regex_sd); ctx->regex_sd = NULL; } ngx_pfree(s->connection->pool, cap); } lua_pushnil(L); return 1; } if (rc < 0) { msg = lua_pushfstring(L, ngx_regex_exec_n " failed: %d", (int) rc); goto error; } if (rc == 0) { if (ctx->flags & NGX_LUA_RE_MODE_DFA) { rc = 1; } else { goto error; } } dd("rc = %d", (int) rc); lua_createtable(L, rc /* narr */, 0 /* nrec */); for (i = 0, n = 0; i < rc; i++, n += 2) { dd("capture %d: %d %d", i, cap[n], cap[n + 1]); if (cap[n] < 0) { lua_pushnil(L); } else { lua_pushlstring(L, (char *) &subj.data[cap[n]], cap[n + 1] - cap[n]); dd("pushing capture %s at %d", lua_tostring(L, -1), (int) i); } lua_rawseti(L, -2, (int) i); } if (name_count > 0) { ngx_stream_lua_re_collect_named_captures(L, lua_gettop(L), name_table, name_count, name_entry_size, ctx->flags, &subj); } offset = cap[1]; if (offset == cap[0]) { offset++; } if (offset > (ssize_t) subj.len) { offset = -1; if (!(ctx->flags & NGX_LUA_RE_COMPILE_ONCE)) { if (ctx->regex_sd) { ngx_stream_lua_regex_free_study_data(s->connection->pool, ctx->regex_sd); ctx->regex_sd = NULL; } ngx_pfree(s->connection->pool, cap); } } lua_pushinteger(L, offset); lua_replace(L, lua_upvalueindex(3)); return 1; error: lua_pushinteger(L, -1); lua_replace(L, lua_upvalueindex(3)); if (!(ctx->flags & NGX_LUA_RE_COMPILE_ONCE)) { if (ctx->regex_sd) { ngx_stream_lua_regex_free_study_data(s->connection->pool, ctx->regex_sd); ctx->regex_sd = NULL; } ngx_pfree(s->connection->pool, cap); } lua_pushnil(L); lua_pushstring(L, msg); return 2; } static ngx_uint_t ngx_stream_lua_ngx_re_parse_opts(lua_State *L, ngx_stream_lua_regex_compile_t *re, ngx_str_t *opts, int narg) { u_char *p; const char *msg; ngx_uint_t flags; flags = 0; p = opts->data; while (*p != '\0') { switch (*p) { case 'i': re->options |= NGX_REGEX_CASELESS; break; case 's': re->options |= PCRE_DOTALL; break; case 'm': re->options |= PCRE_MULTILINE; break; case 'u': re->options |= PCRE_UTF8; break; case 'U': re->options |= PCRE_UTF8; flags |= NGX_LUA_RE_NO_UTF8_CHECK; break; case 'x': re->options |= PCRE_EXTENDED; break; case 'o': flags |= NGX_LUA_RE_COMPILE_ONCE; break; case 'j': flags |= NGX_LUA_RE_MODE_JIT; break; case 'd': flags |= NGX_LUA_RE_MODE_DFA; break; case 'a': re->options |= PCRE_ANCHORED; break; #if (PCRE_MAJOR > 8) || (PCRE_MAJOR == 8 && PCRE_MINOR >= 12) case 'D': re->options |= PCRE_DUPNAMES; flags |= NGX_LUA_RE_MODE_DUPNAMES; break; case 'J': re->options |= PCRE_JAVASCRIPT_COMPAT; break; #endif default: msg = lua_pushfstring(L, "unknown flag \"%c\" (flags \"%s\")", *p, opts->data); return luaL_argerror(L, narg, msg); } p++; } /* pcre does not support JIT for DFA mode yet, * so if DFA mode is specified, we turn off JIT automatically * */ if ((flags & NGX_LUA_RE_MODE_JIT) && (flags & NGX_LUA_RE_MODE_DFA)) { flags &= ~NGX_LUA_RE_MODE_JIT; } return flags; } static int ngx_stream_lua_ngx_re_sub(lua_State *L) { return ngx_stream_lua_ngx_re_sub_helper(L, 0 /* global */); } static int ngx_stream_lua_ngx_re_gsub(lua_State *L) { return ngx_stream_lua_ngx_re_sub_helper(L, 1 /* global */); } static int ngx_stream_lua_ngx_re_sub_helper(lua_State *L, unsigned global) { ngx_stream_lua_regex_t *re; ngx_stream_session_t *s; ngx_str_t subj; ngx_str_t pat; ngx_str_t opts; ngx_str_t tpl; ngx_stream_lua_main_conf_t *lmcf; ngx_pool_t *pool, *old_pool; const char *msg; ngx_int_t rc; ngx_uint_t n; ngx_int_t i; int nargs; int *cap = NULL; int ovecsize; int type; unsigned func; int offset; int cp_offset; size_t count; luaL_Buffer luabuf; ngx_int_t flags; u_char *p; u_char errstr[NGX_MAX_CONF_ERRSTR + 1]; pcre_extra *sd = NULL; int name_entry_size = 0, name_count; u_char *name_table = NULL; int exec_opts; ngx_stream_lua_regex_compile_t re_comp; ngx_stream_lua_complex_value_t *ctpl = NULL; ngx_stream_lua_compile_complex_value_t ccv; nargs = lua_gettop(L); if (nargs != 3 && nargs != 4) { return luaL_error(L, "expecting three or four arguments, but got %d", nargs); } s = ngx_stream_lua_get_session(L); if (s == NULL) { return luaL_error(L, "no session object found"); } subj.data = (u_char *) luaL_checklstring(L, 1, &subj.len); pat.data = (u_char *) luaL_checklstring(L, 2, &pat.len); func = 0; type = lua_type(L, 3); switch (type) { case LUA_TFUNCTION: func = 1; tpl.len = 0; tpl.data = (u_char *) ""; break; case LUA_TNUMBER: case LUA_TSTRING: tpl.data = (u_char *) lua_tolstring(L, 3, &tpl.len); break; default: msg = lua_pushfstring(L, "string, number, or function expected, " "got %s", lua_typename(L, type)); return luaL_argerror(L, 3, msg); } ngx_memzero(&re_comp, sizeof(ngx_stream_lua_regex_compile_t)); if (nargs == 4) { opts.data = (u_char *) luaL_checklstring(L, 4, &opts.len); lua_pop(L, 1); } else { /* nargs == 3 */ opts.data = (u_char *) ""; opts.len = 0; } /* stack: subj regex repl */ re_comp.options = 0; flags = ngx_stream_lua_ngx_re_parse_opts(L, &re_comp, &opts, 4); lmcf = ngx_stream_get_module_main_conf(s, ngx_stream_lua_module); if (flags & NGX_LUA_RE_COMPILE_ONCE) { pool = lmcf->pool; dd("server pool %p", lmcf->pool); lua_pushlightuserdata(L, &ngx_stream_lua_regex_cache_key); lua_rawget(L, LUA_REGISTRYINDEX); /* table */ lua_pushliteral(L, "s"); lua_pushinteger(L, tpl.len); lua_pushliteral(L, ":"); lua_pushvalue(L, 2); if (tpl.len != 0) { lua_pushvalue(L, 3); } dd("options size: %d", (int) sizeof(re_comp.options)); lua_pushlstring(L, (char *) &re_comp.options, sizeof(re_comp.options)); /* table regex opts */ if (tpl.len == 0) { lua_concat(L, 5); /* table key */ } else { lua_concat(L, 6); /* table key */ } lua_pushvalue(L, -1); /* table key key */ dd("regex cache key: %.*s", (int) (pat.len + sizeof(re_comp.options)), lua_tostring(L, -1)); lua_rawget(L, -3); /* table key re */ re = lua_touserdata(L, -1); lua_pop(L, 1); /* table key */ if (re) { ngx_log_debug3(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "stream lua regex cache hit for sub regex \"%s\" " "with options \"%s\" and replace \"%s\"", pat.data, opts.data, func ? (u_char *) "<func>" : tpl.data); lua_pop(L, 2); dd("restoring regex %p, ncaptures %d, captures %p", re->regex, re->ncaptures, re->captures); re_comp.regex = re->regex; sd = re->regex_sd; re_comp.captures = re->ncaptures; cap = re->captures; ctpl = re->replace; if (flags & NGX_LUA_RE_MODE_DFA) { ovecsize = 2; } else { ovecsize = (re->ncaptures + 1) * 3; } goto exec; } ngx_log_debug4(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "stream lua regex cache miss for %ssub regex \"%s\" " "with options \"%s\" and replace \"%s\"", global ? "g" : "", pat.data, opts.data, func ? (u_char *) "<func>" : tpl.data); if (lmcf->regex_cache_entries >= lmcf->regex_cache_max_entries) { if (lmcf->regex_cache_entries == lmcf->regex_cache_max_entries) { ngx_log_error(NGX_LOG_WARN, s->connection->log, 0, "stream lua exceeding regex cache max " "entries (%i)", lmcf->regex_cache_max_entries); lmcf->regex_cache_entries++; } pool = s->connection->pool; flags &= ~NGX_LUA_RE_COMPILE_ONCE; } } else { pool = s->connection->pool; } re_comp.pattern = pat; re_comp.err.len = NGX_MAX_CONF_ERRSTR; re_comp.err.data = errstr; re_comp.pool = pool; dd("compiling regex"); ngx_log_debug6(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "stream lua compiling %ssub regex \"%s\" with options " "\"%s\" (compile once: %d) (dfa mode: %d) (jit mode: %d)", global ? "g" : "", pat.data, opts.data, (flags & NGX_LUA_RE_COMPILE_ONCE) != 0, (flags & NGX_LUA_RE_MODE_DFA) != 0, (flags & NGX_LUA_RE_MODE_JIT) != 0); old_pool = ngx_stream_lua_pcre_malloc_init(pool); rc = ngx_stream_lua_regex_compile(&re_comp); ngx_stream_lua_pcre_malloc_done(old_pool); if (rc != NGX_OK) { dd("compile failed"); lua_pushnil(L); lua_pushnil(L); lua_pushlstring(L, (char *) re_comp.err.data, re_comp.err.len); return 3; } #if LUA_HAVE_PCRE_JIT if (flags & NGX_LUA_RE_MODE_JIT) { old_pool = ngx_stream_lua_pcre_malloc_init(pool); sd = pcre_study(re_comp.regex, PCRE_STUDY_JIT_COMPILE, &msg); ngx_stream_lua_pcre_malloc_done(old_pool); # if (NGX_DEBUG) dd("sd = %p", sd); if (msg != NULL) { ngx_log_debug2(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "pcre study failed with PCRE_STUDY_JIT_COMPILE: " "%s (%p)", msg, sd); } if (sd != NULL) { int jitted; old_pool = ngx_stream_lua_pcre_malloc_init(pool); pcre_fullinfo(re_comp.regex, sd, PCRE_INFO_JIT, &jitted); ngx_stream_lua_pcre_malloc_done(old_pool); ngx_log_debug1(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "pcre JIT compiling result: %d", jitted); } # endif /* NGX_DEBUG */ } else { old_pool = ngx_stream_lua_pcre_malloc_init(pool); sd = pcre_study(re_comp.regex, 0, &msg); ngx_stream_lua_pcre_malloc_done(old_pool); # if (NGX_DEBUG) dd("sd = %p", sd); if (msg != NULL) { ngx_log_debug2(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "pcre_study failed with PCRE_STUDY_JIT_COMPILE: " "%s (%p)", msg, sd); } # endif /* NGX_DEBUG */ } #else /* LUA_HAVE_PCRE_JIT */ if (flags & NGX_LUA_RE_MODE_JIT) { ngx_log_debug0(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "your pcre build does not have JIT support and " "the \"j\" regex option is ignored"); } #endif /* LUA_HAVE_PCRE_JIT */ if (sd && lmcf->regex_match_limit > 0) { sd->flags |= PCRE_EXTRA_MATCH_LIMIT; sd->match_limit = lmcf->regex_match_limit; } dd("compile done, captures %d", re_comp.captures); if (flags & NGX_LUA_RE_MODE_DFA) { ovecsize = 2; } else { ovecsize = (re_comp.captures + 1) * 3; } cap = ngx_palloc(pool, ovecsize * sizeof(int)); if (cap == NULL) { flags &= ~NGX_LUA_RE_COMPILE_ONCE; msg = "no memory"; goto error; } if (func) { ctpl = NULL; } else { ctpl = ngx_palloc(pool, sizeof(ngx_stream_lua_complex_value_t)); if (ctpl == NULL) { flags &= ~NGX_LUA_RE_COMPILE_ONCE; msg = "no memory"; goto error; } if ((flags & NGX_LUA_RE_COMPILE_ONCE) && tpl.len != 0) { /* copy the string buffer pointed to by tpl.data from Lua VM */ p = ngx_palloc(pool, tpl.len + 1); if (p == NULL) { flags &= ~NGX_LUA_RE_COMPILE_ONCE; msg = "no memory"; goto error; } ngx_memcpy(p, tpl.data, tpl.len); p[tpl.len] = '\0'; tpl.data = p; } ngx_memzero(&ccv, sizeof(ngx_stream_lua_compile_complex_value_t)); ccv.pool = pool; ccv.log = s->connection->log; ccv.value = &tpl; ccv.complex_value = ctpl; if (ngx_stream_lua_compile_complex_value(&ccv) != NGX_OK) { ngx_pfree(pool, cap); ngx_pfree(pool, ctpl); if ((flags & NGX_LUA_RE_COMPILE_ONCE) && tpl.len != 0) { ngx_pfree(pool, tpl.data); } if (sd) { ngx_stream_lua_regex_free_study_data(pool, sd); } ngx_pfree(pool, re_comp.regex); lua_pushnil(L); lua_pushnil(L); lua_pushliteral(L, "failed to compile the replacement template"); return 3; } } if (flags & NGX_LUA_RE_COMPILE_ONCE) { ngx_log_debug2(NGX_LOG_DEBUG_STREAM, s->connection->log, 0, "stream lua saving compiled sub regex (%d captures) " "into the cache (entries %i)", re_comp.captures, lmcf->regex_cache_entries); re = ngx_palloc(pool, sizeof(ngx_stream_lua_regex_t)); if (re == NULL) { msg = "no memory"; goto error; } dd("saving regex %p, ncaptures %d, captures %p", re_comp.regex, re_comp.captures, cap); re->regex = re_comp.regex; re->regex_sd = sd; re->ncaptures = re_comp.captures; re->captures = cap; re->replace = ctpl; lua_pushlightuserdata(L, re); /* table key value */ lua_rawset(L, -3); /* table */ lua_pop(L, 1); lmcf->regex_cache_entries++; } exec: count = 0; offset = 0; cp_offset = 0; if (pcre_fullinfo(re_comp.regex, NULL, PCRE_INFO_NAMECOUNT, &name_count) != 0) { msg = "cannot acquire named subpattern count"; goto error; } if (name_count > 0) { if (pcre_fullinfo(re_comp.regex, NULL, PCRE_INFO_NAMEENTRYSIZE, &name_entry_size) != 0) { msg = "cannot acquire named subpattern entry size"; goto error; } if (pcre_fullinfo(re_comp.regex, NULL, PCRE_INFO_NAMETABLE, &name_table) != 0) { msg = "cannot acquire named subpattern table"; goto error; } } if (flags & NGX_LUA_RE_NO_UTF8_CHECK) { exec_opts = PCRE_NO_UTF8_CHECK; } else { exec_opts = 0; } for (;;) { if (flags & NGX_LUA_RE_MODE_DFA) { #if LUA_HAVE_PCRE_DFA int ws[NGX_LUA_RE_DFA_MODE_WORKSPACE_COUNT]; rc = ngx_stream_lua_regex_dfa_exec(re_comp.regex, sd, &subj, offset, cap, ovecsize, ws, sizeof(ws)/sizeof(ws[0]), exec_opts); #else /* LUA_HAVE_PCRE_DFA */ msg = "at least pcre 6.0 is required for the DFA mode"; goto error; #endif /* LUA_HAVE_PCRE_DFA */ } else { rc = ngx_stream_lua_regex_exec(re_comp.regex, sd, &subj, offset, cap, ovecsize, exec_opts); } if (rc == NGX_REGEX_NO_MATCHED) { break; } if (rc < 0) { msg = lua_pushfstring(L, ngx_regex_exec_n " failed: %d", (int) rc); goto error; } if (rc == 0) { if (flags & NGX_LUA_RE_MODE_DFA) { rc = 1; } else { msg = "capture size too small"; goto error; } } dd("rc = %d", (int) rc); count++; if (count == 1) { luaL_buffinit(L, &luabuf); } if (func) { lua_pushvalue(L, 3); lua_createtable(L, rc - 1 /* narr */, 1 /* nrec */); for (i = 0, n = 0; i < rc; i++, n += 2) { dd("capture %d: %d %d", (int) i, cap[n], cap[n + 1]); if (cap[n] < 0) { lua_pushnil(L); } else { lua_pushlstring(L, (char *) &subj.data[cap[n]], cap[n + 1] - cap[n]); dd("pushing capture %s at %d", lua_tostring(L, -1), (int) i); } lua_rawseti(L, -2, (int) i); } if (name_count > 0) { ngx_stream_lua_re_collect_named_captures(L, lua_gettop(L), name_table, name_count, name_entry_size, flags, &subj); } dd("stack size at call: %d", lua_gettop(L)); lua_call(L, 1 /* nargs */, 1 /* nresults */); type = lua_type(L, -1); switch (type) { case LUA_TNUMBER: case LUA_TSTRING: tpl.data = (u_char *) lua_tolstring(L, -1, &tpl.len); break; default: msg = lua_pushfstring(L, "string or number expected to be " "returned by the replace " "function, got %s", lua_typename(L, type)); return luaL_argerror(L, 3, msg); } lua_insert(L, 1); luaL_addlstring(&luabuf, (char *) &subj.data[cp_offset], cap[0] - cp_offset); luaL_addlstring(&luabuf, (char *) tpl.data, tpl.len); lua_remove(L, 1); cp_offset = cap[1]; offset = cp_offset; if (offset == cap[0]) { offset++; if (offset > (ssize_t) subj.len) { break; } } if (global) { continue; } break; } rc = ngx_stream_lua_complex_value(s, &subj, cp_offset, rc, cap, ctpl, &luabuf); if (rc != NGX_OK) { msg = lua_pushfstring(L, "failed to eval the template for " "replacement: \"%s\"", tpl.data); goto error; } cp_offset = cap[1]; offset = cp_offset; if (offset == cap[0]) { offset++; if (offset > (ssize_t) subj.len) { break; } } if (global) { continue; } break; } if (count == 0) { dd("no match, just the original subject"); lua_settop(L, 1); } else { if (offset < (int) subj.len) { dd("adding trailer: %s (len %d)", &subj.data[cp_offset], (int) (subj.len - cp_offset)); luaL_addlstring(&luabuf, (char *) &subj.data[cp_offset], subj.len - cp_offset); } luaL_pushresult(&luabuf); dd("the dst string: %s", lua_tostring(L, -1)); } if (!(flags & NGX_LUA_RE_COMPILE_ONCE)) { if (sd) { ngx_stream_lua_regex_free_study_data(pool, sd); } if (re_comp.regex) { ngx_pfree(pool, re_comp.regex); } if (ctpl) { ngx_pfree(pool, ctpl); } if (cap) { ngx_pfree(pool, cap); } } lua_pushinteger(L, count); return 2; error: if (!(flags & NGX_LUA_RE_COMPILE_ONCE)) { if (sd) { ngx_stream_lua_regex_free_study_data(pool, sd); } if (re_comp.regex) { ngx_pfree(pool, re_comp.regex); } if (ctpl) { ngx_pfree(pool, ctpl); } if (cap) { ngx_pfree(pool, cap); } } lua_pushnil(L); lua_pushnil(L); lua_pushstring(L, msg); return 3; } void ngx_stream_lua_inject_regex_api(lua_State *L) { /* ngx.re */ lua_createtable(L, 0, 5 /* nrec */); /* .re */ lua_pushcfunction(L, ngx_stream_lua_ngx_re_find); lua_setfield(L, -2, "find"); lua_pushcfunction(L, ngx_stream_lua_ngx_re_match); lua_setfield(L, -2, "match"); lua_pushcfunction(L, ngx_stream_lua_ngx_re_gmatch); lua_setfield(L, -2, "gmatch"); lua_pushcfunction(L, ngx_stream_lua_ngx_re_sub); lua_setfield(L, -2, "sub"); lua_pushcfunction(L, ngx_stream_lua_ngx_re_gsub); lua_setfield(L, -2, "gsub"); lua_setfield(L, -2, "re"); } static void ngx_stream_lua_regex_free_study_data(ngx_pool_t *pool, pcre_extra *sd) { ngx_pool_t *old_pool; old_pool = ngx_stream_lua_pcre_malloc_init(pool); #if LUA_HAVE_PCRE_JIT pcre_free_study(sd); #else pcre_free(sd); #endif ngx_stream_lua_pcre_malloc_done(old_pool); } static ngx_int_t ngx_stream_lua_regex_compile(ngx_stream_lua_regex_compile_t *rc) { int n, erroff; char *p; const char *errstr; pcre *re; ngx_pool_t *old_pool; old_pool = ngx_stream_lua_pcre_malloc_init(rc->pool); re = pcre_compile((const char *) rc->pattern.data, (int) rc->options, &errstr, &erroff, NULL); ngx_stream_lua_pcre_malloc_done(old_pool); if (re == NULL) { if ((size_t) erroff == rc->pattern.len) { rc->err.len = ngx_snprintf(rc->err.data, rc->err.len, "pcre_compile() failed: %s in \"%V\"", errstr, &rc->pattern) - rc->err.data; } else { rc->err.len = ngx_snprintf(rc->err.data, rc->err.len, "pcre_compile() failed: %s in \"%V\" " "at \"%s\"", errstr, &rc->pattern, rc->pattern.data + erroff) - rc->err.data; } return NGX_ERROR; } rc->regex = re; #if 1 n = pcre_fullinfo(re, NULL, PCRE_INFO_CAPTURECOUNT, &rc->captures); if (n < 0) { p = "pcre_fullinfo(\"%V\", PCRE_INFO_CAPTURECOUNT) failed: %d"; goto failed; } #endif return NGX_OK; failed: rc->err.len = ngx_snprintf(rc->err.data, rc->err.len, p, &rc->pattern, n) - rc->err.data; return NGX_OK; } static void ngx_stream_lua_ngx_re_gmatch_cleanup(void *data) { ngx_stream_lua_regex_ctx_t *ctx = data; if (ctx) { if (ctx->regex_sd) { ngx_stream_lua_regex_free_study_data(ctx->session->connection->pool, ctx->regex_sd); ctx->regex_sd = NULL; } if (ctx->cleanup) { *ctx->cleanup = NULL; ctx->cleanup = NULL; } ctx->session = NULL; } return; } static int ngx_stream_lua_ngx_re_gmatch_gc(lua_State *L) { ngx_stream_lua_regex_ctx_t *ctx; ctx = lua_touserdata(L, 1); if (ctx && ctx->cleanup) { ngx_stream_lua_ngx_re_gmatch_cleanup(ctx); } return 0; } static void ngx_stream_lua_re_collect_named_captures(lua_State *L, int res_tb_idx, u_char *name_table, int name_count, int name_entry_size, unsigned flags, ngx_str_t *subj) { int i, n; size_t len; u_char *name_entry; char *name; for (i = 0; i < name_count; i++) { dd("top: %d", lua_gettop(L)); name_entry = &name_table[i * name_entry_size]; n = (name_entry[0] << 8) | name_entry[1]; name = (char *) &name_entry[2]; lua_rawgeti(L, -1, n); if (lua_isnil(L, -1)) { lua_pop(L, 1); continue; } if (flags & NGX_LUA_RE_MODE_DUPNAMES) { lua_getfield(L, -2, name); /* big_tb cap small_tb */ if (lua_isnil(L, -1)) { lua_pop(L, 1); /* assuming named submatches are usually unique */ lua_createtable(L, 1 /* narr */, 0 /* nrec */); lua_pushstring(L, name); lua_pushvalue(L, -2); /* big_tb cap small_tb key small_tb */ lua_rawset(L, res_tb_idx); /* big_tb cap small_tb */ len = 0; } else { len = lua_objlen(L, -1); } lua_pushvalue(L, -2); /* big_tb cap small_tb cap */ lua_rawseti(L, -2, (int) len + 1); /* big_tb cap small_tb */ lua_pop(L, 2); } else { lua_pushstring(L, name); /* big_tb cap key */ lua_pushvalue(L, -2); /* big_tb cap key cap */ lua_rawset(L, res_tb_idx); /* big_tb cap */ lua_pop(L, 1); } dd("top 2: %d", lua_gettop(L)); } } #ifndef NGX_LUA_NO_FFI_API ngx_stream_lua_regex_t * ngx_stream_lua_ffi_compile_regex(const unsigned char *pat, size_t pat_len, int flags, int pcre_opts, u_char *errstr, size_t errstr_size) { int *cap = NULL, ovecsize; u_char *p; ngx_int_t rc; const char *msg; ngx_pool_t *pool, *old_pool; pcre_extra *sd = NULL; ngx_stream_lua_regex_t *re; ngx_stream_lua_main_conf_t *lmcf; ngx_stream_lua_regex_compile_t re_comp; pool = ngx_create_pool(512, ngx_cycle->log); if (pool == NULL) { msg = "no memory"; goto error; } re = ngx_palloc(pool, sizeof(ngx_stream_lua_regex_t)); if (re == NULL) { ngx_destroy_pool(pool); pool = NULL; msg = "no memory"; goto error; } re->pool = pool; re_comp.options = pcre_opts; re_comp.pattern.data = (u_char *) pat; re_comp.pattern.len = pat_len; re_comp.err.len = errstr_size - 1; re_comp.err.data = errstr; re_comp.pool = pool; old_pool = ngx_stream_lua_pcre_malloc_init(pool); rc = ngx_stream_lua_regex_compile(&re_comp); ngx_stream_lua_pcre_malloc_done(old_pool); if (rc != NGX_OK) { re_comp.err.data[re_comp.err.len] = '\0'; msg = (char *) re_comp.err.data; goto error; } #if (LUA_HAVE_PCRE_JIT) if (flags & NGX_LUA_RE_MODE_JIT) { old_pool = ngx_stream_lua_pcre_malloc_init(pool); sd = pcre_study(re_comp.regex, PCRE_STUDY_JIT_COMPILE, &msg); ngx_stream_lua_pcre_malloc_done(old_pool); # if (NGX_DEBUG) if (msg != NULL) { ngx_log_debug2(NGX_LOG_DEBUG_STREAM, ngx_cycle->log, 0, "pcre study failed with PCRE_STUDY_JIT_COMPILE: " "%s (%p)", msg, sd); } if (sd != NULL) { int jitted; old_pool = ngx_stream_lua_pcre_malloc_init(pool); pcre_fullinfo(re_comp.regex, sd, PCRE_INFO_JIT, &jitted); ngx_stream_lua_pcre_malloc_done(old_pool); ngx_log_debug1(NGX_LOG_DEBUG_STREAM, ngx_cycle->log, 0, "pcre JIT compiling result: %d", jitted); } # endif /* !(NGX_DEBUG) */ } else { old_pool = ngx_stream_lua_pcre_malloc_init(pool); sd = pcre_study(re_comp.regex, 0, &msg); ngx_stream_lua_pcre_malloc_done(old_pool); } #endif /* LUA_HAVE_PCRE_JIT */ lmcf = ngx_stream_cycle_get_module_main_conf(ngx_cycle, ngx_stream_lua_module); if (sd && lmcf && lmcf->regex_match_limit > 0) { sd->flags |= PCRE_EXTRA_MATCH_LIMIT; sd->match_limit = lmcf->regex_match_limit; } if (flags & NGX_LUA_RE_MODE_DFA) { ovecsize = 2; } else { ovecsize = (re_comp.captures + 1) * 3; } dd("allocating cap with size: %d", (int) ovecsize); cap = ngx_palloc(pool, ovecsize * sizeof(int)); if (cap == NULL) { msg = "no memory"; goto error; } if (pcre_fullinfo(re_comp.regex, NULL, PCRE_INFO_NAMECOUNT, &re->name_count) != 0) { msg = "cannot acquire named subpattern count"; goto error; } if (re->name_count > 0) { if (pcre_fullinfo(re_comp.regex, NULL, PCRE_INFO_NAMEENTRYSIZE, &re->name_entry_size) != 0) { msg = "cannot acquire named subpattern entry size"; goto error; } if (pcre_fullinfo(re_comp.regex, NULL, PCRE_INFO_NAMETABLE, &re->name_table) != 0) { msg = "cannot acquire named subpattern table"; goto error; } } re->regex = re_comp.regex; re->regex_sd = sd; re->ncaptures = re_comp.captures; re->captures = cap; re->replace = NULL; /* only for (stap) debugging, the pointer might be invalid when the * string is collected later on.... */ re->pattern = pat; return re; error: p = ngx_snprintf(errstr, errstr_size - 1, "%s", msg); *p = '\0'; if (sd) { ngx_stream_lua_regex_free_study_data(pool, sd); } if (pool) { ngx_destroy_pool(pool); } return NULL; } int ngx_stream_lua_ffi_exec_regex(ngx_stream_lua_regex_t *re, int flags, const u_char *s, size_t len, int pos) { int rc, ovecsize, exec_opts, *cap; ngx_str_t subj; pcre_extra *sd; cap = re->captures; sd = re->regex_sd; if (flags & NGX_LUA_RE_MODE_DFA) { ovecsize = 2; } else { ovecsize = (re->ncaptures + 1) * 3; } if (flags & NGX_LUA_RE_NO_UTF8_CHECK) { exec_opts = PCRE_NO_UTF8_CHECK; } else { exec_opts = 0; } subj.data = (u_char *) s; subj.len = len; if (flags & NGX_LUA_RE_MODE_DFA) { #if LUA_HAVE_PCRE_DFA int ws[NGX_LUA_RE_DFA_MODE_WORKSPACE_COUNT]; rc = ngx_stream_lua_regex_dfa_exec(re->regex, sd, &subj, (int) pos, cap, ovecsize, ws, sizeof(ws)/sizeof(ws[0]), exec_opts); #else return PCRE_ERROR_BADOPTION; #endif /* LUA_HAVE_PCRE_DFA */ } else { rc = ngx_stream_lua_regex_exec(re->regex, sd, &subj, (int) pos, cap, ovecsize, exec_opts); } return rc; } void ngx_stream_lua_ffi_destroy_regex(ngx_stream_lua_regex_t *re) { ngx_pool_t *old_pool; dd("destroy regex called"); if (re == NULL || re->pool == NULL) { return; } if (re->regex_sd) { old_pool = ngx_stream_lua_pcre_malloc_init(re->pool); #if LUA_HAVE_PCRE_JIT pcre_free_study(re->regex_sd); #else pcre_free(re->regex_sd); #endif ngx_stream_lua_pcre_malloc_done(old_pool); re->regex_sd = NULL; } ngx_destroy_pool(re->pool); } int ngx_stream_lua_ffi_compile_replace_template(ngx_stream_lua_regex_t *re, const u_char *replace_data, size_t replace_len) { ngx_int_t rc; ngx_str_t tpl; ngx_stream_lua_complex_value_t *ctpl; ngx_stream_lua_compile_complex_value_t ccv; ctpl = ngx_palloc(re->pool, sizeof(ngx_stream_lua_complex_value_t)); if (ctpl == NULL) { return NGX_ERROR; } if (replace_len != 0) { /* copy the string buffer pointed to by tpl.data from Lua VM */ tpl.data = ngx_palloc(re->pool, replace_len + 1); if (tpl.data == NULL) { return NGX_ERROR; } ngx_memcpy(tpl.data, replace_data, replace_len); tpl.data[replace_len] = '\0'; } else { tpl.data = (u_char *) replace_data; } tpl.len = replace_len; ngx_memzero(&ccv, sizeof(ngx_stream_lua_compile_complex_value_t)); ccv.pool = re->pool; ccv.log = ngx_cycle->log; ccv.value = &tpl; ccv.complex_value = ctpl; rc = ngx_stream_lua_compile_complex_value(&ccv); re->replace = ctpl; return rc; } ngx_stream_lua_script_engine_t * ngx_stream_lua_ffi_create_script_engine(void) { return ngx_calloc(sizeof(ngx_stream_lua_script_engine_t), ngx_cycle->log); } void ngx_stream_lua_ffi_init_script_engine(ngx_stream_lua_script_engine_t *e, const unsigned char *subj, ngx_stream_lua_regex_t *compiled, int count) { e->log = ngx_cycle->log; e->ncaptures = count * 2; e->captures = compiled->captures; e->captures_data = (u_char *) subj; } void ngx_stream_lua_ffi_destroy_script_engine(ngx_stream_lua_script_engine_t *e) { ngx_free(e); } size_t ngx_stream_lua_ffi_script_eval_len(ngx_stream_lua_script_engine_t *e, ngx_stream_lua_complex_value_t *val) { size_t len; ngx_stream_lua_script_len_code_pt lcode; e->ip = val->lengths; len = 0; while (*(uintptr_t *) e->ip) { lcode = *(ngx_stream_lua_script_len_code_pt *) e->ip; len += lcode(e); } return len; } void ngx_stream_lua_ffi_script_eval_data(ngx_stream_lua_script_engine_t *e, ngx_stream_lua_complex_value_t *val, u_char *dst) { ngx_stream_lua_script_code_pt code; e->ip = val->values; e->pos = dst; while (*(uintptr_t *) e->ip) { code = *(ngx_stream_lua_script_code_pt *) e->ip; code(e); } } uint32_t ngx_stream_lua_ffi_max_regex_cache_size(void) { ngx_stream_lua_main_conf_t *lmcf; lmcf = ngx_stream_cycle_get_module_main_conf(ngx_cycle, ngx_stream_lua_module); if (lmcf == NULL) { return 0; } return (uint32_t) lmcf->regex_cache_max_entries; } #endif /* NGX_LUA_NO_FFI_API */ #endif /* NGX_PCRE */ /* vi:set ft=c ts=4 sw=4 et fdm=marker: */
167523.c
/* * Copyright (c) 2006-2021, RT-Thread Development Team * * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2013-07-20 Bernard first version * 2014-04-03 Grissiom many enhancements * 2018-11-22 Jesven add rt_hw_ipi_send() * add rt_hw_ipi_handler_install() */ #include <rthw.h> #include <rtthread.h> #include "gic.h" #include "cp15.h" struct arm_gic { rt_uint32_t offset; /* the first interrupt index in the vector table */ rt_uint32_t dist_hw_base; /* the base address of the gic distributor */ rt_uint32_t cpu_hw_base; /* the base addrees of the gic cpu interface */ }; /* 'ARM_GIC_MAX_NR' is the number of cores */ static struct arm_gic _gic_table[ARM_GIC_MAX_NR]; #define GIC_CPU_CTRL(hw_base) __REG32((hw_base) + 0x00) #define GIC_CPU_PRIMASK(hw_base) __REG32((hw_base) + 0x04) #define GIC_CPU_BINPOINT(hw_base) __REG32((hw_base) + 0x08) #define GIC_CPU_INTACK(hw_base) __REG32((hw_base) + 0x0c) #define GIC_CPU_EOI(hw_base) __REG32((hw_base) + 0x10) #define GIC_CPU_RUNNINGPRI(hw_base) __REG32((hw_base) + 0x14) #define GIC_CPU_HIGHPRI(hw_base) __REG32((hw_base) + 0x18) #define GIC_DIST_CTRL(hw_base) __REG32((hw_base) + 0x000) #define GIC_DIST_TYPE(hw_base) __REG32((hw_base) + 0x004) #define GIC_DIST_IGROUP(hw_base, n) __REG32((hw_base) + 0x080 + ((n)/32) * 4) #define GIC_DIST_ENABLE_SET(hw_base, n) __REG32((hw_base) + 0x100 + ((n)/32) * 4) #define GIC_DIST_ENABLE_CLEAR(hw_base, n) __REG32((hw_base) + 0x180 + ((n)/32) * 4) #define GIC_DIST_PENDING_SET(hw_base, n) __REG32((hw_base) + 0x200 + ((n)/32) * 4) #define GIC_DIST_PENDING_CLEAR(hw_base, n) __REG32((hw_base) + 0x280 + ((n)/32) * 4) #define GIC_DIST_ACTIVE_SET(hw_base, n) __REG32((hw_base) + 0x300 + ((n)/32) * 4) #define GIC_DIST_ACTIVE_CLEAR(hw_base, n) __REG32((hw_base) + 0x380 + ((n)/32) * 4) #define GIC_DIST_PRI(hw_base, n) __REG32((hw_base) + 0x400 + ((n)/4) * 4) #define GIC_DIST_TARGET(hw_base, n) __REG32((hw_base) + 0x800 + ((n)/4) * 4) #define GIC_DIST_CONFIG(hw_base, n) __REG32((hw_base) + 0xc00 + ((n)/16) * 4) #define GIC_DIST_SOFTINT(hw_base) __REG32((hw_base) + 0xf00) #define GIC_DIST_CPENDSGI(hw_base, n) __REG32((hw_base) + 0xf10 + ((n)/4) * 4) #define GIC_DIST_ICPIDR2(hw_base) __REG32((hw_base) + 0xfe8) static unsigned int _gic_max_irq; int arm_gic_get_active_irq(rt_uint32_t index) { int irq; RT_ASSERT(index < ARM_GIC_MAX_NR); irq = GIC_CPU_INTACK(_gic_table[index].cpu_hw_base); irq += _gic_table[index].offset; return irq; } void arm_gic_ack(rt_uint32_t index, int irq) { rt_uint32_t mask = 1 << (irq % 32); RT_ASSERT(index < ARM_GIC_MAX_NR); irq = irq - _gic_table[index].offset; RT_ASSERT(irq >= 0); GIC_DIST_PENDING_CLEAR(_gic_table[index].dist_hw_base, irq) = mask; GIC_CPU_EOI(_gic_table[index].cpu_hw_base) = irq; } void arm_gic_mask(rt_uint32_t index, int irq) { rt_uint32_t mask = 1 << (irq % 32); RT_ASSERT(index < ARM_GIC_MAX_NR); irq = irq - _gic_table[index].offset; RT_ASSERT(irq >= 0); GIC_DIST_ENABLE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask; } void arm_gic_clear_pending(rt_uint32_t index, int irq) { rt_uint32_t mask = 1 << (irq % 32); RT_ASSERT(index < ARM_GIC_MAX_NR); irq = irq - _gic_table[index].offset; RT_ASSERT(irq >= 0); GIC_DIST_PENDING_CLEAR(_gic_table[index].dist_hw_base, irq) = mask; } void arm_gic_clear_active(rt_uint32_t index, int irq) { rt_uint32_t mask = 1 << (irq % 32); RT_ASSERT(index < ARM_GIC_MAX_NR); irq = irq - _gic_table[index].offset; RT_ASSERT(irq >= 0); GIC_DIST_ACTIVE_CLEAR(_gic_table[index].dist_hw_base, irq) = mask; } /* Set up the cpu mask for the specific interrupt */ void arm_gic_set_cpu(rt_uint32_t index, int irq, unsigned int cpumask) { rt_uint32_t old_tgt; RT_ASSERT(index < ARM_GIC_MAX_NR); irq = irq - _gic_table[index].offset; RT_ASSERT(irq >= 0); old_tgt = GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq); old_tgt &= ~(0x0FFUL << ((irq % 4)*8)); old_tgt |= cpumask << ((irq % 4)*8); GIC_DIST_TARGET(_gic_table[index].dist_hw_base, irq) = old_tgt; } void arm_gic_umask(rt_uint32_t index, int irq) { rt_uint32_t mask = 1 << (irq % 32); RT_ASSERT(index < ARM_GIC_MAX_NR); irq = irq - _gic_table[index].offset; RT_ASSERT(irq >= 0); GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, irq) = mask; } void arm_gic_dump_type(rt_uint32_t index) { unsigned int gic_type; gic_type = GIC_DIST_TYPE(_gic_table[index].dist_hw_base); rt_kprintf("GICv%d on %p, max IRQs: %d, %s security extension(%08x)\n", (GIC_DIST_ICPIDR2(_gic_table[index].dist_hw_base) >> 4) & 0xf, _gic_table[index].dist_hw_base, _gic_max_irq, gic_type & (1 << 10) ? "has" : "no", gic_type); } void arm_gic_dump(rt_uint32_t index) { unsigned int i, k; k = GIC_CPU_HIGHPRI(_gic_table[index].cpu_hw_base); rt_kprintf("--- high pending priority: %d(%08x)\n", k, k); rt_kprintf("--- hw mask ---\n"); for (i = 0; i < _gic_max_irq / 32; i++) { rt_kprintf("0x%08x, ", GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, i * 32)); } rt_kprintf("\n--- hw pending ---\n"); for (i = 0; i < _gic_max_irq / 32; i++) { rt_kprintf("0x%08x, ", GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, i * 32)); } rt_kprintf("\n--- hw active ---\n"); for (i = 0; i < _gic_max_irq / 32; i++) { rt_kprintf("0x%08x, ", GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base, i * 32)); } rt_kprintf("\n"); } #ifdef RT_USING_FINSH #include <finsh.h> FINSH_FUNCTION_EXPORT_ALIAS(arm_gic_dump, gic, show gic status); #endif int arm_gic_dist_init(rt_uint32_t index, rt_uint32_t dist_base, int irq_start) { unsigned int gic_type, i; rt_uint32_t cpumask = 1 << 0; RT_ASSERT(index < ARM_GIC_MAX_NR); _gic_table[index].dist_hw_base = dist_base; _gic_table[index].offset = irq_start; /* Find out how many interrupts are supported. */ gic_type = GIC_DIST_TYPE(dist_base); _gic_max_irq = ((gic_type & 0x1f) + 1) * 32; /* * The GIC only supports up to 1020 interrupt sources. * Limit this to either the architected maximum, or the * platform maximum. */ if (_gic_max_irq > 1020) _gic_max_irq = 1020; if (_gic_max_irq > ARM_GIC_NR_IRQS) /* the platform maximum interrupts */ _gic_max_irq = ARM_GIC_NR_IRQS; cpumask |= cpumask << 8; cpumask |= cpumask << 16; cpumask |= cpumask << 24; GIC_DIST_CTRL(dist_base) = 0x0; /* Set all global interrupts to be level triggered, active low. */ for (i = 32; i < _gic_max_irq; i += 16) GIC_DIST_CONFIG(dist_base, i) = 0x0; /* Set all global interrupts to this CPU only. */ for (i = 32; i < _gic_max_irq; i += 4) GIC_DIST_TARGET(dist_base, i) = cpumask; /* Set priority on all interrupts. */ for (i = 0; i < _gic_max_irq; i += 4) GIC_DIST_PRI(dist_base, i) = 0xa0a0a0a0; /* Disable all interrupts. */ for (i = 0; i < _gic_max_irq; i += 32) GIC_DIST_ENABLE_CLEAR(dist_base, i) = 0xffffffff; #if 0 /* All interrupts defaults to IGROUP1(IRQ). */ for (i = 0; i < _gic_max_irq; i += 32) GIC_DIST_IGROUP(dist_base, i) = 0xffffffff; #endif for (i = 0; i < _gic_max_irq; i += 32) GIC_DIST_IGROUP(dist_base, i) = 0; /* Enable group0 and group1 interrupt forwarding. */ GIC_DIST_CTRL(dist_base) = 0x01; return 0; } int arm_gic_cpu_init(rt_uint32_t index, rt_uint32_t cpu_base) { RT_ASSERT(index < ARM_GIC_MAX_NR); _gic_table[index].cpu_hw_base = cpu_base; GIC_CPU_PRIMASK(cpu_base) = 0xf0; GIC_CPU_BINPOINT(cpu_base) = 0x7; /* Enable CPU interrupt */ GIC_CPU_CTRL(cpu_base) = 0x01; return 0; } void arm_gic_set_group(rt_uint32_t index, int vector, int group) { /* As for GICv2, there are only group0 and group1. */ RT_ASSERT(group <= 1); RT_ASSERT(vector < _gic_max_irq); if (group == 0) { GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, vector) &= ~(1 << (vector % 32)); } else if (group == 1) { GIC_DIST_IGROUP(_gic_table[index].dist_hw_base, vector) |= (1 << (vector % 32)); } } #ifdef RT_USING_SMP void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask) { /* note: ipi_vector maybe different with irq_vector */ GIC_DIST_SOFTINT(_gic_table[0].dist_hw_base) = (cpu_mask << 16) | ipi_vector; } #endif #ifdef RT_USING_SMP void rt_hw_ipi_handler_install(int ipi_vector, rt_isr_handler_t ipi_isr_handler) { /* note: ipi_vector maybe different with irq_vector */ rt_hw_interrupt_install(ipi_vector, ipi_isr_handler, 0, "IPI_HANDLER"); } #endif
140265.c
/* * libEtPan! -- a mail stuff library * * Copyright (C) 2001, 2005 - DINH Viet Hoa * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the libEtPan! project nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H # include <config.h> #endif #include "mailimap_types.h" #include "acl_types.h" #include "mailimap_extension.h" #include <stdlib.h> #include <string.h> void mailimap_acl_identifier_free(char * identifier) { mailimap_astring_free(identifier); } void mailimap_acl_rights_free(char * rights) { mailimap_astring_free(rights); } struct mailimap_acl_identifier_rights * mailimap_acl_identifier_rights_new(char * identifier, char * rights) { struct mailimap_acl_identifier_rights * id_rights; id_rights = malloc(sizeof(* id_rights)); if (id_rights == NULL) return NULL; id_rights->identifer = identifier; id_rights->rights = rights; return id_rights; } void mailimap_acl_identifier_rights_free( struct mailimap_acl_identifier_rights * id_rights) { mailimap_acl_identifier_free(id_rights->identifer); mailimap_acl_rights_free(id_rights->rights); free(id_rights); } struct mailimap_acl_acl_data * mailimap_acl_acl_data_new(char * mailbox, clist * idrights_list) { struct mailimap_acl_acl_data * acl_data; acl_data = malloc(sizeof(* acl_data)); if (acl_data == NULL) return NULL; acl_data->mailbox = mailbox; acl_data->idrights_list = idrights_list; return acl_data; } LIBETPAN_EXPORT void mailimap_acl_acl_data_free(struct mailimap_acl_acl_data * acl_data) { mailimap_mailbox_free(acl_data->mailbox); clist_foreach(acl_data->idrights_list, (clist_func) mailimap_acl_identifier_rights_free, NULL); clist_free(acl_data->idrights_list); free(acl_data); } struct mailimap_acl_listrights_data * mailimap_acl_listrights_data_new(char * mailbox, char * identifier, clist * rights_list) { struct mailimap_acl_listrights_data * lr_data; lr_data = malloc(sizeof(* lr_data)); if (lr_data == NULL) return NULL; lr_data->mailbox = mailbox; lr_data->identifier = identifier; lr_data->rights_list = rights_list; return lr_data; } LIBETPAN_EXPORT void mailimap_acl_listrights_data_free(struct mailimap_acl_listrights_data * lr_data) { mailimap_mailbox_free(lr_data->mailbox); mailimap_acl_identifier_free(lr_data->identifier); clist_foreach(lr_data->rights_list, (clist_func) mailimap_acl_rights_free, NULL); clist_free(lr_data->rights_list); free(lr_data); } struct mailimap_acl_myrights_data * mailimap_acl_myrights_data_new(char * mailbox, char * rights) { struct mailimap_acl_myrights_data * mr_data; mr_data = malloc(sizeof(* mr_data)); if (mr_data == NULL) return NULL; mr_data->mailbox = mailbox; mr_data->rights = rights; return mr_data; } LIBETPAN_EXPORT void mailimap_acl_myrights_data_free(struct mailimap_acl_myrights_data * mr_data) { mailimap_mailbox_free(mr_data->mailbox); mailimap_acl_rights_free(mr_data->rights); free(mr_data); } void mailimap_acl_free(struct mailimap_extension_data * ext_data) { if (ext_data == NULL) return; switch (ext_data->ext_type) { case MAILIMAP_ACL_TYPE_ACL_DATA: mailimap_acl_acl_data_free(ext_data->ext_data); break; case MAILIMAP_ACL_TYPE_LISTRIGHTS_DATA: mailimap_acl_listrights_data_free(ext_data->ext_data); break; case MAILIMAP_ACL_TYPE_MYRIGHTS_DATA: mailimap_acl_myrights_data_free(ext_data->ext_data); break; } free (ext_data); }
300037.c
/* -*- coding: utf-8 -*- * ---------------------------------------------------------------------- * Copyright © 2012, RedJack, LLC. * All rights reserved. * * Please see the COPYING file in this distribution for license * details. * ---------------------------------------------------------------------- */ #include <dirent.h> #include <errno.h> #include <fcntl.h> #include <string.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include "libcork/core/attributes.h" #include "libcork/core/error.h" #include "libcork/core/types.h" #include "libcork/ds/buffer.h" #include "libcork/helpers/errors.h" #include "libcork/helpers/posix.h" #include "libcork/os/files.h" static int cork_walk_one_directory(struct cork_dir_walker *w, struct cork_buffer *path, size_t root_path_size) { DIR *dir = NULL; struct dirent *entry; size_t dir_path_size; rip_check_posix(dir = opendir(path->buf)); cork_buffer_append(path, "/", 1); dir_path_size = path->size; errno = 0; while ((entry = readdir(dir)) != NULL) { struct stat info; /* Skip the "." and ".." entries */ if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0) { continue; } /* Stat the directory entry */ cork_buffer_append_string(path, entry->d_name); ei_check_posix(stat(path->buf, &info)); /* If the entry is a subdirectory, recurse into it. */ if (S_ISDIR(info.st_mode)) { int rc = cork_dir_walker_enter_directory (w, path->buf, path->buf + root_path_size, path->buf + dir_path_size); if (rc != CORK_SKIP_DIRECTORY) { ei_check(cork_walk_one_directory(w, path, root_path_size)); ei_check(cork_dir_walker_leave_directory (w, path->buf, path->buf + root_path_size, path->buf + dir_path_size)); } } else if (S_ISREG(info.st_mode)) { ei_check(cork_dir_walker_file (w, path->buf, path->buf + root_path_size, path->buf + dir_path_size)); } /* Remove this entry name from the path buffer. */ cork_buffer_truncate(path, dir_path_size); /* We have to reset errno to 0 because of the ambiguous way * readdir uses a return value of NULL. Other functions may * return normally yet set errno to a non-zero value. dlopen * on Mac OS X is an ogreish example. Since an error readdir * is indicated by returning NULL and setting errno to indicate * the error, then we need to reset it to zero before each call. * We shall assume, perhaps to our great misery, that functions * within this loop do proper error checking and act accordingly. */ errno = 0; } /* Check errno immediately after the while loop terminates */ if (CORK_UNLIKELY(errno != 0)) { cork_system_error_set(); goto error; } /* Remove the trailing '/' from the path buffer. */ cork_buffer_truncate(path, dir_path_size - 1); rii_check_posix(closedir(dir)); return 0; error: if (dir != NULL) { rii_check_posix(closedir(dir)); } return -1; } int cork_walk_directory(const char *path, struct cork_dir_walker *w) { int rc; char *p; struct cork_buffer buf = CORK_BUFFER_INIT(); /* Seed the buffer with the directory's path, ensuring that there's no * trailing '/' */ cork_buffer_append_string(&buf, path); p = buf.buf; while (p[buf.size-1] == '/') { buf.size--; p[buf.size] = '\0'; } rc = cork_walk_one_directory(w, &buf, buf.size + 1); cork_buffer_done(&buf); return rc; }
327771.c
/* * Unit test suite for gdiplus regions * * Copyright (C) 2008 Huw Davies * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */ #include "windows.h" #include "gdiplus.h" #include "wingdi.h" #include "wine/test.h" #include <math.h> #define RGNDATA_RECT 0x10000000 #define RGNDATA_PATH 0x10000001 #define RGNDATA_EMPTY_RECT 0x10000002 #define RGNDATA_INFINITE_RECT 0x10000003 #define RGNDATA_MAGIC 0xdbc01001 #define RGNDATA_MAGIC2 0xdbc01002 #define expect(expected, got) ok(got == expected, "Expected %.8x, got %.8x\n", expected, got) #define expectf_(expected, got, precision) ok(fabs(expected - got) < precision, "Expected %.2f, got %.2f\n", expected, got) #define expectf(expected, got) expectf_(expected, got, 0.0001) #define expect_magic(value) ok(*value == RGNDATA_MAGIC || *value == RGNDATA_MAGIC2, "Expected a known magic value, got %8x\n", *value) #define expect_dword(value, expected) ok(*(value) == expected, "expected %08x got %08x\n", expected, *(value)) static inline void expect_float(DWORD *value, FLOAT expected) { FLOAT valuef = *(FLOAT*)value; ok(valuef == expected, "expected %f got %f\n", expected, valuef); } /* We get shorts back, not INTs like a GpPoint */ typedef struct RegionDataPoint { short X, Y; } RegionDataPoint; static void verify_region(HRGN hrgn, const RECT *rc) { union { RGNDATA data; char buf[sizeof(RGNDATAHEADER) + sizeof(RECT)]; } rgn; const RECT *rect; DWORD ret; ret = GetRegionData(hrgn, 0, NULL); if (IsRectEmpty(rc)) ok(ret == sizeof(rgn.data.rdh), "expected sizeof(rdh), got %u\n", ret); else ok(ret == sizeof(rgn.data.rdh) + sizeof(RECT), "expected sizeof(rgn), got %u\n", ret); if (!ret) return; ret = GetRegionData(hrgn, sizeof(rgn), &rgn.data); if (IsRectEmpty(rc)) ok(ret == sizeof(rgn.data.rdh), "expected sizeof(rdh), got %u\n", ret); else ok(ret == sizeof(rgn.data.rdh) + sizeof(RECT), "expected sizeof(rgn), got %u\n", ret); trace("size %u, type %u, count %u, rgn size %u, bound (%d,%d-%d,%d)\n", rgn.data.rdh.dwSize, rgn.data.rdh.iType, rgn.data.rdh.nCount, rgn.data.rdh.nRgnSize, rgn.data.rdh.rcBound.left, rgn.data.rdh.rcBound.top, rgn.data.rdh.rcBound.right, rgn.data.rdh.rcBound.bottom); if (rgn.data.rdh.nCount != 0) { rect = (const RECT *)rgn.data.Buffer; trace("rect (%d,%d-%d,%d)\n", rect->left, rect->top, rect->right, rect->bottom); ok(EqualRect(rect, rc), "expected (%d,%d)-(%d,%d), got (%d,%d)-(%d,%d)\n", rc->left, rc->top, rc->right, rc->bottom, rect->left, rect->top, rect->right, rect->bottom); } ok(rgn.data.rdh.dwSize == sizeof(rgn.data.rdh), "expected sizeof(rdh), got %u\n", rgn.data.rdh.dwSize); ok(rgn.data.rdh.iType == RDH_RECTANGLES, "expected RDH_RECTANGLES, got %u\n", rgn.data.rdh.iType); if (IsRectEmpty(rc)) { ok(rgn.data.rdh.nCount == 0, "expected 0, got %u\n", rgn.data.rdh.nCount); ok(rgn.data.rdh.nRgnSize == 0, "expected 0, got %u\n", rgn.data.rdh.nRgnSize); } else { ok(rgn.data.rdh.nCount == 1, "expected 1, got %u\n", rgn.data.rdh.nCount); ok(rgn.data.rdh.nRgnSize == sizeof(RECT), "expected sizeof(RECT), got %u\n", rgn.data.rdh.nRgnSize); } ok(EqualRect(&rgn.data.rdh.rcBound, rc), "expected (%d,%d)-(%d,%d), got (%d,%d)-(%d,%d)\n", rc->left, rc->top, rc->right, rc->bottom, rgn.data.rdh.rcBound.left, rgn.data.rdh.rcBound.top, rgn.data.rdh.rcBound.right, rgn.data.rdh.rcBound.bottom); } static void test_getregiondata(void) { GpStatus status; GpRegion *region, *region2; RegionDataPoint *point; UINT needed; DWORD buf[100]; GpRect rect; GpPath *path; GpMatrix *matrix; memset(buf, 0xee, sizeof(buf)); status = GdipCreateRegion(&region); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionDataSize(region, &needed); ok(status == Ok, "status %08x\n", status); expect(20, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); ok(status == Ok, "status %08x\n", status); expect(20, needed); expect_dword(buf, 12); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 0); expect_dword(buf + 4, RGNDATA_INFINITE_RECT); status = GdipSetEmpty(region); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionDataSize(region, &needed); ok(status == Ok, "status %08x\n", status); expect(20, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); ok(status == Ok, "status %08x\n", status); expect(20, needed); expect_dword(buf, 12); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 0); expect_dword(buf + 4, RGNDATA_EMPTY_RECT); status = GdipSetInfinite(region); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionDataSize(region, &needed); ok(status == Ok, "status %08x\n", status); expect(20, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); ok(status == Ok, "status %08x\n", status); expect(20, needed); expect_dword(buf, 12); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 0); expect_dword(buf + 4, RGNDATA_INFINITE_RECT); status = GdipDeleteRegion(region); ok(status == Ok, "status %08x\n", status); rect.X = 10; rect.Y = 20; rect.Width = 100; rect.Height = 200; status = GdipCreateRegionRectI(&rect, &region); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionDataSize(region, &needed); ok(status == Ok, "status %08x\n", status); expect(36, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); ok(status == Ok, "status %08x\n", status); expect(36, needed); expect_dword(buf, 28); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 0); expect_dword(buf + 4, RGNDATA_RECT); expect_float(buf + 5, 10.0); expect_float(buf + 6, 20.0); expect_float(buf + 7, 100.0); expect_float(buf + 8, 200.0); rect.X = 50; rect.Y = 30; rect.Width = 10; rect.Height = 20; status = GdipCombineRegionRectI(region, &rect, CombineModeIntersect); ok(status == Ok, "status %08x\n", status); rect.X = 100; rect.Y = 300; rect.Width = 30; rect.Height = 50; status = GdipCombineRegionRectI(region, &rect, CombineModeXor); ok(status == Ok, "status %08x\n", status); rect.X = 200; rect.Y = 100; rect.Width = 133; rect.Height = 266; status = GdipCreateRegionRectI(&rect, &region2); ok(status == Ok, "status %08x\n", status); rect.X = 20; rect.Y = 10; rect.Width = 40; rect.Height = 66; status = GdipCombineRegionRectI(region2, &rect, CombineModeUnion); ok(status == Ok, "status %08x\n", status); status = GdipCombineRegionRegion(region, region2, CombineModeComplement); ok(status == Ok, "status %08x\n", status); rect.X = 400; rect.Y = 500; rect.Width = 22; rect.Height = 55; status = GdipCombineRegionRectI(region, &rect, CombineModeExclude); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionDataSize(region, &needed); ok(status == Ok, "status %08x\n", status); expect(156, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); ok(status == Ok, "status %08x\n", status); expect(156, needed); expect_dword(buf, 148); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 10); expect_dword(buf + 4, CombineModeExclude); expect_dword(buf + 5, CombineModeComplement); expect_dword(buf + 6, CombineModeXor); expect_dword(buf + 7, CombineModeIntersect); expect_dword(buf + 8, RGNDATA_RECT); expect_float(buf + 9, 10.0); expect_float(buf + 10, 20.0); expect_float(buf + 11, 100.0); expect_float(buf + 12, 200.0); expect_dword(buf + 13, RGNDATA_RECT); expect_float(buf + 14, 50.0); expect_float(buf + 15, 30.0); expect_float(buf + 16, 10.0); expect_float(buf + 17, 20.0); expect_dword(buf + 18, RGNDATA_RECT); expect_float(buf + 19, 100.0); expect_float(buf + 20, 300.0); expect_float(buf + 21, 30.0); expect_float(buf + 22, 50.0); expect_dword(buf + 23, CombineModeUnion); expect_dword(buf + 24, RGNDATA_RECT); expect_float(buf + 25, 200.0); expect_float(buf + 26, 100.0); expect_float(buf + 27, 133.0); expect_float(buf + 28, 266.0); expect_dword(buf + 29, RGNDATA_RECT); expect_float(buf + 30, 20.0); expect_float(buf + 31, 10.0); expect_float(buf + 32, 40.0); expect_float(buf + 33, 66.0); expect_dword(buf + 34, RGNDATA_RECT); expect_float(buf + 35, 400.0); expect_float(buf + 36, 500.0); expect_float(buf + 37, 22.0); expect_float(buf + 38, 55.0); status = GdipDeleteRegion(region2); ok(status == Ok, "status %08x\n", status); status = GdipDeleteRegion(region); ok(status == Ok, "status %08x\n", status); /* Try some paths */ status = GdipCreatePath(FillModeAlternate, &path); ok(status == Ok, "status %08x\n", status); GdipAddPathRectangle(path, 12.5, 13.0, 14.0, 15.0); status = GdipCreateRegionPath(path, &region); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionDataSize(region, &needed); ok(status == Ok, "status %08x\n", status); expect(72, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); ok(status == Ok, "status %08x\n", status); expect(72, needed); expect_dword(buf, 64); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 0); expect_dword(buf + 4, RGNDATA_PATH); expect_dword(buf + 5, 0x00000030); expect_magic((DWORD*)(buf + 6)); expect_dword(buf + 7, 0x00000004); expect_dword(buf + 8, 0x00000000); expect_float(buf + 9, 12.5); expect_float(buf + 10, 13.0); expect_float(buf + 11, 26.5); expect_float(buf + 12, 13.0); expect_float(buf + 13, 26.5); expect_float(buf + 14, 28.0); expect_float(buf + 15, 12.5); expect_float(buf + 16, 28.0); expect_dword(buf + 17, 0x81010100); rect.X = 50; rect.Y = 30; rect.Width = 10; rect.Height = 20; status = GdipCombineRegionRectI(region, &rect, CombineModeIntersect); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionDataSize(region, &needed); ok(status == Ok, "status %08x\n", status); expect(96, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); ok(status == Ok, "status %08x\n", status); expect(96, needed); expect_dword(buf, 88); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 2); expect_dword(buf + 4, CombineModeIntersect); expect_dword(buf + 5, RGNDATA_PATH); expect_dword(buf + 6, 0x00000030); expect_magic((DWORD*)(buf + 7)); expect_dword(buf + 8, 0x00000004); expect_dword(buf + 9, 0x00000000); expect_float(buf + 10, 12.5); expect_float(buf + 11, 13.0); expect_float(buf + 12, 26.5); expect_float(buf + 13, 13.0); expect_float(buf + 14, 26.5); expect_float(buf + 15, 28.0); expect_float(buf + 16, 12.5); expect_float(buf + 17, 28.0); expect_dword(buf + 18, 0x81010100); expect_dword(buf + 19, RGNDATA_RECT); expect_float(buf + 20, 50.0); expect_float(buf + 21, 30.0); expect_float(buf + 22, 10.0); expect_float(buf + 23, 20.0); status = GdipDeleteRegion(region); ok(status == Ok, "status %08x\n", status); status = GdipDeletePath(path); ok(status == Ok, "status %08x\n", status); /* Test an empty path */ status = GdipCreatePath(FillModeAlternate, &path); expect(Ok, status); status = GdipCreateRegionPath(path, &region); expect(Ok, status); status = GdipGetRegionDataSize(region, &needed); expect(Ok, status); expect(36, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); expect(Ok, status); expect(36, needed); expect_dword(buf, 28); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 0); expect_dword(buf + 4, RGNDATA_PATH); /* Second signature for pathdata */ expect_dword(buf + 5, 12); expect_magic((DWORD*)(buf + 6)); expect_dword(buf + 7, 0); /* flags 0x4000 means its a path of shorts instead of FLOAT */ ok((*(buf + 8) & (~ 0x00004000)) == 0x00000000, "expected 00000000 got %08x\n", *(buf + 8) & (~ 0x00004000)); /* Transform an empty region */ status = GdipCreateMatrix(&matrix); expect(Ok, status); status = GdipTransformRegion(region, matrix); expect(Ok, status); GdipDeleteMatrix(matrix); status = GdipDeleteRegion(region); expect(Ok, status); /* Test a simple triangle of INTs */ status = GdipAddPathLine(path, 5, 6, 7, 8); expect(Ok, status); status = GdipAddPathLine(path, 8, 1, 5, 6); expect(Ok, status); status = GdipClosePathFigure(path); expect(Ok, status); status = GdipCreateRegionPath(path, &region); expect(Ok, status); status = GdipGetRegionDataSize(region, &needed); expect(Ok, status); expect(56, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); expect(Ok, status); expect(56, needed); expect_dword(buf, 48); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3 , 0); expect_dword(buf + 4 , RGNDATA_PATH); expect_dword(buf + 5, 32); expect_magic((DWORD*)(buf + 6)); expect_dword(buf + 7, 4); expect_dword(buf + 8, 0x00004000); /* ?? */ point = (RegionDataPoint*)buf + 9; expect(5, point[0].X); expect(6, point[0].Y); expect(7, point[1].X); /* buf + 10 */ expect(8, point[1].Y); expect(8, point[2].X); /* buf + 11 */ expect(1, point[2].Y); expect(5, point[3].X); /* buf + 12 */ expect(6, point[3].Y); expect_dword(buf + 13, 0x81010100); /* 0x01010100 if we don't close the path */ status = GdipDeletePath(path); expect(Ok, status); status = GdipDeleteRegion(region); expect(Ok, status); /* Test a floating-point triangle */ status = GdipCreatePath(FillModeAlternate, &path); expect(Ok, status); status = GdipAddPathLine(path, 5.6, 6.2, 7.2, 8.9); expect(Ok, status); status = GdipAddPathLine(path, 8.1, 1.6, 5.6, 6.2); expect(Ok, status); status = GdipCreateRegionPath(path, &region); expect(Ok, status); status = GdipGetRegionDataSize(region, &needed); expect(Ok, status); expect(72, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); expect(Ok, status); expect(72, needed); expect_dword(buf, 64); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 0); expect_dword(buf + 4, RGNDATA_PATH); expect_dword(buf + 5, 48); expect_magic((DWORD*)(buf + 6)); expect_dword(buf + 7, 4); expect_dword(buf + 8, 0); expect_float(buf + 9, 5.6); expect_float(buf + 10, 6.2); expect_float(buf + 11, 7.2); expect_float(buf + 12, 8.9); expect_float(buf + 13, 8.1); expect_float(buf + 14, 1.6); expect_float(buf + 15, 5.6); expect_float(buf + 16, 6.2); status = GdipDeletePath(path); expect(Ok, status); status = GdipDeleteRegion(region); expect(Ok, status); /* Test for a path with > 4 points, and CombineRegionPath */ GdipCreatePath(FillModeAlternate, &path); status = GdipAddPathLine(path, 50, 70.2, 60, 102.8); expect(Ok, status); status = GdipAddPathLine(path, 55.4, 122.4, 40.4, 60.2); expect(Ok, status); status = GdipAddPathLine(path, 45.6, 20.2, 50, 70.2); expect(Ok, status); rect.X = 20; rect.Y = 25; rect.Width = 60; rect.Height = 120; status = GdipCreateRegionRectI(&rect, &region); expect(Ok, status); status = GdipCombineRegionPath(region, path, CombineModeUnion); expect(Ok, status); status = GdipGetRegionDataSize(region, &needed); expect(Ok, status); expect(116, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); expect(Ok, status); expect(116, needed); expect_dword(buf, 108); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 2); expect_dword(buf + 4, CombineModeUnion); expect_dword(buf + 5, RGNDATA_RECT); expect_float(buf + 6, 20); expect_float(buf + 7, 25); expect_float(buf + 8, 60); expect_float(buf + 9, 120); expect_dword(buf + 10, RGNDATA_PATH); expect_dword(buf + 11, 68); expect_magic((DWORD*)(buf + 12)); expect_dword(buf + 13, 6); expect_float(buf + 14, 0x0); expect_float(buf + 15, 50); expect_float(buf + 16, 70.2); expect_float(buf + 17, 60); expect_float(buf + 18, 102.8); expect_float(buf + 19, 55.4); expect_float(buf + 20, 122.4); expect_float(buf + 21, 40.4); expect_float(buf + 22, 60.2); expect_float(buf + 23, 45.6); expect_float(buf + 24, 20.2); expect_float(buf + 25, 50); expect_float(buf + 26, 70.2); expect_dword(buf + 27, 0x01010100); ok(*(buf + 28) == 0x00000101 || *(buf + 28) == 0x43050101 /* Win 7 */, "expected 00000101 or 43050101 got %08x\n", *(buf + 28)); status = GdipDeletePath(path); expect(Ok, status); status = GdipDeleteRegion(region); expect(Ok, status); } static void test_isinfinite(void) { GpStatus status; GpRegion *region; GpGraphics *graphics = NULL; GpMatrix *m; HDC hdc = GetDC(0); BOOL res; status = GdipCreateFromHDC(hdc, &graphics); expect(Ok, status); GdipCreateRegion(&region); GdipCreateMatrix2(3.0, 0.0, 0.0, 1.0, 20.0, 30.0, &m); /* NULL arguments */ status = GdipIsInfiniteRegion(NULL, NULL, NULL); expect(InvalidParameter, status); status = GdipIsInfiniteRegion(region, NULL, NULL); expect(InvalidParameter, status); status = GdipIsInfiniteRegion(NULL, graphics, NULL); expect(InvalidParameter, status); status = GdipIsInfiniteRegion(NULL, NULL, &res); expect(InvalidParameter, status); status = GdipIsInfiniteRegion(region, NULL, &res); expect(InvalidParameter, status); res = FALSE; status = GdipIsInfiniteRegion(region, graphics, &res); expect(Ok, status); expect(TRUE, res); /* after world transform */ status = GdipSetWorldTransform(graphics, m); expect(Ok, status); res = FALSE; status = GdipIsInfiniteRegion(region, graphics, &res); expect(Ok, status); expect(TRUE, res); GdipDeleteMatrix(m); GdipDeleteRegion(region); GdipDeleteGraphics(graphics); ReleaseDC(0, hdc); } static void test_isempty(void) { GpStatus status; GpRegion *region; GpGraphics *graphics = NULL; HDC hdc = GetDC(0); BOOL res; status = GdipCreateFromHDC(hdc, &graphics); expect(Ok, status); GdipCreateRegion(&region); /* NULL arguments */ status = GdipIsEmptyRegion(NULL, NULL, NULL); expect(InvalidParameter, status); status = GdipIsEmptyRegion(region, NULL, NULL); expect(InvalidParameter, status); status = GdipIsEmptyRegion(NULL, graphics, NULL); expect(InvalidParameter, status); status = GdipIsEmptyRegion(NULL, NULL, &res); expect(InvalidParameter, status); status = GdipIsEmptyRegion(region, NULL, &res); expect(InvalidParameter, status); /* default is infinite */ res = TRUE; status = GdipIsEmptyRegion(region, graphics, &res); expect(Ok, status); expect(FALSE, res); status = GdipSetEmpty(region); expect(Ok, status); res = FALSE; status = GdipIsEmptyRegion(region, graphics, &res); expect(Ok, status); expect(TRUE, res); GdipDeleteRegion(region); GdipDeleteGraphics(graphics); ReleaseDC(0, hdc); } static void test_combinereplace(void) { GpStatus status; GpRegion *region, *region2; GpPath *path; GpRectF rectf; UINT needed; DWORD buf[50]; rectf.X = rectf.Y = 0.0; rectf.Width = rectf.Height = 100.0; status = GdipCreateRegionRect(&rectf, &region); expect(Ok, status); /* replace with the same rectangle */ status = GdipCombineRegionRect(region, &rectf,CombineModeReplace); expect(Ok, status); status = GdipGetRegionDataSize(region, &needed); expect(Ok, status); expect(36, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); expect(Ok, status); expect(36, needed); expect_dword(buf, 28); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 0); expect_dword(buf + 4, RGNDATA_RECT); /* replace with path */ status = GdipCreatePath(FillModeAlternate, &path); expect(Ok, status); status = GdipAddPathEllipse(path, 0.0, 0.0, 100.0, 250.0); expect(Ok, status); status = GdipCombineRegionPath(region, path, CombineModeReplace); expect(Ok, status); status = GdipGetRegionDataSize(region, &needed); expect(Ok, status); expect(156, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); expect(Ok, status); expect(156, needed); expect_dword(buf, 148); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 0); expect_dword(buf + 4, RGNDATA_PATH); GdipDeletePath(path); /* replace with infinite rect */ status = GdipCreateRegion(&region2); expect(Ok, status); status = GdipCombineRegionRegion(region, region2, CombineModeReplace); expect(Ok, status); status = GdipGetRegionDataSize(region, &needed); expect(Ok, status); expect(20, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); expect(Ok, status); expect(20, needed); expect_dword(buf, 12); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 0); expect_dword(buf + 4, RGNDATA_INFINITE_RECT); GdipDeleteRegion(region2); /* more complex case : replace with a combined region */ status = GdipCreateRegionRect(&rectf, &region2); expect(Ok, status); status = GdipCreatePath(FillModeAlternate, &path); expect(Ok, status); status = GdipAddPathEllipse(path, 0.0, 0.0, 100.0, 250.0); expect(Ok, status); status = GdipCombineRegionPath(region2, path, CombineModeUnion); expect(Ok, status); GdipDeletePath(path); status = GdipCombineRegionRegion(region, region2, CombineModeReplace); expect(Ok, status); GdipDeleteRegion(region2); status = GdipGetRegionDataSize(region, &needed); expect(Ok, status); expect(180, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); expect(Ok, status); expect(180, needed); expect_dword(buf, 172); trace("buf[1] = %08x\n", buf[1]); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 2); expect_dword(buf + 4, CombineModeUnion); GdipDeleteRegion(region); } static void test_fromhrgn(void) { GpStatus status; GpRegion *region = (GpRegion*)0xabcdef01; HRGN hrgn; UINT needed; DWORD buf[220]; RegionDataPoint *point; GpGraphics *graphics = NULL; HDC hdc; BOOL res; /* NULL */ status = GdipCreateRegionHrgn(NULL, NULL); expect(InvalidParameter, status); status = GdipCreateRegionHrgn(NULL, &region); expect(InvalidParameter, status); status = GdipCreateRegionHrgn((HRGN)0xdeadbeef, &region); expect(InvalidParameter, status); ok(region == (GpRegion*)0xabcdef01, "Expected region not to be created\n"); /* empty rectangle */ hrgn = CreateRectRgn(0, 0, 0, 0); status = GdipCreateRegionHrgn(hrgn, &region); expect(Ok, status); if(status == Ok) { hdc = GetDC(0); status = GdipCreateFromHDC(hdc, &graphics); expect(Ok, status); res = FALSE; status = GdipIsEmptyRegion(region, graphics, &res); expect(Ok, status); expect(TRUE, res); GdipDeleteGraphics(graphics); ReleaseDC(0, hdc); GdipDeleteRegion(region); } DeleteObject(hrgn); /* rectangle */ hrgn = CreateRectRgn(0, 0, 100, 10); status = GdipCreateRegionHrgn(hrgn, &region); expect(Ok, status); status = GdipGetRegionDataSize(region, &needed); expect(Ok, status); expect(56, needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); expect(Ok, status); if(status == Ok){ expect(56, needed); expect_dword(buf, 48); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 0); expect_dword(buf + 4, RGNDATA_PATH); expect_dword(buf + 5, 0x00000020); expect_magic((DWORD*)(buf + 6)); expect_dword(buf + 7, 0x00000004); todo_wine expect_dword(buf + 8, 0x00006000); /* ?? */ point = (RegionDataPoint*)buf + 9; expect(0, point[0].X); expect(0, point[0].Y); expect(100,point[1].X); /* buf + 10 */ expect(0, point[1].Y); expect(100,point[2].X); /* buf + 11 */ expect(10, point[2].Y); expect(0, point[3].X); /* buf + 12 */ expect(10, point[3].Y); expect_dword(buf + 13, 0x81010100); /* closed */ } GdipDeleteRegion(region); DeleteObject(hrgn); /* ellipse */ hrgn = CreateEllipticRgn(0, 0, 100, 10); status = GdipCreateRegionHrgn(hrgn, &region); expect(Ok, status); status = GdipGetRegionDataSize(region, &needed); expect(Ok, status); ok(needed == 216 || needed == 196, /* win98 */ "Got %.8x\n", needed); status = GdipGetRegionData(region, (BYTE*)buf, sizeof(buf), &needed); expect(Ok, status); if(status == Ok && needed == 216) /* Don't try to test win98 layout */ { expect(Ok, status); expect(216, needed); expect_dword(buf, 208); expect_magic((DWORD*)(buf + 2)); expect_dword(buf + 3, 0); expect_dword(buf + 4, RGNDATA_PATH); expect_dword(buf + 5, 0x000000C0); expect_magic((DWORD*)(buf + 6)); expect_dword(buf + 7, 0x00000024); todo_wine expect_dword(buf + 8, 0x00006000); /* ?? */ } GdipDeleteRegion(region); DeleteObject(hrgn); } static void test_gethrgn(void) { GpStatus status; GpRegion *region, *region2; GpPath *path; GpGraphics *graphics; HRGN hrgn; HDC hdc=GetDC(0); static const RECT empty_rect = {0,0,0,0}; static const RECT test_rect = {10, 11, 20, 21}; static const GpRectF test_rectF = {10.0, 11.0, 10.0, 10.0}; static const RECT scaled_rect = {20, 22, 40, 42}; static const RECT test_rect2 = {10, 21, 20, 31}; static const GpRectF test_rect2F = {10.0, 21.0, 10.0, 10.0}; static const RECT test_rect3 = {10, 11, 20, 31}; static const GpRectF test_rect3F = {10.0, 11.0, 10.0, 20.0}; status = GdipCreateFromHDC(hdc, &graphics); ok(status == Ok, "status %08x\n", status); status = GdipCreateRegion(&region); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionHRgn(NULL, graphics, &hrgn); ok(status == InvalidParameter, "status %08x\n", status); status = GdipGetRegionHRgn(region, graphics, NULL); ok(status == InvalidParameter, "status %08x\n", status); status = GdipGetRegionHRgn(region, NULL, &hrgn); ok(status == Ok, "status %08x\n", status); ok(hrgn == NULL, "hrgn=%p\n", hrgn); status = GdipGetRegionHRgn(region, graphics, &hrgn); ok(status == Ok, "status %08x\n", status); ok(hrgn == NULL, "hrgn=%p\n", hrgn); status = GdipSetEmpty(region); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionHRgn(region, NULL, &hrgn); ok(status == Ok, "status %08x\n", status); verify_region(hrgn, &empty_rect); DeleteObject(hrgn); status = GdipCreatePath(FillModeAlternate, &path); ok(status == Ok, "status %08x\n", status); status = GdipAddPathRectangle(path, 10.0, 11.0, 10.0, 10.0); ok(status == Ok, "status %08x\n", status); status = GdipCreateRegionPath(path, &region2); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionHRgn(region2, NULL, &hrgn); ok(status == Ok, "status %08x\n", status); verify_region(hrgn, &test_rect); DeleteObject(hrgn); /* resulting HRGN is in device coordinates */ status = GdipScaleWorldTransform(graphics, 2.0, 2.0, MatrixOrderPrepend); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionHRgn(region2, graphics, &hrgn); ok(status == Ok, "status %08x\n", status); verify_region(hrgn, &scaled_rect); DeleteObject(hrgn); status = GdipCombineRegionRect(region2, &test_rectF, CombineModeReplace); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionHRgn(region2, NULL, &hrgn); ok(status == Ok, "status %08x\n", status); verify_region(hrgn, &test_rect); DeleteObject(hrgn); status = GdipGetRegionHRgn(region2, graphics, &hrgn); ok(status == Ok, "status %08x\n", status); verify_region(hrgn, &scaled_rect); DeleteObject(hrgn); status = GdipSetInfinite(region); ok(status == Ok, "status %08x\n", status); status = GdipCombineRegionRect(region, &test_rectF, CombineModeIntersect); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionHRgn(region, NULL, &hrgn); ok(status == Ok, "status %08x\n", status); verify_region(hrgn, &test_rect); DeleteObject(hrgn); status = GdipCombineRegionRect(region, &test_rectF, CombineModeReplace); ok(status == Ok, "status %08x\n", status); status = GdipCombineRegionRect(region, &test_rect2F, CombineModeUnion); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionHRgn(region, NULL, &hrgn); ok(status == Ok, "status %08x\n", status); verify_region(hrgn, &test_rect3); DeleteObject(hrgn); status = GdipCombineRegionRect(region, &test_rect3F, CombineModeReplace); ok(status == Ok, "status %08x\n", status); status = GdipCombineRegionRect(region, &test_rect2F, CombineModeXor); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionHRgn(region, NULL, &hrgn); ok(status == Ok, "status %08x\n", status); verify_region(hrgn, &test_rect); DeleteObject(hrgn); status = GdipCombineRegionRect(region, &test_rect3F, CombineModeReplace); ok(status == Ok, "status %08x\n", status); status = GdipCombineRegionRect(region, &test_rectF, CombineModeExclude); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionHRgn(region, NULL, &hrgn); ok(status == Ok, "status %08x\n", status); verify_region(hrgn, &test_rect2); DeleteObject(hrgn); status = GdipCombineRegionRect(region, &test_rectF, CombineModeReplace); ok(status == Ok, "status %08x\n", status); status = GdipCombineRegionRect(region, &test_rect3F, CombineModeComplement); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionHRgn(region, NULL, &hrgn); ok(status == Ok, "status %08x\n", status); verify_region(hrgn, &test_rect2); DeleteObject(hrgn); status = GdipDeletePath(path); ok(status == Ok, "status %08x\n", status); status = GdipDeleteRegion(region); ok(status == Ok, "status %08x\n", status); status = GdipDeleteRegion(region2); ok(status == Ok, "status %08x\n", status); status = GdipDeleteGraphics(graphics); ok(status == Ok, "status %08x\n", status); ReleaseDC(0, hdc); } static void test_isequal(void) { GpRegion *region1, *region2; GpGraphics *graphics; GpRectF rectf; GpStatus status; HDC hdc = GetDC(0); BOOL res; status = GdipCreateFromHDC(hdc, &graphics); ok(status == Ok, "status %08x\n", status); status = GdipCreateRegion(&region1); ok(status == Ok, "status %08x\n", status); status = GdipCreateRegion(&region2); ok(status == Ok, "status %08x\n", status); /* NULL */ status = GdipIsEqualRegion(NULL, NULL, NULL, NULL); ok(status == InvalidParameter, "status %08x\n", status); status = GdipIsEqualRegion(region1, region2, NULL, NULL); ok(status == InvalidParameter, "status %08x\n", status); status = GdipIsEqualRegion(region1, region2, graphics, NULL); ok(status == InvalidParameter, "status %08x\n", status); status = GdipIsEqualRegion(region1, region2, NULL, &res); ok(status == InvalidParameter, "status %08x\n", status); /* infinite regions */ res = FALSE; status = GdipIsEqualRegion(region1, region2, graphics, &res); ok(status == Ok, "status %08x\n", status); ok(res, "Expected to be equal.\n"); /* empty regions */ status = GdipSetEmpty(region1); ok(status == Ok, "status %08x\n", status); status = GdipSetEmpty(region2); ok(status == Ok, "status %08x\n", status); res = FALSE; status = GdipIsEqualRegion(region1, region2, graphics, &res); ok(status == Ok, "status %08x\n", status); ok(res, "Expected to be equal.\n"); /* empty & infinite */ status = GdipSetInfinite(region1); ok(status == Ok, "status %08x\n", status); res = TRUE; status = GdipIsEqualRegion(region1, region2, graphics, &res); ok(status == Ok, "status %08x\n", status); ok(!res, "Expected to be unequal.\n"); /* rect & (inf/empty) */ rectf.X = rectf.Y = 0.0; rectf.Width = rectf.Height = 100.0; status = GdipCombineRegionRect(region1, &rectf, CombineModeReplace); ok(status == Ok, "status %08x\n", status); res = TRUE; status = GdipIsEqualRegion(region1, region2, graphics, &res); ok(status == Ok, "status %08x\n", status); ok(!res, "Expected to be unequal.\n"); status = GdipSetInfinite(region2); ok(status == Ok, "status %08x\n", status); res = TRUE; status = GdipIsEqualRegion(region1, region2, graphics, &res); ok(status == Ok, "status %08x\n", status); ok(!res, "Expected to be unequal.\n"); /* roughly equal rectangles */ rectf.X = rectf.Y = 0.0; rectf.Width = rectf.Height = 100.001; status = GdipCombineRegionRect(region2, &rectf, CombineModeReplace); ok(status == Ok, "status %08x\n", status); res = FALSE; status = GdipIsEqualRegion(region1, region2, graphics, &res); ok(status == Ok, "status %08x\n", status); ok(res, "Expected to be equal.\n"); /* equal rectangles */ rectf.X = rectf.Y = 0.0; rectf.Width = rectf.Height = 100.0; status = GdipCombineRegionRect(region2, &rectf, CombineModeReplace); ok(status == Ok, "status %08x\n", status); res = FALSE; status = GdipIsEqualRegion(region1, region2, graphics, &res); ok(status == Ok, "status %08x\n", status); ok(res, "Expected to be equal.\n"); /* cleanup */ status = GdipDeleteRegion(region1); ok(status == Ok, "status %08x\n", status); status = GdipDeleteRegion(region2); ok(status == Ok, "status %08x\n", status); status = GdipDeleteGraphics(graphics); ok(status == Ok, "status %08x\n", status); ReleaseDC(0, hdc); } static void test_translate(void) { GpRegion *region, *region2; GpGraphics *graphics; GpPath *path; GpRectF rectf; GpStatus status; HDC hdc = GetDC(0); BOOL res; status = GdipCreateFromHDC(hdc, &graphics); ok(status == Ok, "status %08x\n", status); status = GdipCreatePath(FillModeAlternate, &path); ok(status == Ok, "status %08x\n", status); status = GdipCreateRegion(&region); ok(status == Ok, "status %08x\n", status); status = GdipCreateRegion(&region2); ok(status == Ok, "status %08x\n", status); /* NULL */ status = GdipTranslateRegion(NULL, 0.0, 0.0); ok(status == InvalidParameter, "status %08x\n", status); /* infinite */ status = GdipTranslateRegion(region, 10.0, 10.0); ok(status == Ok, "status %08x\n", status); /* empty */ status = GdipSetEmpty(region); ok(status == Ok, "status %08x\n", status); status = GdipTranslateRegion(region, 10.0, 10.0); ok(status == Ok, "status %08x\n", status); /* rect */ rectf.X = 10.0; rectf.Y = 0.0; rectf.Width = rectf.Height = 100.0; status = GdipCombineRegionRect(region, &rectf, CombineModeReplace); ok(status == Ok, "status %08x\n", status); rectf.X = 15.0; rectf.Y = -2.0; rectf.Width = rectf.Height = 100.0; status = GdipCombineRegionRect(region2, &rectf, CombineModeReplace); ok(status == Ok, "status %08x\n", status); status = GdipTranslateRegion(region, 5.0, -2.0); ok(status == Ok, "status %08x\n", status); res = FALSE; status = GdipIsEqualRegion(region, region2, graphics, &res); ok(status == Ok, "status %08x\n", status); ok(res, "Expected to be equal.\n"); /* path */ status = GdipAddPathEllipse(path, 0.0, 10.0, 100.0, 150.0); ok(status == Ok, "status %08x\n", status); status = GdipCombineRegionPath(region, path, CombineModeReplace); ok(status == Ok, "status %08x\n", status); status = GdipResetPath(path); ok(status == Ok, "status %08x\n", status); status = GdipAddPathEllipse(path, 10.0, 21.0, 100.0, 150.0); ok(status == Ok, "status %08x\n", status); status = GdipCombineRegionPath(region2, path, CombineModeReplace); ok(status == Ok, "status %08x\n", status); status = GdipTranslateRegion(region, 10.0, 11.0); ok(status == Ok, "status %08x\n", status); res = FALSE; status = GdipIsEqualRegion(region, region2, graphics, &res); ok(status == Ok, "status %08x\n", status); ok(res, "Expected to be equal.\n"); status = GdipDeleteRegion(region); ok(status == Ok, "status %08x\n", status); status = GdipDeleteRegion(region2); ok(status == Ok, "status %08x\n", status); status = GdipDeleteGraphics(graphics); ok(status == Ok, "status %08x\n", status); status = GdipDeletePath(path); ok(status == Ok, "status %08x\n", status); ReleaseDC(0, hdc); } static void test_transform(void) { GpRegion *region, *region2; GpMatrix *matrix; GpGraphics *graphics; GpPath *path; GpRectF rectf; GpStatus status; HDC hdc = GetDC(0); BOOL res; status = GdipCreateFromHDC(hdc, &graphics); expect(Ok, status); status = GdipCreatePath(FillModeAlternate, &path); expect(Ok, status); status = GdipCreateRegion(&region); expect(Ok, status); status = GdipCreateRegion(&region2); expect(Ok, status); status = GdipCreateMatrix(&matrix); expect(Ok, status); status = GdipScaleMatrix(matrix, 2.0, 3.0, MatrixOrderAppend); expect(Ok, status); /* NULL */ status = GdipTransformRegion(NULL, matrix); expect(InvalidParameter, status); status = GdipTransformRegion(region, NULL); expect(InvalidParameter, status); /* infinite */ status = GdipTransformRegion(region, matrix); expect(Ok, status); res = FALSE; status = GdipIsEqualRegion(region, region2, graphics, &res); expect(Ok, status); ok(res, "Expected to be equal.\n"); /* empty */ status = GdipSetEmpty(region); expect(Ok, status); status = GdipTransformRegion(region, matrix); expect(Ok, status); status = GdipSetEmpty(region2); expect(Ok, status); res = FALSE; status = GdipIsEqualRegion(region, region2, graphics, &res); expect(Ok, status); ok(res, "Expected to be equal.\n"); /* rect */ rectf.X = 10.0; rectf.Y = 0.0; rectf.Width = rectf.Height = 100.0; status = GdipCombineRegionRect(region, &rectf, CombineModeReplace); expect(Ok, status); rectf.X = 20.0; rectf.Y = 0.0; rectf.Width = 200.0; rectf.Height = 300.0; status = GdipCombineRegionRect(region2, &rectf, CombineModeReplace); expect(Ok, status); status = GdipTransformRegion(region, matrix); expect(Ok, status); res = FALSE; status = GdipIsEqualRegion(region, region2, graphics, &res); expect(Ok, status); ok(res, "Expected to be equal.\n"); /* path */ status = GdipAddPathEllipse(path, 0.0, 10.0, 100.0, 150.0); expect(Ok, status); status = GdipCombineRegionPath(region, path, CombineModeReplace); expect(Ok, status); status = GdipResetPath(path); expect(Ok, status); status = GdipAddPathEllipse(path, 0.0, 30.0, 200.0, 450.0); expect(Ok, status); status = GdipCombineRegionPath(region2, path, CombineModeReplace); expect(Ok, status); status = GdipTransformRegion(region, matrix); expect(Ok, status); res = FALSE; status = GdipIsEqualRegion(region, region2, graphics, &res); expect(Ok, status); ok(res, "Expected to be equal.\n"); status = GdipDeleteRegion(region); expect(Ok, status); status = GdipDeleteRegion(region2); expect(Ok, status); status = GdipDeleteGraphics(graphics); expect(Ok, status); status = GdipDeletePath(path); expect(Ok, status); status = GdipDeleteMatrix(matrix); expect(Ok, status); ReleaseDC(0, hdc); } static void test_scans(void) { GpRegion *region; GpMatrix *matrix; GpRectF rectf; GpStatus status; ULONG count=80085; INT icount; GpRectF scans[2]; GpRect scansi[2]; status = GdipCreateRegion(&region); expect(Ok, status); status = GdipCreateMatrix(&matrix); expect(Ok, status); /* test NULL values */ status = GdipGetRegionScansCount(NULL, &count, matrix); expect(InvalidParameter, status); status = GdipGetRegionScansCount(region, NULL, matrix); expect(InvalidParameter, status); status = GdipGetRegionScansCount(region, &count, NULL); expect(InvalidParameter, status); status = GdipGetRegionScans(NULL, scans, &icount, matrix); expect(InvalidParameter, status); status = GdipGetRegionScans(region, scans, NULL, matrix); expect(InvalidParameter, status); status = GdipGetRegionScans(region, scans, &icount, NULL); expect(InvalidParameter, status); /* infinite */ status = GdipGetRegionScansCount(region, &count, matrix); expect(Ok, status); expect(1, count); status = GdipGetRegionScans(region, NULL, &icount, matrix); expect(Ok, status); expect(1, icount); status = GdipGetRegionScans(region, scans, &icount, matrix); expect(Ok, status); expect(1, icount); status = GdipGetRegionScansI(region, scansi, &icount, matrix); expect(Ok, status); expect(1, icount); expect(-0x400000, scansi[0].X); expect(-0x400000, scansi[0].Y); expect(0x800000, scansi[0].Width); expect(0x800000, scansi[0].Height); status = GdipGetRegionScans(region, scans, &icount, matrix); expect(Ok, status); expect(1, icount); expectf((double)-0x400000, scans[0].X); expectf((double)-0x400000, scans[0].Y); expectf((double)0x800000, scans[0].Width); expectf((double)0x800000, scans[0].Height); /* empty */ status = GdipSetEmpty(region); expect(Ok, status); status = GdipGetRegionScansCount(region, &count, matrix); expect(Ok, status); expect(0, count); status = GdipGetRegionScans(region, scans, &icount, matrix); expect(Ok, status); expect(0, icount); /* single rectangle */ rectf.X = rectf.Y = 0.0; rectf.Width = rectf.Height = 5.0; status = GdipCombineRegionRect(region, &rectf, CombineModeReplace); expect(Ok, status); status = GdipGetRegionScansCount(region, &count, matrix); expect(Ok, status); expect(1, count); status = GdipGetRegionScans(region, scans, &icount, matrix); expect(Ok, status); expect(1, icount); expectf(0.0, scans[0].X); expectf(0.0, scans[0].Y); expectf(5.0, scans[0].Width); expectf(5.0, scans[0].Height); /* two rectangles */ rectf.X = rectf.Y = 5.0; rectf.Width = rectf.Height = 5.0; status = GdipCombineRegionRect(region, &rectf, CombineModeUnion); expect(Ok, status); status = GdipGetRegionScansCount(region, &count, matrix); expect(Ok, status); expect(2, count); /* Native ignores the initial value of count */ scans[1].X = scans[1].Y = scans[1].Width = scans[1].Height = 8.0; icount = 1; status = GdipGetRegionScans(region, scans, &icount, matrix); expect(Ok, status); expect(2, icount); expectf(0.0, scans[0].X); expectf(0.0, scans[0].Y); expectf(5.0, scans[0].Width); expectf(5.0, scans[0].Height); expectf(5.0, scans[1].X); expectf(5.0, scans[1].Y); expectf(5.0, scans[1].Width); expectf(5.0, scans[1].Height); status = GdipGetRegionScansI(region, scansi, &icount, matrix); expect(Ok, status); expect(2, icount); expect(0, scansi[0].X); expect(0, scansi[0].Y); expect(5, scansi[0].Width); expect(5, scansi[0].Height); expect(5, scansi[1].X); expect(5, scansi[1].Y); expect(5, scansi[1].Width); expect(5, scansi[1].Height); status = GdipDeleteRegion(region); expect(Ok, status); status = GdipDeleteMatrix(matrix); expect(Ok, status); } static void test_getbounds(void) { GpRegion *region; GpGraphics *graphics; GpStatus status; GpRectF rectf; HDC hdc = GetDC(0); status = GdipCreateFromHDC(hdc, &graphics); ok(status == Ok, "status %08x\n", status); status = GdipCreateRegion(&region); ok(status == Ok, "status %08x\n", status); /* NULL */ status = GdipGetRegionBounds(NULL, NULL, NULL); ok(status == InvalidParameter, "status %08x\n", status); status = GdipGetRegionBounds(region, NULL, NULL); ok(status == InvalidParameter, "status %08x\n", status); status = GdipGetRegionBounds(region, graphics, NULL); ok(status == InvalidParameter, "status %08x\n", status); /* infinite */ rectf.X = rectf.Y = 0.0; rectf.Height = rectf.Width = 100.0; status = GdipGetRegionBounds(region, graphics, &rectf); ok(status == Ok, "status %08x\n", status); ok(rectf.X == -(REAL)(1 << 22), "Expected X = %.2f, got %.2f\n", -(REAL)(1 << 22), rectf.X); ok(rectf.Y == -(REAL)(1 << 22), "Expected Y = %.2f, got %.2f\n", -(REAL)(1 << 22), rectf.Y); ok(rectf.Width == (REAL)(1 << 23), "Expected width = %.2f, got %.2f\n", (REAL)(1 << 23), rectf.Width); ok(rectf.Height == (REAL)(1 << 23), "Expected height = %.2f, got %.2f\n",(REAL)(1 << 23), rectf.Height); /* empty */ rectf.X = rectf.Y = 0.0; rectf.Height = rectf.Width = 100.0; status = GdipSetEmpty(region); ok(status == Ok, "status %08x\n", status); status = GdipGetRegionBounds(region, graphics, &rectf); ok(status == Ok, "status %08x\n", status); ok(rectf.X == 0.0, "Expected X = 0.0, got %.2f\n", rectf.X); ok(rectf.Y == 0.0, "Expected Y = 0.0, got %.2f\n", rectf.Y); ok(rectf.Width == 0.0, "Expected width = 0.0, got %.2f\n", rectf.Width); ok(rectf.Height == 0.0, "Expected height = 0.0, got %.2f\n", rectf.Height); /* rect */ rectf.X = 10.0; rectf.Y = 0.0; rectf.Width = rectf.Height = 100.0; status = GdipCombineRegionRect(region, &rectf, CombineModeReplace); ok(status == Ok, "status %08x\n", status); rectf.X = rectf.Y = 0.0; rectf.Height = rectf.Width = 0.0; status = GdipGetRegionBounds(region, graphics, &rectf); ok(status == Ok, "status %08x\n", status); ok(rectf.X == 10.0, "Expected X = 0.0, got %.2f\n", rectf.X); ok(rectf.Y == 0.0, "Expected Y = 0.0, got %.2f\n", rectf.Y); ok(rectf.Width == 100.0, "Expected width = 0.0, got %.2f\n", rectf.Width); ok(rectf.Height == 100.0, "Expected height = 0.0, got %.2f\n", rectf.Height); /* the world and page transforms are ignored */ GdipScaleWorldTransform(graphics, 2.0, 2.0, MatrixOrderPrepend); GdipSetPageUnit(graphics, UnitInch); GdipSetPageScale(graphics, 2.0); status = GdipGetRegionBounds(region, graphics, &rectf); ok(status == Ok, "status %08x\n", status); ok(rectf.X == 10.0, "Expected X = 0.0, got %.2f\n", rectf.X); ok(rectf.Y == 0.0, "Expected Y = 0.0, got %.2f\n", rectf.Y); ok(rectf.Width == 100.0, "Expected width = 0.0, got %.2f\n", rectf.Width); rectf.X = 10.0; rectf.Y = 0.0; rectf.Width = rectf.Height = 100.0; status = GdipCombineRegionRect(region, &rectf, CombineModeReplace); ok(status == Ok, "status %08x\n", status); rectf.X = rectf.Y = 0.0; rectf.Height = rectf.Width = 0.0; status = GdipGetRegionBounds(region, graphics, &rectf); ok(status == Ok, "status %08x\n", status); ok(rectf.X == 10.0, "Expected X = 0.0, got %.2f\n", rectf.X); ok(rectf.Y == 0.0, "Expected Y = 0.0, got %.2f\n", rectf.Y); ok(rectf.Width == 100.0, "Expected width = 0.0, got %.2f\n", rectf.Width); ok(rectf.Height == 100.0, "Expected height = 0.0, got %.2f\n", rectf.Height); status = GdipDeleteRegion(region); ok(status == Ok, "status %08x\n", status); status = GdipDeleteGraphics(graphics); ok(status == Ok, "status %08x\n", status); ReleaseDC(0, hdc); } static void test_isvisiblepoint(void) { HDC hdc = GetDC(0); GpGraphics* graphics; GpRegion* region; GpPath* path; GpRectF rectf; GpStatus status; BOOL res; REAL x, y; status = GdipCreateFromHDC(hdc, &graphics); expect(Ok, status); status = GdipCreateRegion(&region); expect(Ok, status); /* null parameters */ status = GdipIsVisibleRegionPoint(NULL, 0, 0, graphics, &res); expect(InvalidParameter, status); status = GdipIsVisibleRegionPointI(NULL, 0, 0, graphics, &res); expect(InvalidParameter, status); status = GdipIsVisibleRegionPoint(region, 0, 0, NULL, &res); expect(Ok, status); status = GdipIsVisibleRegionPointI(region, 0, 0, NULL, &res); expect(Ok, status); status = GdipIsVisibleRegionPoint(region, 0, 0, graphics, NULL); expect(InvalidParameter, status); status = GdipIsVisibleRegionPointI(region, 0, 0, graphics, NULL); expect(InvalidParameter, status); /* infinite region */ status = GdipIsInfiniteRegion(region, graphics, &res); expect(Ok, status); ok(res == TRUE, "Region should be infinite\n"); x = 10; y = 10; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f) to be visible\n", x, y); status = GdipIsVisibleRegionPointI(region, (INT)x, (INT)y, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%d, %d) to be visible\n", (INT)x, (INT)y); x = -10; y = -10; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f) to be visible\n", x, y); status = GdipIsVisibleRegionPointI(region, (INT)x, (INT)y, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%d, %d) to be visible\n", (INT)x, (INT)y); /* rectangular region */ rectf.X = 10; rectf.Y = 20; rectf.Width = 30; rectf.Height = 40; status = GdipCombineRegionRect(region, &rectf, CombineModeReplace); expect(Ok, status); x = 0; y = 0; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%.2f, %.2f) not to be visible\n", x, y); status = GdipIsVisibleRegionPointI(region, (INT)x, (INT)y, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%d, %d) not to be visible\n", (INT)x, (INT)y); x = 9; y = 19; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%.2f, %.2f) to be visible\n", x, y); x = 9.25; y = 19.25; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%.2f, %.2f) to be visible\n", x, y); x = 9.5; y = 19.5; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f) to be visible\n", x, y); x = 9.75; y = 19.75; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f) to be visible\n", x, y); x = 10; y = 20; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f) to be visible\n", x, y); x = 25; y = 40; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f) to be visible\n", x, y); status = GdipIsVisibleRegionPointI(region, (INT)x, (INT)y, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%d, %d) to be visible\n", (INT)x, (INT)y); x = 40; y = 60; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%.2f, %.2f) not to be visible\n", x, y); status = GdipIsVisibleRegionPointI(region, (INT)x, (INT)y, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%d, %d) not to be visible\n", (INT)x, (INT)y); /* translate into the center of the rectangle */ status = GdipTranslateWorldTransform(graphics, 25, 40, MatrixOrderAppend); expect(Ok, status); /* native ignores the world transform, so treat these as if * no transform exists */ x = -20; y = -30; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%.2f, %.2f) not to be visible\n", x, y); status = GdipIsVisibleRegionPointI(region, (INT)x, (INT)y, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%d, %d) not to be visible\n", (INT)x, (INT)y); x = 0; y = 0; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%.2f, %.2f) not to be visible\n", x, y); status = GdipIsVisibleRegionPointI(region, (INT)x, (INT)y, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%d, %d) not to be visible\n", (INT)x, (INT)y); x = 25; y = 40; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f) to be visible\n", x, y); status = GdipIsVisibleRegionPointI(region, (INT)x, (INT)y, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%d, %d) to be visible\n", (INT)x, (INT)y); /* translate back to origin */ status = GdipTranslateWorldTransform(graphics, -25, -40, MatrixOrderAppend); expect(Ok, status); /* region from path */ status = GdipCreatePath(FillModeAlternate, &path); expect(Ok, status); status = GdipAddPathEllipse(path, 10, 20, 30, 40); expect(Ok, status); status = GdipCombineRegionPath(region, path, CombineModeReplace); expect(Ok, status); x = 11; y = 21; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%.2f, %.2f) not to be visible\n", x, y); status = GdipIsVisibleRegionPointI(region, (INT)x, (INT)y, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%d, %d) not to be visible\n", (INT)x, (INT)y); x = 25; y = 40; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f) to be visible\n", x, y); status = GdipIsVisibleRegionPointI(region, (INT)x, (INT)y, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%d, %d) to be visible\n", (INT)x, (INT)y); x = 40; y = 60; status = GdipIsVisibleRegionPoint(region, x, y, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%.2f, %.2f) not to be visible\n", x, y); status = GdipIsVisibleRegionPointI(region, (INT)x, (INT)y, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%d, %d) not to be visible\n", (INT)x, (INT)y); GdipDeletePath(path); GdipDeleteRegion(region); GdipDeleteGraphics(graphics); ReleaseDC(0, hdc); } static void test_isvisiblerect(void) { HDC hdc = GetDC(0); GpGraphics* graphics; GpRegion* region; GpPath* path; GpRectF rectf; GpStatus status; BOOL res; REAL x, y, w, h; status = GdipCreateFromHDC(hdc, &graphics); expect(Ok, status); status = GdipCreateRegion(&region); expect(Ok, status); /* null parameters */ status = GdipIsVisibleRegionRect(NULL, 0, 0, 0, 0, graphics, &res); expect(InvalidParameter, status); status = GdipIsVisibleRegionRectI(NULL, 0, 0, 0, 0, graphics, &res); expect(InvalidParameter, status); status = GdipIsVisibleRegionRect(region, 0, 0, 0, 0, NULL, &res); expect(Ok, status); status = GdipIsVisibleRegionRectI(region, 0, 0, 0, 0, NULL, &res); expect(Ok, status); status = GdipIsVisibleRegionRect(region, 0, 0, 0, 0, graphics, NULL); expect(InvalidParameter, status); status = GdipIsVisibleRegionRectI(region, 0, 0, 0, 0, graphics, NULL); expect(InvalidParameter, status); /* infinite region */ status = GdipIsInfiniteRegion(region, graphics, &res); expect(Ok, status); ok(res == TRUE, "Region should be infinite\n"); x = 10; w = 10; y = 10; h = 10; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f, %.2f, %.2f) to be visible\n", x, y, w, h); x = -10; w = 5; y = -10; h = 5; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f, %.2f, %.2f) to be visible\n", x, y, w, h); /* rectangular region */ rectf.X = 10; rectf.Y = 20; rectf.Width = 30; rectf.Height = 40; status = GdipCombineRegionRect(region, &rectf, CombineModeIntersect); expect(Ok, status); /* entirely within the region */ x = 11; w = 10; y = 12; h = 10; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f, %.2f, %.2f) to be visible\n", x, y, w, h); status = GdipIsVisibleRegionRectI(region, (INT)x, (INT)y, (INT)w, (INT)h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%d, %d, %d, %d) to be visible\n", (INT)x, (INT)y, (INT)w, (INT)h); /* entirely outside of the region */ x = 0; w = 5; y = 0; h = 5; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%.2f, %.2f, %.2f, %.2f) not to be visible\n", x, y, w, h); status = GdipIsVisibleRegionRectI(region, (INT)x, (INT)y, (INT)w, (INT)h, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%d, %d, %d, %d) not to be visible\n", (INT)x, (INT)y, (INT)w, (INT)h); /* corner cases */ x = 0; w = 10; y = 0; h = 20; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%.2f, %.2f, %.2f, %.2f) not to be visible\n", x, y, w, h); x = 0; w = 10.25; y = 0; h = 20.25; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f, %.2f, %.2f) to be visible\n", x, y, w, h); x = 39; w = 10; y = 59; h = 10; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f, %.2f, %.2f) to be visible\n", x, y, w, h); x = 39.25; w = 10; y = 59.25; h = 10; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%.2f, %.2f, %.2f, %.2f) not to be visible\n", x, y, w, h); /* corners outside, but some intersection */ x = 0; w = 100; y = 0; h = 100; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f, %.2f, %.2f) to be visible\n", x, y, w, h); x = 0; w = 100; y = 0; h = 40; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f, %.2f, %.2f) to be visible\n", x, y, w, h); x = 0; w = 25; y = 0; h = 100; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f, %.2f, %.2f) to be visible\n", x, y, w, h); /* translate into the center of the rectangle */ status = GdipTranslateWorldTransform(graphics, 25, 40, MatrixOrderAppend); expect(Ok, status); /* native ignores the world transform, so treat these as if * no transform exists */ x = 0; w = 5; y = 0; h = 5; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%.2f, %.2f, %.2f, %.2f) not to be visible\n", x, y, w, h); status = GdipIsVisibleRegionRectI(region, (INT)x, (INT)y, (INT)w, (INT)h, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%d, %d, %d, %d) not to be visible\n", (INT)x, (INT)y, (INT)w, (INT)h); x = 11; w = 10; y = 12; h = 10; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f, %.2f, %.2f) to be visible\n", x, y, w, h); status = GdipIsVisibleRegionRectI(region, (INT)x, (INT)y, (INT)w, (INT)h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%d, %d, %d, %d) to be visible\n", (INT)x, (INT)y, (INT)w, (INT)h); /* translate back to origin */ status = GdipTranslateWorldTransform(graphics, -25, -40, MatrixOrderAppend); expect(Ok, status); /* region from path */ status = GdipCreatePath(FillModeAlternate, &path); expect(Ok, status); status = GdipAddPathEllipse(path, 10, 20, 30, 40); expect(Ok, status); status = GdipCombineRegionPath(region, path, CombineModeReplace); expect(Ok, status); x = 0; w = 12; y = 0; h = 22; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%.2f, %.2f, %.2f, %.2f) not to be visible\n", x, y, w, h); status = GdipIsVisibleRegionRectI(region, (INT)x, (INT)y, (INT)w, (INT)h, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%d, %d, %d, %d) not to be visible\n", (INT)x, (INT)y, (INT)w, (INT)h); x = 0; w = 25; y = 0; h = 40; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f, %.2f, %.2f) to be visible\n", x, y, w, h); status = GdipIsVisibleRegionRectI(region, (INT)x, (INT)y, (INT)w, (INT)h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%d, %d, %d, %d) to be visible\n", (INT)x, (INT)y, (INT)w, (INT)h); x = 38; w = 10; y = 55; h = 10; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%.2f, %.2f, %.2f, %.2f) not to be visible\n", x, y, w, h); status = GdipIsVisibleRegionRectI(region, (INT)x, (INT)y, (INT)w, (INT)h, graphics, &res); expect(Ok, status); ok(res == FALSE, "Expected (%d, %d, %d, %d) not to be visible\n", (INT)x, (INT)y, (INT)w, (INT)h); x = 0; w = 100; y = 0; h = 100; status = GdipIsVisibleRegionRect(region, x, y, w, h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%.2f, %.2f, %.2f, %.2f) to be visible\n", x, y, w, h); status = GdipIsVisibleRegionRectI(region, (INT)x, (INT)y, (INT)w, (INT)h, graphics, &res); expect(Ok, status); ok(res == TRUE, "Expected (%d, %d, %d, %d) to be visible\n", (INT)x, (INT)y, (INT)w, (INT)h); GdipDeletePath(path); GdipDeleteRegion(region); GdipDeleteGraphics(graphics); ReleaseDC(0, hdc); } START_TEST(region) { struct GdiplusStartupInput gdiplusStartupInput; ULONG_PTR gdiplusToken; gdiplusStartupInput.GdiplusVersion = 1; gdiplusStartupInput.DebugEventCallback = NULL; gdiplusStartupInput.SuppressBackgroundThread = 0; gdiplusStartupInput.SuppressExternalCodecs = 0; GdiplusStartup(&gdiplusToken, &gdiplusStartupInput, NULL); test_getregiondata(); test_isinfinite(); test_isempty(); test_combinereplace(); test_fromhrgn(); test_gethrgn(); test_isequal(); test_translate(); test_transform(); test_scans(); test_getbounds(); test_isvisiblepoint(); test_isvisiblerect(); GdiplusShutdown(gdiplusToken); }
487838.c
#include <u.h> #include <kvm.h> #include <nlist.h> #include <sys/types.h> #include <sys/protosw.h> #include <sys/socket.h> #include <sys/sysctl.h> #include <sys/time.h> #include <sys/dkstat.h> #include <net/if.h> #include <net/if_var.h> #include <net/if_dl.h> #include <net/if_types.h> #if __FreeBSD_version < 600000 #include <machine/apm_bios.h> #endif #include <sys/ioctl.h> #include <limits.h> #include <libc.h> #include <bio.h> #include <ifaddrs.h> #include "dat.h" void xapm(int); void xloadavg(int); void xcpu(int); void xswap(int); void xsysctl(int); void xnet(int); void xkvm(int); void (*statfn[])(int) = { xkvm, xapm, xloadavg, xswap, xcpu, xsysctl, xnet, 0 }; static kvm_t *kvm; static struct nlist nl[] = { { "_cp_time" }, { "" } }; void kvminit(void) { char buf[_POSIX2_LINE_MAX]; if(kvm) return; kvm = kvm_openfiles(nil, nil, nil, OREAD, buf); if(kvm == nil) return; if(kvm_nlist(kvm, nl) < 0 || nl[0].n_type == 0){ kvm = nil; return; } } void xkvm(int first) { if(first) kvminit(); } int kread(ulong addr, char *buf, int size) { if(kvm_read(kvm, addr, buf, size) != size){ memset(buf, 0, size); return -1; } return size; } void xnet(int first) { struct ifaddrs *ifap, *ifa; ulong out, in, outb, inb, err; if(first) return; if (getifaddrs(&ifap) != 0) return; out = in = outb = inb = err = 0; #define IFA_STAT(s) (((struct if_data *)ifa->ifa_data)->ifi_ ## s) for (ifa = ifap; ifa; ifa = ifa->ifa_next) { if (ifa->ifa_addr->sa_family != AF_LINK) continue; out += IFA_STAT(opackets); in += IFA_STAT(ipackets); outb += IFA_STAT(obytes); inb += IFA_STAT(ibytes); err += IFA_STAT(oerrors) + IFA_STAT(ierrors); } freeifaddrs(ifap); Bprint(&bout, "etherin %lud 1000\n", in); Bprint(&bout, "etherout %lud 1000\n", out); Bprint(&bout, "etherinb %lud 1000000\n", inb); Bprint(&bout, "etheroutb %lud 1000000\n", outb); Bprint(&bout, "ethererr %lud 1000\n", err); Bprint(&bout, "ether %lud 1000\n", in+out); Bprint(&bout, "etherb %lud 1000000\n", inb+outb); } #if __FreeBSD_version >= 500000 int xacpi(int first) { int rv; int val; size_t len; len = sizeof(val); rv = sysctlbyname("hw.acpi.battery.life", &val, &len, nil, 0); if(rv != 0) return -1; Bprint(&bout, "battery =%d 100\n", val); return 0; } #else int xacpi(int first) { return -1; } #endif #if __FreeBSD_version < 600000 void xapm(int first) { static int fd; struct apm_info ai; if(first){ xacpi(first); fd = open("/dev/apm", OREAD); return; } if(xacpi(0) >= 0) return; if(ioctl(fd, APMIO_GETINFO, &ai) < 0) return; if(ai.ai_batt_life <= 100) Bprint(&bout, "battery =%d 100\n", ai.ai_batt_life); } #else void xapm(int first) { xacpi(first); } #endif int rsys(char *name, char *buf, int len) { size_t l; l = len; if(sysctlbyname(name, buf, &l, nil, 0) < 0) return -1; buf[l] = 0; return l; } vlong isys(char *name) { ulong u; size_t l; l = sizeof u; if(sysctlbyname(name, &u, &l, nil, 0) < 0) return -1; return u; } void xsysctl(int first) { static int pgsize; if(first){ pgsize = isys("vm.stats.vm.v_page_size"); if(pgsize == 0) pgsize = 4096; } Bprint(&bout, "mem =%lld %lld\n", isys("vm.stats.vm.v_active_count")*pgsize, isys("vm.stats.vm.v_page_count")*pgsize); Bprint(&bout, "context %lld 1000\n", isys("vm.stats.sys.v_swtch")); Bprint(&bout, "syscall %lld 1000\n", isys("vm.stats.sys.v_syscall")); Bprint(&bout, "intr %lld 1000\n", isys("vm.stats.sys.v_intr")+isys("vm.stats.sys.v_trap")); Bprint(&bout, "fault %lld 1000\n", isys("vm.stats.vm.v_vm_faults")); Bprint(&bout, "fork %lld 1000\n", isys("vm.stats.vm.v_forks") +isys("vm.stats.vm.v_rforks") +isys("vm.stats.vm.v_vforks")); } void xcpu(int first) { static int stathz; union { ulong x[20]; struct clockinfo ci; } u; int n; if(first){ if(rsys("kern.clockrate", (char*)u.x, sizeof u.x) < sizeof u.ci) stathz = 128; else stathz = u.ci.stathz; return; } if((n=rsys("kern.cp_time", (char*)u.x, sizeof u.x)) < 5*sizeof(ulong)) return; Bprint(&bout, "user %lud %d\n", u.x[CP_USER]+u.x[CP_NICE], stathz); Bprint(&bout, "sys %lud %d\n", u.x[CP_SYS], stathz); Bprint(&bout, "cpu %lud %d\n", u.x[CP_USER]+u.x[CP_NICE]+u.x[CP_SYS], stathz); Bprint(&bout, "idle %lud %d\n", u.x[CP_IDLE], stathz); } void xloadavg(int first) { double l[3]; if(first) return; if(getloadavg(l, 3) < 0) return; Bprint(&bout, "load =%d 1000\n", (int)(l[0]*1000.0)); } void xswap(int first) { static struct kvm_swap s; static ulong pgin, pgout; int i, o; static int pgsize; if(first){ pgsize = getpagesize(); if(pgsize == 0) pgsize = 4096; return; } if(kvm == nil) return; i = isys("vm.stats.vm.v_swappgsin"); o = isys("vm.stats.vm.v_swappgsout"); if(i != pgin || o != pgout){ pgin = i; pgout = o; kvm_getswapinfo(kvm, &s, 1, 0); } Bprint(&bout, "swap =%lld %lld\n", s.ksw_used*(vlong)pgsize, s.ksw_total*(vlong)pgsize); }
71739.c
//------------------------------------------------------------------------------ // GB_AxB: hard-coded functions for semiring: C<M>=A*B or A'*B //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_bracket.h" #include "GB_iterator.h" #include "GB_sort.h" #include "GB_atomics.h" #include "GB_AxB_saxpy3.h" #include "GB_AxB__include.h" // The C=A*B semiring is defined by the following types and operators: // A'*B function (dot2): GB_Adot2B__min_isge_fp32 // A'*B function (dot3): GB_Adot3B__min_isge_fp32 // C+=A'*B function (dot4): GB_Adot4B__min_isge_fp32 // A*B function (saxpy3): GB_Asaxpy3B__min_isge_fp32 // C type: float // A type: float // B type: float // Multiply: z = (aik >= bkj) // Add: cij = fminf (cij, z) // 'any' monoid? 0 // atomic? 1 // OpenMP atomic? 0 // MultAdd: cij = fminf (cij, (aik >= bkj)) // Identity: INFINITY // Terminal: if (cij == (-INFINITY)) break ; #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // aik = Ax [pA] #define GB_GETA(aik,Ax,pA) \ float aik = Ax [pA] // bkj = Bx [pB] #define GB_GETB(bkj,Bx,pB) \ float bkj = Bx [pB] #define GB_CX(p) Cx [p] // multiply operator #define GB_MULT(z, x, y) \ z = (x >= y) // multiply-add #define GB_MULTADD(z, x, y) \ z = fminf (z, (x >= y)) // monoid identity value #define GB_IDENTITY \ INFINITY // break if cij reaches the terminal value (dot product only) #define GB_DOT_TERMINAL(cij) \ if (cij == (-INFINITY)) break ; // simd pragma for dot-product loop vectorization #define GB_PRAGMA_VECTORIZE_DOT \ ; // simd pragma for other loop vectorization #define GB_PRAGMA_VECTORIZE GB_PRAGMA_SIMD // declare the cij scalar #define GB_CIJ_DECLARE(cij) \ float cij // save the value of C(i,j) #define GB_CIJ_SAVE(cij,p) Cx [p] = cij // cij = Cx [pC] #define GB_GETC(cij,pC) \ cij = Cx [pC] // Cx [pC] = cij #define GB_PUTC(cij,pC) \ Cx [pC] = cij // Cx [p] = t #define GB_CIJ_WRITE(p,t) Cx [p] = t // C(i,j) += t #define GB_CIJ_UPDATE(p,t) \ Cx [p] = fminf (Cx [p], t) // x + y #define GB_ADD_FUNCTION(x,y) \ fminf (x, y) // type with size of GB_CTYPE, and can be used in compare-and-swap #define GB_CTYPE_PUN \ uint32_t // bit pattern for bool, 8-bit, 16-bit, and 32-bit integers #define GB_CTYPE_BITS \ 0 // 1 if monoid update can skipped entirely (the ANY monoid) #define GB_IS_ANY_MONOID \ 0 // 1 if monoid update is EQ #define GB_IS_EQ_MONOID \ 0 // 1 if monoid update can be done atomically, 0 otherwise #define GB_HAS_ATOMIC \ 1 // 1 if monoid update can be done with an OpenMP atomic update, 0 otherwise #define GB_HAS_OMP_ATOMIC \ 0 // 1 for the ANY_PAIR semirings #define GB_IS_ANY_PAIR_SEMIRING \ 0 // 1 if PAIR is the multiply operator #define GB_IS_PAIR_MULTIPLIER \ 0 #if GB_IS_ANY_PAIR_SEMIRING // result is purely symbolic; no numeric work to do. Hx is not used. #define GB_HX_WRITE(i,t) #define GB_CIJ_GATHER(p,i) #define GB_HX_UPDATE(i,t) #define GB_CIJ_MEMCPY(p,i,len) #else // Hx [i] = t #define GB_HX_WRITE(i,t) Hx [i] = t // Cx [p] = Hx [i] #define GB_CIJ_GATHER(p,i) Cx [p] = Hx [i] // Hx [i] += t #define GB_HX_UPDATE(i,t) \ Hx [i] = fminf (Hx [i], t) // memcpy (&(Cx [p]), &(Hx [i]), len) #define GB_CIJ_MEMCPY(p,i,len) \ memcpy (Cx +(p), Hx +(i), (len) * sizeof(float)) #endif // disable this semiring and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_ISGE || GxB_NO_FP32 || GxB_NO_MIN_FP32 || GxB_NO_ISGE_FP32 || GxB_NO_MIN_ISGE_FP32) //------------------------------------------------------------------------------ // C=A'*B or C<!M>=A'*B: dot product (phase 2) //------------------------------------------------------------------------------ GrB_Info GB_Adot2B__min_isge_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix *Aslice, bool A_is_pattern, const GrB_Matrix B, bool B_is_pattern, int64_t *GB_RESTRICT B_slice, int64_t *GB_RESTRICT *C_counts, int nthreads, int naslice, int nbslice ) { // C<M>=A'*B now uses dot3 #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_AxB_dot2_meta.c" #undef GB_PHASE_2_OF_2 return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C<M>=A'*B: masked dot product method (phase 2) //------------------------------------------------------------------------------ GrB_Info GB_Adot3B__min_isge_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix B, bool B_is_pattern, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_AxB_dot3_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C+=A'*B: dense dot product //------------------------------------------------------------------------------ GrB_Info GB_Adot4B__min_isge_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, int64_t *GB_RESTRICT A_slice, int naslice, const GrB_Matrix B, bool B_is_pattern, int64_t *GB_RESTRICT B_slice, int nbslice, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_AxB_dot4_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C=A*B, C<M>=A*B, C<!M>=A*B: saxpy3 method (Gustavson + Hash) //------------------------------------------------------------------------------ #include "GB_AxB_saxpy3_template.h" GrB_Info GB_Asaxpy3B__min_isge_fp32 ( GrB_Matrix C, const GrB_Matrix M, bool Mask_comp, const bool Mask_struct, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix B, bool B_is_pattern, GB_saxpy3task_struct *GB_RESTRICT TaskList, const int ntasks, const int nfine, const int nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_AxB_saxpy3_template.c" return (GrB_SUCCESS) ; #endif } #endif
893331.c
/* * Generated by asn1c-0.9.24 (http://lionet.info/asn1c) * From ASN.1 module "S1AP-IEs" * found in "/root/openair-cn/SRC/S1AP/MESSAGES/ASN1/R10.5/S1AP-IEs.asn" * `asn1c -gen-PER` */ #include "S1ap-CellID-Broadcast-Item.h" static asn_TYPE_member_t asn_MBR_S1ap_CellID_Broadcast_Item_1[] = { { ATF_NOFLAGS, 0, offsetof(struct S1ap_CellID_Broadcast_Item, eCGI), (ASN_TAG_CLASS_CONTEXT | (0 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_S1ap_EUTRAN_CGI, 0, /* Defer constraints checking to the member type */ 0, /* No PER visible constraints */ 0, "eCGI" }, { ATF_POINTER, 1, offsetof(struct S1ap_CellID_Broadcast_Item, iE_Extensions), (ASN_TAG_CLASS_CONTEXT | (1 << 2)), -1, /* IMPLICIT tag at current level */ &asn_DEF_S1ap_IE_Extensions, 0, /* Defer constraints checking to the member type */ 0, /* No PER visible constraints */ 0, "iE-Extensions" }, }; static int asn_MAP_S1ap_CellID_Broadcast_Item_oms_1[] = { 1 }; static ber_tlv_tag_t asn_DEF_S1ap_CellID_Broadcast_Item_tags_1[] = { (ASN_TAG_CLASS_UNIVERSAL | (16 << 2)) }; static asn_TYPE_tag2member_t asn_MAP_S1ap_CellID_Broadcast_Item_tag2el_1[] = { { (ASN_TAG_CLASS_CONTEXT | (0 << 2)), 0, 0, 0 }, /* eCGI at 232 */ { (ASN_TAG_CLASS_CONTEXT | (1 << 2)), 1, 0, 0 } /* iE-Extensions at 233 */ }; static asn_SEQUENCE_specifics_t asn_SPC_S1ap_CellID_Broadcast_Item_specs_1 = { sizeof(struct S1ap_CellID_Broadcast_Item), offsetof(struct S1ap_CellID_Broadcast_Item, _asn_ctx), asn_MAP_S1ap_CellID_Broadcast_Item_tag2el_1, 2, /* Count of tags in the map */ asn_MAP_S1ap_CellID_Broadcast_Item_oms_1, /* Optional members */ 1, 0, /* Root/Additions */ 1, /* Start extensions */ 3 /* Stop extensions */ }; asn_TYPE_descriptor_t asn_DEF_S1ap_CellID_Broadcast_Item = { "S1ap-CellID-Broadcast-Item", "S1ap-CellID-Broadcast-Item", SEQUENCE_free, SEQUENCE_print, SEQUENCE_constraint, SEQUENCE_decode_ber, SEQUENCE_encode_der, SEQUENCE_decode_xer, SEQUENCE_encode_xer, SEQUENCE_decode_uper, SEQUENCE_encode_uper, SEQUENCE_decode_aper, SEQUENCE_encode_aper, SEQUENCE_compare, 0, /* Use generic outmost tag fetcher */ asn_DEF_S1ap_CellID_Broadcast_Item_tags_1, sizeof(asn_DEF_S1ap_CellID_Broadcast_Item_tags_1) /sizeof(asn_DEF_S1ap_CellID_Broadcast_Item_tags_1[0]), /* 1 */ asn_DEF_S1ap_CellID_Broadcast_Item_tags_1, /* Same as above */ sizeof(asn_DEF_S1ap_CellID_Broadcast_Item_tags_1) /sizeof(asn_DEF_S1ap_CellID_Broadcast_Item_tags_1[0]), /* 1 */ 0, /* No PER visible constraints */ asn_MBR_S1ap_CellID_Broadcast_Item_1, 2, /* Elements count */ &asn_SPC_S1ap_CellID_Broadcast_Item_specs_1 /* Additional specs */ };
506749.c
/* Copyright (c) 2014, Linaro Limited * All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include "config.h" #include "shmem.h" int main(int argc, char *argv[]) { return shmem_main(argc, argv); }
209042.c
#include <cpu/cpu.h> #include <debug.h> #include <kernel.h> #include <mm/paging.h> #include <proc/threads.h> extern page_directory_t *g_current_directory; void debug_handler_task() { task_t *curtask = get_current_task(); struct thread *curthrd = get_current_thrd(); if (!curtask) return; printk("Task name: '%s'\n", curtask->name); printk("Task ring: %i, id: %i\n", curtask->ring, curtask->pid); printk("Program start: %08x program break %08x\n", curtask->program_start, curtask->program_break); printk("Stacktop %08x stacksize %08x kernel stack %08x\n", curthrd->stack_top, curthrd->stack_size, curthrd->kernel_stack); } static void frame_print(char *string, unsigned long *location) { page_t *page = get_page((unsigned int) location, 0, get_current_dir()); int frame = 0; unsigned int value = 0; if (page) { frame = page->frame; if (frame) value = *location; } printk(string, page, frame, get_frame((unsigned int) location), value); } void DEBUGGER_ENTRY() { /** * This function will be breakpointed to by GDB so that * whenever the debug_handler is called, we automatically stop * and get a debugger ready. */ } void debug_handler(registers_t *regs) { printk("============ DEBUG HANDLER CALLED ============\n"); if (regs == 0) { printk("No register data was provided\n"); goto taskinfo; } printk("Interrupt number: %i (0x%x) and error number %i (0x%x)\n", regs->int_no, regs->int_no, regs->err_code, regs->err_code); printk("Register dump: \n"); printk("EAX: %08x EBX: %08x ECX: %08x EDX: %08x\n", regs->eax, regs->ebx, regs->ecx, regs->edx); printk("DS: %08x CS: %08x SS: %08x EF: %08x\n", regs->ds, regs->cs, regs->ss, regs->eflags); printk("ESP: %08x EBP: %08x ESI: %08x EDI: %08x\n", regs->esp, regs->ebp, regs->esi, regs->edi); printk("EIP: %08x CR3: %08x\n", regs->eip, g_current_directory->physicalAddress); frame_print( "Frame info of esp: page %x, frame %x is frame set %i, value %x\n", (unsigned long *) regs->esp); frame_print( "Frame info of eip: page %x, frame %x is frame set %i, value %x\n", (unsigned long *) regs->eip); DEBUGGER_ENTRY(); taskinfo:; printk("---------------- TASK INFO -----------------\n"); task_t *curtask = get_current_task(); struct thread *curthrd = get_current_thrd(); if (!curtask) goto end; printk("Task name: '%s'\n", curtask->name); printk("Task ring: %i, id: %i\n", curtask->ring, curtask->pid); printk("Program start: %08x program break %08x\n", curtask->program_start, curtask->program_break); printk("Stacktop %08x stacksize %08x kernel stack %08x\n", curthrd->stack_top, curthrd->stack_size, curthrd->kernel_stack); DEBUGGER_ENTRY(); printk("----------------- STACKTRACE -----------------\n"); dump_stacktrace(); end:; printk("============ DEBUG END ============\n"); for (;;) ; }
780493.c
/********************************************************************** * $Id$ * * Project: MapServer * Purpose: OGC SOS implementation * Author: Y. Assefa, DM Solutions Group ([email protected]) * ********************************************************************** * Copyright (c) 2006, Y. Assefa, DM Solutions Group Inc * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies of this Software or works derived from this Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. **********************************************************************/ #define _GNU_SOURCE #include "mapserver.h" #if defined(USE_SOS_SVR) && defined(USE_LIBXML2) #include "maperror.h" #include "mapthread.h" #include "mapows.h" #include "maptime.h" #include "mapgml.h" #include "mapogcfilter.h" #include "mapowscommon.h" #include "maplibxml2.h" #include "libxml/parser.h" #include "libxml/tree.h" #include "libxml/xpath.h" #include "libxml/xpathInternals.h" const char *pszSOSVersion = "1.0.0"; const char *pszSOSNamespaceUri = "http://www.opengis.net/sos/1.0"; const char *pszSOSNamespacePrefix = "sos"; const char *pszOMNamespaceUri = "http://www.opengis.net/om/1.0"; const char *pszOMNamespacePrefix = "om"; const char *pszSOSDescribeSensorMimeType = "text/xml; subtype=\"sensorML/1.0.0\""; const char *pszSOSGetObservationMimeType = "text/xml; subtype=\"om/1.0.0\""; typedef struct { char *pszProcedure; xmlNodePtr psResultNode; } SOSProcedureNode; int msSOSParseRequest(mapObj *map, cgiRequestObj *request, sosParamsObj *sosparams); void msSOSFreeParamsObj(sosParamsObj *sosparams); /* ** msSOSException() ** ** Report current MapServer error in XML exception format. ** Wrapper function around msOWSCommonExceptionReport. Merely ** passes SOS specific info. ** */ static int msSOSException(mapObj *map, char *locator, char *exceptionCode) { int size = 0; char *errorString = NULL; char *schemasLocation = NULL; xmlDocPtr psDoc = NULL; xmlNodePtr psRootNode = NULL; xmlNsPtr psNsOws = NULL; xmlChar *buffer = NULL; psNsOws = xmlNewNs(NULL, BAD_CAST "http://www.opengis.net/ows/1.1", BAD_CAST "ows"); errorString = msGetErrorString("\n"); schemasLocation = msEncodeHTMLEntities(msOWSGetSchemasLocation(map)); psDoc = xmlNewDoc(BAD_CAST "1.0"); psRootNode = msOWSCommonExceptionReport(psNsOws, OWS_1_1_0, schemasLocation, pszSOSVersion, msOWSGetLanguage(map, "exception"), exceptionCode, locator, errorString); xmlDocSetRootElement(psDoc, psRootNode); xmlNewNs(psRootNode, BAD_CAST "http://www.opengis.net/ows/1.1", BAD_CAST "ows"); msIO_setHeader("Content-Type","text/xml; charset=UTF-8"); msIO_sendHeaders(); xmlDocDumpFormatMemoryEnc(psDoc, &buffer, &size, ("UTF-8"), 1); msIO_printf("%s", buffer); /*free buffer and the document */ free(errorString); free(schemasLocation); xmlFree(buffer); xmlFreeDoc(psDoc); xmlFreeNs(psNsOws); /* ** The typical pattern is to call msSOSException() right after ** msSetError(). In order to prevent mapserv.c from re-reporting this ** error at a higher level, we mark it as reported here. #3571 */ { errorObj *err = msGetErrorObj(); if( err != NULL && err->code != MS_NOERR ) err->isreported = MS_TRUE; } return MS_FAILURE; } static int _IsInList(char **papsProcedures, int nDistinctProcedures, char *pszProcedure) { int i = 0; if (papsProcedures && nDistinctProcedures > 0 && pszProcedure) { for (i=0; i<nDistinctProcedures; i++) { if (papsProcedures[i] && strcmp(papsProcedures[i], pszProcedure) == 0) return 1; } } return 0; } /************************************************************************/ /* msSOSValidateFilter */ /* */ /* Look if the filter's property names have an equivalent */ /* layre's attribute. */ /************************************************************************/ static int msSOSValidateFilter(FilterEncodingNode *psFilterNode, layerObj *lp) { int i=0, bFound =0; /* assuming here that the layer is opened*/ if (psFilterNode && lp) { if (psFilterNode->eType == FILTER_NODE_TYPE_PROPERTYNAME) { for (i=0; i<lp->numitems; i++) { if (strcasecmp(lp->items[i], psFilterNode->pszValue) == 0) { bFound = 1; break; } } if (!bFound) return MS_FALSE; } if (psFilterNode->psLeftNode && psFilterNode->eType != FILTER_NODE_TYPE_SPATIAL) { if (msSOSValidateFilter(psFilterNode->psLeftNode, lp) == MS_FALSE) return MS_FALSE; } if (psFilterNode->psRightNode && psFilterNode->eType != FILTER_NODE_TYPE_SPATIAL) { if (msSOSValidateFilter(psFilterNode->psRightNode, lp) == MS_FALSE) return MS_FALSE; } } return MS_TRUE; } /************************************************************************/ /* msSOSAddMetadataChildNode */ /* */ /* Utility function to add a metadata node. */ /************************************************************************/ void msSOSAddMetadataChildNode(xmlNodePtr psParent, const char *psNodeName, xmlNsPtr psNs, hashTableObj *metadata, const char *psNamespaces, const char *psMetadataName, const char *psDefaultValue) { xmlNodePtr psNode = NULL; char *psValue = NULL; if (psParent && psNodeName) { psValue = msOWSGetEncodeMetadata(metadata, psNamespaces, psMetadataName, psDefaultValue); if (psValue) { psNode = xmlNewChild(psParent, NULL, BAD_CAST psNodeName, BAD_CAST psValue); if (psNs) xmlSetNs(psNode, psNs); free(psValue); } } } /************************************************************************/ /* msSOSGetFirstLayerForOffering */ /* */ /* return the first layer for the offering. */ /************************************************************************/ layerObj *msSOSGetFirstLayerForOffering(mapObj *map, const char *pszOffering, const char *pszProperty) { layerObj *lp = NULL; const char *pszTmp = NULL; int i = 0; if (pszOffering && map) { for (i=0; i<map->numlayers; i++) { pszTmp = msOWSLookupMetadata(&(GET_LAYER(map, i)->metadata), "S", "offering_id"); if (pszTmp && (strcasecmp(pszTmp, pszOffering) == 0)) { if (pszProperty) { pszTmp = msOWSLookupMetadata(&(GET_LAYER(map, i)->metadata), "S", "observedproperty_id"); if (pszTmp && (strcasecmp(pszTmp, pszProperty) == 0)) { lp = (GET_LAYER(map, i)); break; } } else { lp = (GET_LAYER(map, i)); break; } } } } return lp; } xmlNodePtr msSOSAddTimeNode(xmlNsPtr psNs, xmlNsPtr psNsGml, char *pszStart, char *pszEnd) { xmlNodePtr psNode=NULL; char *timeel= NULL; if (strcmp((char *)psNs->prefix,"sos") == 0) timeel = "time"; if (strcmp((char *)psNs->prefix,"om") == 0) timeel = "samplingTime"; else timeel = "time"; psNode = xmlNewNode(psNs, BAD_CAST timeel); xmlAddChild(psNode, msGML3TimePeriod(psNsGml, pszStart, pszEnd)); return psNode; } void msSOSAddPropertyNode(xmlNsPtr psNsSwe, xmlNsPtr psNsXLink, xmlNodePtr psParent, layerObj *lp, xmlNsPtr psNsGml, char *pszCompositePhenomenonId) { const char *pszValue = NULL; char *pszTmpVal = NULL, *pszFullName = NULL; xmlNodePtr psCompNode, psNode; int i, j=0; char szTmp[256]; const char *pszComponentBase = "urn:ogc:def:property:"; if (psParent && lp) { psNode = xmlNewChild(psParent, NULL, BAD_CAST "observedProperty", NULL); psCompNode = xmlNewChild(psNode, psNsSwe, BAD_CAST "CompositePhenomenon", NULL); pszValue = msOWSLookupMetadata(&(lp->metadata), "S", "observedproperty_id"); pszTmpVal = msStrdup(pszValue); if (pszCompositePhenomenonId != NULL) { /* unique value needs to be constructed */ pszTmpVal = msStringConcatenate(pszTmpVal, "_"); pszTmpVal = msStringConcatenate(pszTmpVal, pszCompositePhenomenonId); } if (pszTmpVal) { /*should always be true */ xmlNewNsProp(psCompNode, psNsGml, BAD_CAST "id", BAD_CAST pszTmpVal); msFree(pszTmpVal); } pszValue = msOWSLookupMetadata(&(lp->metadata), "S", "observedproperty_name"); if (pszValue) psNode = xmlNewChild(psCompNode, psNsGml, BAD_CAST "name", BAD_CAST pszValue); /* add components */ /*assuming that the layer is opened and msLayerGetItems called*/ for(i=0; i<lp->numitems; i++) { pszValue = msOWSLookupMetadata(&(lp->metadata), "S", "observedproperty_authority"); if (pszValue) pszTmpVal = msStrdup(pszValue); else pszTmpVal = msStrdup("OGC-SWE"); pszFullName = msStrdup(pszComponentBase); pszFullName = msStringConcatenate(pszFullName, pszTmpVal); free(pszTmpVal); pszFullName = msStringConcatenate(pszFullName, ":"); pszValue = msOWSLookupMetadata(&(lp->metadata), "S", "observedproperty_version"); if (pszValue) pszTmpVal = msStrdup(pszValue); else pszTmpVal = msStrdup("1"); pszFullName = msStringConcatenate(pszFullName, pszTmpVal); free(pszTmpVal); pszFullName = msStringConcatenate(pszFullName, ":"); snprintf(szTmp, sizeof(szTmp), "%s_alias", lp->items[i]); pszValue = msOWSLookupMetadata(&(lp->metadata), "S", szTmp); if (pszValue) pszTmpVal = msStrdup(pszValue); else pszTmpVal = msStrdup(lp->items[i]); pszFullName = msStringConcatenate(pszFullName, pszTmpVal); psNode = xmlNewChild(psCompNode, psNsSwe, BAD_CAST "component", NULL); xmlNewNsProp(psNode, psNsXLink, BAD_CAST "href", BAD_CAST pszFullName); free(pszFullName); free(pszTmpVal); j++; } pszTmpVal = msIntToString(j); xmlNewNsProp(psCompNode, NULL, BAD_CAST "dimension", BAD_CAST pszTmpVal); free(pszTmpVal); } } /************************************************************************/ /* msSOSAddGeometryNode */ /* */ /* Outout gml 2 gemptry nodes based on a shape. All logic comes */ /* from gmlWriteGeometry_GML2. Should be merged at one point if */ /* possible. */ /************************************************************************/ void msSOSAddGeometryNode(xmlNsPtr psNsGml, xmlNsPtr psNsMs, xmlNodePtr psParent, mapObj *map, layerObj *lp, shapeObj *psShape, const char *pszEpsg) { char *pszTmp = NULL; int i,j = 0; xmlNodePtr psPointNode, psNode, psLineNode, psPolygonNode; int *panOuterList = NULL, *panInnerList = NULL; if (psParent && psShape) { if (msProjectionsDiffer(&map->projection, &lp->projection) == MS_TRUE) { msProjectShape(&lp->projection, &map->projection, psShape); pszEpsg = msOWSGetEPSGProj(&(map->projection), &(lp->metadata), "SO", MS_TRUE); } switch(psShape->type) { case(MS_SHAPE_POINT): psNode = xmlNewChild(psParent, NULL, BAD_CAST "msGeometry", NULL); xmlSetNs(psNode, psNsMs); if (psShape->line[0].numpoints > 1) { psPointNode = xmlNewChild(psNode, NULL, BAD_CAST "MultiPoint", NULL); xmlSetNs(psPointNode, psNsGml); if (pszEpsg) xmlNewProp(psPointNode, BAD_CAST "srsName", BAD_CAST pszEpsg); } else psPointNode= psNode; /*add all points */ for(i=0; i<psShape->line[0].numpoints; i++) { psNode = xmlAddChild(psPointNode, msGML3Point(psNsGml, pszEpsg, NULL, psShape->line[0].point[i].x, psShape->line[0].point[i].y)); } break; case(MS_SHAPE_LINE): psNode = xmlNewChild(psParent, NULL, BAD_CAST "msGeometry", NULL); xmlSetNs(psNode,xmlNewNs(psNode, NULL, NULL)); if (psShape->numlines > 1) { psLineNode = xmlNewChild(psNode, NULL, BAD_CAST "MultiLineString", NULL); xmlSetNs(psLineNode,xmlNewNs(psLineNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); if (pszEpsg) xmlNewProp(psLineNode, BAD_CAST "srsName", BAD_CAST pszEpsg); } else psLineNode= psNode; for(i=0; i<psShape->numlines; i++) { if (psShape->numlines > 1) { psNode = xmlNewChild(psLineNode, NULL, BAD_CAST "lineStringMember", NULL); xmlSetNs(psNode,xmlNewNs(psNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); psNode = xmlNewChild(psNode, NULL, BAD_CAST "LineString", NULL); xmlSetNs(psNode,xmlNewNs(psNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); } else { psNode = xmlNewChild(psLineNode, NULL, BAD_CAST "LineString", NULL); xmlSetNs(psNode,xmlNewNs(psNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); } if (pszEpsg) xmlNewProp(psNode, BAD_CAST "srsName", BAD_CAST pszEpsg); pszTmp = NULL; for(j=0; j<psShape->line[i].numpoints; j++) { char *doubleTmp = msDoubleToString(psShape->line[i].point[j].x, MS_TRUE); pszTmp = msStringConcatenate(pszTmp, doubleTmp); pszTmp = msStringConcatenate(pszTmp, ","); free(doubleTmp); doubleTmp = msDoubleToString(psShape->line[i].point[j].y, MS_TRUE); pszTmp = msStringConcatenate(pszTmp, doubleTmp); pszTmp = msStringConcatenate(pszTmp, ","); free(doubleTmp); } psNode = xmlNewChild(psNode, NULL, BAD_CAST "coordinates", BAD_CAST pszTmp); xmlSetNs(psNode,xmlNewNs(psNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); free(pszTmp); } break; case(MS_SHAPE_POLYGON): psNode = xmlNewChild(psParent, NULL, BAD_CAST "msGeometry", NULL); xmlSetNs(psNode,xmlNewNs(psNode, NULL, NULL)); if (psShape->numlines > 1) { psPolygonNode = xmlNewChild(psNode, NULL, BAD_CAST "MultiPolygon", NULL); xmlSetNs(psPolygonNode, xmlNewNs(psPolygonNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); if (pszEpsg) xmlNewProp(psPolygonNode, BAD_CAST "srsName", BAD_CAST pszEpsg); } else psPolygonNode= psNode; panOuterList = msGetOuterList(psShape); for(i=0; i<psShape->numlines; i++) { if(panOuterList[i] != MS_TRUE) continue; panInnerList = msGetInnerList(psShape, i, panOuterList); if (psShape->numlines > 1) { psNode = xmlNewChild(psPolygonNode, NULL, BAD_CAST "polygonMember", NULL); xmlSetNs(psNode,xmlNewNs(psNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); psNode = xmlNewChild(psNode, NULL, BAD_CAST "Polygon", NULL); xmlSetNs(psNode,xmlNewNs(psNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); } else { psNode = xmlNewChild(psPolygonNode, NULL, BAD_CAST "Polygon", NULL); xmlSetNs(psNode,xmlNewNs(psNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); } if (pszEpsg) xmlNewProp(psNode, BAD_CAST "srsName", BAD_CAST pszEpsg); psNode = xmlNewChild(psNode, NULL, BAD_CAST "outerBoundaryIs", NULL); xmlSetNs(psNode,xmlNewNs(psNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); psNode = xmlNewChild(psNode, NULL, BAD_CAST "LinearRing", NULL); xmlSetNs(psNode,xmlNewNs(psNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); pszTmp = NULL; for(j=0; j<psShape->line[i].numpoints; j++) { char *doubleTmp; doubleTmp = msDoubleToString(psShape->line[i].point[j].x, MS_TRUE); pszTmp = msStringConcatenate(pszTmp, doubleTmp); pszTmp = msStringConcatenate(pszTmp, ","); free(doubleTmp); doubleTmp = msDoubleToString(psShape->line[i].point[j].y, MS_TRUE); pszTmp = msStringConcatenate(pszTmp, doubleTmp); pszTmp = msStringConcatenate(pszTmp, " "); free(doubleTmp); } psNode = xmlNewChild(psNode, NULL, BAD_CAST "coordinates", BAD_CAST pszTmp); xmlSetNs(psNode,xmlNewNs(psNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); free(pszTmp); if (panInnerList) free(panInnerList); } if (panOuterList) free(panOuterList); break; default: break; } } } /************************************************************************/ /* void msSOSAddDataBlockDefinition(xmlNodePtr psParent, */ /* layerObj *lp) */ /* */ /* Add a databalock used for GetObservation request. */ /************************************************************************/ void msSOSAddDataBlockDefinition(xmlNsPtr psNsSwe, xmlNodePtr psParent, layerObj *lp) { xmlNodePtr psNode, psRecordNode, psCompNode, psSubNode, psEncNode; const char *pszDefinition = NULL, *pszUom=NULL, *pszValue=NULL, *pszName=NULL; char szTmp[100]; int i=0; char *pszTokenValue = NULL; char *pszBlockValue = NULL; const char *pszBlockSep=NULL, *pszTokenSep=NULL; if (psParent) { psNode = xmlNewChild(psParent, NULL, BAD_CAST "DataBlockDefinition", NULL); xmlSetNs(psNode, psNsSwe); /* -------------------------------------------------------------------- */ /* Add components. */ /* -------------------------------------------------------------------- */ psCompNode = xmlNewChild(psNode, NULL, BAD_CAST "components", NULL); psEncNode = xmlNewChild(psNode, NULL, BAD_CAST "encoding", NULL); psRecordNode = xmlNewChild(psCompNode, NULL, BAD_CAST "DataRecord", NULL); /*always add a time field if timeitem is defined*/ if (msOWSLookupMetadata(&(lp->metadata), "SO", "timeitem")) { psNode = xmlNewChild(psRecordNode, NULL, BAD_CAST "field", NULL); xmlNewNsProp(psNode, NULL, BAD_CAST "name", BAD_CAST "time"); psNode = xmlNewChild(psNode, NULL, BAD_CAST "Time", NULL); xmlNewNsProp(psNode, NULL, BAD_CAST "definition", BAD_CAST "urn:ogc:phenomenon:time:iso8601"); } /*add all other fields*/ /*assuming that the layer is open */ for(i=0; i<lp->numitems; i++) { snprintf(szTmp, sizeof(szTmp), "%s_alias", lp->items[i]); pszValue = msOWSLookupMetadata(&(lp->metadata), "S", szTmp); if (pszValue) { psNode = xmlNewChild(psRecordNode, NULL, BAD_CAST "field", NULL); /* check if there is an alias/full name used */ snprintf(szTmp, sizeof(szTmp), "%s_alias", lp->items[i]); pszName = msOWSLookupMetadata(&(lp->metadata), "S", szTmp); if (!pszName) pszName = lp->items[i]; xmlNewNsProp(psNode, NULL, BAD_CAST "name", BAD_CAST pszName); psNode = xmlNewChild(psNode, NULL, BAD_CAST "Quantity", NULL); /* get definition and uom */ snprintf(szTmp, sizeof(szTmp), "%s_definition", lp->items[i]); pszDefinition = msOWSLookupMetadata(&(lp->metadata), "S", szTmp); if (pszDefinition == NULL) pszDefinition = "urn:ogc:object:definition"; xmlNewNsProp(psNode, NULL, BAD_CAST "definition", BAD_CAST pszDefinition); snprintf(szTmp, sizeof(szTmp), "%s_uom", lp->items[i]); pszUom = msOWSLookupMetadata(&(lp->metadata), "S", szTmp); if (pszUom == NULL) pszUom = "urn:ogc:object:uom"; psNode = xmlNewChild(psNode, NULL, BAD_CAST "uom", NULL); xmlNewNsProp(psNode, NULL, BAD_CAST "code", BAD_CAST pszUom); } } /* -------------------------------------------------------------------- */ /* Add encoding block. */ /* -------------------------------------------------------------------- */ pszBlockSep = msOWSLookupMetadata(&(lp->map->web.metadata), "S", "encoding_blockSeparator"); pszTokenSep = msOWSLookupMetadata(&(lp->map->web.metadata), "S", "encoding_tokenSeparator"); psSubNode = xmlNewChild(psEncNode, NULL, BAD_CAST "TextBlock", NULL); if (pszTokenSep) pszTokenValue = msStringConcatenate(pszTokenValue, (char *)pszTokenSep); else pszTokenValue = msStringConcatenate(pszTokenValue, ","); xmlNewNsProp(psSubNode, NULL, BAD_CAST "tokenSeparator", BAD_CAST pszTokenValue); if (pszBlockSep) pszBlockValue = msStringConcatenate(pszBlockValue, (char *)pszBlockSep); else pszBlockValue = msStringConcatenate(pszBlockValue, "\n"); xmlNewNsProp(psSubNode, NULL, BAD_CAST "blockSeparator", BAD_CAST pszBlockValue); xmlNewNsProp(psSubNode, NULL, BAD_CAST "decimalSeparator", BAD_CAST "."); msFree(pszTokenValue); msFree(pszBlockValue); } } /************************************************************************/ /* msSOSAddMemberNode */ /* */ /* Add a memeber node corresponding to a feature. */ /* Assuming that the layer is opened and msLayerGetItems is */ /* called on it. */ /************************************************************************/ void msSOSAddMemberNode(xmlNsPtr psNsGml, xmlNsPtr psNsOm, xmlNsPtr psNsSwe, xmlNsPtr psNsXLink, xmlNsPtr psNsMs, xmlNodePtr psParent, mapObj *map, layerObj *lp, int iFeatureId, const char *script_url, const char *opLayerName) { xmlNodePtr psObsNode, psNode, psLayerNode = NULL; const char *pszEpsg = NULL, *pszValue = NULL; int status,i,j; shapeObj sShape; char szTmp[256]; layerObj *lpfirst = NULL; const char *pszTimeField = NULL; char *pszTmp = NULL; char *pszOid = NULL; char *pszTime = NULL; char *pszValueShape = NULL; const char *pszFeatureId = NULL; if (psParent) { msInitShape(&sShape); status = msLayerGetShape(lp, &sShape, &(lp->resultcache->results[iFeatureId])); if(status != MS_SUCCESS) { xmlFreeNs(psNsOm); return; } psNode = xmlNewChild(psParent, NULL, BAD_CAST "member", NULL); psObsNode = xmlNewChild(psNode, NULL, BAD_CAST "Observation", BAD_CAST pszValue); pszFeatureId = msOWSLookupMetadata(&(lp->metadata), "OSG", "featureid"); if(pszFeatureId && msLayerGetItems(lp) == MS_SUCCESS) { /* find the featureid amongst the items for this layer */ for(j=0; j<lp->numitems; j++) { if(strcasecmp(lp->items[j], pszFeatureId) == 0) { /* found it */ break; } } if (j<lp->numitems) { pszOid = msStringConcatenate(pszOid, "o_"); pszOid = msStringConcatenate(pszOid, sShape.values[j]); xmlNewNsProp(psObsNode, psNsGml, BAD_CAST "id", BAD_CAST pszOid); } } /* order of elements is time, location, procedure, observedproperty featureofinterest, result */ /* time*/ pszTimeField = msOWSLookupMetadata(&(lp->metadata), "SO", "timeitem"); if (pszTimeField && sShape.values) { for(i=0; i<lp->numitems; i++) { if (strcasecmp(lp->items[i], pszTimeField) == 0) { if (sShape.values[i] && strlen(sShape.values[i]) > 0) { pszTime = msStringConcatenate(pszTime, sShape.values[i]); psNode = xmlNewChild(psObsNode, psNsOm, BAD_CAST "samplingTime", NULL); xmlAddChild(psNode, msGML3TimeInstant(psNsGml, pszTime)); msFree(pszTime); } break; } } } /*TODO add location*/ /*procedure*/ /* if a procedure_item is defined, we should extract the value for the attributes. If not dump what is found in the procedure metadata bug 2054*/ if ((pszValue = msOWSLookupMetadata(&(lp->metadata), "S", "procedure_item"))) { lpfirst = msSOSGetFirstLayerForOffering(map, msOWSLookupMetadata(&(lp->metadata), "S", "offering_id"), msOWSLookupMetadata(&(lp->metadata), "S", "observedproperty_id")); if (lp != lpfirst) status = msLayerOpen(lpfirst); if (status == MS_SUCCESS && msLayerGetItems(lpfirst) == MS_SUCCESS) { for(i=0; i<lpfirst->numitems; i++) { if (strcasecmp(lpfirst->items[i], pszValue) == 0) { break; } } if (i < lpfirst->numitems) { snprintf(szTmp, sizeof(szTmp), "%s", "urn:ogc:def:procedure:"); pszTmp = msStringConcatenate(pszTmp, szTmp); pszValueShape = msEncodeHTMLEntities(sShape.values[i]); pszTmp = msStringConcatenate(pszTmp, pszValueShape); psNode = xmlNewChild(psObsNode, NULL, BAD_CAST "procedure", NULL); xmlNewNsProp(psNode, psNsXLink, BAD_CAST "href", BAD_CAST pszTmp); msFree(pszTmp); pszTmp = NULL; msFree(pszValueShape); } /*else should we generate a warning !*/ if (lp != lpfirst) msLayerClose(lpfirst); } } else if ((pszValue = msOWSLookupMetadata(&(lp->metadata), "S", "procedure"))) { if (! msOWSLookupMetadata(&(lp->metadata), "S", "procedure_item")) xmlAddSibling(psNode, xmlNewComment(BAD_CAST "WARNING: Optional metadata \"sos_procedure_item\" missing for sos:procedure. If you have more than 1 procedures, sos:procedure will output them incorrectly.")); snprintf(szTmp, sizeof(szTmp), "%s", "urn:ogc:def:procedure:"); pszTmp = msStringConcatenate(pszTmp, szTmp); pszTmp = msStringConcatenate(pszTmp, (char *)pszValue); psNode = xmlNewChild(psObsNode, NULL, BAD_CAST "procedure", NULL); xmlNewNsProp(psNode, psNsXLink, BAD_CAST "href", BAD_CAST pszTmp); msFree(pszTmp); pszTmp = NULL; } /*observed property*/ pszValue = msOWSLookupMetadata(&(lp->metadata), "S", "observedproperty_id"); if (pszValue) msSOSAddPropertyNode(psNsSwe, psNsXLink, psObsNode, lp, psNsGml, pszOid); msFree(pszOid); pszOid = NULL; /*TODO add featureofinterest*/ pszTmp = msStringConcatenate(pszTmp, (char *) script_url); pszTmp = msStringConcatenate(pszTmp, "service=WFS&version=1.1.0&request=DescribeFeatureType&typename="); pszTmp = msStringConcatenate(pszTmp, (char *) opLayerName); psNode = xmlNewChild(psObsNode, psNsOm, BAD_CAST "featureOfInterest", NULL); xmlNewNsProp(psNode, psNsXLink, BAD_CAST "href", BAD_CAST pszTmp); msFree(pszTmp); pszTmp=NULL; /* add result : gml:featureMember of all selected elements */ psNode = xmlNewChild(psObsNode, NULL, BAD_CAST "result", NULL); /*TODO should we add soemwhere the units of the value : <om:result uom="units.xml#cm">29.00</om:result> */ #ifdef USE_PROJ if(msProjectionsDiffer(&(lp->projection), &(map->projection))) msProjectShape(&lp->projection, &lp->projection, &sShape); #endif psNode = xmlNewChild(psNode, psNsGml, BAD_CAST "featureMember", NULL); /* xmlSetNs(psNode,xmlNewNs(psNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); */ /*TODO : add namespaces like wfs " ms and a url to mapserve ? */ psLayerNode = xmlNewChild(psNode, psNsMs, BAD_CAST lp->name, NULL); /* fetch gml:id */ pszFeatureId = msOWSLookupMetadata(&(lp->metadata), "OSG", "featureid"); if(pszFeatureId && msLayerOpen(lp) == MS_SUCCESS && msLayerGetItems(lp) == MS_SUCCESS) xmlSetNs(psLayerNode,psNsMs); /*bbox*/ #ifdef USE_PROJ pszEpsg = msOWSGetEPSGProj(&(map->projection), &(lp->metadata), "SO", MS_TRUE); if (!pszEpsg) pszEpsg = msOWSGetEPSGProj(&(lp->projection), &(lp->metadata), "SO", MS_TRUE); if (msProjectionsDiffer(&map->projection, &lp->projection) == MS_TRUE) msProjectRect(&lp->projection, &map->projection, &sShape.bounds); #endif psNode = xmlAddChild(psLayerNode, msGML3BoundedBy(psNsGml, sShape.bounds.minx, sShape.bounds.miny, sShape.bounds.maxx, sShape.bounds.maxy, pszEpsg)); /*geometry*/ msSOSAddGeometryNode(psNsGml, psNsMs, psLayerNode, map, lp, &sShape, pszEpsg); /*attributes */ /* TODO only output attributes where there is a sos_%s_alias (to be discussed)*/ /* the first layer is the one that has to have all the metadata defined */ lpfirst = msSOSGetFirstLayerForOffering(map, msOWSLookupMetadata(&(lp->metadata), "S", "offering_id"), msOWSLookupMetadata(&(lp->metadata), "S", "observedproperty_id")); if (lpfirst && msLayerOpen(lpfirst) == MS_SUCCESS && msLayerGetItems(lpfirst) == MS_SUCCESS) { for(i=0; i<lpfirst->numitems; i++) { snprintf(szTmp, sizeof(szTmp), "%s_alias", lpfirst->items[i]); pszValue = msOWSLookupMetadata(&(lpfirst->metadata), "S", szTmp); if (pszValue) { for (j=0; j<lp->numitems; j++) { if (strcasecmp(lpfirst->items[i], lpfirst->items[j]) == 0) { /*if there is an alias used, use it to output the parameter name : eg "sos_AMMON_DIS_alias" "Amonia" */ snprintf(szTmp, sizeof(szTmp), "%s_alias", lpfirst->items[i]); pszValue = msOWSLookupMetadata(&(lpfirst->metadata), "S", szTmp); pszValueShape = msEncodeHTMLEntities(sShape.values[j]); if (pszValue) { pszTmp = msEncodeHTMLEntities(pszValue); psNode = xmlNewChild(psLayerNode, psNsMs, BAD_CAST pszValue, BAD_CAST pszValueShape); free(pszTmp); } else { pszTmp = msEncodeHTMLEntities(lpfirst->items[i]); psNode = xmlNewChild(psLayerNode, psNsMs, BAD_CAST lpfirst->items[i], BAD_CAST pszValueShape); free(pszTmp); } free(pszValueShape); xmlSetNs(psNode,psNsMs); break; } } } } if (lp->index != lpfirst->index) msLayerClose(lpfirst); } msFreeShape(&sShape); } } /************************************************************************/ /* msSOSReturnMemberResult */ /* */ /* Add a result string to the result node (used for */ /* GetObservation using "observation" as the response output. */ /* Assuming here that the layer is already opened. */ /************************************************************************/ char* msSOSReturnMemberResult(layerObj *lp, int iFeatureId, char **ppszProcedure) { char *pszFinalValue = NULL; shapeObj sShape; int i, j, status; layerObj *lpfirst; const char *pszTimeField = NULL, *pszValue=NULL, *pszProcedureField=NULL; char *pszValueShape = NULL; char szTmp[100]; const char *pszSep=NULL; msInitShape(&sShape); status = msLayerGetShape(lp, &sShape, &(lp->resultcache->results[iFeatureId])); if(status != MS_SUCCESS) return NULL; pszTimeField = msOWSLookupMetadata(&(lp->metadata), "SO", "timeitem"); if (pszTimeField && sShape.values) { for(i=0; i<lp->numitems; i++) { if (strcasecmp(lp->items[i], pszTimeField) == 0) { pszFinalValue = msStringConcatenate(pszFinalValue, sShape.values[i]); break; } } } if (ppszProcedure) { pszProcedureField = msOWSLookupMetadata(&(lp->metadata), "S", "procedure_item"); for(i=0; i<lp->numitems; i++) { if (strcasecmp(lp->items[i], pszProcedureField) == 0) { (*ppszProcedure) = msStrdup( sShape.values[i]); break; } } } /* the first layer is the one that has to have all the metadata defined */ lpfirst = msSOSGetFirstLayerForOffering(lp->map, msOWSLookupMetadata(&(lp->metadata), "S", "offering_id"), msOWSLookupMetadata(&(lp->metadata), "S", "observedproperty_id")); if (lp == lpfirst || (lpfirst && msLayerOpen(lpfirst) == MS_SUCCESS && msLayerGetItems(lpfirst) == MS_SUCCESS)) { pszSep = msOWSLookupMetadata(&(lp->map->web.metadata), "S", "encoding_tokenSeparator"); for(i=0; i<lpfirst->numitems; i++) { snprintf(szTmp, sizeof(szTmp), "%s_alias", lpfirst->items[i]); pszValue = msOWSLookupMetadata(&(lpfirst->metadata), "S", szTmp); if (pszValue) { for (j=0; j<lp->numitems; j++) { if (strcasecmp(lpfirst->items[i], lpfirst->items[j]) == 0) { pszValueShape = msEncodeHTMLEntities(sShape.values[j]); if (pszFinalValue) { if (pszSep) pszFinalValue = msStringConcatenate(pszFinalValue, (char *)pszSep); else pszFinalValue = msStringConcatenate(pszFinalValue, ","); } pszFinalValue = msStringConcatenate(pszFinalValue, pszValueShape); msFree(pszValueShape); } } } } } msFreeShape(&sShape); return pszFinalValue; } /************************************************************************/ /* msSOSAddMemberNodeObservation */ /* */ /* Add a member node used for getObservation request using */ /* Observation as the result format. */ /************************************************************************/ xmlNodePtr msSOSAddMemberNodeObservation(xmlNsPtr psNsGml, xmlNsPtr psNsSos, xmlNsPtr psNsOm, xmlNsPtr psNsSwe, xmlNsPtr psNsXLink, xmlNodePtr psParent, mapObj *map, layerObj *lp, const char *pszProcedure) { char *pszTmp = NULL; xmlNodePtr psNode=NULL, psObsNode=NULL, psMemberNode=NULL; layerObj *lpfirst; const char *value = NULL; /*always featch the first layer that has the same offering id and observered propery. This allows to only define all the attributes and components on the first layer if the user wants to present several mapserver layers as the same offering.*/ lpfirst = msSOSGetFirstLayerForOffering(map, msOWSLookupMetadata(&(lp->metadata), "S", "offering_id"), msOWSLookupMetadata(&(lp->metadata), "S", "observedproperty_id")); if (psParent) { psMemberNode = xmlNewChild(psParent, NULL, BAD_CAST "member", NULL); psObsNode = xmlNewChild(psMemberNode, NULL, BAD_CAST "Observation", NULL); /*time*/ /* ??TODO : sampling time is a manadatory element but uses a non mandatory metadata sos_offering_timeextent */ value = msOWSLookupMetadata(&(lp->metadata), "S", "offering_timeextent"); if (value) { char **tokens; int n; char *pszEndTime = NULL; tokens = msStringSplit(value, '/', &n); if (tokens==NULL || (n != 1 && n!=2)) { msSetError(MS_SOSERR, "Wrong number of arguments for sos_offering_timeextent.", "msSOSGetObservation()"); msSOSException(map, "sos_offering_timeextent", "InvalidParameterValue"); return NULL; } if (n == 2) /* end time is empty. It is going to be set as "now*/ pszEndTime = tokens[1]; psNode = xmlAddChild(psObsNode, msSOSAddTimeNode(psNsOm, psNsGml, tokens[0], pszEndTime)); msFreeCharArray(tokens, n); } /* procedure */ if (pszProcedure) { /*this should always be true since procedure is a manadtory element*/ if (! msOWSLookupMetadata(&(lp->metadata), "S", "procedure_item") && msOWSLookupMetadata(&(lp->metadata), "S", "procedure")) xmlAddSibling(psNode, xmlNewComment(BAD_CAST "WARNING: Optional metadata \"sos_procedure_item\" missing for sos:procedure. If you have more than 1 procedures, sos:procedure will output them incorrectly.")); pszTmp = msStringConcatenate(pszTmp, "urn:ogc:def:procedure:"); pszTmp = msStringConcatenate(pszTmp, (char *)pszProcedure); psNode = xmlNewChild(psObsNode, NULL, BAD_CAST "procedure", NULL); /* xmlNewNsProp(psNode, xmlNewNs(NULL, BAD_CAST "http://www.w3.org/1999/xlink", BAD_CAST "xlink"), BAD_CAST "href", BAD_CAST pszTmp); */ xmlNewNsProp(psNode, psNsXLink, BAD_CAST "href", BAD_CAST pszTmp); msFree(pszTmp); pszTmp = NULL; } /*observed propery and components*/ if (lp != lpfirst && msLayerOpen(lpfirst) == MS_SUCCESS && msLayerGetItems(lpfirst) == MS_SUCCESS) { msSOSAddPropertyNode(psNsSwe, psNsXLink, psObsNode, lpfirst, psNsGml, NULL); msLayerClose(lpfirst); } else msSOSAddPropertyNode(psNsSwe, psNsXLink, psObsNode, lpfirst, psNsGml, NULL); /* result definition*/ psNode = xmlNewChild(psObsNode, NULL, BAD_CAST "resultDefinition", NULL); msSOSAddDataBlockDefinition(psNsSwe, psNode, lpfirst); } return psObsNode; } /************************************************************************/ /* msSOSParseTimeGML */ /* */ /* Utility function to convert a gml time value to a */ /* string. Supported gml times are : */ /* */ /* - <gml:TimePeriod> */ /* <gml:beginPosition>2005-09-01T11:54:32</gml:beginPosition> */ /* <gml:endPosition>2005-09-02T14:54:32</gml:endPosition> */ /* </gml:TimePeriod> */ /* This will be converted to startime/endtime */ /* */ /* - <gml:TimeInstant> */ /* <gml:timePosition>2003-02-13T12:28-08:00</gml:timePosition>*/ /* </gml:TimeInstant> */ /* This will retunr the timevalue as a string. */ /* */ /* The caller of the function should free the return value. */ /************************************************************************/ char *msSOSParseTimeGML(char *pszGmlTime) { char *pszReturn = NULL, *pszBegin = NULL, *pszEnd = NULL; CPLXMLNode *psRoot=NULL, *psChild=NULL; CPLXMLNode *psTime=NULL, *psBegin=NULL, *psEnd=NULL; struct tm tm_struct; if (pszGmlTime) { psRoot = CPLParseXMLString(pszGmlTime); if(!psRoot) return NULL; CPLStripXMLNamespace(psRoot, "gml", 1); if (psRoot->eType == CXT_Element && (EQUAL(psRoot->pszValue,"TimePeriod") || EQUAL(psRoot->pszValue,"TimeInstant"))) { if (EQUAL(psRoot->pszValue,"TimeInstant")) { psChild = psRoot->psChild; if (psChild && EQUAL(psChild->pszValue,"timePosition")) { psTime = psChild->psNext; if (psTime && psTime->pszValue && psTime->eType == CXT_Text) { if (msParseTime(psTime->pszValue, &tm_struct) == MS_TRUE) pszReturn = msStrdup(psTime->pszValue); } } } else { psBegin = psRoot->psChild; if (psBegin) psEnd = psBegin->psNext; if (psBegin && EQUAL(psBegin->pszValue, "beginPosition") && psEnd && EQUAL(psEnd->pszValue, "endPosition")) { if (psBegin->psChild && psBegin->psChild->pszValue && psBegin->psChild->eType == CXT_Text) pszBegin = msStrdup( psBegin->psChild->pszValue); if (psEnd->psChild && psEnd->psChild->pszValue && psEnd->psChild->eType == CXT_Text) pszEnd = msStrdup(psEnd->psChild->pszValue); if (pszBegin && pszEnd) { if (msParseTime(pszBegin, &tm_struct) == MS_TRUE && msParseTime(pszEnd, &tm_struct) == MS_TRUE) { pszReturn = msStrdup(pszBegin); pszReturn = msStringConcatenate(pszReturn, "/"); pszReturn = msStringConcatenate(pszReturn, pszEnd); } } msFree(pszBegin); msFree(pszEnd); } } } } CPLDestroyXMLNode(psRoot); return pszReturn; } /************************************************************************/ /* msSOSGetCapabilities */ /* */ /* getCapabilities request handler. */ /************************************************************************/ int msSOSGetCapabilities(mapObj *map, sosParamsObj *sosparams, cgiRequestObj *req, owsRequestObj *ows_request) { xmlDocPtr psDoc = NULL; /* document pointer */ xmlNodePtr psRootNode, psMainNode, psNode; xmlNodePtr psOfferingNode; char *schemalocation = NULL; char *xsi_schemaLocation = NULL; char *script_url=NULL; const char *updatesequence=NULL; int i,j,k; layerObj *lp = NULL, *lpTmp = NULL; const char *value = NULL; char *pszTmp = NULL; char *pszProcedure = NULL; char szTmp[256]; /* array of offering */ char **papsOfferings = NULL; int nOfferings =0, nCurrentOff = -1; int nProperties = 0; char **papszProperties = NULL; int iItemPosition = -1; shapeObj sShape; int status; /* for each layer it indicates the indice to be used in papsOfferings (to associate it with the offering) */ int *panOfferingLayers = NULL; char **papsProcedures = NULL; int nDistinctProcedures =0; xmlNsPtr psNsGml = NULL; xmlNsPtr psNsSos = NULL; xmlNsPtr psNsOws = NULL; xmlNsPtr psNsOgc = NULL; xmlNsPtr psNsXLink = NULL; xmlNsPtr psNsSwe = NULL; xmlChar *buffer = NULL; int size = 0; msIOContext *context = NULL; int ows_version = OWS_1_1_0; int sosSupportedVersions[] = {OWS_1_0_0}; int sosNumSupportedVersions = 1; /* acceptversions: do OWS Common style of version negotiation */ if (sosparams->pszAcceptVersions) { char **tokens; int i, j, k=-1; tokens = msStringSplit(sosparams->pszAcceptVersions, ',', &j); for (i=0; i<j; i++) { int iVersion = 0; iVersion = msOWSParseVersionString(tokens[i]); if (iVersion == -1) { msSetError(MS_SOSERR, "Invalid version format : %s.", "msSOSGetCapabilities()", tokens[i]); msFreeCharArray(tokens, j); return msSOSException(map, "acceptversions", "VersionNegotiationFailed"); } /* negotiate version */ k = msOWSCommonNegotiateVersion(iVersion, sosSupportedVersions, sosNumSupportedVersions); if (k != -1) break; } msFreeCharArray(tokens, j); if(k == -1) { msSetError(MS_SOSERR, "ACCEPTVERSIONS list (%s) does not match supported versions (%s)", "msSOSGetCapabilities()", sosparams->pszAcceptVersions, pszSOSVersion); return msSOSException(map, "acceptversions", "VersionNegotiationFailed"); } } /* updateSequence */ updatesequence = msOWSLookupMetadata(&(map->web.metadata), "SO", "updatesequence"); if (sosparams->pszUpdateSequence != NULL) { i = msOWSNegotiateUpdateSequence(sosparams->pszUpdateSequence, updatesequence); if (i == 0) { /* current */ msSetError(MS_SOSERR, "UPDATESEQUENCE parameter (%s) is equal to server (%s)", "msSOSGetCapabilities()", sosparams->pszUpdateSequence, updatesequence); return msSOSException(map, "updatesequence", "CurrentUpdateSequence"); } if (i > 0) { /* invalid */ msSetError(MS_SOSERR, "UPDATESEQUENCE parameter (%s) is higher than server (%s)", "msSOSGetCapabilities()", sosparams->pszUpdateSequence, updatesequence); return msSOSException(map, "updatesequence", "InvalidUpdateSequence"); } } psDoc = xmlNewDoc(BAD_CAST "1.0"); psRootNode = xmlNewNode(NULL, BAD_CAST "Capabilities"); xmlDocSetRootElement(psDoc, psRootNode); psNsGml = xmlNewNs(NULL, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml"); psNsSos = xmlNewNs(NULL, BAD_CAST pszSOSNamespaceUri, BAD_CAST pszSOSNamespacePrefix); psNsOgc = xmlNewNs(NULL, BAD_CAST MS_OWSCOMMON_OGC_NAMESPACE_URI, BAD_CAST MS_OWSCOMMON_OGC_NAMESPACE_PREFIX); psNsSwe = xmlNewNs(NULL, BAD_CAST "http://www.opengis.net/swe/1.0.1", BAD_CAST "swe"); /* name spaces */ xmlSetNs(psRootNode, xmlNewNs(psRootNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); xmlSetNs(psRootNode, xmlNewNs(psRootNode, BAD_CAST "http://www.opengis.net/om/1.0", BAD_CAST "om")); psNsOws = xmlNewNs(psRootNode, BAD_CAST "http://www.opengis.net/ows/1.1", BAD_CAST "ows"); xmlSetNs(psRootNode, psNsOws ); xmlSetNs(psRootNode,xmlNewNs(psRootNode, BAD_CAST "http://www.opengis.net/swe/1.0.1", BAD_CAST "swe")); psNsXLink = xmlNewNs(psRootNode, BAD_CAST MS_OWSCOMMON_W3C_XLINK_NAMESPACE_URI, BAD_CAST MS_OWSCOMMON_W3C_XLINK_NAMESPACE_PREFIX); xmlSetNs(psRootNode, psNsXLink ); xmlSetNs(psRootNode,xmlNewNs(psRootNode, BAD_CAST MS_OWSCOMMON_W3C_XSI_NAMESPACE_URI, BAD_CAST MS_OWSCOMMON_W3C_XSI_NAMESPACE_PREFIX)); xmlSetNs(psRootNode, xmlNewNs(psRootNode, BAD_CAST MS_OWSCOMMON_OGC_NAMESPACE_URI, BAD_CAST MS_OWSCOMMON_OGC_NAMESPACE_PREFIX)); xmlSetNs(psRootNode, xmlNewNs(psRootNode, BAD_CAST pszSOSNamespaceUri, BAD_CAST pszSOSNamespacePrefix)); /*version fixed for now*/ xmlNewProp(psRootNode, BAD_CAST "version", BAD_CAST pszSOSVersion); value = msOWSLookupMetadata(&(map->web.metadata), "SO", "updatesequence"); if (value) xmlNewProp(psRootNode, BAD_CAST "updateSequence", BAD_CAST value); /*schema fixed*/ schemalocation = msEncodeHTMLEntities( msOWSGetSchemasLocation(map) ); xsi_schemaLocation = msStrdup(pszSOSNamespaceUri); xsi_schemaLocation = msStringConcatenate(xsi_schemaLocation, " "); xsi_schemaLocation = msStringConcatenate(xsi_schemaLocation, schemalocation); xsi_schemaLocation = msStringConcatenate(xsi_schemaLocation, "/sos/"); xsi_schemaLocation = msStringConcatenate(xsi_schemaLocation, (char *)pszSOSVersion); xsi_schemaLocation = msStringConcatenate(xsi_schemaLocation, "/sosGetCapabilities.xsd"); xmlNewNsProp(psRootNode, NULL, BAD_CAST "xsi:schemaLocation", BAD_CAST xsi_schemaLocation); xmlAddChild(psRootNode, xmlNewComment(BAD_CAST msGetVersion())); /*service identification*/ xmlAddChild(psRootNode, msOWSCommonServiceIdentification(psNsOws, map, "SOS", pszSOSVersion, "SO", NULL)); /*service provider*/ xmlAddChild(psRootNode, msOWSCommonServiceProvider(psNsOws, psNsXLink, map, "SO", NULL)); /*operation metadata */ if ((script_url=msOWSGetOnlineResource(map, "SO", "onlineresource", req)) == NULL) return msSOSException(map, "NoApplicableCode", "NoApplicableCode"); psMainNode = xmlAddChild(psRootNode, msOWSCommonOperationsMetadata(psNsOws)); psNode = xmlAddChild(psMainNode, msOWSCommonOperationsMetadataOperation(psNsOws,psNsXLink,"GetCapabilities", OWS_METHOD_GETPOST, (char *) script_url)); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "service", "SOS")); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "version", (char *)pszSOSVersion)); if (msOWSRequestIsEnabled(map, NULL, "S", "DescribeSensor", MS_TRUE)) { psNode = xmlAddChild(psMainNode, msOWSCommonOperationsMetadataOperation(psNsOws,psNsXLink,"DescribeSensor", OWS_METHOD_GETPOST, (char *) script_url)); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "service", "SOS")); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "version", (char *)pszSOSVersion)); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "sensorid", "urn:ogc:object:procedure")); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "outputFormat", (char *)pszSOSDescribeSensorMimeType)); } if (msOWSRequestIsEnabled(map, NULL, "S", "DescribeObservationType", MS_TRUE)) { psNode = xmlAddChild(psMainNode, msOWSCommonOperationsMetadataOperation(psNsOws,psNsXLink,"DescribeObservationType", OWS_METHOD_GETPOST, (char *) script_url)); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "service", "SOS")); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "version", (char *)pszSOSVersion)); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "observedproperty", "urn:ogc:object:observedproperty")); } if (msOWSRequestIsEnabled(map, NULL, "S", "GetObservation", MS_TRUE)) { psNode = xmlAddChild(psMainNode, msOWSCommonOperationsMetadataOperation(psNsOws,psNsXLink,"GetObservation", OWS_METHOD_GETPOST, (char *) script_url)); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "service", "SOS")); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "version", (char *)pszSOSVersion)); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "offering", "urn:ogc:object:offering")); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "observedproperty", "urn:ogc:object:observedproperty")); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "eventtime", "sos:time")); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "procedure", "urn:ogc:object:sensor")); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "featureofinterest", "gml:location")); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "result", "ogc:Filter")); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "responseFormat", (char *)pszSOSGetObservationMimeType)); xmlAddChild(psNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Parameter", "resultModel", "Observation,Measurement")); } value = msOWSLookupMetadata(&(map->web.metadata), "SO", "maxfeatures"); if (value) { psNode = xmlAddChild(psMainNode, msOWSCommonOperationsMetadataDomainType(ows_version, psNsOws,"Constraint", "DefaultMaxFeatures", (char *)value)); } /*<ogc:Filter_Capabilities> */ xmlAddChild(psRootNode, FLTGetCapabilities(psNsSos, psNsOgc, MS_TRUE)); /*Offerings */ psNode = xmlNewChild(psRootNode, NULL, BAD_CAST "Contents", NULL); psMainNode = xmlNewChild(psNode, NULL, BAD_CAST "ObservationOfferingList", NULL); /*go through the layers and check for metadata sos_offering_id. One or more layers could have the same offering id. In that case they are adverized as the same offering. The first layer that has*/ if (map->numlayers) { papsOfferings = (char **)malloc(sizeof(char *)*map->numlayers); panOfferingLayers = (int *)malloc(sizeof(int)*map->numlayers); for (i=0; i<map->numlayers; i++) panOfferingLayers[i] = -1; for (i=0; i<map->numlayers; i++) { lp = (GET_LAYER(map, i)); if (lp->status == MS_DELETE) continue; value = msOWSLookupMetadata(&(lp->metadata), "S", "offering_id"); if (value && (msIntegerInArray(lp->index, ows_request->enabled_layers, ows_request->numlayers))) { nCurrentOff = -1; for (j=0; j<nOfferings; j++) { if (strcasecmp(value, papsOfferings[j]) == 0) { nCurrentOff = j; break; } } if (nCurrentOff >= 0) /* existing offering */ panOfferingLayers[i] = nCurrentOff; else { /*new offering */ papsOfferings[nOfferings] = msStrdup(value); panOfferingLayers[i] = nOfferings; nOfferings++; } } } if (nOfferings > 0) { for (i=0; i<nOfferings; i++) { psOfferingNode = xmlNewChild(psMainNode, NULL,BAD_CAST "ObservationOffering", NULL); xmlNewNsProp(psOfferingNode, psNsGml, BAD_CAST "id", BAD_CAST papsOfferings[i]); for (j=0; j<map->numlayers; j++) { if (panOfferingLayers[j] == i) /*first layer of the offering */ break; } /*description*/ value = msOWSLookupMetadata(&(lp->metadata), "S", "offering_description"); if (value) psNode = xmlNewChild(psOfferingNode, psNsGml, BAD_CAST "description", BAD_CAST value); else xmlAddSibling(psNode, xmlNewComment(BAD_CAST "WARNING: Optional metadata \"sos_offering_description\" missing for gml:description")); /*name*/ lp = (GET_LAYER(map, j)); /*first layer*/ value = msOWSLookupMetadata(&(lp->metadata), "S", "offering_name"); if (value) psNode = xmlNewChild(psOfferingNode, psNsGml, BAD_CAST "name", BAD_CAST value); else xmlAddSibling(psNode, xmlNewComment(BAD_CAST "WARNING: Optional metadata \"sos_offering_name\" missing for gml:name")); /* srsName */ value = msOWSLookupMetadata(&(map->web.metadata), "SO", "srs"); if (value) msLibXml2GenerateList(psOfferingNode, psNsGml, "srsName", value, ' '); else xmlAddSibling(psNode, xmlNewComment(BAD_CAST "WARNING: Required metadata \"sos_srs\" missing for gml:srsName")); /*bounding box */ /*TODO : if sos_offering_extent does not exist compute extents Check also what happen if epsg not present */ value = msOWSLookupMetadata(&(lp->metadata), "S", "offering_extent"); if (value) { char **tokens; int n; tokens = msStringSplit(value, ',', &n); if (tokens==NULL || n != 4) { msSetError(MS_SOSERR, "Wrong number of arguments for sos_offering_extent.", "msSOSGetCapabilities()"); return msSOSException(map, "sos_offering_extent", "InvalidParameterValue"); } value = msOWSGetEPSGProj(&(lp->projection), &(lp->metadata), "SO", MS_TRUE); if (value) psNode = xmlAddChild(psOfferingNode, msGML3BoundedBy(psNsGml, atof(tokens[0]), atof(tokens[1]), atof(tokens[2]), atof(tokens[3]), value)); msFreeCharArray(tokens, n); } /* intended application */ value = msOWSLookupMetadata(&(lp->metadata), "S", "offering_intendedapplication"); if (value) psNode = xmlNewChild(psOfferingNode, psNsSos, BAD_CAST "intendedApplication", BAD_CAST value); else xmlAddSibling(psNode, xmlNewComment(BAD_CAST "WARNING: Optional metadata \"sos_offering_intendedapplication\" missing for sos:intendedApplication")); /*time*/ value = msOWSLookupMetadata(&(lp->metadata), "S", "offering_timeextent"); if (value) { char **tokens; int n; char *pszEndTime = NULL; tokens = msStringSplit(value, '/', &n); if (tokens==NULL || (n != 1 && n!=2)) { msSetError(MS_SOSERR, "Wrong number of arguments for sos_offering_timeextent.", "msSOSGetCapabilities()"); return msSOSException(map, "sos_offering_timeextent", "InvalidParameterValue"); } if (n == 2) /* end time is empty. It is going to be set as "now*/ pszEndTime = tokens[1]; psNode = xmlAddChild(psOfferingNode, msSOSAddTimeNode(psNsSos, psNsGml, tokens[0], pszEndTime)); msFreeCharArray(tokens, n); } /*procedure : output all procedure links for the offering */ for (j=0; j<map->numlayers; j++) { if (panOfferingLayers[j] == i) { value = msOWSLookupMetadata(&(GET_LAYER(map, j)->metadata), "S", "procedure"); if (value && strlen(value) > 0) { /*value could be a list of procedure*/ char **tokens; int n = 0; tokens = msStringSplit(value, ' ', &n); for (k=0; k<n; k++) { /*TODO review the urn output */ snprintf(szTmp, sizeof(szTmp), "%s", "urn:ogc:def:procedure:"); pszTmp = msStringConcatenate(pszTmp, szTmp); pszTmp = msStringConcatenate(pszTmp, tokens[k]); psNode = xmlNewChild(psOfferingNode, NULL, BAD_CAST "procedure", NULL); /* xmlNewNsProp(psNode, xmlNewNs(NULL, BAD_CAST "http://www.w3.org/1999/xlink", BAD_CAST "xlink"), BAD_CAST "href", BAD_CAST pszTmp); */ xmlNewNsProp(psNode, psNsXLink, BAD_CAST "href", BAD_CAST pszTmp); msFree(pszTmp); pszTmp = NULL; } msFreeCharArray(tokens, n); } else if ((value = msOWSLookupMetadata(&(GET_LAYER(map,j)->metadata), "S", "procedure_item"))) { /* if a procedure_item is used, it means that the procedure (or sensor) need to be extracted from the data. Thus we need to query the layer and get the values from each feature */ lpTmp = GET_LAYER(map,j); if (lpTmp->template == NULL) lpTmp->template = msStrdup("ttt"); map->query.type = MS_QUERY_BY_RECT; map->query.mode = MS_QUERY_MULTIPLE; map->query.layer = j; map->query.rect = map->extent; msQueryByRect(map); /*check if the attribute specified in the procedure_item is available on the layer*/ iItemPosition = -1; if (msLayerGetItems(lpTmp) == MS_SUCCESS && lpTmp->resultcache && lpTmp->resultcache->numresults > 0) { for(k=0; k<lpTmp->numitems; k++) { if (strcasecmp(lpTmp->items[k], value) == 0) { iItemPosition = k; break; } } if (iItemPosition == -1) { msSetError(MS_SOSERR, "procedure_item %s could not be found on the layer %s", "msSOSGetCapabilities()", value, lpTmp->name); return msSOSException(map, "mapserv", "NoApplicableCode"); } /*for each selected feature, grab the value of the prodedire_item*/ /* do not duplicate sensor ids if they are the same */ /*keep list of distinct procedures*/ papsProcedures = (char **)malloc(sizeof(char *) * lpTmp->resultcache->numresults); nDistinctProcedures = 0; for(k=0; k<lpTmp->resultcache->numresults; k++) papsProcedures[k] = NULL; for(k=0; k<lpTmp->resultcache->numresults; k++) { msInitShape(&sShape); status = msLayerGetShape(lp, &sShape, &(lpTmp->resultcache->results[k])); if(status != MS_SUCCESS) continue; if (sShape.values[iItemPosition]) { pszProcedure = msStringConcatenate(pszProcedure, sShape.values[iItemPosition]); if (!_IsInList(papsProcedures, nDistinctProcedures, pszProcedure)) { papsProcedures[nDistinctProcedures] = msStrdup(pszProcedure); nDistinctProcedures++; snprintf(szTmp, sizeof(szTmp), "%s", "urn:ogc:def:procedure:"); pszTmp = msStringConcatenate(pszTmp, szTmp); pszTmp = msStringConcatenate(pszTmp, pszProcedure); psNode = xmlNewChild(psOfferingNode, NULL, BAD_CAST "procedure", NULL); xmlNewNsProp(psNode, xmlNewNs(NULL, BAD_CAST "http://www.w3.org/1999/xlink", BAD_CAST "xlink"), BAD_CAST "href", BAD_CAST pszTmp); msFree(pszTmp); pszTmp = NULL; } msFree(pszProcedure); pszProcedure = NULL; } } for(k=0; k<lpTmp->resultcache->numresults; k++) if (papsProcedures[k] != NULL) msFree(papsProcedures[k]); msFree(papsProcedures); } else { msSetError(MS_SOSERR, "Invalid procedure %s", "msSOSGetCapabilities()", value); return msSOSException(map, "procedure", "InvalidParameterValue"); } } else { msSetError(MS_SOSERR, "Mandatory metadata procedure_item could not be found on the layer %s", "msSOSGetCapabilities()", GET_LAYER(map,j)->name); return msSOSException(map, "mapserv", "NoApplicableCode"); } } } /*observed property */ /* observed property are equivalent to layers. We can group sevaral layers using the same sos_observedproperty_id. The components are the attributes. Components are exposed using the metadata sos_%s_aliasl */ nProperties = 0; papszProperties = (char **)malloc(sizeof(char *)*map->numlayers); for (j=0; j<map->numlayers; j++) { if (panOfferingLayers[j] == i) { if ((value = msOWSLookupMetadata(&(GET_LAYER(map, j)->metadata), "S", "observedproperty_id"))) { for (k=0; k<nProperties; k++) { if (strcasecmp(value, papszProperties[k]) == 0) break; } if (k == nProperties) { /*not found*/ papszProperties[nProperties] = msStrdup(value); nProperties++; lpTmp = GET_LAYER(map, j); if (msLayerOpen(lpTmp) == MS_SUCCESS && msLayerGetItems(lpTmp) == MS_SUCCESS) { msSOSAddPropertyNode(psNsSwe, psNsXLink, psOfferingNode, lpTmp, psNsGml, NULL); msLayerClose(lpTmp); } } } } } for (j=0; j<nProperties; j++) free(papszProperties[j]); free(papszProperties); psNode = xmlNewChild(psOfferingNode, NULL, BAD_CAST "featureOfInterest", NULL); xmlNewNsProp(psNode, psNsXLink, BAD_CAST "href", BAD_CAST "urn:ogc:def:feature:OGC-SWE:3:transient"); psNode = xmlNewChild(psOfferingNode, NULL, BAD_CAST "responseFormat", BAD_CAST pszSOSGetObservationMimeType); psNode = xmlNewChild(psOfferingNode, NULL, BAD_CAST "resultModel", BAD_CAST "om:Observation"); psNode = xmlNewChild(psOfferingNode, NULL, BAD_CAST "resultModel", BAD_CAST "om:Measurement"); psNode = xmlNewChild(psOfferingNode, NULL, BAD_CAST "responseMode", BAD_CAST "inline"); }/*end of offerings*/ } if (papsOfferings && nOfferings > 0) { for (i=0; i<nOfferings; i++) msFree(papsOfferings[i]); } msFree(papsOfferings); if(panOfferingLayers) msFree(panOfferingLayers); }/* end of offerings */ if ( msIO_needBinaryStdout() == MS_FAILURE ) return MS_FAILURE; msIO_setHeader("Content-Type","text/xml; charset=UTF-8"); msIO_sendHeaders(); /*TODO* : check the encoding validity. Internally libxml2 uses UTF-8 msOWSPrintEncodeMetadata(stdout, &(map->web.metadata), "SO", "encoding", OWS_NOERR, "<?xml version='1.0' encoding=\"%s\" standalone=\"no\" ?>\n", "ISO-8859-1"); */ /*xmlDocDumpFormatMemoryEnc(psDoc, &buffer, &size, (encoding ? encoding : "ISO-8859-1"), 1);*/ /* xmlDocDump crashs withe the prebuild windows binaries distibutes at the libxml site???. It works with locally build binaries*/ context = msIO_getHandler(stdout); xmlDocDumpFormatMemoryEnc(psDoc, &buffer, &size, ("UTF-8"), 1); msIO_contextWrite(context, buffer, size); xmlFree(buffer); /*free buffer and the document */ /*xmlFree(buffer);*/ xmlFreeDoc(psDoc); xmlFreeNs(psNsGml); xmlFreeNs(psNsSos); xmlFreeNs(psNsOgc); xmlFreeNs(psNsSwe); free(xsi_schemaLocation); free(schemalocation); msFree(script_url); /* *Free the global variables that may *have been allocated by the parser. */ xmlCleanupParser(); return(MS_SUCCESS); /* nSize = sizeof(workbuffer); nSize = nSize-1;*/ /* the last character for the '\0' */ /* if (size > nSize) { iIndice = 0; while ((iIndice + nSize) <= size) { snprintf(workbuffer, (sizeof(workbuffer)-1), "%s", buffer+iIndice ); workbuffer[sizeof(workbuffer)-1] = '\0'; msIO_printf("%s", workbuffer); iIndice +=nSize; } if (iIndice < size) { sprintf(workbuffer, "%s", buffer+iIndice ); msIO_printf("%s", workbuffer); } } else { msIO_printf("%s", buffer); } */ } /************************************************************************/ /* msSOSGetObservation */ /* */ /* GetObservation request handler */ /************************************************************************/ int msSOSGetObservation(mapObj *map, sosParamsObj *sosparams, cgiRequestObj *req, owsRequestObj *ows_request) { char *schemalocation = NULL; char *xsi_schemaLocation = NULL; const char *pszTmp = NULL, *pszTmp2 = NULL; const char *user_namespace_uri = "http://mapserver.gis.umn.edu/mapserver"; const char *user_namespace_prefix = "ms"; char *script_url=NULL; int i, j, k, bLayerFound = 0; layerObj *lp = NULL, *lpfirst = NULL; const char *pszTimeExtent=NULL, *pszTimeField=NULL, *pszValue=NULL; FilterEncodingNode *psFilterNode = NULL; rectObj sBbox; xmlDocPtr psDoc = NULL; xmlNodePtr psRootNode, psNode; char **tokens=NULL, **tokens1; int n=0, n1=0; xmlNsPtr psNsGml = NULL; xmlNsPtr psNsOm = NULL; xmlNsPtr psNsSwe = NULL; xmlNsPtr psNsXLink = NULL; xmlNsPtr psNsSos = NULL; xmlNsPtr psNsMs = NULL; const char *opLayerName = NULL; char *pszBuffer = NULL; const char *pszProcedureItem = NULL; int bSpatialDB = 0; xmlChar *buffer = NULL; int size = 0; msIOContext *context = NULL; xmlNodePtr psObservationNode = NULL, psResultNode=NULL; const char *pszProcedure = NULL; const char *pszBlockSep=NULL; char *pszResult=NULL; int nDiffrentProc = 0; SOSProcedureNode *paDiffrentProc = NULL; char *pszProcedureValue = NULL; int iItemPosition, status; shapeObj sShape; char* pszEscapedStr = NULL; sBbox = map->extent; /* establish local namespace */ pszTmp = msOWSLookupMetadata(&(map->web.metadata), "SFO", "namespace_uri"); if(pszTmp) user_namespace_uri = pszTmp; pszTmp = msOWSLookupMetadata(&(map->web.metadata), "SFO", "namespace_prefix"); if(pszTmp) user_namespace_prefix = pszTmp; /* validates mandatory request elements */ if (!sosparams->pszOffering) { msSetError(MS_SOSERR, "Missing OFFERING parameter.", "msSOSGetObservation()"); return msSOSException(map, "offering", "MissingParameterValue"); } if (!sosparams->pszObservedProperty) { msSetError(MS_SOSERR, "Missing OBSERVEDPROPERTY parameter.", "msSOSGetObservation()"); return msSOSException(map, "observedproperty", "MissingParameterValue"); } if (!sosparams->pszResponseFormat) { msSetError(MS_SOSERR, "Missing RESPONSEFORMAT parameter.", "msSOSGetObservation()"); return msSOSException(map, "responseformat", "MissingParameterValue"); } if (strcasecmp(sosparams->pszResponseFormat, pszSOSGetObservationMimeType) != 0) { msSetError(MS_SOSERR, "Invalid RESPONSEFORMAT parameter %s. Allowable values are: %s", "msSOSGetObservation()", sosparams->pszResponseFormat, pszSOSGetObservationMimeType); return msSOSException(map, "responseformat", "InvalidParameterValue"); } if (sosparams->pszResponseMode && strcasecmp(sosparams->pszResponseMode, "inline") != 0) { msSetError(MS_SOSERR, "Invalid RESPONSEMODE parameter %s. Allowable values are: \"inline\"", "msSOSGetObservation()", sosparams->pszResponseMode); return msSOSException(map, "responsemode", "InvalidParameterValue"); } /*validate if offering exists*/ for (i=0; i<map->numlayers; i++) { pszTmp = msOWSLookupMetadata(&(GET_LAYER(map, i)->metadata), "S", "offering_id"); if (pszTmp && (strcasecmp(pszTmp, sosparams->pszOffering) == 0) && (msIntegerInArray(GET_LAYER(map, i)->index, ows_request->enabled_layers, ows_request->numlayers))) break; } if (i==map->numlayers) { msSetError(MS_SOSERR, "Offering %s not found. A layer might be disabled for \ this request. Check sos/ows_enable_request settings.", "msSOSGetObservation()", sosparams->pszOffering); return msSOSException(map, "offering", "InvalidParameterValue"); } /*validate if observed property exist*/ /* Allow more the 1 oberved property comma separated (specs is unclear on it). If we do it, we need to see if other parameters like result (filter encoding) should be given for each property too) */ bLayerFound = 0; tokens = msStringSplit(sosparams->pszObservedProperty, ',', &n); for (i=0; i<map->numlayers; i++) { pszTmp = msOWSLookupMetadata(&(GET_LAYER(map, i)->metadata), "S", "offering_id"); pszTmp2 = msOWSLookupMetadata(&(GET_LAYER(map, i)->metadata), "S", "observedproperty_id"); GET_LAYER(map, i)->status = MS_OFF; if (pszTmp && pszTmp2) { if (strcasecmp(pszTmp, sosparams->pszOffering) == 0) { if (tokens && n > 0) { for (j=0; j<n; j++) { if(strcasecmp(pszTmp2, tokens[j]) == 0) { GET_LAYER(map, i)->status = MS_ON; /* opLayerName = msStrdup(GET_LAYER(map, i)->name); */ opLayerName = GET_LAYER(map, i)->name; /* Force setting a template to enable query. */ if (!GET_LAYER(map, i)->template) GET_LAYER(map, i)->template = msStrdup("ttt.html"); bLayerFound = 1; break; } } } } } } if (tokens && n > 0) msFreeCharArray(tokens, n); if (bLayerFound == 0) { msSetError(MS_SOSERR, "ObservedProperty %s not found.", "msSOSGetObservation()", sosparams->pszObservedProperty); return msSOSException(map, "observedproperty", "InvalidParameterValue"); } /* apply procedure : could be a comma separated list. set status to on those layers that have the sos_procedure metadata equals to this parameter. Note that the layer should already have it's status at ON by the offering,observedproperty filter done above */ if (sosparams->pszProcedure) { bLayerFound = 0; tokens = msStringSplit(sosparams->pszProcedure, ',', &n); if (tokens && n > 0) { for (i=0; i<map->numlayers; i++) { if(GET_LAYER(map, i)->status == MS_ON) { pszValue = msOWSLookupMetadata(&(GET_LAYER(map, i)->metadata), "S", "procedure"); if (pszValue) { /* the procedure metadata can be a list "sensor1 sensor2..."*/ tokens1 = msStringSplit(pszValue, ' ', &n1); for (j=0; j<n; j++) { for (k=0; k<n1; k++) { if (strcasecmp(tokens1[k], tokens[j]) == 0) { /* found */ bLayerFound = 1; break; } } if (k<n1) break; } if (j == n) /*not found*/ GET_LAYER(map, i)->status = MS_OFF; if (tokens1) msFreeCharArray(tokens1, n1); if (bLayerFound == 0) { msSetError(MS_SOSERR, "Procedure %s not found.", "msSOSGetObservation()", sosparams->pszProcedure); msFreeCharArray(tokens, n); return msSOSException(map, "procedure", "InvalidParameterValue"); } } /* if there is a procedure_item defined on the layer, we will */ /* use it to set the filter parameter of the layer */ if ((GET_LAYER(map, i)->status == MS_ON) && (pszProcedureItem = msOWSLookupMetadata(&(GET_LAYER(map, i)->metadata), "S", "procedure_item"))) { lp = GET_LAYER(map, i); /* HACK BEGIN */ if (msOWSLookupMetadata(&(GET_LAYER(map,i)->metadata), "S", "procedure") == NULL) { /* if sos_procedure_item is used, and sos_procedure is not, it means that */ /* the procedure or sensor) need to be extracted from the data. Thus we */ /* need to query the layer and get the values from each feature */ if (lp->template == NULL) lp->template = msStrdup("ttt"); map->query.type = MS_QUERY_BY_RECT; map->query.mode = MS_QUERY_MULTIPLE; map->query.layer = i; map->query.rect = map->extent; msQueryByRect(map); /*check if the attribute specified in the procedure_item is available */ /*on the layer*/ iItemPosition = -1; if (msLayerGetItems(lp) == MS_SUCCESS && lp->resultcache && lp->resultcache->numresults > 0) { for(k=0; k<lp->numitems; k++) { if (strcasecmp(lp->items[k], pszProcedureItem) == 0) { iItemPosition = k; break; } } if (iItemPosition == -1) { msSetError(MS_SOSERR, "sos_procedure_item %s could not be found on the layer %s", "msSOSGetCapabilities()", pszProcedureItem, lp->name); return msSOSException(map, "mapserv", "NoApplicableCode"); } /*for each selected feature, grab the value of the procedure_item*/ bLayerFound = 0; for(k=0; k<lp->resultcache->numresults; k++) { msInitShape(&sShape); status = msLayerGetShape(lp, &sShape, &(lp->resultcache->results[k])); if(status != MS_SUCCESS) continue; if (sShape.values[iItemPosition]) { tokens = msStringSplit(sosparams->pszProcedure, ',', &n); for (j=0; j<n; j++) { if (strcasecmp(sShape.values[iItemPosition], tokens[j]) == 0) { /* found */ bLayerFound = 1; break; } } } if (bLayerFound) break; } if (bLayerFound == 0) { msSetError(MS_SOSERR, "Invalid procedure value %s", "msSOSGetCapabilities()", sosparams->pszProcedure); return msSOSException(map, "procedure", "InvalidParameterValue"); } } } /* HACK END */ pszBuffer = NULL; if (&lp->filter) { if (lp->filter.string && strlen(lp->filter.string) > 0) msFreeExpression(&lp->filter); } /*The filter should reflect the underlying db*/ /*for ogr add a where clause */ bSpatialDB = 0; if (lp->connectiontype == MS_POSTGIS || lp->connectiontype == MS_ORACLESPATIAL || lp->connectiontype == MS_OGR) bSpatialDB = 1; if (bSpatialDB) { if (lp->connectiontype != MS_OGR) pszBuffer = msStringConcatenate(pszBuffer, "("); else pszBuffer = msStringConcatenate(pszBuffer, "WHERE "); } else pszBuffer = msStringConcatenate(pszBuffer, "("); for (j=0; j<n; j++) { if (j > 0) pszBuffer = msStringConcatenate(pszBuffer, " OR "); pszBuffer = msStringConcatenate(pszBuffer, "("); if (!bSpatialDB) { pszBuffer = msStringConcatenate(pszBuffer, "'["); pszBuffer = msStringConcatenate(pszBuffer, (char *)pszProcedureItem); } else { pszEscapedStr = msLayerEscapePropertyName(lp, (char *)pszProcedureItem); pszBuffer = msStringConcatenate(pszBuffer, pszEscapedStr); msFree(pszEscapedStr); pszEscapedStr = NULL; } if (!bSpatialDB) pszBuffer = msStringConcatenate(pszBuffer, "]'"); pszBuffer = msStringConcatenate(pszBuffer, " = '"); pszEscapedStr = msLayerEscapeSQLParam(lp, tokens[j]); pszBuffer = msStringConcatenate(pszBuffer, pszEscapedStr); msFree(pszEscapedStr); pszBuffer = msStringConcatenate(pszBuffer, "')"); } if (!bSpatialDB || lp->connectiontype != MS_OGR) pszBuffer = msStringConcatenate(pszBuffer, ")"); msLoadExpressionString(&lp->filter, pszBuffer); if (pszBuffer) msFree(pszBuffer); } } } msFreeCharArray(tokens, n); } } /* -------------------------------------------------------------------- */ /* supports 2 types of gml:Time : TimePeriod and TimeInstant : */ /* - <gml:TimePeriod> */ /* <gml:beginPosition>2005-09-01T11:54:32</gml:beginPosition> */ /* <gml:endPosition>2005-09-02T14:54:32</gml:endPosition> */ /* </gml:TimePeriod> */ /* */ /* - <gml:TimeInstant> */ /* <gml:timePosition>2003-02-13T12:28-08:00</gml:timePosition>*/ /* </gml:TimeInstant> */ /* */ /* The user can specify mutilple times separated by commas. */ /* */ /* The gml will be parsed and trasformed into a sting tah */ /* looks like timestart/timeend,... */ /* -------------------------------------------------------------------- */ /*apply time filter if available */ if (sosparams->pszEventTime) { char **apszTimes = NULL; int numtimes = 0; char *pszTimeString = NULL, *pszTmp = NULL; /* Because SOS has specific TemporalOperator which extends FES 1.1, the time filter */ /* passed is different than what mapogcfilter (per 1.0.0) supports. */ /* */ /* Because, in XML POST mode, we traverse directly to gml:TimePeriod|gml:TimeInstant */ /* this is passed directly to mapogcfilter. */ /* for GET requests, we strip the parent element before passing */ pszTmp = msStrdup(sosparams->pszEventTime); pszTmp = msCaseReplaceSubstring(pszTmp, "<ogc:TM_Equals>", ""); pszTmp = msCaseReplaceSubstring(pszTmp, "<TM_Equals>", ""); pszTmp = msCaseReplaceSubstring(pszTmp, "</ogc:TM_Equals>", ""); pszTmp = msCaseReplaceSubstring(pszTmp, "</TM_Equals>", ""); apszTimes = msStringSplit (pszTmp, ',', &numtimes); msFree(pszTmp); if (numtimes >=1) { for (i=0; i<numtimes; i++) { pszTmp = msSOSParseTimeGML(apszTimes[i]); if (pszTmp) { if (pszTimeString) pszTimeString = msStringConcatenate(pszTimeString, ","); pszTimeString = msStringConcatenate(pszTimeString, pszTmp); msFree(pszTmp); } } msFreeCharArray(apszTimes, numtimes); } if (!pszTimeString) { msSetError(MS_SOSERR, "Invalid time value given for the eventTime parameter %s", "msSOSGetObservation()", sosparams->pszEventTime); return msSOSException(map, "eventtime", "InvalidParameterValue"); } for (i=0; i<map->numlayers; i++) { if (GET_LAYER(map, i)->status == MS_ON) { /* the sos_offering_timeextent should be used for time validation*/ /*TODO : too documented ?*/ lpfirst = msSOSGetFirstLayerForOffering(map, msOWSLookupMetadata(&(GET_LAYER(map, i)->metadata), "S", "offering_id"), NULL); if (lpfirst) pszTimeExtent = msOWSLookupMetadata(&lpfirst->metadata, "S", "offering_timeextent"); pszTimeField = msOWSLookupMetadata(&(GET_LAYER(map, i)->metadata), "SO", "timeitem"); if (pszTimeField) { /*validate only if time extent is set.*/ if (pszTimeExtent) { if (msValidateTimeValue(pszTimeString, pszTimeExtent) == MS_TRUE) msLayerSetTimeFilter((GET_LAYER(map, i)), pszTimeString, pszTimeField); else { /*we should turn the layer off since the eventTime is not in the time extent*/ GET_LAYER(map, i)->status = MS_OFF; } } else msLayerSetTimeFilter((GET_LAYER(map, i)), pszTimeString, pszTimeField); } } } if (pszTimeString) msFree(pszTimeString); } /*bbox*/ /* this is a gml feature <gml:Envelope xmlns:gml="http://www.opengis.net/gml"> <gml:lowerCorner srsName="EPSG:4326">-66 43</gml:lowerCorner> <upperCorner srsName="EPSG:4326">-62 45</upperCorner> </gml:Envelope> */ if (sosparams->pszFeatureOfInterest) { int bValid = 0; CPLXMLNode *psRoot=NULL; char *pszSRS = NULL; psRoot = CPLParseXMLString(sosparams->pszFeatureOfInterest); if(!psRoot) { msSetError(MS_SOSERR, "Invalid gml:Envelope value given for featureOfInterest.", "msSOSGetObservation()"); return msSOSException(map, "featureofinterest", "InvalidParameterValue"); } CPLStripXMLNamespace(psRoot, "gml", 1); bValid = FLTParseGMLEnvelope(psRoot, &sBbox, &pszSRS); /* TODO we should reproject the bbox to the map projection if there is an srs defined */ if (!bValid) { msSetError(MS_SOSERR, "Invalid gml:Envelope value given for featureOfInterest %s.", "msSOSGetObservation()", sosparams->pszEventTime); return msSOSException(map, "featureofinterest", "InvalidParameterValue"); } map->extent.minx = sBbox.minx; map->extent.miny = sBbox.miny; map->extent.maxx = sBbox.maxx; map->extent.maxy = sBbox.maxy; CPLDestroyXMLNode(psRoot); msFree(pszSRS); } if (sosparams->pszSrsName) { /* validate against MAP.WEB.METADATA.sos_srs */ int iUnits = -1; char **tokens = NULL; const char *pszSRSList = NULL; int n = 0; int bFound = 0; int k; char srsbuffer[100]; projectionObj po; pszSRSList = msOWSLookupMetadata(&(map->web.metadata), "SO", "srs"); if (pszSRSList) { tokens = msStringSplit(pszSRSList, ' ', &n); if (tokens && n > 0) { for (k=0; k<n; k++) { if (strncasecmp(tokens[k], "EPSG:", strlen("EPSG:")) == 0 && strcasecmp(sosparams->pszSrsName, tokens[k]) == 0) { /* match */ bFound = 1; /* project MAP.EXTENT to this SRS */ msInitProjection(&po); snprintf(srsbuffer, sizeof(srsbuffer), "+init=epsg:%.20s", sosparams->pszSrsName+strlen("EPSG:")); if (msLoadProjectionString(&po, srsbuffer) != 0) { msSetError(MS_SOSERR, "Could not set output projection to \"%s\"", "msSOSGetObservation()", sosparams->pszSrsName); return msSOSException(map, "mapserv", "NoApplicableCode"); } if (msProjectionsDiffer(&map->projection, &po) == MS_TRUE) { msProjectRect(&map->projection, &po, &map->extent); sBbox = map->extent; } /* set map->projection to this SRS */ if (msLoadProjectionString(&(map->projection), srsbuffer) != 0) { msSetError(MS_SOSERR, "Could not set output projection to \"%s\"", "msSOSGetObservation()", sosparams->pszSrsName); return msSOSException(map, "mapserv", "NoApplicableCode"); } iUnits = GetMapserverUnitUsingProj(&(map->projection)); if (iUnits != -1) map->units = iUnits; msFreeProjection(&po); break; } } msFreeCharArray(tokens, n); } if (bFound == 0) { msSetError(MS_SOSERR, "srsName value \"%s\" unsupported / invalid", "msSOSGetObservation()", sosparams->pszSrsName); return msSOSException(map, "srsName", "InvalidParameterValue"); } } else { msSetError(MS_SOSERR, "MAP.WEB.METADATA.sos_srs not set", "msSOSGetObservation()"); return msSOSException(map, "mapserv", "NoApplicableCode"); } } /* apply filter */ if (sosparams->pszResult) { psFilterNode = FLTParseFilterEncoding(sosparams->pszResult); if (!psFilterNode) { msSetError(MS_SOSERR, "Invalid or Unsupported RESULT in GetObservation: %s", "msSOSGetObservation()", sosparams->pszResult); return msSOSException(map, "result", "InvalidParameterValue"); } /* apply the filter to all layers that are on */ for (i=0; i<map->numlayers; i++) { lp = GET_LAYER(map, i); if (lp->status == MS_ON) { /* preparse parser so that alias for fields can be used */ FLTPreParseFilterForAliasAndGroup(psFilterNode, map, i, "S"); /* validate that the property names used are valid (there is a corresponding layer attribute) */ if (msLayerOpen(lp) == MS_SUCCESS && msLayerGetItems(lp) == MS_SUCCESS) { if (msSOSValidateFilter(psFilterNode, lp)== MS_FALSE) { msSetError(MS_SOSERR, "Invalid component name in RESULT statement", "msSOSGetObservation()"); return msSOSException(map, "result", "InvalidParameterValue"); } msLayerClose(lp); } FLTApplyFilterToLayer(psFilterNode, map, i); } } FLTFreeFilterEncodingNode(psFilterNode); } /* this is just a fall back if bbox is enetered. The bbox parameter is not supported by the sos specs */ if (sosparams->pszBBox && !sosparams->pszFeatureOfInterest) { char **tokens; int n; tokens = msStringSplit(sosparams->pszBBox, ',', &n); if (tokens==NULL || n != 4) { msSetError(MS_SOSERR, "Wrong number of arguments for bounding box.", "msSOSGetObservation()"); return msSOSException(map, "bbox", "InvalidParameterValue"); } sBbox.minx = atof(tokens[0]); sBbox.miny = atof(tokens[1]); sBbox.maxx = atof(tokens[2]); sBbox.maxy = atof(tokens[3]); msFreeCharArray(tokens, n); } /* do the query if the filter encoding (pszResult) is not part of the request. If pszResult is available, the query on the layers will be done when the filter is parsed*/ if (!sosparams->pszResult) { map->query.type = MS_QUERY_BY_RECT; map->query.mode = MS_QUERY_MULTIPLE; map->query.layer = -1; map->query.rect = sBbox; msQueryByRect(map); } /*get the first layers of the offering*/ for (i=0; i<map->numlayers; i++) { pszTmp = msOWSLookupMetadata(&(GET_LAYER(map, i)->metadata), "S", "offering_id"); if (pszTmp && (strcasecmp(pszTmp, sosparams->pszOffering) == 0)) { lp = (GET_LAYER(map, i)); break; } } /* build xml return tree*/ psNsSos = xmlNewNs(NULL, BAD_CAST "http://www.opengis.net/sos/1.0", BAD_CAST "sos"); psNsGml = xmlNewNs(NULL, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml"); psNsOm = xmlNewNs(NULL, BAD_CAST pszOMNamespaceUri, BAD_CAST pszOMNamespacePrefix); psNsSwe = xmlNewNs(NULL, BAD_CAST "http://www.opengis.net/swe/1.0.1", BAD_CAST "swe"); psNsXLink = xmlNewNs(NULL, BAD_CAST MS_OWSCOMMON_W3C_XLINK_NAMESPACE_URI, BAD_CAST MS_OWSCOMMON_W3C_XLINK_NAMESPACE_PREFIX); psNsMs = xmlNewNs(NULL, BAD_CAST user_namespace_uri, BAD_CAST user_namespace_prefix); psDoc = xmlNewDoc(BAD_CAST "1.0"); psRootNode = xmlNewNode(NULL, BAD_CAST "ObservationCollection"); xmlDocSetRootElement(psDoc, psRootNode); xmlSetNs(psRootNode, xmlNewNs(psRootNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); xmlSetNs(psRootNode, xmlNewNs(psRootNode, BAD_CAST "http://www.opengis.net/ows/1.1", BAD_CAST "ows")); xmlSetNs(psRootNode, xmlNewNs(psRootNode, BAD_CAST "http://www.opengis.net/swe/1.0.1", BAD_CAST "swe")); xmlSetNs(psRootNode, xmlNewNs(psRootNode, BAD_CAST MS_OWSCOMMON_W3C_XLINK_NAMESPACE_URI, BAD_CAST MS_OWSCOMMON_W3C_XLINK_NAMESPACE_PREFIX)); xmlSetNs(psRootNode, xmlNewNs(psRootNode, BAD_CAST MS_OWSCOMMON_W3C_XSI_NAMESPACE_URI, BAD_CAST MS_OWSCOMMON_W3C_XSI_NAMESPACE_PREFIX)); xmlSetNs(psRootNode, xmlNewNs(psRootNode, BAD_CAST pszSOSNamespaceUri, BAD_CAST pszSOSNamespacePrefix)); xmlSetNs(psRootNode, xmlNewNs(psRootNode, BAD_CAST user_namespace_uri, BAD_CAST user_namespace_prefix)); xmlSetNs(psRootNode, xmlNewNs(psRootNode, BAD_CAST "http://www.opengis.net/om/1.0", BAD_CAST "om")); xmlNewNsProp(psRootNode, psNsGml, BAD_CAST "id", BAD_CAST sosparams->pszOffering); schemalocation = msEncodeHTMLEntities(msOWSGetSchemasLocation(map)); if ((script_url=msOWSGetOnlineResource(map, "SO", "onlineresource", req)) == NULL) return msSOSException(map, "NoApplicableCode", "NoApplicableCode"); xsi_schemaLocation = msStrdup("http://www.opengis.net/om/1.0 "); xsi_schemaLocation = msStringConcatenate(xsi_schemaLocation, schemalocation); xsi_schemaLocation = msStringConcatenate(xsi_schemaLocation, "/om/1.0.0/om.xsd "); xsi_schemaLocation = msStringConcatenate(xsi_schemaLocation, (char *) user_namespace_uri); xsi_schemaLocation = msStringConcatenate(xsi_schemaLocation, " "); xsi_schemaLocation = msStringConcatenate(xsi_schemaLocation, (char *) script_url); xsi_schemaLocation = msStringConcatenate(xsi_schemaLocation, "service=WFS&version=1.1.0&request=DescribeFeatureType&typename="); xsi_schemaLocation = msStringConcatenate(xsi_schemaLocation, (char *) opLayerName); xmlNewNsProp(psRootNode, NULL, BAD_CAST "xsi:schemaLocation", BAD_CAST xsi_schemaLocation); /* description */ pszTmp = msOWSLookupMetadata(&(lp->metadata), "S", "offering_description"); if (pszTmp) { psNode = xmlNewChild(psRootNode, NULL, BAD_CAST "description", BAD_CAST pszTmp); xmlSetNs(psNode, xmlNewNs(psNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); } /* name */ pszTmp = msOWSLookupMetadata(&(lp->metadata), "S", "offering_name"); if (pszTmp) { psNode = xmlNewChild(psRootNode, NULL, BAD_CAST "name", BAD_CAST pszTmp); xmlSetNs(psNode, xmlNewNs(psNode, BAD_CAST "http://www.opengis.net/gml", BAD_CAST "gml")); } /* extent */ pszTmp = msOWSLookupMetadata(&(lp->metadata), "S", "offering_extent"); if (pszTmp) { char **tokens; int n; rectObj envelope; pszTmp2 = msOWSGetEPSGProj(&(map->projection), &(lp->metadata), "SO", MS_TRUE); tokens = msStringSplit(pszTmp, ',', &n); if (tokens==NULL || n != 4) { msSetError(MS_SOSERR, "Wrong number of arguments for sos_offering_extent.", "msSOSGetCapabilities()"); return msSOSException(map, "sos_offering_extent", "InvalidParameterValue"); } envelope.minx = atof(tokens[0]); envelope.miny = atof(tokens[1]); envelope.maxx = atof(tokens[2]); envelope.maxy = atof(tokens[3]); if (map && msProjectionsDiffer(&map->projection, &lp->projection) == MS_TRUE) { if (msProjectRect(&lp->projection, &map->projection, &envelope) == MS_FAILURE) { msSetError(MS_SOSERR, "Coordinates transformation failed. Raised in msProjectRect() of file %s line %d", "msSOSGetCapabilities()", __FILE__, __LINE__); return msSOSException(map, "sos_offering_extent", "InvalidParameterValue"); } } psNode = xmlAddChild(psRootNode, msGML3BoundedBy(psNsGml, envelope.minx, envelope.miny, envelope.maxx, envelope.maxy, pszTmp2)); msFreeCharArray(tokens, n); } /* time pszTmp = msOWSLookupMetadata(&(lp->metadata), "S","offering_timeextent"); if (pszTmp) { char **tokens; int n; char *pszEndTime = NULL; tokens = msStringSplit(pszTmp, '/', &n); if (tokens==NULL || (n != 1 && n!=2)) { msSetError(MS_SOSERR, "Wrong number of arguments for sos_offering_timeextent.", "msSOSGetCapabilities()"); return msSOSException(map, "sos_offering_timeextent", "InvalidParameterValue"); } */ /* if (n == 2) */ /* end time is empty. It is going to be set as "now" */ /* pszEndTime = tokens[1]; psNode = xmlAddChild(psRootNode, msSOSAddTimeNode(xmlNewNs(NULL, BAD_CAST pszOMNamespaceUri, BAD_CAST pszOMNamespacePrefix), tokens[0], pszEndTime)); psNode = xmlAddChild(psRootNode, msSOSAddTimeNode(psNsOm, psNsGml, tokens[0], pszEndTime)); msFreeCharArray(tokens, n); } */ if (sosparams->pszResultModel && strcasecmp(sosparams->pszResultModel, "om:Measurement") != 0 && strcasecmp(sosparams->pszResultModel, "om:Observation") != 0) { msSetError(MS_SOSERR, "resultModel should be om:Measurement or om:Observation", "msSOSGetObservation()"); free(xsi_schemaLocation); free(schemalocation); return msSOSException(map, "resultModel", "InvalidParameterValue"); } else { /* output result members */ for (i=0; i<map->numlayers; i++) { if (GET_LAYER(map, i)->resultcache && GET_LAYER(map, i)->resultcache->numresults > 0) { msLayerGetItems((GET_LAYER(map, i))); pszTmp = msOWSLookupMetadata(&(map->web.metadata), "SO", "maxfeatures"); if (pszTmp != NULL) n1 = atoi(pszTmp); else n1 = 0; if (sosparams->pszResultModel == NULL || strcasecmp(sosparams->pszResultModel, "om:Measurement") == 0) { for(j=0; j<GET_LAYER(map, i)->resultcache->numresults; j++) { msSOSAddMemberNode(psNsGml, psNsOm, psNsSwe, psNsXLink, psNsMs, psRootNode, map, (GET_LAYER(map, i)), j, script_url, opLayerName); if (j == n1-1) break; } } else { /*assuming here that pszResultModel = observation */ /*layer does not define a procedure_item: this means one procedure per layer defined using sos_procedure)*/ if (msOWSLookupMetadata(&(GET_LAYER(map, i)->metadata), "S", "procedure_item") == NULL) { pszProcedure = msOWSLookupMetadata(&(lp->metadata), "S", "procedure"); psObservationNode = msSOSAddMemberNodeObservation(psNsGml, psNsSos, psNsOm, psNsSwe, psNsXLink, psRootNode, map, (GET_LAYER(map, i)), pszProcedure); /*add a result node*/ psResultNode = xmlNewChild(psObservationNode, NULL, BAD_CAST "result", NULL); for(j=0; j<GET_LAYER(map, i)->resultcache->numresults; j++) { /*add a block separator*/ if (j > 0) { pszBlockSep = msOWSLookupMetadata(&(map->web.metadata), "S", "encoding_blockSeparator"); if (pszBlockSep) xmlNodeAddContent(psResultNode, BAD_CAST pszBlockSep); else xmlNodeAddContent(psResultNode, BAD_CAST "\n"); } pszResult = msSOSReturnMemberResult((GET_LAYER(map, i)), j, NULL); if (pszResult) { xmlNodeAddContent(psResultNode, BAD_CAST pszResult); msFree(pszResult); } } } /*this is the case where procedure_item is used. Needs more management since the same data on a layer contains different procedures (procedures are one of the fields of the record)*/ else { for(j=0; j<GET_LAYER(map, i)->resultcache->numresults; j++) { pszResult = msSOSReturnMemberResult((GET_LAYER(map, i)), j, &pszProcedureValue); if (!pszProcedureValue || !pszResult) continue; for (k=0; k<nDiffrentProc; k++) { if (strcasecmp(paDiffrentProc[k].pszProcedure, pszProcedureValue) == 0) { pszBlockSep = msOWSLookupMetadata(&(map->web.metadata), "S", "encoding_blockSeparator"); if (pszBlockSep) xmlNodeAddContent(paDiffrentProc[k].psResultNode, BAD_CAST pszBlockSep); else xmlNodeAddContent(paDiffrentProc[k].psResultNode, BAD_CAST "\n"); xmlNodeAddContent(paDiffrentProc[k].psResultNode, BAD_CAST pszResult); break; } } if (k == nDiffrentProc) { /*a new procedure*/ nDiffrentProc++; if (paDiffrentProc == NULL) paDiffrentProc = (SOSProcedureNode *)malloc(sizeof(SOSProcedureNode)); else paDiffrentProc = (SOSProcedureNode *)realloc(paDiffrentProc, sizeof(SOSProcedureNode) *nDiffrentProc); paDiffrentProc[nDiffrentProc-1].pszProcedure = msStrdup(pszProcedureValue); psObservationNode = msSOSAddMemberNodeObservation(psNsGml, psNsSos, psNsOm, psNsSwe, psNsXLink, psRootNode, map, (GET_LAYER(map, i)), pszProcedureValue); msFree(pszProcedureValue); paDiffrentProc[nDiffrentProc-1].psResultNode = xmlNewChild(psObservationNode, NULL, BAD_CAST "result", NULL); xmlNodeAddContent(paDiffrentProc[nDiffrentProc-1].psResultNode, BAD_CAST pszResult); msFree(pszResult); } } if (paDiffrentProc) { for (k=0; k<nDiffrentProc; k++) msFree(paDiffrentProc[k].pszProcedure); free(paDiffrentProc); } } } } } } /* output results */ msIO_setHeader("Content-Type","text/xml; charset=UTF-8"); msIO_sendHeaders(); context = msIO_getHandler(stdout); xmlDocDumpFormatMemoryEnc(psDoc, &buffer, &size, ("UTF-8"), 1); msIO_contextWrite(context, buffer, size); free(schemalocation); free(xsi_schemaLocation); xmlFreeNs(psNsSos); xmlFreeNs(psNsGml); xmlFreeNs(psNsOm); xmlFreeNs(psNsSwe); xmlFreeNs(psNsXLink); xmlFreeNs(psNsMs); xmlFree(buffer); msFree(script_url); /*free document */ xmlFreeDoc(psDoc); /* *Free the global variables that may *have been allocated by the parser. */ xmlCleanupParser(); return(MS_SUCCESS); } /************************************************************************/ /* msSOSDescribeSensor */ /* */ /* Describe sensor request handler. */ /************************************************************************/ int msSOSDescribeSensor(mapObj *map, sosParamsObj *sosparams, owsRequestObj *ows_request) { char *pszEncodedUrl = NULL; const char *pszId = NULL, *pszUrl = NULL; int i = 0, j=0, k=0; layerObj *lp = NULL; int iItemPosition = -1; shapeObj sShape; int status; char *tmpstr = NULL, *pszTmp = NULL, *pszProcedureURI = NULL, *pszProcedureId = NULL; if (!sosparams->pszOutputFormat) { msSetError(MS_SOSERR, "Missing mandatory parameter outputFormat.", "msSOSDescribeSensor()"); return msSOSException(map, "outputformat", "MissingParameterValue"); } if (strcasecmp(sosparams->pszOutputFormat, pszSOSDescribeSensorMimeType) != 0) { msSetError(MS_SOSERR, "Invalid outputformat parameter %s. Allowable values are: %s", "msSOSDescribeSensor()", sosparams->pszOutputFormat, pszSOSDescribeSensorMimeType); return msSOSException(map, "outputformat", "InvalidParameterValue"); } if (!sosparams->pszProcedure) { msSetError(MS_SOSERR, "Missing mandatory parameter procedure", "msSOSDescribeSensor()"); return msSOSException(map, "procedure", "MissingParameterValue"); } for (i=0; i<map->numlayers; i++) { lp = GET_LAYER(map, i); pszId = msOWSLookupMetadata(&(lp->metadata), "S", "procedure"); if (pszId && strlen(pszId) > 0) { /*procedure could be a list*/ char **tokens = NULL; int n=0; int bFound = 0; tokens = msStringSplit(pszId, ' ', &n); for (k=0; k<n; k++) { if (tokens[k] && strlen(tokens[k]) > 0) { pszProcedureURI = msStrdup("urn:ogc:def:procedure:"); pszProcedureURI = msStringConcatenate(pszProcedureURI, tokens[k]); if ( (pszProcedureURI && strcasecmp(pszProcedureURI, sosparams->pszProcedure) == 0) && (msIntegerInArray(lp->index, ows_request->enabled_layers, ows_request->numlayers)) ) { bFound = 1; pszProcedureId = msStrdup(tokens[k]); msFree(pszProcedureURI); break; } msFree(pszProcedureURI); } } msFreeCharArray(tokens, n); if (bFound) { pszUrl = msOWSLookupMetadata(&(lp->metadata), "S", "describesensor_url"); if (pszUrl) { pszTmp = msStrdup(pszUrl); /* %procedure% is the hardcoded variable name to use within sos_describesensor_url */ tmpstr = (char *)malloc(sizeof(char)*strlen("procedure") + 3); sprintf(tmpstr,"%%%s%%", "procedure"); if (strcasestr(pszUrl, tmpstr) != NULL) pszTmp = msCaseReplaceSubstring(pszTmp, tmpstr, pszProcedureId); msFree(tmpstr); pszEncodedUrl = msEncodeHTMLEntities(pszTmp); msIO_printf("Location: %s\n\n", pszEncodedUrl); msFree(pszTmp); msFree(pszEncodedUrl); msFree(pszProcedureId); return(MS_SUCCESS); } else { msSetError(MS_SOSERR, "Missing mandatory metadata sos_describesensor_url on layer %s", "msSOSDescribeSensor()", lp->name); return msSOSException(map, "sos_describesensor_url", "MissingParameterValue"); } } } else if ((pszId = msOWSLookupMetadata(&(lp->metadata), "S", "procedure_item"))) { iItemPosition = -1; if (msLayerOpen(lp) == MS_SUCCESS && msLayerGetItems(lp) == MS_SUCCESS) { for(j=0; j<lp->numitems; j++) { if (strcasecmp(lp->items[j], pszId) == 0) { iItemPosition = j; break; } } msLayerClose(lp); } if (iItemPosition >=0) { if (lp->template == NULL) lp->template = msStrdup("ttt"); map->query.type = MS_QUERY_BY_RECT; map->query.mode = MS_QUERY_MULTIPLE; map->query.layer = i; map->query.rect = map->extent; msQueryByRect(map); msLayerGetItems(lp); if (lp->resultcache && lp->resultcache->numresults > 0) { for(j=0; j<lp->resultcache->numresults; j++) { msInitShape(&sShape); status = msLayerGetShape(lp, &sShape, &(lp->resultcache->results[j])); if(status != MS_SUCCESS) continue; if (sShape.values[iItemPosition]) { pszProcedureURI = msStrdup("urn:ogc:def:procedure:"); pszProcedureURI = msStringConcatenate(pszProcedureURI, sShape.values[iItemPosition]); if (strcasecmp(pszProcedureURI, sosparams->pszProcedure) == 0) { pszUrl = msOWSLookupMetadata(&(lp->metadata), "S", "describesensor_url"); pszProcedureId = msStrdup(sShape.values[iItemPosition]); if (pszUrl) { pszTmp = msStrdup(pszUrl); /* %procedure% is the hardcoded variable names to use within sos_describesensor_url */ tmpstr = (char *)malloc(sizeof(char)*strlen("procedure") + 3); sprintf(tmpstr,"%%%s%%", "procedure"); if (strcasestr(pszUrl, tmpstr) != NULL) pszTmp = msCaseReplaceSubstring(pszTmp, tmpstr, pszProcedureId); msFree(tmpstr); pszEncodedUrl = msEncodeHTMLEntities(pszTmp); msIO_printf("Location: %s\n\n", pszEncodedUrl); msFree(pszTmp); return(MS_SUCCESS); } else { msSetError(MS_SOSERR, "Missing mandatory metadata sos_describesensor_url on layer %s", "msSOSDescribeSensor()", lp->name); return msSOSException(map, "mapserv", "NoApplicableCode"); } } } } } } } } msSetError(MS_SOSERR, "procedure %s not found.", "msSOSDescribeSensor()", sosparams->pszProcedure); return msSOSException(map, "procedure", "InvalidParameterValue"); } /************************************************************************/ /* msSOSDescribeObservationType */ /* */ /* DescribeObserrvationType request handler */ /************************************************************************/ int msSOSDescribeObservationType(mapObj *map, sosParamsObj *sosparams, cgiRequestObj *req, owsRequestObj *ows_request) { int i, j, n = 0, bLayerFound = 0; char **tokens = NULL; char *script_url=NULL; const char *pszTmp = NULL; char *pszTmp2=NULL; const char *opLayerName = NULL; if (!sosparams->pszObservedProperty) { msSetError(MS_SOSERR, "Missing mandatory parameter observedproperty", "msSOSDescribeObservationType()"); return msSOSException(map, "observedproperty", "MissingParameterValue"); } tokens = msStringSplit(sosparams->pszObservedProperty, ',', &n); for (i=0; i<map->numlayers; i++) { if (!msIntegerInArray(GET_LAYER(map, i)->index, ows_request->enabled_layers, ows_request->numlayers)) continue; pszTmp = msOWSLookupMetadata(&(GET_LAYER(map, i)->metadata), "S", "observedproperty_id"); if (pszTmp) { if (strcasecmp(pszTmp, sosparams->pszObservedProperty) == 0) { if (tokens && n > 0) { for (j=0; j<n; j++) { if(strcasecmp(pszTmp, tokens[j]) == 0) { opLayerName = GET_LAYER(map, i)->name; bLayerFound = 1; break; } } } } } } if (tokens && n > 0) msFreeCharArray(tokens, n); if (bLayerFound == 0) { msSetError(MS_SOSERR, "ObservedProperty %s not found.", "msSOSGetObservation()", sosparams->pszObservedProperty); return msSOSException(map, "observedproperty", "InvalidParameterValue"); } if ((script_url=msOWSGetOnlineResource(map, "SO", "onlineresource", req)) == NULL) return msSOSException(map, "NoApplicableCode", "NoApplicableCode"); pszTmp2 = msStringConcatenate(pszTmp2, (char *) script_url); pszTmp2 = msStringConcatenate(pszTmp2, "service=WFS&version=1.1.0&request=DescribeFeatureType&typename="); pszTmp2 = msStringConcatenate(pszTmp2, (char *) opLayerName); msIO_printf("Location: %s\n\n", pszTmp2); msFree(pszTmp2); msFree(script_url); return(MS_SUCCESS); } #endif /* defined(USE_WCS_SVR) && defined(USE_LIBXML2) */ /* ** msSOSDispatch() is the entry point for SOS requests. ** - If this is a valid request then it is processed and MS_SUCCESS is returned ** on success, or MS_FAILURE on failure. */ int msSOSDispatch(mapObj *map, cgiRequestObj *req, owsRequestObj *ows_request) { #if defined(USE_SOS_SVR) && defined(USE_LIBXML2) int returnvalue = MS_DONE; sosParamsObj *paramsObj = (sosParamsObj *)calloc(1, sizeof(sosParamsObj)); if (msSOSParseRequest(map, req, paramsObj) == MS_FAILURE) { msSOSFreeParamsObj(paramsObj); free(paramsObj); return MS_FAILURE; } /* SERVICE must be specified and be SOS */ if (paramsObj->pszService && strcasecmp(paramsObj->pszService, "SOS") == 0) { /* this is an SOS request */ if (!paramsObj->pszRequest) { msSetError(MS_SOSERR, "Missing REQUEST Parameter", "msSOSDispatch()"); msSOSFreeParamsObj(paramsObj); free(paramsObj); paramsObj = NULL; return msSOSException(map, "request", "MissingParameterValue"); } msOWSRequestLayersEnabled(map, "S", paramsObj->pszRequest, ows_request); if (ows_request->numlayers == 0) { msSetError(MS_SOSERR, "SOS request not enabled. Check sos/ows_enable_request settings.", "msSOSDispatch()"); msSOSFreeParamsObj(paramsObj); free(paramsObj); paramsObj = NULL; return msSOSException(map, "request", "InvalidParameterValue"); } if (strcasecmp(paramsObj->pszRequest, "GetCapabilities") == 0) { returnvalue = msSOSGetCapabilities(map, paramsObj, req, ows_request); msSOSFreeParamsObj(paramsObj); free(paramsObj); paramsObj = NULL; return returnvalue; } else if (strcasecmp(paramsObj->pszRequest, "DescribeSensor") == 0 || strcasecmp(paramsObj->pszRequest, "GetObservation") == 0 || strcasecmp(paramsObj->pszRequest, "DescribeObservationType") == 0 ) { /* check version */ if (!paramsObj->pszVersion) { msSetError(MS_SOSERR, "Missing VERSION parameter", "msSOSDispatch()"); msSOSFreeParamsObj(paramsObj); free(paramsObj); paramsObj = NULL; return msSOSException(map, "version", "MissingParameterValue"); } if (msOWSParseVersionString(paramsObj->pszVersion) != OWS_1_0_0) { msSetError(MS_SOSERR, "VERSION %s not supported. Supported versions are: %s.", "msSOSDispatch()", paramsObj->pszVersion, pszSOSVersion); msSOSFreeParamsObj(paramsObj); free(paramsObj); paramsObj = NULL; return msSOSException(map, "version", "InvalidParameterValue"); } if (strcasecmp(paramsObj->pszRequest, "DescribeSensor") == 0) returnvalue = msSOSDescribeSensor(map, paramsObj, ows_request); else if (strcasecmp(paramsObj->pszRequest, "GetObservation") == 0) returnvalue = msSOSGetObservation(map, paramsObj, req, ows_request); else if (strcasecmp(paramsObj->pszRequest, "DescribeObservationType") == 0) returnvalue = msSOSDescribeObservationType(map, paramsObj, req, ows_request); msSOSFreeParamsObj(paramsObj); free(paramsObj); paramsObj = NULL; return returnvalue; } else { msSetError(MS_SOSERR, "Invalid REQUEST parameter: %s", "msSOSDispatch()", paramsObj->pszRequest); msSOSFreeParamsObj(paramsObj); free(paramsObj); paramsObj = NULL; return msSOSException(map, "request", "InvalidParameterValue"); } } else { msSOSFreeParamsObj(paramsObj); free(paramsObj); return MS_DONE; /* Not an SOS request */ } #else msSetError(MS_SOSERR, "SOS support is not available.", "msSOSDispatch()"); return(MS_FAILURE); #endif } #if defined(USE_SOS_SVR) && defined(USE_LIBXML2) int msSOSParseRequest(mapObj *map, cgiRequestObj *request, sosParamsObj *sosparams) { int i; xmlDocPtr doc; xmlXPathContextPtr context; xmlNodeSetPtr nodeset; xmlXPathObjectPtr psXPathTmp; char *pszTmp = NULL; if (request->NumParams) { /* this is a GET request */ for(i=0; i<request->NumParams; i++) { if (strcasecmp(request->ParamNames[i], "SERVICE") == 0) sosparams->pszService = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "VERSION") == 0) sosparams->pszVersion = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "ACCEPTVERSIONS") == 0) sosparams->pszAcceptVersions = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "REQUEST") == 0) sosparams->pszRequest = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "UPDATESEQUENCE") == 0) sosparams->pszUpdateSequence = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "SENSORID") == 0) sosparams->pszSensorId = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "PROCEDURE") == 0) sosparams->pszProcedure = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "OUTPUTFORMAT") == 0) sosparams->pszOutputFormat = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "OFFERING") == 0) sosparams->pszOffering = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "OBSERVEDPROPERTY") == 0) sosparams->pszObservedProperty = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "EVENTTIME") == 0) sosparams->pszEventTime = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "RESULT") == 0) sosparams->pszResult = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "RESULTMODEL") == 0) sosparams->pszResultModel = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "RESPONSEFORMAT") == 0) sosparams->pszResponseFormat = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "RESPONSEMODE") == 0) sosparams->pszResponseMode = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "BBOX") == 0) sosparams->pszBBox = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "SRSNAME") == 0) sosparams->pszSrsName = msStrdup(request->ParamValues[i]); else if (strcasecmp(request->ParamNames[i], "FEATUREOFINTEREST") == 0) sosparams->pszFeatureOfInterest = msStrdup(request->ParamValues[i]); } } if (request->postrequest) { /* this a POST request */ /* load document */ doc = xmlParseDoc((xmlChar *)request->postrequest); if (doc == NULL ) { msSetError(MS_SOSERR, "Invalid POST request. XML is not well-formed", "msSOSParseRequest()"); return msSOSException(map, "request", "InvalidRequest"); } /* load context */ context = xmlXPathNewContext(doc); if (context == NULL) { msSetError(MS_SOSERR, "Could not create context (xmlXPathNewContext)", "msSOSParseRequest()"); return msSOSException(map, "request", "NoApplicableCode"); } /* register namespaces */ if(xmlXPathRegisterNs(context, (xmlChar *)"sos", (xmlChar *)"http://www.opengis.net/sos/1.0") != 0 || xmlXPathRegisterNs(context, (xmlChar *)"ows", (xmlChar *)"http://www.opengis.net/ows/1.1") != 0 || xmlXPathRegisterNs(context, (xmlChar *)"ogc", (xmlChar *)"http://www.opengis.net/ogc") != 0 || xmlXPathRegisterNs(context, (xmlChar *)"gml", (xmlChar *)"http://www.opengis.net/gml") != 0) { msSetError(MS_SOSERR, "Could not register namespaces (xmlXPathRegisterNs)", "msSOSParseRequest()"); return msSOSException(map, "request", "NoApplicableCode"); } /* check for service */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/*/@service"); if (psXPathTmp) { nodeset = psXPathTmp->nodesetval; sosparams->pszService = (char *)xmlNodeListGetString(doc, nodeset->nodeTab[0]->xmlChildrenNode, 1); } xmlXPathFreeObject(psXPathTmp); /* check for updateSequence*/ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/*/@updateSequence"); if (psXPathTmp) { nodeset = psXPathTmp->nodesetval; sosparams->pszUpdateSequence = (char *)xmlNodeListGetString(doc, nodeset->nodeTab[0]->xmlChildrenNode, 1); } xmlXPathFreeObject(psXPathTmp); /* check for version */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/*/ows:AcceptVersions/ows:Version|/*/@version"); if (psXPathTmp) { nodeset = psXPathTmp->nodesetval; sosparams->pszVersion = (char *)xmlNodeListGetString(doc, nodeset->nodeTab[0]->xmlChildrenNode, 1); } xmlXPathFreeObject(psXPathTmp); /* check for request */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:GetCapabilities"); if (psXPathTmp) sosparams->pszRequest = msStrdup("GetCapabilities"); psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:DescribeSensor"); if (psXPathTmp) sosparams->pszRequest = msStrdup("DescribeSensor"); psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:GetObservation"); if (psXPathTmp) sosparams->pszRequest = msStrdup("GetObservation"); psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:DescribeObservationType"); if (psXPathTmp) sosparams->pszRequest = msStrdup("DescribeObservationType"); xmlXPathFreeObject(psXPathTmp); /* check for outputformat */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:DescribeSensor/@outputFormat"); if (psXPathTmp) { nodeset = psXPathTmp->nodesetval; sosparams->pszOutputFormat = (char *)xmlNodeListGetString(doc, nodeset->nodeTab[0]->xmlChildrenNode, 1); } xmlXPathFreeObject(psXPathTmp); /* check for Procedure */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:DescribeSensor/sos:procedure"); if (psXPathTmp) { nodeset = psXPathTmp->nodesetval; sosparams->pszProcedure = (char *)xmlNodeListGetString(doc, nodeset->nodeTab[0]->xmlChildrenNode, 1); } xmlXPathFreeObject(psXPathTmp); /* check for offering */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:GetObservation/sos:offering"); if (psXPathTmp) { nodeset = psXPathTmp->nodesetval; sosparams->pszOffering = (char *)xmlNodeListGetString(doc, nodeset->nodeTab[0]->xmlChildrenNode, 1); } xmlXPathFreeObject(psXPathTmp); /* check for observedproperty */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:GetObservation/sos:observedProperty"); if (psXPathTmp) { nodeset = psXPathTmp->nodesetval; sosparams->pszObservedProperty = (char *)xmlNodeListGetString(doc, nodeset->nodeTab[0]->xmlChildrenNode, 1); } xmlXPathFreeObject(psXPathTmp); /* check for procedure */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:GetObservation/sos:procedure"); if (psXPathTmp) { nodeset = psXPathTmp->nodesetval; sosparams->pszProcedure = (char *)xmlNodeListGetString(doc, nodeset->nodeTab[0]->xmlChildrenNode, 1); } xmlXPathFreeObject(psXPathTmp); /* check for responseFormat */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:GetObservation/sos:responseFormat"); if (psXPathTmp) { nodeset = psXPathTmp->nodesetval; sosparams->pszResponseFormat = (char *)xmlNodeListGetString(doc, nodeset->nodeTab[0]->xmlChildrenNode, 1); } xmlXPathFreeObject(psXPathTmp); /* check for resultModel */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:GetObservation/sos:resultModel"); if (psXPathTmp) { nodeset = psXPathTmp->nodesetval; sosparams->pszResultModel = (char *)xmlNodeListGetString(doc, nodeset->nodeTab[0]->xmlChildrenNode, 1); } xmlXPathFreeObject(psXPathTmp); /* check for responseMode */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:GetObservation/sos:responseMode"); if (psXPathTmp) { nodeset = psXPathTmp->nodesetval; sosparams->pszResponseMode = (char *)xmlNodeListGetString(doc, nodeset->nodeTab[0]->xmlChildrenNode, 1); } xmlXPathFreeObject(psXPathTmp); /* check for srsName */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:GetObservation/@srsName"); if (psXPathTmp) { nodeset = psXPathTmp->nodesetval; sosparams->pszSrsName = (char *)xmlNodeListGetString(doc, nodeset->nodeTab[0]->xmlChildrenNode, 1); } xmlXPathFreeObject(psXPathTmp); /* check for result (chunk of XML) */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:GetObservation/sos:result/child::*"); if (psXPathTmp) { sosparams->pszResult = msLibXml2GetXPathTree(doc, psXPathTmp); pszTmp = msStringConcatenate(pszTmp, "<ogc:Filter>"); pszTmp = msStringConcatenate(pszTmp, sosparams->pszResult); pszTmp = msStringConcatenate(pszTmp, "</ogc:Filter>"); msFree(sosparams->pszResult); sosparams->pszResult = msStrdup(pszTmp); msFree(pszTmp); } xmlXPathFreeObject(psXPathTmp); /* check for featureOfInterest (chunk of XML) */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:GetObservation/sos:featureOfInterest/ogc:BBOX/gml:Envelope"); if (psXPathTmp) { sosparams->pszFeatureOfInterest = (char *)msLibXml2GetXPathTree(doc, psXPathTmp); } xmlXPathFreeObject(psXPathTmp); /* check for eventTime (chunk of XML) */ psXPathTmp = msLibXml2GetXPath(doc, context, (xmlChar *)"/sos:GetObservation/sos:eventTime/*/gml:TimeInstant|/sos:GetObservation/sos:eventTime/*/gml:TimePeriod"); if (psXPathTmp) { sosparams->pszEventTime = (char *)msLibXml2GetXPathTree(doc, psXPathTmp); } xmlXPathFreeObject(psXPathTmp); xmlXPathFreeContext(context); xmlFreeDoc(doc); xmlCleanupParser(); } return MS_SUCCESS; } void msSOSFreeParamsObj(sosParamsObj *sosparams) { if (sosparams) { if (sosparams->pszService) free(sosparams->pszService); if (sosparams->pszVersion) free(sosparams->pszVersion); if (sosparams->pszAcceptVersions) free(sosparams->pszAcceptVersions); if (sosparams->pszUpdateSequence) free(sosparams->pszUpdateSequence); if (sosparams->pszRequest) free(sosparams->pszRequest); if (sosparams->pszOutputFormat) free(sosparams->pszOutputFormat); if (sosparams->pszSensorId) free(sosparams->pszSensorId); if (sosparams->pszProcedure) free(sosparams->pszProcedure); if (sosparams->pszOffering) free(sosparams->pszOffering); if (sosparams->pszObservedProperty) free(sosparams->pszObservedProperty); if (sosparams->pszEventTime) free(sosparams->pszEventTime); if (sosparams->pszResult) free(sosparams->pszResult); if (sosparams->pszResponseFormat) free(sosparams->pszResponseFormat); if (sosparams->pszResultModel) free(sosparams->pszResultModel); if (sosparams->pszResponseMode) free(sosparams->pszResponseMode); if (sosparams->pszSrsName) free(sosparams->pszSrsName); if (sosparams->pszFeatureOfInterest) free(sosparams->pszFeatureOfInterest); } } #endif /* defined(USE_SOS_SVR) && defined(USE_LIBXML2) */
260988.c
// SPDX-License-Identifier: GPL-2.0-only /* * Omnivision OV2659 CMOS Image Sensor driver * * Copyright (C) 2015 Texas Instruments, Inc. * * Benoit Parrot <[email protected]> * Lad, Prabhakar <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/pm_runtime.h> #include <media/i2c/ov2659.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-image-sizes.h> #include <media/v4l2-subdev.h> #define DRIVER_NAME "ov2659" /* * OV2659 register definitions */ #define REG_SOFTWARE_STANDBY 0x0100 #define REG_SOFTWARE_RESET 0x0103 #define REG_IO_CTRL00 0x3000 #define REG_IO_CTRL01 0x3001 #define REG_IO_CTRL02 0x3002 #define REG_OUTPUT_VALUE00 0x3008 #define REG_OUTPUT_VALUE01 0x3009 #define REG_OUTPUT_VALUE02 0x300d #define REG_OUTPUT_SELECT00 0x300e #define REG_OUTPUT_SELECT01 0x300f #define REG_OUTPUT_SELECT02 0x3010 #define REG_OUTPUT_DRIVE 0x3011 #define REG_INPUT_READOUT00 0x302d #define REG_INPUT_READOUT01 0x302e #define REG_INPUT_READOUT02 0x302f #define REG_SC_PLL_CTRL0 0x3003 #define REG_SC_PLL_CTRL1 0x3004 #define REG_SC_PLL_CTRL2 0x3005 #define REG_SC_PLL_CTRL3 0x3006 #define REG_SC_CHIP_ID_H 0x300a #define REG_SC_CHIP_ID_L 0x300b #define REG_SC_PWC 0x3014 #define REG_SC_CLKRST0 0x301a #define REG_SC_CLKRST1 0x301b #define REG_SC_CLKRST2 0x301c #define REG_SC_CLKRST3 0x301d #define REG_SC_SUB_ID 0x302a #define REG_SC_SCCB_ID 0x302b #define REG_GROUP_ADDRESS_00 0x3200 #define REG_GROUP_ADDRESS_01 0x3201 #define REG_GROUP_ADDRESS_02 0x3202 #define REG_GROUP_ADDRESS_03 0x3203 #define REG_GROUP_ACCESS 0x3208 #define REG_AWB_R_GAIN_H 0x3400 #define REG_AWB_R_GAIN_L 0x3401 #define REG_AWB_G_GAIN_H 0x3402 #define REG_AWB_G_GAIN_L 0x3403 #define REG_AWB_B_GAIN_H 0x3404 #define REG_AWB_B_GAIN_L 0x3405 #define REG_AWB_MANUAL_CONTROL 0x3406 #define REG_TIMING_HS_H 0x3800 #define REG_TIMING_HS_L 0x3801 #define REG_TIMING_VS_H 0x3802 #define REG_TIMING_VS_L 0x3803 #define REG_TIMING_HW_H 0x3804 #define REG_TIMING_HW_L 0x3805 #define REG_TIMING_VH_H 0x3806 #define REG_TIMING_VH_L 0x3807 #define REG_TIMING_DVPHO_H 0x3808 #define REG_TIMING_DVPHO_L 0x3809 #define REG_TIMING_DVPVO_H 0x380a #define REG_TIMING_DVPVO_L 0x380b #define REG_TIMING_HTS_H 0x380c #define REG_TIMING_HTS_L 0x380d #define REG_TIMING_VTS_H 0x380e #define REG_TIMING_VTS_L 0x380f #define REG_TIMING_HOFFS_H 0x3810 #define REG_TIMING_HOFFS_L 0x3811 #define REG_TIMING_VOFFS_H 0x3812 #define REG_TIMING_VOFFS_L 0x3813 #define REG_TIMING_XINC 0x3814 #define REG_TIMING_YINC 0x3815 #define REG_TIMING_VERT_FORMAT 0x3820 #define REG_TIMING_HORIZ_FORMAT 0x3821 #define REG_FORMAT_CTRL00 0x4300 #define REG_VFIFO_READ_START_H 0x4608 #define REG_VFIFO_READ_START_L 0x4609 #define REG_DVP_CTRL02 0x4708 #define REG_ISP_CTRL00 0x5000 #define REG_ISP_CTRL01 0x5001 #define REG_ISP_CTRL02 0x5002 #define REG_LENC_RED_X0_H 0x500c #define REG_LENC_RED_X0_L 0x500d #define REG_LENC_RED_Y0_H 0x500e #define REG_LENC_RED_Y0_L 0x500f #define REG_LENC_RED_A1 0x5010 #define REG_LENC_RED_B1 0x5011 #define REG_LENC_RED_A2_B2 0x5012 #define REG_LENC_GREEN_X0_H 0x5013 #define REG_LENC_GREEN_X0_L 0x5014 #define REG_LENC_GREEN_Y0_H 0x5015 #define REG_LENC_GREEN_Y0_L 0x5016 #define REG_LENC_GREEN_A1 0x5017 #define REG_LENC_GREEN_B1 0x5018 #define REG_LENC_GREEN_A2_B2 0x5019 #define REG_LENC_BLUE_X0_H 0x501a #define REG_LENC_BLUE_X0_L 0x501b #define REG_LENC_BLUE_Y0_H 0x501c #define REG_LENC_BLUE_Y0_L 0x501d #define REG_LENC_BLUE_A1 0x501e #define REG_LENC_BLUE_B1 0x501f #define REG_LENC_BLUE_A2_B2 0x5020 #define REG_AWB_CTRL00 0x5035 #define REG_AWB_CTRL01 0x5036 #define REG_AWB_CTRL02 0x5037 #define REG_AWB_CTRL03 0x5038 #define REG_AWB_CTRL04 0x5039 #define REG_AWB_LOCAL_LIMIT 0x503a #define REG_AWB_CTRL12 0x5049 #define REG_AWB_CTRL13 0x504a #define REG_AWB_CTRL14 0x504b #define REG_SHARPENMT_THRESH1 0x5064 #define REG_SHARPENMT_THRESH2 0x5065 #define REG_SHARPENMT_OFFSET1 0x5066 #define REG_SHARPENMT_OFFSET2 0x5067 #define REG_DENOISE_THRESH1 0x5068 #define REG_DENOISE_THRESH2 0x5069 #define REG_DENOISE_OFFSET1 0x506a #define REG_DENOISE_OFFSET2 0x506b #define REG_SHARPEN_THRESH1 0x506c #define REG_SHARPEN_THRESH2 0x506d #define REG_CIP_CTRL00 0x506e #define REG_CIP_CTRL01 0x506f #define REG_CMX_SIGN 0x5079 #define REG_CMX_MISC_CTRL 0x507a #define REG_PRE_ISP_CTRL00 0x50a0 #define TEST_PATTERN_ENABLE BIT(7) #define VERTICAL_COLOR_BAR_MASK 0x53 #define REG_NULL 0x0000 /* Array end token */ #define OV265X_ID(_msb, _lsb) ((_msb) << 8 | (_lsb)) #define OV2659_ID 0x2656 struct sensor_register { u16 addr; u8 value; }; struct ov2659_framesize { u16 width; u16 height; u16 max_exp_lines; const struct sensor_register *regs; }; struct ov2659_pll_ctrl { u8 ctrl1; u8 ctrl2; u8 ctrl3; }; struct ov2659_pixfmt { u32 code; /* Output format Register Value (REG_FORMAT_CTRL00) */ struct sensor_register *format_ctrl_regs; }; struct pll_ctrl_reg { unsigned int div; unsigned char reg; }; struct ov2659 { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_mbus_framefmt format; unsigned int xvclk_frequency; const struct ov2659_platform_data *pdata; struct mutex lock; struct i2c_client *client; struct v4l2_ctrl_handler ctrls; struct v4l2_ctrl *link_frequency; const struct ov2659_framesize *frame_size; struct sensor_register *format_ctrl_regs; struct ov2659_pll_ctrl pll; int streaming; /* used to control the sensor PWDN pin */ struct gpio_desc *pwdn_gpio; /* used to control the sensor RESETB pin */ struct gpio_desc *resetb_gpio; }; static const struct sensor_register ov2659_init_regs[] = { { REG_IO_CTRL00, 0x03 }, { REG_IO_CTRL01, 0xff }, { REG_IO_CTRL02, 0xe0 }, { 0x3633, 0x3d }, { 0x3620, 0x02 }, { 0x3631, 0x11 }, { 0x3612, 0x04 }, { 0x3630, 0x20 }, { 0x4702, 0x02 }, { 0x370c, 0x34 }, { REG_TIMING_HS_H, 0x00 }, { REG_TIMING_HS_L, 0x00 }, { REG_TIMING_VS_H, 0x00 }, { REG_TIMING_VS_L, 0x00 }, { REG_TIMING_HW_H, 0x06 }, { REG_TIMING_HW_L, 0x5f }, { REG_TIMING_VH_H, 0x04 }, { REG_TIMING_VH_L, 0xb7 }, { REG_TIMING_DVPHO_H, 0x03 }, { REG_TIMING_DVPHO_L, 0x20 }, { REG_TIMING_DVPVO_H, 0x02 }, { REG_TIMING_DVPVO_L, 0x58 }, { REG_TIMING_HTS_H, 0x05 }, { REG_TIMING_HTS_L, 0x14 }, { REG_TIMING_VTS_H, 0x02 }, { REG_TIMING_VTS_L, 0x68 }, { REG_TIMING_HOFFS_L, 0x08 }, { REG_TIMING_VOFFS_L, 0x02 }, { REG_TIMING_XINC, 0x31 }, { REG_TIMING_YINC, 0x31 }, { 0x3a02, 0x02 }, { 0x3a03, 0x68 }, { 0x3a08, 0x00 }, { 0x3a09, 0x5c }, { 0x3a0a, 0x00 }, { 0x3a0b, 0x4d }, { 0x3a0d, 0x08 }, { 0x3a0e, 0x06 }, { 0x3a14, 0x02 }, { 0x3a15, 0x28 }, { REG_DVP_CTRL02, 0x01 }, { 0x3623, 0x00 }, { 0x3634, 0x76 }, { 0x3701, 0x44 }, { 0x3702, 0x18 }, { 0x3703, 0x24 }, { 0x3704, 0x24 }, { 0x3705, 0x0c }, { REG_TIMING_VERT_FORMAT, 0x81 }, { REG_TIMING_HORIZ_FORMAT, 0x01 }, { 0x370a, 0x52 }, { REG_VFIFO_READ_START_H, 0x00 }, { REG_VFIFO_READ_START_L, 0x80 }, { REG_FORMAT_CTRL00, 0x30 }, { 0x5086, 0x02 }, { REG_ISP_CTRL00, 0xfb }, { REG_ISP_CTRL01, 0x1f }, { REG_ISP_CTRL02, 0x00 }, { 0x5025, 0x0e }, { 0x5026, 0x18 }, { 0x5027, 0x34 }, { 0x5028, 0x4c }, { 0x5029, 0x62 }, { 0x502a, 0x74 }, { 0x502b, 0x85 }, { 0x502c, 0x92 }, { 0x502d, 0x9e }, { 0x502e, 0xb2 }, { 0x502f, 0xc0 }, { 0x5030, 0xcc }, { 0x5031, 0xe0 }, { 0x5032, 0xee }, { 0x5033, 0xf6 }, { 0x5034, 0x11 }, { 0x5070, 0x1c }, { 0x5071, 0x5b }, { 0x5072, 0x05 }, { 0x5073, 0x20 }, { 0x5074, 0x94 }, { 0x5075, 0xb4 }, { 0x5076, 0xb4 }, { 0x5077, 0xaf }, { 0x5078, 0x05 }, { REG_CMX_SIGN, 0x98 }, { REG_CMX_MISC_CTRL, 0x21 }, { REG_AWB_CTRL00, 0x6a }, { REG_AWB_CTRL01, 0x11 }, { REG_AWB_CTRL02, 0x92 }, { REG_AWB_CTRL03, 0x21 }, { REG_AWB_CTRL04, 0xe1 }, { REG_AWB_LOCAL_LIMIT, 0x01 }, { 0x503c, 0x05 }, { 0x503d, 0x08 }, { 0x503e, 0x08 }, { 0x503f, 0x64 }, { 0x5040, 0x58 }, { 0x5041, 0x2a }, { 0x5042, 0xc5 }, { 0x5043, 0x2e }, { 0x5044, 0x3a }, { 0x5045, 0x3c }, { 0x5046, 0x44 }, { 0x5047, 0xf8 }, { 0x5048, 0x08 }, { REG_AWB_CTRL12, 0x70 }, { REG_AWB_CTRL13, 0xf0 }, { REG_AWB_CTRL14, 0xf0 }, { REG_LENC_RED_X0_H, 0x03 }, { REG_LENC_RED_X0_L, 0x20 }, { REG_LENC_RED_Y0_H, 0x02 }, { REG_LENC_RED_Y0_L, 0x5c }, { REG_LENC_RED_A1, 0x48 }, { REG_LENC_RED_B1, 0x00 }, { REG_LENC_RED_A2_B2, 0x66 }, { REG_LENC_GREEN_X0_H, 0x03 }, { REG_LENC_GREEN_X0_L, 0x30 }, { REG_LENC_GREEN_Y0_H, 0x02 }, { REG_LENC_GREEN_Y0_L, 0x7c }, { REG_LENC_GREEN_A1, 0x40 }, { REG_LENC_GREEN_B1, 0x00 }, { REG_LENC_GREEN_A2_B2, 0x66 }, { REG_LENC_BLUE_X0_H, 0x03 }, { REG_LENC_BLUE_X0_L, 0x10 }, { REG_LENC_BLUE_Y0_H, 0x02 }, { REG_LENC_BLUE_Y0_L, 0x7c }, { REG_LENC_BLUE_A1, 0x3a }, { REG_LENC_BLUE_B1, 0x00 }, { REG_LENC_BLUE_A2_B2, 0x66 }, { REG_CIP_CTRL00, 0x44 }, { REG_SHARPENMT_THRESH1, 0x08 }, { REG_SHARPENMT_THRESH2, 0x10 }, { REG_SHARPENMT_OFFSET1, 0x12 }, { REG_SHARPENMT_OFFSET2, 0x02 }, { REG_SHARPEN_THRESH1, 0x08 }, { REG_SHARPEN_THRESH2, 0x10 }, { REG_CIP_CTRL01, 0xa6 }, { REG_DENOISE_THRESH1, 0x08 }, { REG_DENOISE_THRESH2, 0x10 }, { REG_DENOISE_OFFSET1, 0x04 }, { REG_DENOISE_OFFSET2, 0x12 }, { 0x507e, 0x40 }, { 0x507f, 0x20 }, { 0x507b, 0x02 }, { REG_CMX_MISC_CTRL, 0x01 }, { 0x5084, 0x0c }, { 0x5085, 0x3e }, { 0x5005, 0x80 }, { 0x3a0f, 0x30 }, { 0x3a10, 0x28 }, { 0x3a1b, 0x32 }, { 0x3a1e, 0x26 }, { 0x3a11, 0x60 }, { 0x3a1f, 0x14 }, { 0x5060, 0x69 }, { 0x5061, 0x7d }, { 0x5062, 0x7d }, { 0x5063, 0x69 }, { REG_NULL, 0x00 }, }; /* 1280X720 720p */ static struct sensor_register ov2659_720p[] = { { REG_TIMING_HS_H, 0x00 }, { REG_TIMING_HS_L, 0xa0 }, { REG_TIMING_VS_H, 0x00 }, { REG_TIMING_VS_L, 0xf0 }, { REG_TIMING_HW_H, 0x05 }, { REG_TIMING_HW_L, 0xbf }, { REG_TIMING_VH_H, 0x03 }, { REG_TIMING_VH_L, 0xcb }, { REG_TIMING_DVPHO_H, 0x05 }, { REG_TIMING_DVPHO_L, 0x00 }, { REG_TIMING_DVPVO_H, 0x02 }, { REG_TIMING_DVPVO_L, 0xd0 }, { REG_TIMING_HTS_H, 0x06 }, { REG_TIMING_HTS_L, 0x4c }, { REG_TIMING_VTS_H, 0x02 }, { REG_TIMING_VTS_L, 0xe8 }, { REG_TIMING_HOFFS_L, 0x10 }, { REG_TIMING_VOFFS_L, 0x06 }, { REG_TIMING_XINC, 0x11 }, { REG_TIMING_YINC, 0x11 }, { REG_TIMING_VERT_FORMAT, 0x80 }, { REG_TIMING_HORIZ_FORMAT, 0x00 }, { 0x370a, 0x12 }, { 0x3a03, 0xe8 }, { 0x3a09, 0x6f }, { 0x3a0b, 0x5d }, { 0x3a15, 0x9a }, { REG_VFIFO_READ_START_H, 0x00 }, { REG_VFIFO_READ_START_L, 0x80 }, { REG_ISP_CTRL02, 0x00 }, { REG_NULL, 0x00 }, }; /* 1600X1200 UXGA */ static struct sensor_register ov2659_uxga[] = { { REG_TIMING_HS_H, 0x00 }, { REG_TIMING_HS_L, 0x00 }, { REG_TIMING_VS_H, 0x00 }, { REG_TIMING_VS_L, 0x00 }, { REG_TIMING_HW_H, 0x06 }, { REG_TIMING_HW_L, 0x5f }, { REG_TIMING_VH_H, 0x04 }, { REG_TIMING_VH_L, 0xbb }, { REG_TIMING_DVPHO_H, 0x06 }, { REG_TIMING_DVPHO_L, 0x40 }, { REG_TIMING_DVPVO_H, 0x04 }, { REG_TIMING_DVPVO_L, 0xb0 }, { REG_TIMING_HTS_H, 0x07 }, { REG_TIMING_HTS_L, 0x9f }, { REG_TIMING_VTS_H, 0x04 }, { REG_TIMING_VTS_L, 0xd0 }, { REG_TIMING_HOFFS_L, 0x10 }, { REG_TIMING_VOFFS_L, 0x06 }, { REG_TIMING_XINC, 0x11 }, { REG_TIMING_YINC, 0x11 }, { 0x3a02, 0x04 }, { 0x3a03, 0xd0 }, { 0x3a08, 0x00 }, { 0x3a09, 0xb8 }, { 0x3a0a, 0x00 }, { 0x3a0b, 0x9a }, { 0x3a0d, 0x08 }, { 0x3a0e, 0x06 }, { 0x3a14, 0x04 }, { 0x3a15, 0x50 }, { 0x3623, 0x00 }, { 0x3634, 0x44 }, { 0x3701, 0x44 }, { 0x3702, 0x30 }, { 0x3703, 0x48 }, { 0x3704, 0x48 }, { 0x3705, 0x18 }, { REG_TIMING_VERT_FORMAT, 0x80 }, { REG_TIMING_HORIZ_FORMAT, 0x00 }, { 0x370a, 0x12 }, { REG_VFIFO_READ_START_H, 0x00 }, { REG_VFIFO_READ_START_L, 0x80 }, { REG_ISP_CTRL02, 0x00 }, { REG_NULL, 0x00 }, }; /* 1280X1024 SXGA */ static struct sensor_register ov2659_sxga[] = { { REG_TIMING_HS_H, 0x00 }, { REG_TIMING_HS_L, 0x00 }, { REG_TIMING_VS_H, 0x00 }, { REG_TIMING_VS_L, 0x00 }, { REG_TIMING_HW_H, 0x06 }, { REG_TIMING_HW_L, 0x5f }, { REG_TIMING_VH_H, 0x04 }, { REG_TIMING_VH_L, 0xb7 }, { REG_TIMING_DVPHO_H, 0x05 }, { REG_TIMING_DVPHO_L, 0x00 }, { REG_TIMING_DVPVO_H, 0x04 }, { REG_TIMING_DVPVO_L, 0x00 }, { REG_TIMING_HTS_H, 0x07 }, { REG_TIMING_HTS_L, 0x9c }, { REG_TIMING_VTS_H, 0x04 }, { REG_TIMING_VTS_L, 0xd0 }, { REG_TIMING_HOFFS_L, 0x10 }, { REG_TIMING_VOFFS_L, 0x06 }, { REG_TIMING_XINC, 0x11 }, { REG_TIMING_YINC, 0x11 }, { 0x3a02, 0x02 }, { 0x3a03, 0x68 }, { 0x3a08, 0x00 }, { 0x3a09, 0x5c }, { 0x3a0a, 0x00 }, { 0x3a0b, 0x4d }, { 0x3a0d, 0x08 }, { 0x3a0e, 0x06 }, { 0x3a14, 0x02 }, { 0x3a15, 0x28 }, { 0x3623, 0x00 }, { 0x3634, 0x76 }, { 0x3701, 0x44 }, { 0x3702, 0x18 }, { 0x3703, 0x24 }, { 0x3704, 0x24 }, { 0x3705, 0x0c }, { REG_TIMING_VERT_FORMAT, 0x80 }, { REG_TIMING_HORIZ_FORMAT, 0x00 }, { 0x370a, 0x52 }, { REG_VFIFO_READ_START_H, 0x00 }, { REG_VFIFO_READ_START_L, 0x80 }, { REG_ISP_CTRL02, 0x00 }, { REG_NULL, 0x00 }, }; /* 1024X768 SXGA */ static struct sensor_register ov2659_xga[] = { { REG_TIMING_HS_H, 0x00 }, { REG_TIMING_HS_L, 0x00 }, { REG_TIMING_VS_H, 0x00 }, { REG_TIMING_VS_L, 0x00 }, { REG_TIMING_HW_H, 0x06 }, { REG_TIMING_HW_L, 0x5f }, { REG_TIMING_VH_H, 0x04 }, { REG_TIMING_VH_L, 0xb7 }, { REG_TIMING_DVPHO_H, 0x04 }, { REG_TIMING_DVPHO_L, 0x00 }, { REG_TIMING_DVPVO_H, 0x03 }, { REG_TIMING_DVPVO_L, 0x00 }, { REG_TIMING_HTS_H, 0x07 }, { REG_TIMING_HTS_L, 0x9c }, { REG_TIMING_VTS_H, 0x04 }, { REG_TIMING_VTS_L, 0xd0 }, { REG_TIMING_HOFFS_L, 0x10 }, { REG_TIMING_VOFFS_L, 0x06 }, { REG_TIMING_XINC, 0x11 }, { REG_TIMING_YINC, 0x11 }, { 0x3a02, 0x02 }, { 0x3a03, 0x68 }, { 0x3a08, 0x00 }, { 0x3a09, 0x5c }, { 0x3a0a, 0x00 }, { 0x3a0b, 0x4d }, { 0x3a0d, 0x08 }, { 0x3a0e, 0x06 }, { 0x3a14, 0x02 }, { 0x3a15, 0x28 }, { 0x3623, 0x00 }, { 0x3634, 0x76 }, { 0x3701, 0x44 }, { 0x3702, 0x18 }, { 0x3703, 0x24 }, { 0x3704, 0x24 }, { 0x3705, 0x0c }, { REG_TIMING_VERT_FORMAT, 0x80 }, { REG_TIMING_HORIZ_FORMAT, 0x00 }, { 0x370a, 0x52 }, { REG_VFIFO_READ_START_H, 0x00 }, { REG_VFIFO_READ_START_L, 0x80 }, { REG_ISP_CTRL02, 0x00 }, { REG_NULL, 0x00 }, }; /* 800X600 SVGA */ static struct sensor_register ov2659_svga[] = { { REG_TIMING_HS_H, 0x00 }, { REG_TIMING_HS_L, 0x00 }, { REG_TIMING_VS_H, 0x00 }, { REG_TIMING_VS_L, 0x00 }, { REG_TIMING_HW_H, 0x06 }, { REG_TIMING_HW_L, 0x5f }, { REG_TIMING_VH_H, 0x04 }, { REG_TIMING_VH_L, 0xb7 }, { REG_TIMING_DVPHO_H, 0x03 }, { REG_TIMING_DVPHO_L, 0x20 }, { REG_TIMING_DVPVO_H, 0x02 }, { REG_TIMING_DVPVO_L, 0x58 }, { REG_TIMING_HTS_H, 0x05 }, { REG_TIMING_HTS_L, 0x14 }, { REG_TIMING_VTS_H, 0x02 }, { REG_TIMING_VTS_L, 0x68 }, { REG_TIMING_HOFFS_L, 0x08 }, { REG_TIMING_VOFFS_L, 0x02 }, { REG_TIMING_XINC, 0x31 }, { REG_TIMING_YINC, 0x31 }, { 0x3a02, 0x02 }, { 0x3a03, 0x68 }, { 0x3a08, 0x00 }, { 0x3a09, 0x5c }, { 0x3a0a, 0x00 }, { 0x3a0b, 0x4d }, { 0x3a0d, 0x08 }, { 0x3a0e, 0x06 }, { 0x3a14, 0x02 }, { 0x3a15, 0x28 }, { 0x3623, 0x00 }, { 0x3634, 0x76 }, { 0x3701, 0x44 }, { 0x3702, 0x18 }, { 0x3703, 0x24 }, { 0x3704, 0x24 }, { 0x3705, 0x0c }, { REG_TIMING_VERT_FORMAT, 0x81 }, { REG_TIMING_HORIZ_FORMAT, 0x01 }, { 0x370a, 0x52 }, { REG_VFIFO_READ_START_H, 0x00 }, { REG_VFIFO_READ_START_L, 0x80 }, { REG_ISP_CTRL02, 0x00 }, { REG_NULL, 0x00 }, }; /* 640X480 VGA */ static struct sensor_register ov2659_vga[] = { { REG_TIMING_HS_H, 0x00 }, { REG_TIMING_HS_L, 0x00 }, { REG_TIMING_VS_H, 0x00 }, { REG_TIMING_VS_L, 0x00 }, { REG_TIMING_HW_H, 0x06 }, { REG_TIMING_HW_L, 0x5f }, { REG_TIMING_VH_H, 0x04 }, { REG_TIMING_VH_L, 0xb7 }, { REG_TIMING_DVPHO_H, 0x02 }, { REG_TIMING_DVPHO_L, 0x80 }, { REG_TIMING_DVPVO_H, 0x01 }, { REG_TIMING_DVPVO_L, 0xe0 }, { REG_TIMING_HTS_H, 0x05 }, { REG_TIMING_HTS_L, 0x14 }, { REG_TIMING_VTS_H, 0x02 }, { REG_TIMING_VTS_L, 0x68 }, { REG_TIMING_HOFFS_L, 0x08 }, { REG_TIMING_VOFFS_L, 0x02 }, { REG_TIMING_XINC, 0x31 }, { REG_TIMING_YINC, 0x31 }, { 0x3a02, 0x02 }, { 0x3a03, 0x68 }, { 0x3a08, 0x00 }, { 0x3a09, 0x5c }, { 0x3a0a, 0x00 }, { 0x3a0b, 0x4d }, { 0x3a0d, 0x08 }, { 0x3a0e, 0x06 }, { 0x3a14, 0x02 }, { 0x3a15, 0x28 }, { 0x3623, 0x00 }, { 0x3634, 0x76 }, { 0x3701, 0x44 }, { 0x3702, 0x18 }, { 0x3703, 0x24 }, { 0x3704, 0x24 }, { 0x3705, 0x0c }, { REG_TIMING_VERT_FORMAT, 0x81 }, { REG_TIMING_HORIZ_FORMAT, 0x01 }, { 0x370a, 0x52 }, { REG_VFIFO_READ_START_H, 0x00 }, { REG_VFIFO_READ_START_L, 0xa0 }, { REG_ISP_CTRL02, 0x10 }, { REG_NULL, 0x00 }, }; /* 320X240 QVGA */ static struct sensor_register ov2659_qvga[] = { { REG_TIMING_HS_H, 0x00 }, { REG_TIMING_HS_L, 0x00 }, { REG_TIMING_VS_H, 0x00 }, { REG_TIMING_VS_L, 0x00 }, { REG_TIMING_HW_H, 0x06 }, { REG_TIMING_HW_L, 0x5f }, { REG_TIMING_VH_H, 0x04 }, { REG_TIMING_VH_L, 0xb7 }, { REG_TIMING_DVPHO_H, 0x01 }, { REG_TIMING_DVPHO_L, 0x40 }, { REG_TIMING_DVPVO_H, 0x00 }, { REG_TIMING_DVPVO_L, 0xf0 }, { REG_TIMING_HTS_H, 0x05 }, { REG_TIMING_HTS_L, 0x14 }, { REG_TIMING_VTS_H, 0x02 }, { REG_TIMING_VTS_L, 0x68 }, { REG_TIMING_HOFFS_L, 0x08 }, { REG_TIMING_VOFFS_L, 0x02 }, { REG_TIMING_XINC, 0x31 }, { REG_TIMING_YINC, 0x31 }, { 0x3a02, 0x02 }, { 0x3a03, 0x68 }, { 0x3a08, 0x00 }, { 0x3a09, 0x5c }, { 0x3a0a, 0x00 }, { 0x3a0b, 0x4d }, { 0x3a0d, 0x08 }, { 0x3a0e, 0x06 }, { 0x3a14, 0x02 }, { 0x3a15, 0x28 }, { 0x3623, 0x00 }, { 0x3634, 0x76 }, { 0x3701, 0x44 }, { 0x3702, 0x18 }, { 0x3703, 0x24 }, { 0x3704, 0x24 }, { 0x3705, 0x0c }, { REG_TIMING_VERT_FORMAT, 0x81 }, { REG_TIMING_HORIZ_FORMAT, 0x01 }, { 0x370a, 0x52 }, { REG_VFIFO_READ_START_H, 0x00 }, { REG_VFIFO_READ_START_L, 0xa0 }, { REG_ISP_CTRL02, 0x10 }, { REG_NULL, 0x00 }, }; static const struct pll_ctrl_reg ctrl3[] = { { 1, 0x00 }, { 2, 0x02 }, { 3, 0x03 }, { 4, 0x06 }, { 6, 0x0d }, { 8, 0x0e }, { 12, 0x0f }, { 16, 0x12 }, { 24, 0x13 }, { 32, 0x16 }, { 48, 0x1b }, { 64, 0x1e }, { 96, 0x1f }, { 0, 0x00 }, }; static const struct pll_ctrl_reg ctrl1[] = { { 2, 0x10 }, { 4, 0x20 }, { 6, 0x30 }, { 8, 0x40 }, { 10, 0x50 }, { 12, 0x60 }, { 14, 0x70 }, { 16, 0x80 }, { 18, 0x90 }, { 20, 0xa0 }, { 22, 0xb0 }, { 24, 0xc0 }, { 26, 0xd0 }, { 28, 0xe0 }, { 30, 0xf0 }, { 0, 0x00 }, }; static const struct ov2659_framesize ov2659_framesizes[] = { { /* QVGA */ .width = 320, .height = 240, .regs = ov2659_qvga, .max_exp_lines = 248, }, { /* VGA */ .width = 640, .height = 480, .regs = ov2659_vga, .max_exp_lines = 498, }, { /* SVGA */ .width = 800, .height = 600, .regs = ov2659_svga, .max_exp_lines = 498, }, { /* XGA */ .width = 1024, .height = 768, .regs = ov2659_xga, .max_exp_lines = 498, }, { /* 720P */ .width = 1280, .height = 720, .regs = ov2659_720p, .max_exp_lines = 498, }, { /* SXGA */ .width = 1280, .height = 1024, .regs = ov2659_sxga, .max_exp_lines = 1048, }, { /* UXGA */ .width = 1600, .height = 1200, .regs = ov2659_uxga, .max_exp_lines = 498, }, }; /* YUV422 YUYV*/ static struct sensor_register ov2659_format_yuyv[] = { { REG_FORMAT_CTRL00, 0x30 }, { REG_NULL, 0x0 }, }; /* YUV422 UYVY */ static struct sensor_register ov2659_format_uyvy[] = { { REG_FORMAT_CTRL00, 0x32 }, { REG_NULL, 0x0 }, }; /* Raw Bayer BGGR */ static struct sensor_register ov2659_format_bggr[] = { { REG_FORMAT_CTRL00, 0x00 }, { REG_NULL, 0x0 }, }; /* RGB565 */ static struct sensor_register ov2659_format_rgb565[] = { { REG_FORMAT_CTRL00, 0x60 }, { REG_NULL, 0x0 }, }; static const struct ov2659_pixfmt ov2659_formats[] = { { .code = MEDIA_BUS_FMT_YUYV8_2X8, .format_ctrl_regs = ov2659_format_yuyv, }, { .code = MEDIA_BUS_FMT_UYVY8_2X8, .format_ctrl_regs = ov2659_format_uyvy, }, { .code = MEDIA_BUS_FMT_RGB565_2X8_BE, .format_ctrl_regs = ov2659_format_rgb565, }, { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .format_ctrl_regs = ov2659_format_bggr, }, }; static inline struct ov2659 *to_ov2659(struct v4l2_subdev *sd) { return container_of(sd, struct ov2659, sd); } /* sensor register write */ static int ov2659_write(struct i2c_client *client, u16 reg, u8 val) { struct i2c_msg msg; u8 buf[3]; int ret; buf[0] = reg >> 8; buf[1] = reg & 0xFF; buf[2] = val; msg.addr = client->addr; msg.flags = client->flags; msg.buf = buf; msg.len = sizeof(buf); ret = i2c_transfer(client->adapter, &msg, 1); if (ret >= 0) return 0; dev_dbg(&client->dev, "ov2659 write reg(0x%x val:0x%x) failed !\n", reg, val); return ret; } /* sensor register read */ static int ov2659_read(struct i2c_client *client, u16 reg, u8 *val) { struct i2c_msg msg[2]; u8 buf[2]; int ret; buf[0] = reg >> 8; buf[1] = reg & 0xFF; msg[0].addr = client->addr; msg[0].flags = client->flags; msg[0].buf = buf; msg[0].len = sizeof(buf); msg[1].addr = client->addr; msg[1].flags = client->flags | I2C_M_RD; msg[1].buf = buf; msg[1].len = 1; ret = i2c_transfer(client->adapter, msg, 2); if (ret >= 0) { *val = buf[0]; return 0; } dev_dbg(&client->dev, "ov2659 read reg(0x%x val:0x%x) failed !\n", reg, *val); return ret; } static int ov2659_write_array(struct i2c_client *client, const struct sensor_register *regs) { int i, ret = 0; for (i = 0; ret == 0 && regs[i].addr; i++) ret = ov2659_write(client, regs[i].addr, regs[i].value); return ret; } static void ov2659_pll_calc_params(struct ov2659 *ov2659) { const struct ov2659_platform_data *pdata = ov2659->pdata; u8 ctrl1_reg = 0, ctrl2_reg = 0, ctrl3_reg = 0; struct i2c_client *client = ov2659->client; unsigned int desired = pdata->link_frequency; u32 prediv, postdiv, mult; u32 bestdelta = -1; u32 delta, actual; int i, j; for (i = 0; ctrl1[i].div != 0; i++) { postdiv = ctrl1[i].div; for (j = 0; ctrl3[j].div != 0; j++) { prediv = ctrl3[j].div; for (mult = 1; mult <= 63; mult++) { actual = ov2659->xvclk_frequency; actual *= mult; actual /= prediv; actual /= postdiv; delta = actual - desired; delta = abs(delta); if ((delta < bestdelta) || (bestdelta == -1)) { bestdelta = delta; ctrl1_reg = ctrl1[i].reg; ctrl2_reg = mult; ctrl3_reg = ctrl3[j].reg; } } } } ov2659->pll.ctrl1 = ctrl1_reg; ov2659->pll.ctrl2 = ctrl2_reg; ov2659->pll.ctrl3 = ctrl3_reg; dev_dbg(&client->dev, "Actual reg config: ctrl1_reg: %02x ctrl2_reg: %02x ctrl3_reg: %02x\n", ctrl1_reg, ctrl2_reg, ctrl3_reg); } static int ov2659_set_pixel_clock(struct ov2659 *ov2659) { struct i2c_client *client = ov2659->client; struct sensor_register pll_regs[] = { {REG_SC_PLL_CTRL1, ov2659->pll.ctrl1}, {REG_SC_PLL_CTRL2, ov2659->pll.ctrl2}, {REG_SC_PLL_CTRL3, ov2659->pll.ctrl3}, {REG_NULL, 0x00}, }; dev_dbg(&client->dev, "%s\n", __func__); return ov2659_write_array(client, pll_regs); }; static void ov2659_get_default_format(struct v4l2_mbus_framefmt *format) { format->width = ov2659_framesizes[2].width; format->height = ov2659_framesizes[2].height; format->colorspace = V4L2_COLORSPACE_SRGB; format->code = ov2659_formats[0].code; format->field = V4L2_FIELD_NONE; } static void ov2659_set_streaming(struct ov2659 *ov2659, int on) { struct i2c_client *client = ov2659->client; int ret; on = !!on; dev_dbg(&client->dev, "%s: on: %d\n", __func__, on); ret = ov2659_write(client, REG_SOFTWARE_STANDBY, on); if (ret) dev_err(&client->dev, "ov2659 soft standby failed\n"); } static int ov2659_init(struct v4l2_subdev *sd, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); return ov2659_write_array(client, ov2659_init_regs); } /* * V4L2 subdev video and pad level operations */ static int ov2659_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_mbus_code_enum *code) { struct i2c_client *client = v4l2_get_subdevdata(sd); dev_dbg(&client->dev, "%s:\n", __func__); if (code->index >= ARRAY_SIZE(ov2659_formats)) return -EINVAL; code->code = ov2659_formats[code->index].code; return 0; } static int ov2659_enum_frame_sizes(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_frame_size_enum *fse) { struct i2c_client *client = v4l2_get_subdevdata(sd); int i = ARRAY_SIZE(ov2659_formats); dev_dbg(&client->dev, "%s:\n", __func__); if (fse->index >= ARRAY_SIZE(ov2659_framesizes)) return -EINVAL; while (--i) if (fse->code == ov2659_formats[i].code) break; fse->code = ov2659_formats[i].code; fse->min_width = ov2659_framesizes[fse->index].width; fse->max_width = fse->min_width; fse->max_height = ov2659_framesizes[fse->index].height; fse->min_height = fse->max_height; return 0; } static int ov2659_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *fmt) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov2659 *ov2659 = to_ov2659(sd); dev_dbg(&client->dev, "ov2659_get_fmt\n"); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #ifdef CONFIG_VIDEO_V4L2_SUBDEV_API struct v4l2_mbus_framefmt *mf; mf = v4l2_subdev_get_try_format(sd, cfg, 0); mutex_lock(&ov2659->lock); fmt->format = *mf; mutex_unlock(&ov2659->lock); return 0; #else return -EINVAL; #endif } mutex_lock(&ov2659->lock); fmt->format = ov2659->format; mutex_unlock(&ov2659->lock); dev_dbg(&client->dev, "ov2659_get_fmt: %x %dx%d\n", ov2659->format.code, ov2659->format.width, ov2659->format.height); return 0; } static void __ov2659_try_frame_size(struct v4l2_mbus_framefmt *mf, const struct ov2659_framesize **size) { const struct ov2659_framesize *fsize = &ov2659_framesizes[0]; const struct ov2659_framesize *match = NULL; int i = ARRAY_SIZE(ov2659_framesizes); unsigned int min_err = UINT_MAX; while (i--) { int err = abs(fsize->width - mf->width) + abs(fsize->height - mf->height); if ((err < min_err) && (fsize->regs[0].addr)) { min_err = err; match = fsize; } fsize++; } if (!match) match = &ov2659_framesizes[2]; mf->width = match->width; mf->height = match->height; if (size) *size = match; } static int ov2659_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *fmt) { struct i2c_client *client = v4l2_get_subdevdata(sd); int index = ARRAY_SIZE(ov2659_formats); struct v4l2_mbus_framefmt *mf = &fmt->format; const struct ov2659_framesize *size = NULL; struct ov2659 *ov2659 = to_ov2659(sd); int ret = 0; dev_dbg(&client->dev, "ov2659_set_fmt\n"); __ov2659_try_frame_size(mf, &size); while (--index >= 0) if (ov2659_formats[index].code == mf->code) break; if (index < 0) { index = 0; mf->code = ov2659_formats[index].code; } mf->colorspace = V4L2_COLORSPACE_SRGB; mf->field = V4L2_FIELD_NONE; mutex_lock(&ov2659->lock); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { #ifdef CONFIG_VIDEO_V4L2_SUBDEV_API mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad); *mf = fmt->format; #endif } else { s64 val; if (ov2659->streaming) { mutex_unlock(&ov2659->lock); return -EBUSY; } ov2659->frame_size = size; ov2659->format = fmt->format; ov2659->format_ctrl_regs = ov2659_formats[index].format_ctrl_regs; if (ov2659->format.code != MEDIA_BUS_FMT_SBGGR8_1X8) val = ov2659->pdata->link_frequency / 2; else val = ov2659->pdata->link_frequency; ret = v4l2_ctrl_s_ctrl_int64(ov2659->link_frequency, val); if (ret < 0) dev_warn(&client->dev, "failed to set link_frequency rate (%d)\n", ret); } mutex_unlock(&ov2659->lock); return ret; } static int ov2659_set_frame_size(struct ov2659 *ov2659) { struct i2c_client *client = ov2659->client; dev_dbg(&client->dev, "%s\n", __func__); return ov2659_write_array(ov2659->client, ov2659->frame_size->regs); } static int ov2659_set_format(struct ov2659 *ov2659) { struct i2c_client *client = ov2659->client; dev_dbg(&client->dev, "%s\n", __func__); return ov2659_write_array(ov2659->client, ov2659->format_ctrl_regs); } static int ov2659_s_stream(struct v4l2_subdev *sd, int on) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov2659 *ov2659 = to_ov2659(sd); int ret = 0; dev_dbg(&client->dev, "%s: on: %d\n", __func__, on); mutex_lock(&ov2659->lock); on = !!on; if (ov2659->streaming == on) goto unlock; if (!on) { /* Stop Streaming Sequence */ ov2659_set_streaming(ov2659, 0); ov2659->streaming = on; pm_runtime_put(&client->dev); goto unlock; } ret = pm_runtime_get_sync(&client->dev); if (ret < 0) { pm_runtime_put_noidle(&client->dev); goto unlock; } ret = ov2659_init(sd, 0); if (!ret) ret = ov2659_set_pixel_clock(ov2659); if (!ret) ret = ov2659_set_frame_size(ov2659); if (!ret) ret = ov2659_set_format(ov2659); if (!ret) { ov2659_set_streaming(ov2659, 1); ov2659->streaming = on; } unlock: mutex_unlock(&ov2659->lock); return ret; } static int ov2659_set_test_pattern(struct ov2659 *ov2659, int value) { struct i2c_client *client = v4l2_get_subdevdata(&ov2659->sd); int ret; u8 val; ret = ov2659_read(client, REG_PRE_ISP_CTRL00, &val); if (ret < 0) return ret; switch (value) { case 0: val &= ~TEST_PATTERN_ENABLE; break; case 1: val &= VERTICAL_COLOR_BAR_MASK; val |= TEST_PATTERN_ENABLE; break; } return ov2659_write(client, REG_PRE_ISP_CTRL00, val); } static int ov2659_s_ctrl(struct v4l2_ctrl *ctrl) { struct ov2659 *ov2659 = container_of(ctrl->handler, struct ov2659, ctrls); struct i2c_client *client = ov2659->client; /* V4L2 controls values will be applied only when power is already up */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; switch (ctrl->id) { case V4L2_CID_TEST_PATTERN: return ov2659_set_test_pattern(ov2659, ctrl->val); } pm_runtime_put(&client->dev); return 0; } static const struct v4l2_ctrl_ops ov2659_ctrl_ops = { .s_ctrl = ov2659_s_ctrl, }; static const char * const ov2659_test_pattern_menu[] = { "Disabled", "Vertical Color Bars", }; static int ov2659_power_off(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov2659 *ov2659 = to_ov2659(sd); dev_dbg(&client->dev, "%s:\n", __func__); gpiod_set_value(ov2659->pwdn_gpio, 1); return 0; } static int ov2659_power_on(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov2659 *ov2659 = to_ov2659(sd); dev_dbg(&client->dev, "%s:\n", __func__); gpiod_set_value(ov2659->pwdn_gpio, 0); if (ov2659->resetb_gpio) { gpiod_set_value(ov2659->resetb_gpio, 1); usleep_range(500, 1000); gpiod_set_value(ov2659->resetb_gpio, 0); usleep_range(3000, 5000); } return 0; } /* ----------------------------------------------------------------------------- * V4L2 subdev internal operations */ #ifdef CONFIG_VIDEO_V4L2_SUBDEV_API static int ov2659_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(sd, fh->pad, 0); dev_dbg(&client->dev, "%s:\n", __func__); ov2659_get_default_format(format); return 0; } #endif static const struct v4l2_subdev_core_ops ov2659_subdev_core_ops = { .log_status = v4l2_ctrl_subdev_log_status, .subscribe_event = v4l2_ctrl_subdev_subscribe_event, .unsubscribe_event = v4l2_event_subdev_unsubscribe, }; static const struct v4l2_subdev_video_ops ov2659_subdev_video_ops = { .s_stream = ov2659_s_stream, }; static const struct v4l2_subdev_pad_ops ov2659_subdev_pad_ops = { .enum_mbus_code = ov2659_enum_mbus_code, .enum_frame_size = ov2659_enum_frame_sizes, .get_fmt = ov2659_get_fmt, .set_fmt = ov2659_set_fmt, }; #ifdef CONFIG_VIDEO_V4L2_SUBDEV_API static const struct v4l2_subdev_ops ov2659_subdev_ops = { .core = &ov2659_subdev_core_ops, .video = &ov2659_subdev_video_ops, .pad = &ov2659_subdev_pad_ops, }; static const struct v4l2_subdev_internal_ops ov2659_subdev_internal_ops = { .open = ov2659_open, }; #endif static int ov2659_detect(struct v4l2_subdev *sd) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 pid = 0; u8 ver = 0; int ret; dev_dbg(&client->dev, "%s:\n", __func__); ret = ov2659_write(client, REG_SOFTWARE_RESET, 0x01); if (ret != 0) { dev_err(&client->dev, "Sensor soft reset failed\n"); return -ENODEV; } usleep_range(1000, 2000); /* Check sensor revision */ ret = ov2659_read(client, REG_SC_CHIP_ID_H, &pid); if (!ret) ret = ov2659_read(client, REG_SC_CHIP_ID_L, &ver); if (!ret) { unsigned short id; id = OV265X_ID(pid, ver); if (id != OV2659_ID) { dev_err(&client->dev, "Sensor detection failed (%04X, %d)\n", id, ret); ret = -ENODEV; } else { dev_info(&client->dev, "Found OV%04X sensor\n", id); } } return ret; } static struct ov2659_platform_data * ov2659_get_pdata(struct i2c_client *client) { struct ov2659_platform_data *pdata; struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 }; struct device_node *endpoint; int ret; if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node) return client->dev.platform_data; endpoint = of_graph_get_next_endpoint(client->dev.of_node, NULL); if (!endpoint) return NULL; ret = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(endpoint), &bus_cfg); if (ret) { pdata = NULL; goto done; } pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) goto done; if (!bus_cfg.nr_of_link_frequencies) { dev_err(&client->dev, "link-frequencies property not found or too many\n"); pdata = NULL; goto done; } pdata->link_frequency = bus_cfg.link_frequencies[0]; done: v4l2_fwnode_endpoint_free(&bus_cfg); of_node_put(endpoint); return pdata; } static int ov2659_probe(struct i2c_client *client) { const struct ov2659_platform_data *pdata = ov2659_get_pdata(client); struct v4l2_subdev *sd; struct ov2659 *ov2659; struct clk *clk; int ret; if (!pdata) { dev_err(&client->dev, "platform data not specified\n"); return -EINVAL; } ov2659 = devm_kzalloc(&client->dev, sizeof(*ov2659), GFP_KERNEL); if (!ov2659) return -ENOMEM; ov2659->pdata = pdata; ov2659->client = client; clk = devm_clk_get(&client->dev, "xvclk"); if (IS_ERR(clk)) return PTR_ERR(clk); ov2659->xvclk_frequency = clk_get_rate(clk); if (ov2659->xvclk_frequency < 6000000 || ov2659->xvclk_frequency > 27000000) return -EINVAL; /* Optional gpio don't fail if not present */ ov2659->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown", GPIOD_OUT_LOW); if (IS_ERR(ov2659->pwdn_gpio)) return PTR_ERR(ov2659->pwdn_gpio); /* Optional gpio don't fail if not present */ ov2659->resetb_gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ov2659->resetb_gpio)) return PTR_ERR(ov2659->resetb_gpio); v4l2_ctrl_handler_init(&ov2659->ctrls, 2); ov2659->link_frequency = v4l2_ctrl_new_std(&ov2659->ctrls, &ov2659_ctrl_ops, V4L2_CID_PIXEL_RATE, pdata->link_frequency / 2, pdata->link_frequency, 1, pdata->link_frequency); v4l2_ctrl_new_std_menu_items(&ov2659->ctrls, &ov2659_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov2659_test_pattern_menu) - 1, 0, 0, ov2659_test_pattern_menu); ov2659->sd.ctrl_handler = &ov2659->ctrls; if (ov2659->ctrls.error) { dev_err(&client->dev, "%s: control initialization error %d\n", __func__, ov2659->ctrls.error); return ov2659->ctrls.error; } sd = &ov2659->sd; client->flags |= I2C_CLIENT_SCCB; #ifdef CONFIG_VIDEO_V4L2_SUBDEV_API v4l2_i2c_subdev_init(sd, client, &ov2659_subdev_ops); sd->internal_ops = &ov2659_subdev_internal_ops; sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; #endif #if defined(CONFIG_MEDIA_CONTROLLER) ov2659->pad.flags = MEDIA_PAD_FL_SOURCE; sd->entity.function = MEDIA_ENT_F_CAM_SENSOR; ret = media_entity_pads_init(&sd->entity, 1, &ov2659->pad); if (ret < 0) { v4l2_ctrl_handler_free(&ov2659->ctrls); return ret; } #endif mutex_init(&ov2659->lock); ov2659_get_default_format(&ov2659->format); ov2659->frame_size = &ov2659_framesizes[2]; ov2659->format_ctrl_regs = ov2659_formats[0].format_ctrl_regs; ov2659_power_on(&client->dev); ret = ov2659_detect(sd); if (ret < 0) goto error; /* Calculate the PLL register value needed */ ov2659_pll_calc_params(ov2659); ret = v4l2_async_register_subdev(&ov2659->sd); if (ret) goto error; dev_info(&client->dev, "%s sensor driver registered !!\n", sd->name); pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; error: v4l2_ctrl_handler_free(&ov2659->ctrls); ov2659_power_off(&client->dev); media_entity_cleanup(&sd->entity); mutex_destroy(&ov2659->lock); return ret; } static int ov2659_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov2659 *ov2659 = to_ov2659(sd); v4l2_ctrl_handler_free(&ov2659->ctrls); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); mutex_destroy(&ov2659->lock); pm_runtime_disable(&client->dev); if (!pm_runtime_status_suspended(&client->dev)) ov2659_power_off(&client->dev); pm_runtime_set_suspended(&client->dev); return 0; } static const struct dev_pm_ops ov2659_pm_ops = { SET_RUNTIME_PM_OPS(ov2659_power_off, ov2659_power_on, NULL) }; static const struct i2c_device_id ov2659_id[] = { { "ov2659", 0 }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(i2c, ov2659_id); #if IS_ENABLED(CONFIG_OF) static const struct of_device_id ov2659_of_match[] = { { .compatible = "ovti,ov2659", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, ov2659_of_match); #endif static struct i2c_driver ov2659_i2c_driver = { .driver = { .name = DRIVER_NAME, .pm = &ov2659_pm_ops, .of_match_table = of_match_ptr(ov2659_of_match), }, .probe_new = ov2659_probe, .remove = ov2659_remove, .id_table = ov2659_id, }; module_i2c_driver(ov2659_i2c_driver); MODULE_AUTHOR("Benoit Parrot <[email protected]>"); MODULE_DESCRIPTION("OV2659 CMOS Image Sensor driver"); MODULE_LICENSE("GPL v2");
697622.c
/**************************************************************************/ /* */ /* Copyright (c) Microsoft Corporation. All rights reserved. */ /* */ /* This software is licensed under the Microsoft Software License */ /* Terms for Microsoft Azure RTOS. Full text of the license can be */ /* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */ /* and in the root directory of this software. */ /* */ /**************************************************************************/ /**************************************************************************/ /**************************************************************************/ /** */ /** GUIX Component */ /** */ /** Widget Management (Widget) */ /** */ /**************************************************************************/ #define GX_SOURCE_CODE /* Include necessary system files. */ #include "gx_api.h" #include "gx_canvas.h" #include "gx_system.h" /* Bring in externs for caller checking code. */ GX_CALLER_CHECKING_EXTERNS /**************************************************************************/ /* */ /* FUNCTION RELEASE */ /* */ /* _gxe_canvas_circle_draw PORTABLE C */ /* 6.0 */ /* AUTHOR */ /* */ /* Kenneth Maxwell, Microsoft Corporation */ /* */ /* DESCRIPTION */ /* */ /* This function checks for errors in canvas circle draw function call.*/ /* */ /* INPUT */ /* */ /* xcenter x-coord of center of circle */ /* ycenter y-coord of center of circle */ /* r Radius of circle */ /* */ /* OUTPUT */ /* */ /* status Completion status */ /* */ /* CALLS */ /* */ /* _gxe_canvas_circle_draw Actual canvas circle draw */ /* function */ /* */ /* CALLED BY */ /* */ /* Application Code */ /* */ /* RELEASE HISTORY */ /* */ /* DATE NAME DESCRIPTION */ /* */ /* 05-19-2020 Kenneth Maxwell Initial Version 6.0 */ /* */ /**************************************************************************/ UINT _gxe_canvas_circle_draw(INT xcenter, INT ycenter, UINT r) { UINT status; /* Check for appropriate caller. */ GX_INIT_AND_THREADS_CALLER_CHECKING /* Check for invalid value. */ if (r == 0) { return GX_INVALID_VALUE; } /* Check for invalid context. */ if (_gx_system_current_draw_context == GX_NULL) { return GX_INVALID_CONTEXT; } /* Call actual widget draw function. */ status = _gx_canvas_circle_draw(xcenter, ycenter, r); /* Return completion status. */ return(status); }
198129.c
/* boolean.c **/ /* * Copyrights: * * Copyright (c) 1996 Smithsonian Astrophysical Observatory * * Permission to use, copy, modify, distribute, and sell this * software and its documentation for any purpose is hereby * granted without fee, provided that the above copyright * notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting docu- * mentation, and that the name of the Smithsonian Astro- * physical Observatory not be used in advertising or publicity * pertaining to distribution of the software without specific, * written prior permission. The Smithsonian Astrophysical * Observatory makes no representations about the suitability * of this software for any purpose. It is provided "as is" * without express or implied warranty. * THE SMITHSONIAN ASTROPHYSICAL OBSERVATORY DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE SMITHSONIAN ASTROPHYSICAL OBSERVATORY BE * LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA * OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH * THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ #include <string.h> #include "pfile.h" static void *String2Bool(), *Real2Bool(), *Int2Bool(); static Converter Converters[] = { {StringType, BooleanType, String2Bool}, {RealType, BooleanType, Real2Bool}, {IntegerType, BooleanType, Int2Bool}, {0, 0, NULL} }; vtype booleanRec = { "b", Converters, VXSize, VXAcce, NULL }; static void *String2Bool(value, copy) char *value; bool *copy; { if ( value == NULL ) return NULL; if ( copy == NULL ) return NULL; if ( !strcmp(value, "n") ) *copy = 0; else if ( !strcmp(value, "no") ) *copy = 0; else if ( !strcmp(value, "y") ) *copy = 1; else if ( !strcmp(value, "ye") ) *copy = 1; else if ( !strcmp(value, "yes") ) *copy = 1; else { parerr = PARCANTCONVERT; return NULL; } return copy; } static void *Int2Bool(value, copy) int *value; bool *copy; { if ( value == NULL ) return NULL; if ( copy == NULL ) return NULL; *copy = *value != 0; return copy; } static void *Real2Bool(value, copy) real *value; bool *copy; { if ( value == NULL ) return NULL; if ( copy == NULL ) return NULL; *copy = *value != 0; return copy; }
984180.c
#define MASK ((1 << __CHAR_BIT__) - 1) void foo(void) { _Static_assert((char) -1 == (-1 & MASK), "plain char is not unsigned"); } /* * check-name: funsigned-char * check-command: sparse -funsigned-char -Wno-decl $file */
942008.c
main () { int i; for (i = 3000000; --i;) { } }
217286.c
/* * @file * @brief Timer related functions */ /* * Copyright (c) 2018, Unisoc Communications Inc * * SPDX-License-Identifier: Apache-2.0 */ #include "os_adapter.h" #include "sm.h" void wifimgr_timeout(void *sival_ptr) { struct wifimgr_delayed_work *dwork = (struct wifimgr_delayed_work *)sival_ptr; wifimgr_queue_work(&dwork->wq, &dwork->work); } int wifimgr_timer_start(timer_t timerid, unsigned int sec) { struct itimerspec todelay; int ret; /* Start, restart, or stop the timer */ todelay.it_value.tv_sec = sec; todelay.it_value.tv_nsec = 0; todelay.it_interval.tv_sec = 0; todelay.it_interval.tv_nsec = 0; ret = timer_settime(timerid, 0, &todelay, NULL); if (ret == -1) ret = -errno; return ret; } int wifimgr_interval_timer_start(timer_t timerid, unsigned int sec, unsigned int interval_sec) { struct itimerspec todelay; int ret; /* Start, restart, or stop the timer */ todelay.it_value.tv_sec = sec; todelay.it_value.tv_nsec = 0; todelay.it_interval.tv_sec = interval_sec; todelay.it_interval.tv_nsec = 0; ret = timer_settime(timerid, 0, &todelay, NULL); if (ret == -1) ret = -errno; return ret; } int wifimgr_timer_init(struct wifimgr_delayed_work *dwork, void *sighand, timer_t *timerid) { struct sigevent toevent; int ret; /* Create a POSIX timer to handle timeouts */ toevent.sigev_value.sival_ptr = dwork; toevent.sigev_notify = SIGEV_SIGNAL; toevent.sigev_notify_function = sighand; toevent.sigev_notify_attributes = NULL; ret = timer_create(CLOCK_MONOTONIC, &toevent, timerid); if (ret == -1) ret = -errno; return ret; } int wifimgr_timer_release(timer_t timerid) { int ret; /* Delete the POSIX timer */ ret = timer_delete(timerid); if (ret == -1) ret = -errno; return ret; }
716953.c
/* _PDCLIB_bigint_add( _PDCLIB_bigint_t *, _PDCLIB_bigint_t const * ) This file is part of the Public Domain C Library (PDCLib). Permission is granted to use, modify, and / or redistribute at will. */ #ifndef REGTEST #include "pdclib/_PDCLIB_internal.h" #include <stdint.h> _PDCLIB_bigint_t * _PDCLIB_bigint_add( _PDCLIB_bigint_t * _PDCLIB_restrict lhs, _PDCLIB_bigint_t const * _PDCLIB_restrict rhs ) { _PDCLIB_bigint_t const * smaller; _PDCLIB_bigint_t const * wider; unsigned carry = 0; unsigned newcarry; int i; if ( lhs->size < rhs->size ) { smaller = lhs; wider = rhs; } else { smaller = rhs; wider = lhs; } /* Add up the bigints digit by digit, ensuring no overflow of 32-bit range */ for ( i = 0; i < smaller->size; ++i ) { uint_least32_t l = ( UINT32_C( 0xFFFFFFFF ) - lhs->data[i] ); uint_least32_t r = ( UINT32_C( 0xFFFFFFFF ) - rhs->data[i] ); if ( ( newcarry = ( l < rhs->data[i] ) || ( carry && ( l == rhs->data[i] ) ) ) ) { lhs->data[i] = UINT32_C( 0xFFFFFFFF ) - ( l + r ) - 1 + carry; } else { lhs->data[i] = lhs->data[i] + rhs->data[i] + carry; } carry = newcarry; } for ( ; i < wider->size; ++i ) { if ( ( newcarry = ( ( UINT32_C( 0xFFFFFFFF ) - wider->data[i] ) < carry ) ) ) { lhs->data[i] = 0; } else { lhs->data[i] = wider->data[i] + carry; } carry = newcarry; } /* Possible new digit */ if ( carry ) { lhs->data[i++] = carry; } lhs->size = i; return lhs; } #endif #ifdef TEST #include "_PDCLIB_test.h" int main( void ) { #ifndef REGTEST _PDCLIB_bigint_t lhs, rhs; _PDCLIB_bigint32( &lhs, 0 ); _PDCLIB_bigint64( &rhs, 0 ); _PDCLIB_bigint_add( &lhs, &rhs ); TESTCASE( lhs.size == 0 ); _PDCLIB_bigint32( &lhs, UINT32_C( 0x12345678 ) ); _PDCLIB_bigint_add( &lhs, &rhs ); TESTCASE( lhs.size == 1 ); TESTCASE( lhs.data[0] == UINT32_C( 0x12345678 ) ); _PDCLIB_bigint32( &rhs, UINT32_C( 0x11111111 ) ); _PDCLIB_bigint_add( &lhs, &rhs ); TESTCASE( lhs.size == 1 ); TESTCASE( lhs.data[0] == UINT32_C( 0x23456789 ) ); _PDCLIB_bigint64( &rhs, UINT64_C( 0x00000001DCBA9877 ) ); _PDCLIB_bigint_add( &lhs, &rhs ); TESTCASE( lhs.size == 2 ); TESTCASE( lhs.data[0] == 0 ); TESTCASE( lhs.data[1] == 2 ); _PDCLIB_bigint32( &lhs, UINT32_C( 0xFFFFFFFF ) ); _PDCLIB_bigint32( &rhs, UINT32_C( 0xFFFFFFFF ) ); _PDCLIB_bigint_add( &lhs, &rhs ); TESTCASE( lhs.size == 2 ); TESTCASE( lhs.data[0] == UINT32_C( 0xFFFFFFFE ) ); TESTCASE( lhs.data[1] == 1 ); #endif return TEST_RESULTS; } #endif
943380.c
/* SDL_mixer: An audio mixer library based on the SDL library Copyright (C) 1997-2022 Sam Lantinga <[email protected]> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ /* Functions to discard MP3 tags - * written by O.Sezer <[email protected]>, put into public domain. */ #include "SDL_stdinc.h" #include "SDL_error.h" #include "SDL_rwops.h" #include "mp3utils.h" #if defined(MUSIC_MP3_MAD) || defined(MUSIC_MP3_MPG123) /*********************** SDL_RW WITH BOOKKEEPING ************************/ int MP3_RWinit(struct mp3file_t *fil, SDL_RWops *src) { /* Don't use SDL_RWsize() here -- see SDL bug #5509 */ fil->src = src; fil->start = SDL_RWtell(src); fil->length = SDL_RWseek(src, 0, RW_SEEK_END) - fil->start; fil->pos = 0; if (fil->start < 0 || fil->length < 0) { return SDL_Error(SDL_EFSEEK); } SDL_RWseek(src, fil->start, RW_SEEK_SET); return 0; } size_t MP3_RWread(struct mp3file_t *fil, void *ptr, size_t size, size_t maxnum) { size_t remaining = (size_t)(fil->length - fil->pos); size_t ret; maxnum *= size; if (maxnum > remaining) maxnum = remaining; ret = SDL_RWread(fil->src, ptr, 1, maxnum); fil->pos += (Sint64)ret; return ret; } Sint64 MP3_RWseek(struct mp3file_t *fil, Sint64 offset, int whence) { Sint64 ret; switch (whence) { case RW_SEEK_CUR: offset += fil->pos; break; case RW_SEEK_END: offset += fil->length; break; } if (offset < 0) return -1; if (offset > fil->length) offset = fil->length; ret = SDL_RWseek(fil->src, fil->start + offset, RW_SEEK_SET); if (ret < 0) return ret; fil->pos = offset; return offset; } /*************************** TAG HANDLING: ******************************/ static SDL_INLINE SDL_bool is_id3v1(const unsigned char *data, long length) { /* http://id3.org/ID3v1 : 3 bytes "TAG" identifier and 125 bytes tag data */ if (length < 128 || SDL_memcmp(data,"TAG",3) != 0) { return SDL_FALSE; } return SDL_TRUE; } static SDL_bool is_id3v2(const unsigned char *data, size_t length) { /* ID3v2 header is 10 bytes: http://id3.org/id3v2.4.0-structure */ /* bytes 0-2: "ID3" identifier */ if (length < 10 || SDL_memcmp(data,"ID3",3) != 0) { return SDL_FALSE; } /* bytes 3-4: version num (major,revision), each byte always less than 0xff. */ if (data[3] == 0xff || data[4] == 0xff) { return SDL_FALSE; } /* bytes 6-9 are the ID3v2 tag size: a 32 bit 'synchsafe' integer, i.e. the * highest bit 7 in each byte zeroed. i.e.: 7 bit information in each byte -> * effectively a 28 bit value. */ if (data[6] >= 0x80 || data[7] >= 0x80 || data[8] >= 0x80 || data[9] >= 0x80) { return SDL_FALSE; } return SDL_TRUE; } static long get_id3v2_len(const unsigned char *data, long length) { /* size is a 'synchsafe' integer (see above) */ long size = (long)((data[6]<<21) + (data[7]<<14) + (data[8]<<7) + data[9]); size += 10; /* header size */ /* ID3v2 header[5] is flags (bits 4-7 only, 0-3 are zero). * bit 4 set: footer is present (a copy of the header but * with "3DI" as ident.) */ if (data[5] & 0x10) { size += 10; /* footer size */ } /* optional padding (always zeroes) */ while (size < length && data[size] == 0) { ++size; } return size; } static SDL_bool is_apetag(const unsigned char *data, size_t length) { /* http://wiki.hydrogenaud.io/index.php?title=APEv2_specification * Header/footer is 32 bytes: bytes 0-7 ident, bytes 8-11 version, * bytes 12-17 size. bytes 24-31 are reserved: must be all zeroes. */ Uint32 v; if (length < 32 || SDL_memcmp(data,"APETAGEX",8) != 0) { return SDL_FALSE; } v = (Uint32)((data[11]<<24) | (data[10]<<16) | (data[9]<<8) | data[8]); /* version */ if (v != 2000U && v != 1000U) { return SDL_FALSE; } v = 0; /* reserved bits : */ if (SDL_memcmp(&data[24],&v,4) != 0 || SDL_memcmp(&data[28],&v,4) != 0) { return SDL_FALSE; } return SDL_TRUE; } static long get_ape_len(const unsigned char *data) { Uint32 flags, version; long size = (long)((data[15]<<24) | (data[14]<<16) | (data[13]<<8) | data[12]); version = (Uint32)((data[11]<<24) | (data[10]<<16) | (data[9]<<8) | data[8]); flags = (Uint32)((data[23]<<24) | (data[22]<<16) | (data[21]<<8) | data[20]); if (version == 2000U && (flags & (1U<<31))) size += 32; /* header present. */ return size; } static SDL_INLINE int is_lyrics3tag(const unsigned char *data, long length) { /* http://id3.org/Lyrics3 * http://id3.org/Lyrics3v2 */ if (length < 15) return 0; if (SDL_memcmp(data+6,"LYRICS200",9) == 0) return 2; /* v2 */ if (SDL_memcmp(data+6,"LYRICSEND",9) == 0) return 1; /* v1 */ return 0; } static long get_lyrics3v1_len(struct mp3file_t *m) { const char *p; long i, len; char buf[5104]; /* needs manual search: http://id3.org/Lyrics3 */ if (m->length < 20) return -1; len = (m->length > 5109)? 5109 : (long)m->length; MP3_RWseek(m, -len, RW_SEEK_END); MP3_RWread(m, buf, 1, (len -= 9)); /* exclude footer */ /* strstr() won't work here. */ for (i = len - 11, p = buf; i >= 0; --i, ++p) { if (SDL_memcmp(p, "LYRICSBEGIN", 11) == 0) break; } if (i < 0) return -1; return len - (long)(p - buf) + 9 /* footer */; } static SDL_INLINE long get_lyrics3v2_len(const unsigned char *data, long length) { /* 6 bytes before the end marker is size in decimal format - * does not include the 9 bytes end marker and size field. */ if (length != 6) return 0; return SDL_strtol((const char *)data, NULL, 10) + 15; } static SDL_INLINE SDL_bool verify_lyrics3v2(const unsigned char *data, long length) { if (length < 11) return SDL_FALSE; if (SDL_memcmp(data,"LYRICSBEGIN",11) == 0) return SDL_TRUE; return SDL_FALSE; } #define MMTAG_PARANOID static SDL_bool is_musicmatch(const unsigned char *data, long length) { /* From docs/musicmatch.txt in id3lib: https://sourceforge.net/projects/id3lib/ Overall tag structure: +-----------------------------+ | Header | | (256 bytes, OPTIONAL) | +-----------------------------+ | Image extension (4 bytes) | +-----------------------------+ | Image binary | | (var. length >= 4 bytes) | +-----------------------------+ | Unused (4 bytes) | +-----------------------------+ | Version info (256 bytes) | +-----------------------------+ | Audio meta-data | | (var. length >= 7868 bytes) | +-----------------------------+ | Data offsets (20 bytes) | +-----------------------------+ | Footer (48 bytes) | +-----------------------------+ */ if (length < 48) return SDL_FALSE; /* sig: 19 bytes company name + 13 bytes space */ if (SDL_memcmp(data,"Brava Software Inc. ",32) != 0) { return SDL_FALSE; } /* 4 bytes version: x.xx */ if (!SDL_isdigit(data[32]) || data[33] != '.' || !SDL_isdigit(data[34]) ||!SDL_isdigit(data[35])) { return SDL_FALSE; } #ifdef MMTAG_PARANOID /* [36..47]: 12 bytes trailing space */ for (length = 36; length < 48; ++length) { if (data[length] != ' ') return SDL_FALSE; } #endif return SDL_TRUE; } static long get_musicmatch_len(struct mp3file_t *m) { const Sint32 metasizes[4] = { 7868, 7936, 8004, 8132 }; const unsigned char syncstr[10] = {'1','8','2','7','3','6','4','5',0,0}; unsigned char buf[256]; Sint32 i, j, imgext_ofs, version_ofs; long len; MP3_RWseek(m, -68, RW_SEEK_END); MP3_RWread(m, buf, 1, 20); imgext_ofs = (Sint32)((buf[3] <<24) | (buf[2] <<16) | (buf[1] <<8) | buf[0] ); version_ofs = (Sint32)((buf[15]<<24) | (buf[14]<<16) | (buf[13]<<8) | buf[12]); if (version_ofs <= imgext_ofs) return -1; if (version_ofs <= 0 || imgext_ofs <= 0) return -1; /* Try finding the version info section: * Because metadata section comes after it, and because metadata section * has different sizes across versions (format ver. <= 3.00: always 7868 * bytes), we can _not_ directly calculate using deltas from the offsets * section. */ for (i = 0; i < 4; ++i) { /* 48: footer, 20: offsets, 256: version info */ len = metasizes[i] + 48 + 20 + 256; if (m->length < len) return -1; MP3_RWseek(m, -len, RW_SEEK_END); MP3_RWread(m, buf, 1, 256); /* [0..9]: sync string, [30..255]: 0x20 */ #ifdef MMTAG_PARANOID for (j = 30; j < 256; ++j) { if (buf[j] != ' ') break; } if (j < 256) continue; #endif if (SDL_memcmp(buf, syncstr, 10) == 0) { break; } } if (i == 4) return -1; /* no luck. */ #ifdef MMTAG_PARANOID /* unused section: (4 bytes of 0x00) */ MP3_RWseek(m, -(len + 4), RW_SEEK_END); MP3_RWread(m, buf, 1, 4); j = 0; if (SDL_memcmp(buf, &j, 4) != 0) return -1; #endif len += (version_ofs - imgext_ofs); if (m->length < len) return -1; MP3_RWseek(m, -len, RW_SEEK_END); MP3_RWread(m, buf, 1, 8); j = (Sint32)((buf[7] <<24) | (buf[6] <<16) | (buf[5] <<8) | buf[4]); if (j < 0) return -1; /* verify image size: */ /* without this, we may land at a wrong place. */ if (j + 12 != version_ofs - imgext_ofs) return -1; /* try finding the optional header */ if (m->length < len + 256) return len; MP3_RWseek(m, -(len + 256), RW_SEEK_END); MP3_RWread(m, buf, 1, 256); /* [0..9]: sync string, [30..255]: 0x20 */ if (SDL_memcmp(buf, syncstr, 10) != 0) { return len; } #ifdef MMTAG_PARANOID for (j = 30; j < 256; ++j) { if (buf[j] != ' ') return len; } #endif return len + 256; /* header is present. */ } static int probe_id3v1(struct mp3file_t *fil, unsigned char *buf, int atend) { if (fil->length >= 128) { MP3_RWseek(fil, -128, RW_SEEK_END); if (MP3_RWread(fil, buf, 1, 128) != 128) return -1; if (is_id3v1(buf, 128)) { if (!atend) { /* possible false positive? */ if (is_musicmatch(buf + 128 - 48, 48) || is_apetag (buf + 128 - 32, 32) || is_lyrics3tag(buf + 128 - 15, 15)) { return 0; } } fil->length -= 128; return 1; /* FIXME: handle possible double-ID3v1 tags?? */ } } return 0; } static int probe_mmtag(struct mp3file_t *fil, unsigned char *buf) { long len; if (fil->length >= 68) { MP3_RWseek(fil, -48, RW_SEEK_END); if (MP3_RWread(fil, buf, 1, 48) != 48) return -1; if (is_musicmatch(buf, 48)) { len = get_musicmatch_len(fil); if (len < 0) return -1; if (len >= fil->length) return -1; fil->length -= len; return 1; } } return 0; } static int probe_apetag(struct mp3file_t *fil, unsigned char *buf) { long len; if (fil->length >= 32) { MP3_RWseek(fil, -32, RW_SEEK_END); if (MP3_RWread(fil, buf, 1, 32) != 32) return -1; if (is_apetag(buf, 32)) { len = get_ape_len(buf); if (len >= fil->length) return -1; fil->length -= len; return 1; } } return 0; } static int probe_lyrics3(struct mp3file_t *fil, unsigned char *buf) { long len; if (fil->length >= 15) { MP3_RWseek(fil, -15, RW_SEEK_END); if (MP3_RWread(fil, buf, 1, 15) != 15) return -1; len = is_lyrics3tag(buf, 15); if (len == 2) { len = get_lyrics3v2_len(buf, 6); if (len >= fil->length) return -1; if (len < 15) return -1; MP3_RWseek(fil, -len, RW_SEEK_END); if (MP3_RWread(fil, buf, 1, 11) != 11) return -1; if (!verify_lyrics3v2(buf, 11)) return -1; fil->length -= len; return 1; } else if (len == 1) { len = get_lyrics3v1_len(fil); if (len < 0) return -1; fil->length -= len; return 1; } } return 0; } int mp3_skiptags(struct mp3file_t *fil, SDL_bool keep_id3v2) { unsigned char buf[128]; long len; size_t readsize; int c_id3, c_ape, c_lyr, c_mm; int rc = -1; /* MP3 standard has no metadata format, so everyone invented * their own thing, even with extensions, until ID3v2 became * dominant: Hence the impossible mess here. * * Note: I don't yet care about freaky broken mp3 files with * double tags. -- O.S. */ readsize = MP3_RWread(fil, buf, 1, 128); if (!readsize) goto fail; /* ID3v2 tag is at the start */ if (is_id3v2(buf, readsize)) { len = get_id3v2_len(buf, (long)readsize); if (len >= fil->length) goto fail; if (!keep_id3v2) { fil->start += len; fil->length -= len; } } /* APE tag _might_ be at the start (discouraged * but not forbidden, either.) read the header. */ else if (is_apetag(buf, readsize)) { len = get_ape_len(buf); if (len >= fil->length) goto fail; fil->start += len; fil->length -= len; } /* it's not impossible that _old_ MusicMatch tag * placing itself after ID3v1. */ if ((c_mm = probe_mmtag(fil, buf)) < 0) { goto fail; } /* ID3v1 tag is at the end */ if ((c_id3 = probe_id3v1(fil, buf, !c_mm)) < 0) { goto fail; } /* we do not know the order of ape or lyrics3 * or musicmatch tags, hence the loop here.. */ c_ape = 0; c_lyr = 0; for (;;) { if (!c_lyr) { /* care about mp3s with double Lyrics3 tags? */ if ((c_lyr = probe_lyrics3(fil, buf)) < 0) goto fail; if (c_lyr) continue; } if (!c_mm) { if ((c_mm = probe_mmtag(fil, buf)) < 0) goto fail; if (c_mm) continue; } if (!c_ape) { if ((c_ape = probe_apetag(fil, buf)) < 0) goto fail; if (c_ape) continue; } break; } /* for (;;) */ rc = (fil->length > 0)? 0 : -1; fail: MP3_RWseek(fil, 0, RW_SEEK_SET); return rc; } #endif /* MUSIC_MP3_??? */ /* vi: set ts=4 sw=4 expandtab: */
677364.c
/* * Copyright (c) 2003, 2007-14 Matteo Frigo * Copyright (c) 2003, 2007-14 Massachusetts Institute of Technology * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * */ /* This file was automatically generated --- DO NOT EDIT */ /* Generated on Tue Sep 14 10:45:49 EDT 2021 */ #include "dft/codelet-dft.h" #if defined(ARCH_PREFERS_FMA) || defined(ISA_EXTENSION_PREFERS_FMA) /* Generated by: ../../../genfft/gen_twiddle_c.native -fma -simd -compact -variables 4 -pipeline-latency 8 -n 6 -name t1bv_6 -include dft/simd/t1b.h -sign 1 */ /* * This function contains 23 FP additions, 18 FP multiplications, * (or, 17 additions, 12 multiplications, 6 fused multiply/add), * 19 stack variables, 2 constants, and 12 memory accesses */ #include "dft/simd/t1b.h" static void t1bv_6(R *ri, R *ii, const R *W, stride rs, INT mb, INT me, INT ms) { DVK(KP500000000, +0.500000000000000000000000000000000000000000000); DVK(KP866025403, +0.866025403784438646763723170752936183471402627); { INT m; R *x; x = ii; for (m = mb, W = W + (mb * ((TWVL / VL) * 10)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 10), MAKE_VOLATILE_STRIDE(6, rs)) { V T4, Ti, Te, Tk, T9, Tj, T1, T3, T2; T1 = LD(&(x[0]), ms, &(x[0])); T2 = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)])); T3 = BYTW(&(W[TWVL * 4]), T2); T4 = VSUB(T1, T3); Ti = VADD(T1, T3); { V Tb, Td, Ta, Tc; Ta = LD(&(x[WS(rs, 4)]), ms, &(x[0])); Tb = BYTW(&(W[TWVL * 6]), Ta); Tc = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)])); Td = BYTW(&(W[0]), Tc); Te = VSUB(Tb, Td); Tk = VADD(Tb, Td); } { V T6, T8, T5, T7; T5 = LD(&(x[WS(rs, 2)]), ms, &(x[0])); T6 = BYTW(&(W[TWVL * 2]), T5); T7 = LD(&(x[WS(rs, 5)]), ms, &(x[WS(rs, 1)])); T8 = BYTW(&(W[TWVL * 8]), T7); T9 = VSUB(T6, T8); Tj = VADD(T6, T8); } { V Th, Tf, Tg, Tn, Tl, Tm; Th = VMUL(LDK(KP866025403), VSUB(T9, Te)); Tf = VADD(T9, Te); Tg = VFNMS(LDK(KP500000000), Tf, T4); ST(&(x[WS(rs, 1)]), VFMAI(Th, Tg), ms, &(x[WS(rs, 1)])); ST(&(x[WS(rs, 3)]), VADD(T4, Tf), ms, &(x[WS(rs, 1)])); ST(&(x[WS(rs, 5)]), VFNMSI(Th, Tg), ms, &(x[WS(rs, 1)])); Tn = VMUL(LDK(KP866025403), VSUB(Tj, Tk)); Tl = VADD(Tj, Tk); Tm = VFNMS(LDK(KP500000000), Tl, Ti); ST(&(x[WS(rs, 2)]), VFNMSI(Tn, Tm), ms, &(x[0])); ST(&(x[0]), VADD(Ti, Tl), ms, &(x[0])); ST(&(x[WS(rs, 4)]), VFMAI(Tn, Tm), ms, &(x[0])); } } } VLEAVE(); } static const tw_instr twinstr[] = { VTW(0, 1), VTW(0, 2), VTW(0, 3), VTW(0, 4), VTW(0, 5), { TW_NEXT, VL, 0 } }; static const ct_desc desc = { 6, XSIMD_STRING("t1bv_6"), twinstr, &GENUS, { 17, 12, 6, 0 }, 0, 0, 0 }; void XSIMD(codelet_t1bv_6) (planner *p) { X(kdft_dit_register) (p, t1bv_6, &desc); } #else /* Generated by: ../../../genfft/gen_twiddle_c.native -simd -compact -variables 4 -pipeline-latency 8 -n 6 -name t1bv_6 -include dft/simd/t1b.h -sign 1 */ /* * This function contains 23 FP additions, 14 FP multiplications, * (or, 21 additions, 12 multiplications, 2 fused multiply/add), * 19 stack variables, 2 constants, and 12 memory accesses */ #include "dft/simd/t1b.h" static void t1bv_6(R *ri, R *ii, const R *W, stride rs, INT mb, INT me, INT ms) { DVK(KP500000000, +0.500000000000000000000000000000000000000000000); DVK(KP866025403, +0.866025403784438646763723170752936183471402627); { INT m; R *x; x = ii; for (m = mb, W = W + (mb * ((TWVL / VL) * 10)); m < me; m = m + VL, x = x + (VL * ms), W = W + (TWVL * 10), MAKE_VOLATILE_STRIDE(6, rs)) { V Tf, Ti, Ta, Tk, T5, Tj, Tc, Te, Td; Tc = LD(&(x[0]), ms, &(x[0])); Td = LD(&(x[WS(rs, 3)]), ms, &(x[WS(rs, 1)])); Te = BYTW(&(W[TWVL * 4]), Td); Tf = VSUB(Tc, Te); Ti = VADD(Tc, Te); { V T7, T9, T6, T8; T6 = LD(&(x[WS(rs, 4)]), ms, &(x[0])); T7 = BYTW(&(W[TWVL * 6]), T6); T8 = LD(&(x[WS(rs, 1)]), ms, &(x[WS(rs, 1)])); T9 = BYTW(&(W[0]), T8); Ta = VSUB(T7, T9); Tk = VADD(T7, T9); } { V T2, T4, T1, T3; T1 = LD(&(x[WS(rs, 2)]), ms, &(x[0])); T2 = BYTW(&(W[TWVL * 2]), T1); T3 = LD(&(x[WS(rs, 5)]), ms, &(x[WS(rs, 1)])); T4 = BYTW(&(W[TWVL * 8]), T3); T5 = VSUB(T2, T4); Tj = VADD(T2, T4); } { V Tb, Tg, Th, Tn, Tl, Tm; Tb = VBYI(VMUL(LDK(KP866025403), VSUB(T5, Ta))); Tg = VADD(T5, Ta); Th = VFNMS(LDK(KP500000000), Tg, Tf); ST(&(x[WS(rs, 1)]), VADD(Tb, Th), ms, &(x[WS(rs, 1)])); ST(&(x[WS(rs, 3)]), VADD(Tf, Tg), ms, &(x[WS(rs, 1)])); ST(&(x[WS(rs, 5)]), VSUB(Th, Tb), ms, &(x[WS(rs, 1)])); Tn = VBYI(VMUL(LDK(KP866025403), VSUB(Tj, Tk))); Tl = VADD(Tj, Tk); Tm = VFNMS(LDK(KP500000000), Tl, Ti); ST(&(x[WS(rs, 2)]), VSUB(Tm, Tn), ms, &(x[0])); ST(&(x[0]), VADD(Ti, Tl), ms, &(x[0])); ST(&(x[WS(rs, 4)]), VADD(Tn, Tm), ms, &(x[0])); } } } VLEAVE(); } static const tw_instr twinstr[] = { VTW(0, 1), VTW(0, 2), VTW(0, 3), VTW(0, 4), VTW(0, 5), { TW_NEXT, VL, 0 } }; static const ct_desc desc = { 6, XSIMD_STRING("t1bv_6"), twinstr, &GENUS, { 21, 12, 2, 0 }, 0, 0, 0 }; void XSIMD(codelet_t1bv_6) (planner *p) { X(kdft_dit_register) (p, t1bv_6, &desc); } #endif
650422.c
/* * Copyright 1999-2006 University of Chicago * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * partial get test. * * makes sure that the ftp client and control libraries will handle the * partial transfer of a file using our FTP extensions. */ #include "globus_ftp_client.h" #include "globus_ftp_client_test_common.h" #include <stdlib.h> static globus_mutex_t lock; static globus_cond_t cond; static globus_bool_t done; static globus_bool_t error = GLOBUS_FALSE; #define SIZE 42 static void done_cb( void * user_arg, globus_ftp_client_handle_t * handle, globus_object_t * err) { char * tmpstr; if(err) tmpstr = " an"; else tmpstr = "out"; if(err) { printf("done with%s error\n", tmpstr); error = GLOBUS_TRUE; } globus_mutex_lock(&lock); done = GLOBUS_TRUE; globus_cond_signal(&cond); globus_mutex_unlock(&lock); } static void data_cb( void * user_arg, globus_ftp_client_handle_t * handle, globus_object_t * err, globus_byte_t * buffer, globus_size_t length, globus_off_t offset, globus_bool_t eof) { static int first = 1; fprintf(stdout, "%s[%"GLOBUS_OFF_T_FORMAT",%"GLOBUS_OFF_T_FORMAT"]\n", first?"":"\n", offset, offset+length); first = 0; fwrite(buffer, 1, length, stdout); if(!eof) { globus_ftp_client_register_read(handle, buffer, SIZE, data_cb, 0); } } int main(int argc, char *argv[]) { globus_ftp_client_handle_t handle; globus_ftp_client_operationattr_t attr; globus_byte_t buffer[SIZE]; globus_size_t buffer_length = sizeof(buffer); globus_result_t result; char * src; char * dst; globus_ftp_client_handleattr_t handle_attr; globus_off_t start_offset=5; globus_off_t end_offset=15; int i; globus_ftp_control_mode_t mode; LTDL_SET_PRELOADED_SYMBOLS(); globus_module_activate(GLOBUS_FTP_CLIENT_MODULE); globus_ftp_client_handleattr_init(&handle_attr); globus_ftp_client_operationattr_init(&attr); mode = GLOBUS_FTP_CONTROL_MODE_STREAM; /* Parse local arguments */ for(i = 1; i < argc; i++) { if(strcmp(argv[i], "-R") == 0 && i + 2 < argc) { globus_libc_scan_off_t(argv[i+1], &start_offset, GLOBUS_NULL); globus_libc_scan_off_t(argv[i+2], &end_offset, GLOBUS_NULL); test_remove_arg(&argc, argv, &i, 2); } else if(strcmp(argv[i], "-E") == 0 && i < argc) { mode = GLOBUS_FTP_CONTROL_MODE_EXTENDED_BLOCK; test_remove_arg(&argc, argv, &i, 0); } } test_parse_args(argc, argv, &handle_attr, &attr, &src, &dst); if(start_offset < 0) start_offset = 0; if(end_offset < 0) end_offset = 0; globus_mutex_init(&lock, GLOBUS_NULL); globus_cond_init(&cond, GLOBUS_NULL); globus_ftp_client_handle_init(&handle, &handle_attr); globus_ftp_client_operationattr_set_mode(&attr, mode); done = GLOBUS_FALSE; result = globus_ftp_client_partial_get(&handle, src, &attr, GLOBUS_NULL, start_offset, end_offset, done_cb, 0); if(result != GLOBUS_SUCCESS) { error = GLOBUS_TRUE; done = GLOBUS_TRUE; } else { globus_ftp_client_register_read( &handle, buffer, buffer_length, data_cb, 0); } globus_mutex_lock(&lock); while(!done) { globus_cond_wait(&cond, &lock); } globus_mutex_unlock(&lock); globus_ftp_client_handle_destroy(&handle); globus_module_deactivate_all(); if(test_abort_count && error) { return 0; } return error; }
985208.c
// Particle.c: implementation of the Particle class. // ////////////////////////////////////////////////////////////////////// #include "Std.h" #include "Particle.h" #define MAXANGLES 16384 extern int theTexture; // used to compute the min and max of two expresions #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) __private_extern__ void DrawParticle(Particle *p) // the math was easier in 2D - so 2D it is { float screenx = (p->x * info->sys_glWidth / p->z) + info->sys_glWidth * 0.5f; float screeny = (p->y * info->sys_glWidth / p->z) + info->sys_glHeight * 0.5f; float oldscreenx = (p->oldx * info->sys_glWidth / p->oldz) + info->sys_glWidth * 0.5f; float oldscreeny = (p->oldy * info->sys_glWidth / p->oldz) + info->sys_glHeight * 0.5f; // near clip if(p->z < 100.0f) { InitParticle(p); return; } // side clip if(screenx > info->sys_glWidth + 100.0f || screenx < -100.0f) { InitParticle(p); return; } // vertical clip if(screeny > info->sys_glHeight + 100.0f || screeny < -100.0f) { InitParticle(p); return; } info->starfieldColor[info->starfieldColorIndex++] = p->r; info->starfieldColor[info->starfieldColorIndex++] = p->g; info->starfieldColor[info->starfieldColorIndex++] = p->b; info->starfieldColor[info->starfieldColorIndex++] = 1.0f; info->starfieldColor[info->starfieldColorIndex++] = p->r; info->starfieldColor[info->starfieldColorIndex++] = p->g; info->starfieldColor[info->starfieldColorIndex++] = p->b; info->starfieldColor[info->starfieldColorIndex++] = 1.0f; info->starfieldColor[info->starfieldColorIndex++] = p->r; info->starfieldColor[info->starfieldColorIndex++] = p->g; info->starfieldColor[info->starfieldColorIndex++] = p->b; info->starfieldColor[info->starfieldColorIndex++] = 1.0f; info->starfieldColor[info->starfieldColorIndex++] = p->r; info->starfieldColor[info->starfieldColorIndex++] = p->g; info->starfieldColor[info->starfieldColorIndex++] = p->b; info->starfieldColor[info->starfieldColorIndex++] = 1.0f; p->animFrame++; if (p->animFrame == 64) { p->animFrame = 0; } { float dx = (screenx-oldscreenx); float dy = (screeny-oldscreeny); float m = FastDistance2D(dx, dy); float u0 = (p->animFrame&&7) * 0.125f; float v0 = (p->animFrame>>3) * 0.125f; float u1 = u0 + 0.125f; float v1 = v0 + 0.125f; float size = (3500.0f*(info->sys_glWidth/1024.0f)); float w = max(1.5f,size/p->z); float ow = max(1.5f,size/p->oldz); float d = FastDistance2D(dx, dy); float s, os, dxs, dys, dxos, dyos, dxm, dym; if(d) { s = w/d; } else { s = 0.0f; } if(d) { os = ow/d; } else { os = 0.0f; } m = 2.0f + s; dxs = dx*s; dys = dy*s; dxos = dx*os; dyos = dy*os; dxm = dx*m; dym = dy*m; info->starfieldTextures[info->starfieldTexturesIndex++] = u0; info->starfieldTextures[info->starfieldTexturesIndex++] = v0; info->starfieldVertices[info->starfieldVerticesIndex++] = screenx+dxm-dys; info->starfieldVertices[info->starfieldVerticesIndex++] = screeny+dym+dxs; info->starfieldTextures[info->starfieldTexturesIndex++] = u0; info->starfieldTextures[info->starfieldTexturesIndex++] = v1; info->starfieldVertices[info->starfieldVerticesIndex++] = screenx+dxm+dys; info->starfieldVertices[info->starfieldVerticesIndex++] = screeny+dym-dxs; info->starfieldTextures[info->starfieldTexturesIndex++] = u1; info->starfieldTextures[info->starfieldTexturesIndex++] = v1; info->starfieldVertices[info->starfieldVerticesIndex++] = oldscreenx-dxm+dyos; info->starfieldVertices[info->starfieldVerticesIndex++] = oldscreeny-dym-dxos; info->starfieldTextures[info->starfieldTexturesIndex++] = u1; info->starfieldTextures[info->starfieldTexturesIndex++] = v0; info->starfieldVertices[info->starfieldVerticesIndex++] = oldscreenx-dxm-dyos; info->starfieldVertices[info->starfieldVerticesIndex++] = oldscreeny-dym+dxos; } } __private_extern__ void UpdateParticle(Particle *p) { p->oldx = p->x; p->oldy = p->y; p->oldz = p->z; p->x += p->deltax*info->fDeltaTime; p->y += p->deltay*info->fDeltaTime; p->z += p->deltaz*info->fDeltaTime; } __private_extern__ void InitParticle(Particle *p) { // float tempx,tempy; int r1,r2; p->oldz = RandFlt(2500.0f,22500.0f); // do // { r1 = rand(); r2 = rand(); p->oldx = ((float) (r1 % (int) info->sys_glWidth) - info->sys_glWidth * 0.5f) / (info->sys_glWidth / p->oldz); p->oldy = (info->sys_glHeight * 0.5f - (float) (r2 % (int) info->sys_glHeight)) / (info->sys_glWidth / p->oldz); // tempx = (oldx * info->sys_glWidth / 75.0f) + info->sys_glWidth/2.0f; // tempy = (oldy * info->sys_glWidth / 75.0f) + info->sys_glHeight/2.0f; // } while (fabs(tempx) < info->sys_glWidth + 100.0 && fabs(tempy) < info->sys_glHeight + 100.0); p->deltax = 0.0f; p->deltay = 0.0f; p->deltaz = (float) -starSpeed; p->x = p->oldx + p->deltax; p->y = p->oldy + p->deltay; p->z = p->oldz + p->deltaz; p->r = RandFlt(0.125f,1.0f); p->g = RandFlt(0.125f,1.0f); p->b = RandFlt(0.125f,1.0f); p->animFrame = 0; }
410670.c
/* * mcp23s08.c: * Extend wiringPi with the MCP 23s08 SPI GPIO expander chip * Copyright (c) 2013 Gordon Henderson *********************************************************************** * This file is part of wiringPi: * https://projects.drogon.net/raspberry-pi/wiringpi/ * * wiringPi is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * wiringPi is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with wiringPi. * If not, see <http://www.gnu.org/licenses/>. *********************************************************************** */ #include <stdio.h> #include <stdint.h> #include "wiringPi.h" #include "wiringPiSPI.h" #include "mcp23x0817.h" #include "mcp23s08.h" #define MCP_SPEED 4000000 /* * writeByte: * Write a byte to a register on the MCP23s08 on the SPI bus. ********************************************************************************* */ static void writeByte (uint8_t spiPort, uint8_t devId, uint8_t reg, uint8_t data) { uint8_t spiData [4] ; spiData [0] = CMD_WRITE | ((devId & 7) << 1) ; spiData [1] = reg ; spiData [2] = data ; wiringPiSPIDataRW (spiPort, spiData, 3) ; } /* * readByte: * Read a byte from a register on the MCP23s08 on the SPI bus. ********************************************************************************* */ static uint8_t readByte (uint8_t spiPort, uint8_t devId, uint8_t reg) { uint8_t spiData [4] ; spiData [0] = CMD_READ | ((devId & 7) << 1) ; spiData [1] = reg ; wiringPiSPIDataRW (spiPort, spiData, 3) ; return spiData [2] ; } /* * myPinMode: ********************************************************************************* */ static void myPinMode (struct wiringPiNodeStruct *node, int pin, int mode) { int mask, old, reg ; reg = MCP23x08_IODIR ; mask = 1 << (pin - node->pinBase) ; old = readByte (node->data0, node->data1, reg) ; if (mode == OUTPUT) old &= (~mask) ; else old |= mask ; writeByte (node->data0, node->data1, reg, old) ; } /* * myPullUpDnControl: ********************************************************************************* */ static void myPullUpDnControl (struct wiringPiNodeStruct *node, int pin, int mode) { int mask, old, reg ; reg = MCP23x08_GPPU ; mask = 1 << (pin - node->pinBase) ; old = readByte (node->data0, node->data1, reg) ; if (mode == PUD_UP) old |= mask ; else old &= (~mask) ; writeByte (node->data0, node->data1, reg, old) ; } /* * myDigitalWrite: ********************************************************************************* */ static void myDigitalWrite (struct wiringPiNodeStruct *node, int pin, int value) { int bit, old ; bit = 1 << ((pin - node->pinBase) & 7) ; old = node->data2 ; if (value == LOW) old &= (~bit) ; else old |= bit ; writeByte (node->data0, node->data1, MCP23x08_GPIO, old) ; node->data2 = old ; } /* * myDigitalRead: ********************************************************************************* */ static int myDigitalRead (struct wiringPiNodeStruct *node, int pin) { int mask, value ; mask = 1 << ((pin - node->pinBase) & 7) ; value = readByte (node->data0, node->data1, MCP23x08_GPIO) ; if ((value & mask) == 0) return LOW ; else return HIGH ; } /* * mcp23s08Setup: * Create a new instance of an MCP23s08 SPI GPIO interface. We know it * has 8 pins, so all we need to know here is the SPI address and the * user-defined pin base. ********************************************************************************* */ int mcp23s08Setup (const int pinBase, const int spiPort, const int devId) { struct wiringPiNodeStruct *node ; if (wiringPiSPISetup (spiPort, MCP_SPEED) < 0) return FALSE ; writeByte (spiPort, devId, MCP23x08_IOCON, IOCON_INIT) ; node = wiringPiNewNode (pinBase, 8) ; node->data0 = spiPort ; node->data1 = devId ; node->pinMode = myPinMode ; node->pullUpDnControl = myPullUpDnControl ; node->digitalRead = myDigitalRead ; node->digitalWrite = myDigitalWrite ; node->data2 = readByte (spiPort, devId, MCP23x08_OLAT) ; return TRUE ; }
437136.c
/* KallistiOS ##version## inet_pton.c Copyright (C) 2007, 2010 Lawrence Sebald */ #include <arpa/inet.h> #include <errno.h> static int inet_pton4(const char *src, void *dst) { int parts[4] = { 0 }; int count = 0; struct in_addr *addr = (struct in_addr *)dst; for(; *src && count < 4; ++src) { if(*src == '.') { ++count; } /* Unlike inet_aton(), inet_pton() only supports decimal parts */ else if(*src >= '0' && *src <= '9') { parts[count] *= 10; parts[count] += *src - '0'; } else { /* Invalid digit, and not a dot... bail */ return 0; } } if(count != 3) { /* Not the right number of parts, bail */ return 0; } /* Validate each part, note that unlike inet_aton(), inet_pton() only supports the standard xxx.xxx.xxx.xxx addresses. */ if(parts[0] > 0xFF || parts[1] > 0xFF || parts[2] > 0xFF || parts[3] > 0xFF) return 0; addr->s_addr = htonl(parts[0] << 24 | parts[1] << 16 | parts[2] << 8 | parts[3]); return 1; } static int inet_pton6(const char *src, void *dst) { uint32_t parts[8] = { 0 }; struct in6_addr *addr = (struct in6_addr *)dst; int wc = 0, dc = 0, afterdc = 0; int pos = 0, i; const char *tmp = src, *ip4start = NULL; struct in_addr ip4addr; /* This loop simply checks the address for validity. Its split up in two parts (check and parse) like this for simplicity and clarity. */ for(; *tmp; ++tmp) { if(*tmp == ':') { if(wc && dc) { /* If we have a double colon again after a double colon (or we have 3 colons in a row), its an error. Bail out. */ return 0; } else if(ip4start) { /* If we have any dots, we can't have any colons after! */ return 0; } else if(wc) { dc = 1; } else if(dc) { ++afterdc; } wc = 1; } else if(*tmp == '.') { /* If this is the first part of the IPv4, figure out where it starts in the string */ if(!ip4start) { for(ip4start = tmp; ip4start > src; --ip4start) { if(*ip4start == ':') { ++ip4start; break; } } } } else if((*tmp >= '0' && *tmp <= '9') || (*tmp >= 'A' && *tmp <= 'F') || (*tmp >= 'a' && *tmp <= 'f')) { wc = 0; } else { /* Invalid character encountered, bail out */ return 0; } } /* Make sure if we have a colon at the end, its a double colon, not a single colon. Double colon is fine, single colon is invalid. */ if(*(tmp - 1) == ':' && *(tmp - 2) != ':') { return 0; } /* Same deal at the beginning of the string. */ if(*src == ':' && *(src + 1) != ':') { return 0; } /* If we have any dots, attempt to parse out the IPv4 address. */ if(ip4start && inet_pton4(ip4start, &ip4addr) != 1) { return 0; } /* Adjust the after double colon count for embedded IPv4 addresses. */ if(ip4start && dc) { afterdc += 2; } ++afterdc; /* Reset these, since we need them reset below to start the parsing. */ wc = dc = 0; for(; *src && (!ip4start || src < ip4start); ++src) { if(*src == ':') { if(wc) { /* We have a double colon, advance as far as we need to. */ if(pos + afterdc >= 8) { /* The double colon is invalid wherever it is. */ return 0; } dc = 1; pos = 8 - afterdc; } else { /* Advance to the next 16-bit set */ wc = 1; ++pos; } } else if(*src >= '0' && *src <= '9') { parts[pos] <<= 4; parts[pos] |= *src - '0'; wc = 0; } else if(*src >= 'a' && *src <= 'f') { parts[pos] <<= 4; parts[pos] |= *src - 'a' + 0x0A; wc = 0; } else if(*src >= 'A' && *src <= 'F') { parts[pos] <<= 4; parts[pos] |= *src - 'A' + 0x0A; wc = 0; } if(parts[pos] > 0xFFFF) { /* We've overflowed, bail */ return 0; } } if((!ip4start && pos != 7) || (ip4start && pos != 5)) { /* We didn't fill in the whole address... */ return 0; } /* If we've gotten here, everything's good, so fill in the real address. */ for(i = 0; i < 8; ++i) { addr->__s6_addr.__s6_addr16[i] = htons((uint16_t)parts[i]); } /* If we have an IPv4 address embedded, put it in too. */ if(ip4start) { addr->__s6_addr.__s6_addr32[3] = ip4addr.s_addr; } /* And, we're done. */ return 1; } int inet_pton(int af, const char *src, void *dst) { switch(af) { case AF_INET: return inet_pton4(src, dst); case AF_INET6: return inet_pton6(src, dst); default: errno = EAFNOSUPPORT; return -1; } }
498974.c
/* * Copyright (c) 2020 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "securec.h" #include "coap_adapter.h" #define COAP_MAX_ENDPOINTS_NUM 64 #define COAP_LOW_DELTA_NUM 13 #define COAP_MID_DELTA_NUM 256 #define COAP_EXTEND_DELTA_VALUE_UINT8 13 #define COAP_EXTEND_DELTA_VALUE_UINT16 14 #define COAP_EXTEND_DELTA_VALUE_INVALID 15 #define COAP_OPTION_MAX_LEN 64 #define COAP_OPTION_EXTENSION_LEN 2 #define COAP_SHIFT_BIT8 8 #define COAP_SHIFT_BIT6 6 #define COAP_SHIFT_BIT4 4 #define BUF_OFFSET_BYTE2 2 #define BUF_OFFSET_BYTE3 3 #define BUF_OFFSET_BYTE4 4 #define MSGID_HIGHT_BYTE 2 static int COAP_ParseOptionExtension(unsigned short *value, const unsigned char **dataPos, unsigned char *headerLen, unsigned int bufLen) { if (*value == COAP_EXTEND_DELTA_VALUE_UINT8) { (*headerLen)++; if (bufLen < *headerLen) { return DISCOVERY_ERR_OPT_INVALID_SHORT_FOR_HEADER; } *value = (unsigned short)((*dataPos)[1] + COAP_LOW_DELTA_NUM); (*dataPos)++; return DISCOVERY_ERR_SUCCESS; } if (*value == COAP_EXTEND_DELTA_VALUE_UINT16) { *headerLen = (unsigned char)(*headerLen + COAP_OPTION_EXTENSION_LEN); if (bufLen < *headerLen) { return DISCOVERY_ERR_OPT_INVALID_SHORT_FOR_HEADER; } unsigned short optionDeltaValue = (unsigned short)((*dataPos)[1] << COAP_SHIFT_BIT8) | (*dataPos)[COAP_OPTION_EXTENSION_LEN]; if (optionDeltaValue > (0xFFFF - COAP_LOW_DELTA_NUM - COAP_MID_DELTA_NUM)) { return DISCOVERY_ERR_BAD_REQ; } *value = (unsigned short)(optionDeltaValue + COAP_LOW_DELTA_NUM + COAP_MID_DELTA_NUM); (*dataPos) += COAP_OPTION_EXTENSION_LEN; return DISCOVERY_ERR_SUCCESS; } if (*value == COAP_EXTEND_DELTA_VALUE_INVALID) { return DISCOVERY_ERR_OPT_INVALID_DELTA; } return DISCOVERY_ERR_SUCCESS; } static int COAP_ParseOption(COAP_Option *option, unsigned short *runningDelta, const unsigned char **buf, unsigned int bufLen) { const unsigned char *dataPos = NULL; unsigned char headLen; unsigned short len; unsigned short delta; int ret; if (bufLen < 1) { return DISCOVERY_ERR_OPT_INVALID_SHORT_FOR_HEADER; } dataPos = *buf; delta = (dataPos[0] & 0xF0) >> COAP_SHIFT_BIT4; len = dataPos[0] & 0x0F; headLen = 1; ret = COAP_ParseOptionExtension(&delta, &dataPos, &headLen, bufLen); if (ret != DISCOVERY_ERR_SUCCESS) { return ret; } ret = COAP_ParseOptionExtension(&len, &dataPos, &headLen, bufLen); if (ret != DISCOVERY_ERR_SUCCESS) { return ret; } if ((dataPos + 1 + len) > (*buf + bufLen)) { return DISCOVERY_ERR_OPT_INVALID_BIG; } option->num = (unsigned short)(delta + *runningDelta); option->optionBuf = dataPos + 1; option->len = len; *buf = dataPos + 1 + len; *runningDelta = (unsigned short)(*runningDelta + delta); return DISCOVERY_ERR_SUCCESS; } static int COAP_ParseOptionsAndPayload(COAP_Packet *pkt, const unsigned char *buf, unsigned int buflen) { unsigned char optionIndex = 0; unsigned short delta = 0; const unsigned char *dataPos = buf + HEADER_LEN + pkt->header.tokenLen; const unsigned char *end = buf + buflen; if (dataPos > end) { return DISCOVERY_ERR_OPT_OVERRUNS_PKT; } while ((dataPos < end) && (*dataPos != 0xFF) && (optionIndex < COAP_MAX_OPTION)) { int ret = COAP_ParseOption(&((pkt->options)[optionIndex]), &delta, &dataPos, end - dataPos); if (ret != DISCOVERY_ERR_SUCCESS) { return ret; } optionIndex++; } if ((dataPos < end) && (*dataPos != 0xFF) && (optionIndex >= COAP_MAX_OPTION)) { return DISCOVERY_ERR_SERVER_ERR; } pkt->optionsNum = optionIndex; if ((dataPos < end) && (*dataPos != 0xFF)) { pkt->payload.buffer = NULL; pkt->payload.len = 0; return DISCOVERY_ERR_SUCCESS; } if (dataPos + 1 >= end) { return DISCOVERY_ERR_INVALID_PKT; } pkt->payload.buffer = dataPos + 1; pkt->payload.len = (unsigned int)(long)(end - (dataPos + 1)); return DISCOVERY_ERR_SUCCESS; } static int COAP_ParseHeader(COAP_Packet *pkt, const unsigned char *buf, unsigned int bufLen) { if (bufLen < HEADER_LEN) { return DISCOVERY_ERR_HEADER_INVALID_SHORT; } pkt->header.ver = (((unsigned int)buf[0] >> COAP_SHIFT_BIT6) & 0x03); pkt->header.type = ((((unsigned int)buf[0] & 0x30) >> COAP_SHIFT_BIT4) & 0x03); pkt->header.tokenLen = (((unsigned int)buf[0] & 0x0F)); pkt->header.code = buf[1]; pkt->header.varSection.msgId = (unsigned short)((unsigned short)(buf[MSGID_HIGHT_BYTE] << COAP_SHIFT_BIT8) | buf[BUF_OFFSET_BYTE3]); return DISCOVERY_ERR_SUCCESS; } int COAP_SoftBusDecode(COAP_Packet *pkt, const unsigned char *buf, unsigned int bufLen) { int ret; if (pkt == NULL || buf == NULL) { return -1; } if (bufLen == 0) { return -1; } if (pkt->protocol != COAP_UDP) { return -1; } ret = COAP_ParseHeader(pkt, buf, bufLen); if (ret != DISCOVERY_ERR_SUCCESS) { return ret; } if (pkt->header.ver != COAP_VERSION) { return DISCOVERY_ERR_VER_INVALID; } if (pkt->header.tokenLen > MAX_TOK_LEN) { return DISCOVERY_ERR_INVALID_TOKEN_LEN; } if ((bufLen > HEADER_LEN) && (pkt->header.code == 0)) { return DISCOVERY_ERR_INVALID_EMPTY_MSG; } if (pkt->header.tokenLen == 0) { pkt->token.buffer = NULL; pkt->token.len = 0; } else if ((unsigned int)(pkt->header.tokenLen + HEADER_LEN) > bufLen) { return DISCOVERY_ERR_TOKEN_INVALID_SHORT; } else { pkt->token.buffer = &buf[BUF_OFFSET_BYTE4]; pkt->token.len = pkt->header.tokenLen; } ret = COAP_ParseOptionsAndPayload(pkt, buf, bufLen); if (ret != DISCOVERY_ERR_SUCCESS) { return ret; } pkt->len = bufLen; return DISCOVERY_ERR_SUCCESS; } static int COAP_CreateHeader(COAP_Packet *pkt, const COAP_PacketParam *pktParam, COAP_ReadWriteBuffer *buf) { if (buf->len != 0) { return DISCOVERY_ERR_INVALID_ARGUMENT; } if ((pktParam->protocol != COAP_UDP) && (pktParam->protocol != COAP_TCP)) { return DISCOVERY_ERR_TRANSPORT_NOT_UDP_OR_TCP; } pkt->protocol = pktParam->protocol; if (pktParam->type > COAP_TYPE_RESET) { return DISCOVERY_ERR_UNKNOWN_MSG_TYPE; } if (buf->size < HEADER_LEN) { return DISCOVERY_ERR_PKT_EXCEED_MAX_PDU; } pkt->header.type = (unsigned int)pktParam->type & 0x03; pkt->header.ver = COAP_VERSION;//1 pkt->header.code = pktParam->code; if (pkt->protocol == COAP_UDP) { pkt->header.varSection.msgId = pktParam->msgId; buf->readWriteBuf[0] = (char)(pkt->header.ver << COAP_SHIFT_BIT6); buf->readWriteBuf[0] = (char)((unsigned char)buf->readWriteBuf[0] | (unsigned char)(pkt->header.type << COAP_SHIFT_BIT4)); buf->readWriteBuf[1] = (char)pkt->header.code; buf->readWriteBuf[BUF_OFFSET_BYTE2] = (char)((pkt->header.varSection.msgId & 0xFF00) >> COAP_SHIFT_BIT8); buf->readWriteBuf[BUF_OFFSET_BYTE3] = (char)(pkt->header.varSection.msgId & 0x00FF); } else { return DISCOVERY_ERR_NOT_SUPPORTED; } pkt->len = buf->len = HEADER_LEN;//4 return DISCOVERY_ERR_SUCCESS; } static int COAP_AddData(COAP_Packet *pkt, const COAP_Buffer *payload, COAP_ReadWriteBuffer *buf) { if ((payload->len == 0) && (payload->buffer == NULL)) { return DISCOVERY_ERR_INVALID_ARGUMENT; } if (buf->len < HEADER_LEN) { return DISCOVERY_ERR_INVALID_ARGUMENT; } if ((payload->len > 0xFFFF) || (buf->len + payload->len + 1) > buf->size) { return DISCOVERY_ERR_PKT_EXCEED_MAX_PDU; } pkt->payload.len = payload->len; if (payload->len != 0) { pkt->payload.len = payload->len; buf->readWriteBuf[buf->len] = 0xFF; (buf->len)++; pkt->payload.buffer = (const unsigned char *)&buf->readWriteBuf[buf->len]; if (memcpy_s(&buf->readWriteBuf[buf->len], buf->size - buf->len, payload->buffer, payload->len) != EOK) { return DISCOVERY_ERR_INVALID_ARGUMENT; } } buf->len += payload->len; pkt->len = buf->len; return DISCOVERY_ERR_SUCCESS; } static void COAP_GetOptionParam(unsigned short value, unsigned char *param) { if (value < COAP_LOW_DELTA_NUM) { *param = (unsigned char)(value & 0xFF); return; } if (value < (COAP_LOW_DELTA_NUM + COAP_MID_DELTA_NUM)) { *param = COAP_EXTEND_DELTA_VALUE_UINT8; return; } *param = COAP_EXTEND_DELTA_VALUE_UINT16; return; } static unsigned short COAP_GetOptionLength(const COAP_Option *opt, unsigned short runningDelta) { unsigned short optionLen = 1; unsigned char delta = 0; unsigned char len = 0; COAP_GetOptionParam((unsigned short)(opt->num - runningDelta), &delta); if (delta == COAP_EXTEND_DELTA_VALUE_UINT8) { optionLen += 1; } else if (delta == COAP_EXTEND_DELTA_VALUE_UINT16) { optionLen += BUF_OFFSET_BYTE2; } COAP_GetOptionParam((unsigned short)opt->len, &len); if (len == COAP_EXTEND_DELTA_VALUE_UINT8) { optionLen += 1; } else if (len == COAP_EXTEND_DELTA_VALUE_UINT16) { optionLen += BUF_OFFSET_BYTE2; } return optionLen + opt->len; } static int COAP_CheckOption(const COAP_Packet *pkt, const COAP_Option *option, const COAP_ReadWriteBuffer *buf) { unsigned short optionLen; unsigned short runningDelta = 0; if (buf->len < HEADER_LEN) { return DISCOVERY_ERR_INVALID_ARGUMENT; } if ((option->optionBuf == NULL) && (option->len != 0)) { return DISCOVERY_ERR_INVALID_ARGUMENT; } if ((option->len > 0xFFFF) || (pkt->optionsNum >= COAP_MAX_OPTION)) { return DISCOVERY_ERR_BAD_REQ; } if (pkt->optionsNum != 0) { runningDelta = pkt->options[pkt->optionsNum - 1].num; } optionLen = COAP_GetOptionLength(option, runningDelta); if ((buf->len + optionLen) > buf->size) { return DISCOVERY_ERR_PKT_EXCEED_MAX_PDU; } return DISCOVERY_ERR_SUCCESS; } static int COAP_AddOption(COAP_Packet *pkt, const COAP_Option *option, COAP_ReadWriteBuffer *buf) { unsigned char delta; unsigned char len; unsigned short optionDelta; unsigned short prevOptionNum; if (COAP_CheckOption(pkt, option, buf) != DISCOVERY_ERR_SUCCESS) { return DISCOVERY_ERR_INVALID_ARGUMENT; } prevOptionNum = 0; if (pkt->optionsNum != 0) { prevOptionNum = pkt->options[pkt->optionsNum - 1].num; } optionDelta = option->num - prevOptionNum; COAP_GetOptionParam(optionDelta, &delta); COAP_GetOptionParam(option->len, &len); buf->readWriteBuf[buf->len++] = (char)(((delta << COAP_SHIFT_BIT4) | len) & 0xFF); if (delta == COAP_EXTEND_DELTA_VALUE_UINT8) { buf->readWriteBuf[buf->len++] = (char)(optionDelta - COAP_LOW_DELTA_NUM); } else if (delta == COAP_EXTEND_DELTA_VALUE_UINT16) { buf->readWriteBuf[buf->len++] = (char)((optionDelta - (COAP_LOW_DELTA_NUM + COAP_MID_DELTA_NUM)) >> COAP_SHIFT_BIT8); buf->readWriteBuf[buf->len++] = (char)((optionDelta - (COAP_LOW_DELTA_NUM + COAP_MID_DELTA_NUM)) & 0xFF); } if (len == COAP_EXTEND_DELTA_VALUE_UINT8) { buf->readWriteBuf[buf->len++] = (char)(option->len - COAP_LOW_DELTA_NUM); } else if (len == COAP_EXTEND_DELTA_VALUE_UINT16) { buf->readWriteBuf[buf->len++] = (char)((option->len - (COAP_LOW_DELTA_NUM + COAP_MID_DELTA_NUM)) >> COAP_SHIFT_BIT8); buf->readWriteBuf[buf->len++] = (char)((option->len - (COAP_LOW_DELTA_NUM + COAP_MID_DELTA_NUM)) & 0xFF); } if (option->len != 0) { if (memcpy_s(&buf->readWriteBuf[buf->len], buf->size - buf->len, option->optionBuf, option->len) != EOK) { return DISCOVERY_ERR_OPT_INVALID_BIG; } } pkt->options[pkt->optionsNum].optionBuf = (const unsigned char *)&buf->readWriteBuf[buf->len]; pkt->options[pkt->optionsNum].num = option->num; pkt->options[pkt->optionsNum].len = option->len; buf->len += option->len; pkt->len = buf->len; pkt->optionsNum++; return DISCOVERY_ERR_SUCCESS; } static int COAP_AddToken(COAP_Packet *pkt, const COAP_Buffer *token, COAP_ReadWriteBuffer *buf) { if ((token->len != 0) && (token->buffer == NULL)) { return DISCOVERY_ERR_INVALID_ARGUMENT; } if (buf->len != HEADER_LEN) { return DISCOVERY_ERR_INVALID_ARGUMENT; } if (token->len > MAX_TOK_LEN) { return DISCOVERY_ERR_INVALID_TOKEN_LEN; } if ((buf->len + token->len) > buf->size) { return DISCOVERY_ERR_PKT_EXCEED_MAX_PDU; } pkt->token.len = token->len; pkt->header.tokenLen = pkt->token.len & 0x0F; pkt->token.buffer = (const unsigned char *)&buf->readWriteBuf[buf->len]; if (token->len != 0) { if (pkt->protocol == COAP_UDP) { buf->readWriteBuf[0] = (char)((unsigned char)buf->readWriteBuf[0] | token->len); } else { buf->readWriteBuf[BUF_OFFSET_BYTE2] = (char)((unsigned char)buf->readWriteBuf[BUF_OFFSET_BYTE2] | token->len); } if (memcpy_s(&buf->readWriteBuf[buf->len], pkt->header.tokenLen, token->buffer, token->len) != EOK) { return DISCOVERY_ERR_INVALID_ARGUMENT; } } buf->len += token->len; pkt->len = buf->len; return DISCOVERY_ERR_SUCCESS; } static int COAP_CreateBody(COAP_Packet *pkt, const COAP_PacketParam *param, const COAP_Buffer *token, const COAP_Buffer *payload, COAP_ReadWriteBuffer *buf) { int i; int ret; if (token != NULL) { ret = COAP_AddToken(pkt, token, buf); if (ret != DISCOVERY_ERR_SUCCESS) { return ret; } } if (param->options != 0) { if (param->optionsNum > COAP_MAX_OPTION) { return DISCOVERY_ERR_SERVER_ERR; } for (i = 0; i < param->optionsNum; i++) { ret = COAP_AddOption(pkt, &param->options[i], buf); if (ret != DISCOVERY_ERR_SUCCESS) { return ret; } } } if (payload != NULL) { ret = COAP_AddData(pkt, payload, buf); if (ret != DISCOVERY_ERR_SUCCESS) { return ret; } } return DISCOVERY_ERR_SUCCESS; } int COAP_SoftBusEncode(COAP_Packet *pkt, const COAP_PacketParam *param, const COAP_Buffer *token, const COAP_Buffer *payload, COAP_ReadWriteBuffer *buf) { int ret; if (pkt == NULL || param == NULL || buf == NULL || buf->readWriteBuf == NULL) { return DISCOVERY_ERR_INVALID_EMPTY_MSG; } ret = COAP_CreateHeader(pkt, param, buf); if (ret != DISCOVERY_ERR_SUCCESS) { return ret; } if ((param->code == 0) && ((token != NULL) || (param->options != NULL) || (payload != NULL))) { return DISCOVERY_ERR_INVALID_EMPTY_MSG; } ret = COAP_CreateBody(pkt, param, NULL, payload, buf); if (ret != DISCOVERY_ERR_SUCCESS) { return ret; } return DISCOVERY_ERR_SUCCESS; } #define COAP_MAX_TOKEN_LEN 8 #define COAP_MAX_TOKEN_ELEMENT_VALUE 256 static unsigned short g_msgId = 0; #define RAND_DIVISOR 0 void COAP_SoftBusInitMsgId(void) { g_msgId = (unsigned short)(RAND_DIVISOR); } unsigned short COAP_SoftBusMsgId(void) { if (++g_msgId == 0) { g_msgId++; } return g_msgId; } typedef struct { COAP_Packet *pkt; COAP_PacketParam *param; const unsigned char *payload; unsigned long payloadLen; } COAP_ResponseInfo; static int COAP_BuildResponseParam(const COAP_Packet *req, const COAP_ResponseInfo *resqInfo, COAP_Buffer *inToken) { if (req == NULL || resqInfo == NULL) { return DISCOVERY_ERR_NOT_SUPPORTED; } inToken->buffer = req->token.buffer; inToken->len = req->token.len; resqInfo->param->protocol = req->protocol; resqInfo->param->type = COAP_TYPE_NONCON; resqInfo->param->code = (unsigned char)COAP_METHOD_POST; resqInfo->param->msgId = COAP_SoftBusMsgId(); return DISCOVERY_ERR_SUCCESS; } #define PKT_TOKEN_LEN 2 int COAP_SoftBusBuildMessage(const COAP_Packet *req, const COAP_ResponseInfo *resqInfo, char *buf, unsigned int *len) { int ret; COAP_ReadWriteBuffer outBuf; COAP_Buffer inPayload; COAP_Buffer inToken; if (resqInfo == NULL || resqInfo->pkt == NULL || resqInfo->param == NULL || buf == NULL || len == NULL) { return DISCOVERY_ERR_BAD_REQ; } if (*len == 0) { return DISCOVERY_ERR_BAD_REQ; } (void)memset_s(&outBuf, sizeof(COAP_ReadWriteBuffer), 0, sizeof(COAP_ReadWriteBuffer)); (void)memset_s(&inPayload, sizeof(COAP_Buffer), 0, sizeof(COAP_Buffer)); (void)memset_s(&inToken, sizeof(COAP_Buffer), 0, sizeof(COAP_Buffer)); outBuf.readWriteBuf = buf; outBuf.size = *len; inPayload.buffer = resqInfo->payload; inPayload.len = resqInfo->payloadLen; if (resqInfo->payloadLen >= *len) { return DISCOVERY_ERR_BAD_REQ; } ret = COAP_BuildResponseParam(req, resqInfo, &inToken); if (ret != DISCOVERY_ERR_SUCCESS) { return ret; } if ((resqInfo->payload == NULL) || (resqInfo->payloadLen == 0)) { ret = COAP_SoftBusEncode(resqInfo->pkt, resqInfo->param, &inToken, NULL, &outBuf); } else { ret = COAP_SoftBusEncode(resqInfo->pkt, resqInfo->param, &inToken, &inPayload, &outBuf); } if (ret != DISCOVERY_ERR_SUCCESS) { return DISCOVERY_ERR_BAD_REQ; } *len = outBuf.len; return ret; } #define COAP_MAX_PDU_SIZE 1024 int BuildSendPkt(const COAP_Packet *pkt, const char* remoteIp, const char *pktPayload, COAP_ReadWriteBuffer *sndPktBuff) { COAP_Packet respPkt; COAP_PacketParam respPktPara; COAP_Option options[COAP_MAX_OPTION] = {0}; if (pkt == NULL || remoteIp == NULL || pktPayload == NULL || sndPktBuff == NULL) { return DISCOVERY_ERR_BAD_REQ; } char *buf = sndPktBuff->readWriteBuf; unsigned int len = sndPktBuff->size; if (buf == NULL) { return DISCOVERY_ERR_BAD_REQ; } (void)memset_s(&respPkt, sizeof(COAP_Packet), 0, sizeof(COAP_Packet)); (void)memset_s(&respPktPara, sizeof(COAP_PacketParam), 0, sizeof(COAP_PacketParam)); respPktPara.options = options; respPktPara.options[respPktPara.optionsNum].num = DISCOVERY_MSG_URI_HOST; respPktPara.options[respPktPara.optionsNum].optionBuf = (unsigned char *)remoteIp; respPktPara.options[respPktPara.optionsNum].len = strlen(remoteIp); respPktPara.optionsNum++; respPktPara.options[respPktPara.optionsNum].num = DISCOVERY_MSG_URI_PATH; respPktPara.options[respPktPara.optionsNum].optionBuf = (unsigned char *)"device_discover"; respPktPara.options[respPktPara.optionsNum].len = strlen("device_discover"); respPktPara.optionsNum++; (void)memset_s(buf, COAP_MAX_PDU_SIZE, 0, COAP_MAX_PDU_SIZE); COAP_ResponseInfo respInfo = {&respPkt, &respPktPara, NULL, 0}; respInfo.payload = (unsigned char *)pktPayload; respInfo.payloadLen = strlen(pktPayload); int ret = COAP_SoftBusBuildMessage(pkt, &respInfo, buf, &len); if (ret != DISCOVERY_ERR_SUCCESS) { return DISCOVERY_ERR_BAD_REQ; } if (len >= sndPktBuff->size) { return DISCOVERY_ERR_BAD_REQ; } sndPktBuff->len = len; return DISCOVERY_ERR_SUCCESS; }
519605.c
#include <stdio.h> #include <stdlib.h> // 没有表头的,初始节点就是head // C语言描述里面是用的这种 struct Node { int data; struct Node *next; }; typedef struct Node node; typedef struct Node *PointNode; // 头结点 在单链表的第一个结点之前附加一个结点,称为头结点。头结点的Data域可以不设任何信息,也可以记录表长等相关信息。若链表是带有头结点的,则头指针指向头结点的存储位置 // 头指针 通常使用“头指针”来标识一个链表,如单链表L,头指针为NULL的时表示一个空链表。链表非空时,头指针指向的是第一个结点的存储位置 // 如何表示一个头指针 // struct Node *PointNode // ====== 初始化空链表 头指针 == NULL // 不带头结点写法 // 初始化空链表 // 1. 传入头指针 头指针代表的是指针指向的下一个结点的存储位置 // 2. 头指针为空 则为空表 PointNode InitList(PointNode headPoint) { headPoint = NULL; return headPoint; } // ====== 初始化空链表 头指针——> 头结点 == NULL // 带头结点写法 // 初始化空链表 // 1. 传入头指针 头指针代表的是指针指向的的下一个结点的存储位置 // 2. 头指针的下一跳为头节点 头结点的 PointNode InitList2(PointNode headPoint){ headPoint = (PointNode)malloc(sizeof(struct Node)); headPoint -> next = NULL; return headPoint; } // 不带头结点写法 // 判断是否为空表 判断第一个普通节点是否为空 // 1. 传入头指针 // 2. 如果头指针为空,则表示为空表 // 空则返回0 ,不空则返回1 int IsEmpty(PointNode headPoint){ if (headPoint == NULL){ return 0; }else { return 1; } } // 带头结点写法 // 判断是否为空表 判断头结点是否为空 // 1. 传入头指针 头指针用来识别一个链表 // 2. 如果头结点是空的话,那该表为空链表 int IsEmpty2(PointNode headPoint){ PointNode head = headPoint -> next; if (head == NULL){ return 0; }else { return 1; } } // 没有头结点写法 // 创建一个单节点的表 // 1. 传入一个空链表,用头指针识别 以及 一个值 // 2. 创建一个新的节点 分配内存并且赋值 // 3. 让头指针指向新的节点,并且新的节点指向NULL // 4. 返回一个头指针类型 PointNode CreateNodeList(PointNode headPoint,int val){ PointNode newNode = (PointNode)malloc(sizeof(struct Node)); newNode -> next = NULL; newNode -> data = val; headPoint -> next = newNode; return headPoint ; } // 有头结点的写法 // 创建一个单节点的表 // 1. 传入一个空链表,用头指针识别以及一个值 // 2. 创建一个新的节点,分配内存并且赋值 // 3. 让头节点指向新的节点,并且新的节点指向NULL // 返回一个指针类型 PointNode CreateNodeList2(PointNode headPoint,int val){ // 根据头指针获取头结点 PointNode head = headPoint -> next; // 创建新的结点 PointNode newnode = (PointNode)malloc(sizeof(struct Node)); newnode -> next = NULL; newnode -> data = val; head -> next = newnode; return headPoint; } int main() { // 创建一个头结点 PointNode headPoint; PointNode hp = InitList(headPoint); int res = IsEmpty(hp); printf("%d",res); // 带头结点 PointNode headPoint2; PointNode hp2 = InitList2(headPoint2); int res2 = IsEmpty2(hp2); printf("%d",res2); }
231648.c
/* * Sorted array routines for CUPS. * * Copyright © 2021-2022 by OpenPrinting. * Copyright © 2007-2014 by Apple Inc. * Copyright © 1997-2007 by Easy Software Products. * * Licensed under Apache License v2.0. See the file "LICENSE" for more * information. */ /* * Include necessary headers... */ #include <cups/cups.h> #include "string-private.h" #include "debug-internal.h" /* * Limits... */ #define _CUPS_MAXSAVE 32 /**** Maximum number of saves ****/ /* * Types and structures... */ struct _cups_array_s /**** CUPS array structure ****/ { /* * The current implementation uses an insertion sort into an array of * sorted pointers. We leave the array type private/opaque so that we * can change the underlying implementation without affecting the users * of this API. */ size_t num_elements, /* Number of array elements */ alloc_elements, /* Allocated array elements */ current, /* Current element */ insert, /* Last inserted element */ num_saved, /* Number of saved elements */ saved[_CUPS_MAXSAVE]; /* Saved elements */ void **elements; /* Array elements */ cups_array_cb_t compare; /* Element comparison function */ bool unique; /* Are all elements unique? */ void *data; /* User data passed to compare */ cups_ahash_cb_t hashfunc; /* Hash function */ size_t hashsize, /* Size of hash */ *hash; /* Hash array */ cups_acopy_cb_t copyfunc; /* Copy function */ cups_afree_cb_t freefunc; /* Free function */ }; /* * Local functions... */ static bool cups_array_add(cups_array_t *a, void *e, bool insert); static size_t cups_array_find(cups_array_t *a, void *e, size_t prev, int *rdiff); /* * 'cupsArrayAdd()' - Add an element to an array. * * This function adds an element to an array. When adding an element to a * sorted array, non-unique elements are appended at the end of the run of * identical elements. For unsorted arrays, the element is appended to the end * of the array. */ bool /* O - `true` on success, `false` on failure */ cupsArrayAdd(cups_array_t *a, /* I - Array */ void *e) /* I - Element */ { DEBUG_printf(("2cupsArrayAdd(a=%p, e=%p)", (void *)a, e)); /* * Range check input... */ if (!a || !e) { DEBUG_puts("3cupsArrayAdd: returning false"); return (false); } /* * Append the element... */ return (cups_array_add(a, e, false)); } /* * 'cupsArrayAddStrings()' - Add zero or more delimited strings to an array. * * This function adds zero or more delimited strings to an array created using * the @link cupsArrayNewStrings@ function. Duplicate strings are *not* added. * If the string pointer "s" is `NULL` or the empty string, no strings are * added to the array. If "delim" is the space character, then all whitespace * is recognized as a delimiter. */ bool /* O - `true` on success, `false` on failure */ cupsArrayAddStrings(cups_array_t *a, /* I - Array */ const char *s, /* I - Delimited strings or `NULL` */ char delim) /* I - Delimiter character */ { char *buffer, /* Copy of string */ *start, /* Start of string */ *end; /* End of string */ bool status = true; /* Status of add */ DEBUG_printf(("cupsArrayAddStrings(a=%p, s=\"%s\", delim='%c')", (void *)a, s, delim)); if (!a || !s || !*s) { DEBUG_puts("1cupsArrayAddStrings: Returning 0"); return (false); } if (delim == ' ') { /* * Skip leading whitespace... */ DEBUG_puts("1cupsArrayAddStrings: Skipping leading whitespace."); while (*s && isspace(*s & 255)) s ++; DEBUG_printf(("1cupsArrayAddStrings: Remaining string \"%s\".", s)); } if (!strchr(s, delim) && (delim != ' ' || (!strchr(s, '\t') && !strchr(s, '\n')))) { /* * String doesn't contain a delimiter, so add it as a single value... */ DEBUG_puts("1cupsArrayAddStrings: No delimiter seen, adding a single " "value."); if (!cupsArrayFind(a, (void *)s)) status = cupsArrayAdd(a, (void *)s); } else if ((buffer = strdup(s)) == NULL) { DEBUG_puts("1cupsArrayAddStrings: Unable to duplicate string."); status = false; } else { for (start = end = buffer; *end; start = end) { /* * Find the end of the current delimited string and see if we need to add * it... */ if (delim == ' ') { while (*end && !isspace(*end & 255)) end ++; while (*end && isspace(*end & 255)) *end++ = '\0'; } else if ((end = strchr(start, delim)) != NULL) *end++ = '\0'; else end = start + strlen(start); DEBUG_printf(("1cupsArrayAddStrings: Adding \"%s\", end=\"%s\"", start, end)); if (!cupsArrayFind(a, start)) status &= cupsArrayAdd(a, start); } free(buffer); } DEBUG_printf(("1cupsArrayAddStrings: Returning %s.", status ? "true" : "false")); return (status); } /* * 'cupsArrayClear()' - Clear an array. * * This function is equivalent to removing all elements in the array, so the * free callback (if any) is called for each element that is removed. */ void cupsArrayClear(cups_array_t *a) /* I - Array */ { /* * Range check input... */ if (!a) return; /* * Free the existing elements as needed.. */ if (a->freefunc) { size_t i; /* Looping var */ void **e; /* Current element */ for (i = a->num_elements, e = a->elements; i > 0; i --, e ++) (a->freefunc)(*e, a->data); } /* * Set the number of elements to 0; we don't actually free the memory * here - that is done in cupsArrayDelete()... */ a->num_elements = 0; a->current = SIZE_MAX; a->insert = SIZE_MAX; a->unique = true; a->num_saved = 0; } /* * 'cupsArrayDelete()' - Free all memory used by an array. * * This function frees all memory used by an array. The free callback (if any) * is called for each element in the array. */ void cupsArrayDelete(cups_array_t *a) /* I - Array */ { /* * Range check input... */ if (!a) return; /* * Clear the array... */ cupsArrayClear(a); /* * Free the other buffers... */ free(a->elements); free(a->hash); free(a); } /* * 'cupsArrayDup()' - Duplicate an array. */ cups_array_t * /* O - Duplicate array */ cupsArrayDup(cups_array_t *a) /* I - Array */ { cups_array_t *da; /* Duplicate array */ /* * Range check input... */ if (!a) return (NULL); /* * Allocate memory for the array... */ da = calloc(1, sizeof(cups_array_t)); if (!da) return (NULL); da->compare = a->compare; da->copyfunc = a->copyfunc; da->freefunc = a->freefunc; da->data = a->data; da->current = a->current; da->insert = a->insert; da->unique = a->unique; da->num_saved = a->num_saved; memcpy(da->saved, a->saved, sizeof(a->saved)); if (a->num_elements) { /* * Allocate memory for the elements... */ da->elements = malloc((size_t)a->num_elements * sizeof(void *)); if (!da->elements) { free(da); return (NULL); } /* * Copy the element pointers... */ if (a->copyfunc) { /* * Use the copy function to make a copy of each element... */ size_t i; /* Looping var */ for (i = 0; i < a->num_elements; i ++) da->elements[i] = (a->copyfunc)(a->elements[i], a->data); } else { /* * Just copy raw pointers... */ memcpy(da->elements, a->elements, (size_t)a->num_elements * sizeof(void *)); } da->num_elements = a->num_elements; da->alloc_elements = a->num_elements; } /* * Return the new array... */ return (da); } /* * 'cupsArrayFind()' - Find an element in an array. */ void * /* O - Element found or @code NULL@ */ cupsArrayFind(cups_array_t *a, /* I - Array */ void *e) /* I - Element */ { size_t current, /* Current element */ hash; /* Hash index */ int diff; /* Difference */ /* * Range check input... */ if (!a || !a->num_elements || !e) return (NULL); /* * Look for a match... */ if (a->hash) { if ((hash = (*(a->hashfunc))(e, a->data)) >= a->hashsize) { current = a->current; hash = SIZE_MAX; } else if ((current = a->hash[hash]) >= a->num_elements) current = a->current; } else { current = a->current; hash = SIZE_MAX; } current = cups_array_find(a, e, current, &diff); if (!diff) { /* * Found a match! If the array does not contain unique values, find * the first element that is the same... */ if (!a->unique && a->compare) { /* * The array is not unique, find the first match... */ while (current > 0 && !(*(a->compare))(e, a->elements[current - 1], a->data)) current --; } a->current = current; if (hash < a->hashsize) a->hash[hash] = current; return (a->elements[current]); } else { /* * No match... */ a->current = SIZE_MAX; return (NULL); } } /* * 'cupsArrayGetCount()' - Get the number of elements in an array. */ size_t /* O - Number of elements */ cupsArrayGetCount(cups_array_t *a) /* I - Array */ { return (a ? a->num_elements : 0); } /* * 'cupsArrayGetCurrent()' - Return the current element in an array. * * This function returns the current element in an array. The current element * is undefined until you call @link cupsArrayFind@, @link cupsArrayGetElement@, * @link cupsArrayGetFirst@, or @link cupsArrayGetLast@. */ void * /* O - Element */ cupsArrayGetCurrent(cups_array_t *a) /* I - Array */ { /* * Range check input... */ if (!a) return (NULL); /* * Return the current element... */ if (a->current < a->num_elements) return (a->elements[a->current]); else return (NULL); } /* * 'cupsArrayGetFirst()' - Get the first element in an array. */ void * /* O - First element or `NULL` if the array is empty */ cupsArrayGetFirst(cups_array_t *a) /* I - Array */ { return (cupsArrayGetElement(a, 0)); } /* * 'cupsArrayGetIndex()' - Get the index of the current element. * * This function returns the index of the current element or `SIZE_MAX` if * there is no current element. The current element is undefined until you call * @link cupsArrayFind@, @link cupsArrayGetElement@, @link cupsArrayGetFirst@, * or @link cupsArrayGetLast@. */ size_t /* O - Index of the current element, starting at 0 */ cupsArrayGetIndex(cups_array_t *a) /* I - Array */ { return (a ? a->current : SIZE_MAX); } /* * 'cupsArrayGetInsert()' - Get the index of the last added or inserted element. * * This function returns the index of the last added or inserted element or * `SIZE_MAX` if no elements have been added or inserted. */ size_t /* O - Index of the last added or inserted element, starting at 0 */ cupsArrayGetInsert(cups_array_t *a) /* I - Array */ { return (a ? a->insert : SIZE_MAX); } /* * 'cupsArrayGetElement()' - Get the N-th element in the array. */ void * /* O - N-th element or `NULL` */ cupsArrayGetElement(cups_array_t *a, /* I - Array */ size_t n) /* I - Index into array, starting at 0 */ { if (!a || n >= a->num_elements) return (NULL); a->current = n; return (a->elements[n]); } /* * 'cupsArrayGetLast()' - Get the last element in the array. */ void * /* O - Last element or`NULL` if the array is empty */ cupsArrayGetLast(cups_array_t *a) /* I - Array */ { /* * Range check input... */ if (!a || a->num_elements == 0) return (NULL); /* * Return the last element... */ return (cupsArrayGetElement(a, a->num_elements - 1)); } /* * 'cupsArrayGetNext()' - Get the next element in an array. * * This function returns the next element in an array. The next element is * undefined until you call @link cupsArrayFind@, @link cupsArrayGetElement@, * @link cupsArrayGetFirst@, or @link cupsArrayGetLast@ to set the current * element. */ void * /* O - Next element or @code NULL@ */ cupsArrayGetNext(cups_array_t *a) /* I - Array */ { if (!a || a->num_elements == 0) return (NULL); else if (a->current == SIZE_MAX) return (cupsArrayGetElement(a, 0)); else return (cupsArrayGetElement(a, a->current + 1)); } /* * 'cupsArrayGetPrev()' - Get the previous element in an array. * * This function returns the previous element in an array. The previous element * is undefined until you call @link cupsArrayFind@, @link cupsArrayGetElement@, * @link cupsArrayGetFirst@, or @link cupsArrayGetLast@ to set the current * element. */ void * /* O - Previous element or @code NULL@ */ cupsArrayGetPrev(cups_array_t *a) /* I - Array */ { if (!a || a->num_elements == 0 || a->current == 0 || a->current == SIZE_MAX) return (NULL); else return (cupsArrayGetElement(a, a->current - 1)); } /* * 'cupsArrayGetUserData()' - Return the user data for an array. */ void * /* O - User data */ cupsArrayGetUserData(cups_array_t *a) /* I - Array */ { return (a ? a->data : NULL); } /* * 'cupsArrayInsert()' - Insert an element in an array. * * This function inserts an element in an array. When inserting an element * in a sorted array, non-unique elements are inserted at the beginning of the * run of identical elements. For unsorted arrays, the element is inserted at * the beginning of the array. */ bool /* O - `true` on success, `false` on failure */ cupsArrayInsert(cups_array_t *a, /* I - Array */ void *e) /* I - Element */ { DEBUG_printf(("2cupsArrayInsert(a=%p, e=%p)", (void *)a, e)); /* * Range check input... */ if (!a || !e) { DEBUG_puts("3cupsArrayInsert: returning false"); return (false); } /* * Insert the element... */ return (cups_array_add(a, e, true)); } /* * 'cupsArrayNew()' - Create a new array with callback functions. * * This function creates a new array with optional callback functions. The * comparison callback function ("f") is used to create a sorted array. The * function receives pointers to two elements and the user data pointer ("d"). * The user data pointer argument can safely be omitted when not required so * functions like `strcmp` can be used for sorted string arrays. * * ``` * int // -1 if a < b, 0 if a == b, and 1 if a > b * compare_cb(void *a, // First element * void *b, // Second element * void *d) // User data pointer * { * ... * } * ``` * * The hash callback function ("hf") is used to implement cached lookups with * the specified hash size ("hsize"). The function receives a pointer to an * element and the user data pointer ("d") and returns an unsigned integer * representing a hash into the array. The hash value is of type `size_t` which * provides at least 32-bits of resolution. * * ``` * size_t // Hash value from 0 to (hashsize - 1) * hash_cb(void *e, // Element * void *d) // User data pointer * { * ... * } * ``` * * The copy callback function ("cf") is used to automatically copy/retain * elements when added to the array or the array is copied with * @link cupsArrayDup@. The function receives a pointer to the element and the * user data pointer ("d") and returns a new pointer that is stored in the array. * * ``` * void * // Pointer to copied/retained element or NULL * copy_cb(void *e, // Element to copy/retain * void *d) // User data pointer * { * ... * } * ``` * * Finally, the free callback function ("cf") is used to automatically * free/release elements when removed or the array is deleted. The function * receives a pointer to the element and the user data pointer ("d"). * * ``` * void * free_cb(void *e, // Element to free/release * void *d) // User data pointer * { * ... * } * ``` */ cups_array_t * /* O - Array */ cupsArrayNew(cups_array_cb_t f, /* I - Comparison callback function or `NULL` for an unsorted array */ void *d, /* I - User data or `NULL` */ cups_ahash_cb_t hf, /* I - Hash callback function or `NULL` for unhashed lookups */ size_t hsize, /* I - Hash size (>= `0`) */ cups_acopy_cb_t cf, /* I - Copy callback function or `NULL` for none */ cups_afree_cb_t ff) /* I - Free callback function or `NULL` for none */ { cups_array_t *a; /* Array */ /* * Allocate memory for the array... */ if ((a = calloc(1, sizeof(cups_array_t))) == NULL) return (NULL); a->compare = f; a->data = d; a->current = SIZE_MAX; a->insert = SIZE_MAX; a->num_saved = 0; a->unique = true; if (hsize > 0 && hf) { a->hashfunc = hf; a->hashsize = hsize; a->hash = malloc((size_t)hsize * sizeof(size_t)); if (!a->hash) { free(a); return (NULL); } memset(a->hash, -1, (size_t)hsize * sizeof(size_t)); } a->copyfunc = cf; a->freefunc = ff; return (a); } /* * 'cupsArrayNewStrings()' - Create a new array of delimited strings. * * This function creates an array that holds zero or more strings. The created * array automatically manages copies of the strings passed and sorts them in * ascending order using a case-sensitive comparison. If the string pointer "s" * is `NULL` or the empty string, no strings are added to the newly created * array. * * Additional strings can be added using the @link cupsArrayAdd@ or * @link cupsArrayAddStrings@ functions. */ cups_array_t * /* O - Array */ cupsArrayNewStrings(const char *s, /* I - Delimited strings or `NULL` to create an empty array */ char delim) /* I - Delimiter character */ { cups_array_t *a; /* Array */ if ((a = cupsArrayNew((cups_array_cb_t)strcmp, NULL, NULL, 0, (cups_acopy_cb_t)_cupsStrAlloc, (cups_afree_cb_t)_cupsStrFree)) != NULL) cupsArrayAddStrings(a, s, delim); return (a); } /* * 'cupsArrayRemove()' - Remove an element from an array. * * This function removes an element from an array. If more than one element * matches "e", only the first matching element is removed. */ bool /* O - `true` on success, `false` on failure */ cupsArrayRemove(cups_array_t *a, /* I - Array */ void *e) /* I - Element */ { size_t i, /* Looping var */ current; /* Current element */ int diff; /* Difference */ /* * Range check input... */ if (!a || a->num_elements == 0 || !e) return (false); /* * See if the element is in the array... */ current = cups_array_find(a, e, a->current, &diff); if (diff) return (false); /* * Yes, now remove it... */ a->num_elements --; if (a->freefunc) (a->freefunc)(a->elements[current], a->data); if (current < a->num_elements) memmove(a->elements + current, a->elements + current + 1, (a->num_elements - current) * sizeof(void *)); if (current <= a->current) { if (a->current) a->current --; else a->current = SIZE_MAX; } if (current < a->insert) a->insert --; else if (current == a->insert) a->insert = SIZE_MAX; for (i = 0; i < a->num_saved; i ++) { if (current <= a->saved[i]) a->saved[i] --; } if (a->num_elements <= 1) a->unique = true; return (true); } /* * 'cupsArrayRestore()' - Reset the current element to the last @link cupsArraySave@. */ void * /* O - New current element */ cupsArrayRestore(cups_array_t *a) /* I - Array */ { if (!a || a->num_saved == 0) return (NULL); a->num_saved --; a->current = a->saved[a->num_saved]; if (a->current < a->num_elements) return (a->elements[a->current]); else return (NULL); } /* * 'cupsArraySave()' - Mark the current element for a later @link cupsArrayRestore@. * * The current element is undefined until you call @link cupsArrayFind@, * @link cupsArrayGetElement@, @link cupsArrayGetFirst@, or * @link cupsArrayGetLast@ to set the current element. * * The save/restore stack is guaranteed to be at least 32 elements deep. */ bool /* O - `true` on success, `false` on failure */ cupsArraySave(cups_array_t *a) /* I - Array */ { if (!a || a->num_saved >= _CUPS_MAXSAVE) return (false); a->saved[a->num_saved] = a->current; a->num_saved ++; return (true); } /* * 'cups_array_add()' - Insert or append an element to the array. */ static bool /* O - `true` on success, `false` on failure */ cups_array_add(cups_array_t *a, /* I - Array */ void *e, /* I - Element to add */ bool insert) /* I - `true` = insert, `false` = append */ { size_t i, /* Looping var */ current; /* Current element */ int diff; /* Comparison with current element */ DEBUG_printf(("7cups_array_add(a=%p, e=%p, insert=%d)", (void *)a, e, insert)); /* * Verify we have room for the new element... */ if (a->num_elements >= a->alloc_elements) { /* * Allocate additional elements; start with 16 elements, then double the * size until 1024 elements, then add 1024 elements thereafter... */ void **temp; /* New array elements */ size_t count; /* New allocation count */ if (a->alloc_elements == 0) count = 16; else if (a->alloc_elements < 1024) count = a->alloc_elements * 2; else count = a->alloc_elements + 1024; DEBUG_printf(("9cups_array_add: count=" CUPS_LLFMT, CUPS_LLCAST count)); if ((temp = realloc(a->elements, count * sizeof(void *))) == NULL) { DEBUG_puts("9cups_array_add: allocation failed, returning false"); return (false); } a->alloc_elements = count; a->elements = temp; } /* * Find the insertion point for the new element; if there is no * compare function or elements, just add it to the beginning or end... */ if (!a->num_elements || !a->compare) { /* * No elements or comparison function, insert/append as needed... */ if (insert) current = 0; /* Insert at beginning */ else current = a->num_elements; /* Append to the end */ } else { /* * Do a binary search for the insertion point... */ current = cups_array_find(a, e, a->insert, &diff); if (diff > 0) { /* * Insert after the current element... */ current ++; } else if (!diff) { /* * Compared equal, make sure we add to the begining or end of * the current run of equal elements... */ a->unique = false; if (insert) { /* * Insert at beginning of run... */ while (current > 0 && !(*(a->compare))(e, a->elements[current - 1], a->data)) current --; } else { /* * Append at end of run... */ do { current ++; } while (current < a->num_elements && !(*(a->compare))(e, a->elements[current], a->data)); } } } /* * Insert or append the element... */ if (current < a->num_elements) { /* * Shift other elements to the right... */ memmove(a->elements + current + 1, a->elements + current, (a->num_elements - current) * sizeof(void *)); if (a->current >= current) a->current ++; for (i = 0; i < a->num_saved; i ++) { if (a->saved[i] >= current) a->saved[i] ++; } DEBUG_printf(("9cups_array_add: insert element at index " CUPS_LLFMT, CUPS_LLCAST current)); } #ifdef DEBUG else DEBUG_printf(("9cups_array_add: append element at " CUPS_LLFMT, CUPS_LLCAST current)); #endif /* DEBUG */ if (a->copyfunc) { if ((a->elements[current] = (a->copyfunc)(e, a->data)) == NULL) { DEBUG_puts("8cups_array_add: Copy function returned NULL, returning false"); return (false); } } else a->elements[current] = e; a->num_elements ++; a->insert = current; #ifdef DEBUG for (current = 0; current < a->num_elements; current ++) DEBUG_printf(("9cups_array_add: a->elements[" CUPS_LLFMT "]=%p", CUPS_LLCAST current, a->elements[current])); #endif /* DEBUG */ DEBUG_puts("9cups_array_add: returning true"); return (true); } /* * 'cups_array_find()' - Find an element in the array. */ static size_t /* O - Index of match */ cups_array_find(cups_array_t *a, /* I - Array */ void *e, /* I - Element */ size_t prev, /* I - Previous index */ int *rdiff) /* O - Difference of match */ { size_t left, /* Left side of search */ right, /* Right side of search */ current; /* Current element */ int diff; /* Comparison with current element */ DEBUG_printf(("7cups_array_find(a=%p, e=%p, prev=%u, rdiff=%p)", (void *)a, e, (unsigned)prev, (void *)rdiff)); if (a->compare) { /* * Do a binary search for the element... */ DEBUG_puts("9cups_array_find: binary search"); if (prev < a->num_elements) { /* * Start search on either side of previous... */ if ((diff = (*(a->compare))(e, a->elements[prev], a->data)) == 0 || (diff < 0 && prev == 0) || (diff > 0 && prev == (a->num_elements - 1))) { /* * Exact or edge match, return it! */ DEBUG_printf(("9cups_array_find: Returning %u, diff=%d", (unsigned)prev, diff)); *rdiff = diff; return (prev); } else if (diff < 0) { /* * Start with previous on right side... */ left = 0; right = prev; } else { /* * Start wih previous on left side... */ left = prev; right = a->num_elements - 1; } } else { /* * Start search in the middle... */ left = 0; right = a->num_elements - 1; } do { current = (left + right) / 2; diff = (*(a->compare))(e, a->elements[current], a->data); DEBUG_printf(("9cups_array_find: left=%u, right=%u, current=%u, diff=%d", (unsigned)left, (unsigned)right, (unsigned)current, diff)); if (diff == 0) break; else if (diff < 0) right = current; else left = current; } while ((right - left) > 1); if (diff != 0) { /* * Check the last 1 or 2 elements... */ if ((diff = (*(a->compare))(e, a->elements[left], a->data)) <= 0) { current = left; } else { diff = (*(a->compare))(e, a->elements[right], a->data); current = right; } } } else { /* * Do a linear pointer search... */ DEBUG_puts("9cups_array_find: linear search"); diff = 1; for (current = 0; current < a->num_elements; current ++) { if (a->elements[current] == e) { diff = 0; break; } } } /* * Return the closest element and the difference... */ DEBUG_printf(("8cups_array_find: Returning %u, diff=%d", (unsigned)current, diff)); *rdiff = diff; return (current); }
49133.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "generator_common.h" #include "generator_x86_instructions.h" #include "generator_gemm_common.h" #include "generator_gemm_sse3_avx_avx2_avx512.h" #include "generator_gemm_sse3_microkernel.h" #include "generator_gemm_avx_microkernel.h" #include "generator_gemm_avx2_microkernel.h" #include "generator_gemm_avx512_microkernel_nofsdbcst.h" #include "libxsmm_main.h" #if defined(LIBXSMM_OFFLOAD_TARGET) # pragma offload_attribute(push,target(LIBXSMM_OFFLOAD_TARGET)) #endif #include <stdlib.h> #include <string.h> #include <assert.h> #include <stdio.h> #if defined(LIBXSMM_OFFLOAD_TARGET) # pragma offload_attribute(pop) #endif LIBXSMM_API_INTERN void libxsmm_generator_gemm_sse3_avx_avx2_avx512_kernel( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc ) { void (*l_generator_microkernel)(libxsmm_generated_code*, const libxsmm_gp_reg_mapping*, const libxsmm_micro_kernel_config*, const libxsmm_gemm_descriptor*, const unsigned int, const unsigned int, const int); libxsmm_micro_kernel_config l_micro_kernel_config; libxsmm_loop_label_tracker l_loop_label_tracker; libxsmm_gp_reg_mapping l_gp_reg_mapping; /* some hard coded parameters for k-blocking */ unsigned int l_k_blocking = 4; unsigned int l_k_threshold = 23; /* initialize n-blocking */ unsigned int l_n_count = 0; /* array counter for blocking arrays */ unsigned int l_n_done = 0; /* progress tracker */ unsigned int l_n_n[2] = {0,0}; /* blocking sizes for blocks */ unsigned int l_n_N[2] = {0,0}; /* size of blocks */ unsigned int adjust_A_pf_ptrs = 0; unsigned int adjust_B_pf_ptrs = 0; /* as we have 32 registers, we can block more aggressively */ /* @TODO: take M blocking into account */ if ( ( io_generated_code->arch >= LIBXSMM_X86_AVX512_CORE ) && ( io_generated_code->arch <= LIBXSMM_X86_ALLFEAT ) ) { if (i_xgemm_desc->n == 7) { libxsmm_compute_equalized_blocking( i_xgemm_desc->n, 7, &(l_n_N[0]), &(l_n_n[0]), &(l_n_N[1]), &(l_n_n[1]) ); } else { libxsmm_compute_equalized_blocking( i_xgemm_desc->n, 6, &(l_n_N[0]), &(l_n_n[0]), &(l_n_N[1]), &(l_n_n[1]) ); } } else { libxsmm_compute_equalized_blocking( i_xgemm_desc->n, 3, &(l_n_N[0]), &(l_n_n[0]), &(l_n_N[1]), &(l_n_n[1]) ); } /* check that l_n_N1 is non-zero */ if ( l_n_N[0] == 0 ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_N_BLOCK ); return; } /* Make sure we properly adjust A,B prefetch pointers in case of batch-reduce gemm kernel */ if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) { if (i_xgemm_desc->prefetch & LIBXSMM_GEMM_PREFETCH_AL1 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2_JPST || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_JPST || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) { adjust_A_pf_ptrs = 1; } } /* define gp register mapping */ libxsmm_reset_x86_gp_reg_mapping( &l_gp_reg_mapping ); #if defined(_WIN32) || defined(__CYGWIN__) l_gp_reg_mapping.gp_reg_a = LIBXSMM_X86_GP_REG_RCX; l_gp_reg_mapping.gp_reg_b = LIBXSMM_X86_GP_REG_RDX; l_gp_reg_mapping.gp_reg_c = LIBXSMM_X86_GP_REG_R8; /* TODO: full support for Windows calling convention */ l_gp_reg_mapping.gp_reg_a_prefetch = LIBXSMM_X86_GP_REG_RDI; l_gp_reg_mapping.gp_reg_b_prefetch = LIBXSMM_X86_GP_REG_RSI; #else /* match calling convention on Linux */ l_gp_reg_mapping.gp_reg_a = LIBXSMM_X86_GP_REG_RDI; l_gp_reg_mapping.gp_reg_b = LIBXSMM_X86_GP_REG_RSI; l_gp_reg_mapping.gp_reg_c = LIBXSMM_X86_GP_REG_RDX; l_gp_reg_mapping.gp_reg_a_prefetch = LIBXSMM_X86_GP_REG_RCX; l_gp_reg_mapping.gp_reg_b_prefetch = LIBXSMM_X86_GP_REG_R8; /* If we are generating the batchreduce kernel, then we rename the registers */ if ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) || (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_STRIDE)) { l_gp_reg_mapping.gp_reg_a = LIBXSMM_X86_GP_REG_RDI; l_gp_reg_mapping.gp_reg_b = LIBXSMM_X86_GP_REG_RSI; l_gp_reg_mapping.gp_reg_c = LIBXSMM_X86_GP_REG_RDX; l_gp_reg_mapping.gp_reg_reduce_count = LIBXSMM_X86_GP_REG_RCX; l_gp_reg_mapping.gp_reg_a_prefetch = LIBXSMM_X86_GP_REG_R8; l_gp_reg_mapping.gp_reg_b_prefetch = LIBXSMM_X86_GP_REG_R9; l_gp_reg_mapping.gp_reg_reduce_loop = LIBXSMM_X86_GP_REG_R10; l_gp_reg_mapping.gp_reg_help_0 = LIBXSMM_X86_GP_REG_R12; } else if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) { l_gp_reg_mapping.gp_reg_a = LIBXSMM_X86_GP_REG_RDI; l_gp_reg_mapping.gp_reg_b = LIBXSMM_X86_GP_REG_RSI; l_gp_reg_mapping.gp_reg_c = LIBXSMM_X86_GP_REG_RDX; l_gp_reg_mapping.gp_reg_reduce_count = LIBXSMM_X86_GP_REG_RCX; l_gp_reg_mapping.gp_reg_a_offset = LIBXSMM_X86_GP_REG_R8; l_gp_reg_mapping.gp_reg_b_offset = LIBXSMM_X86_GP_REG_R9; l_gp_reg_mapping.gp_reg_reduce_loop = LIBXSMM_X86_GP_REG_R10; l_gp_reg_mapping.gp_reg_help_0 = LIBXSMM_X86_GP_REG_R12; } #endif l_gp_reg_mapping.gp_reg_help_5 = LIBXSMM_X86_GP_REG_R11; l_gp_reg_mapping.gp_reg_mloop = LIBXSMM_X86_GP_REG_R12; l_gp_reg_mapping.gp_reg_nloop = LIBXSMM_X86_GP_REG_R13; l_gp_reg_mapping.gp_reg_kloop = LIBXSMM_X86_GP_REG_R14; /* define loop_label_tracker */ libxsmm_reset_loop_label_tracker( &l_loop_label_tracker ); /* set up architecture dependent compute micro kernel generator */ if ( io_generated_code->arch < LIBXSMM_X86_SSE3 ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH ); return; } else if ( io_generated_code->arch <= LIBXSMM_X86_SSE4 ) { l_generator_microkernel = libxsmm_generator_gemm_sse3_microkernel; } else if ( io_generated_code->arch == LIBXSMM_X86_AVX ) { l_generator_microkernel = libxsmm_generator_gemm_avx_microkernel; } else if ( io_generated_code->arch == LIBXSMM_X86_AVX2 ) { l_generator_microkernel = libxsmm_generator_gemm_avx2_microkernel; } else if ( ( io_generated_code->arch >= LIBXSMM_X86_AVX512_CORE ) && ( io_generated_code->arch <= LIBXSMM_X86_ALLFEAT ) ) { l_generator_microkernel = libxsmm_generator_gemm_avx512_microkernel_nofsdbcst; } else { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH ); return; } /* define the micro kernel code gen properties */ libxsmm_generator_gemm_init_micro_kernel_config_fullvector( &l_micro_kernel_config, io_generated_code->arch, i_xgemm_desc, 0 ); /* open asm */ libxsmm_x86_instruction_open_stream( io_generated_code, &l_gp_reg_mapping, i_xgemm_desc->prefetch ); /* Load the actual batch-reduce trip count */ if ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) || (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) || (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_STRIDE)) { libxsmm_x86_instruction_alu_mem( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_reduce_count, LIBXSMM_X86_GP_REG_UNDEF, 0, 0, l_gp_reg_mapping.gp_reg_reduce_count, 0 ); } /* apply n_blocking */ while (l_n_done != (unsigned int)i_xgemm_desc->n) { unsigned int l_n_blocking = l_n_n[l_n_count]; unsigned int l_m_done = 0; unsigned int l_m_done_old = 0; unsigned int l_m_blocking = 0; /* advance N */ l_n_done += l_n_N[l_n_count]; l_n_count++; /* open N loop */ libxsmm_generator_gemm_header_nloop( io_generated_code, &l_loop_label_tracker, &l_gp_reg_mapping, &l_micro_kernel_config, l_n_blocking ); /* define the micro kernel code gen properties, especially m-blocking affects the vector instruction length */ l_m_blocking = libxsmm_generator_gemm_sse3_avx_avx2_avx512_get_initial_m_blocking( &l_micro_kernel_config, io_generated_code->arch, i_xgemm_desc ); /* apply m_blocking */ while (l_m_done != (unsigned int)i_xgemm_desc->m) { if (l_m_done == 0) { /* This is a SeisSol Order 6, HSW, DP performance fix */ if ( ( io_generated_code->arch == LIBXSMM_X86_AVX2 ) && ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { l_m_done_old = l_m_done; if (i_xgemm_desc->m == 56) { l_m_done = 32; } else { assert(0 != l_m_blocking); l_m_done = l_m_done + (((i_xgemm_desc->m - l_m_done_old) / l_m_blocking) * l_m_blocking); } } else { l_m_done_old = l_m_done; assert(0 != l_m_blocking); l_m_done = l_m_done + (((i_xgemm_desc->m - l_m_done_old) / l_m_blocking) * l_m_blocking); } } else { l_m_done_old = l_m_done; assert(0 != l_m_blocking); l_m_done = l_m_done + (((i_xgemm_desc->m - l_m_done_old) / l_m_blocking) * l_m_blocking); } if ( (l_m_done != l_m_done_old) && (l_m_done > 0) ) { /* when on AVX512, load mask, if needed */ if ( ( l_micro_kernel_config.use_masking_a_c != 0 ) && ( io_generated_code->arch >= LIBXSMM_X86_AVX512 ) && ( io_generated_code->arch <= LIBXSMM_X86_ALLFEAT ) ) { unsigned int l_mask_count; unsigned int l_corrected_vlen; /* compute the mask count, depends on vlen as block in M */ if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { l_corrected_vlen = l_micro_kernel_config.vector_length; } else { l_corrected_vlen = l_micro_kernel_config.vector_length; } l_mask_count = l_corrected_vlen - ( l_m_blocking % l_corrected_vlen ); libxsmm_generator_gemm_initialize_avx512_mask( io_generated_code, l_gp_reg_mapping.gp_reg_help_5, i_xgemm_desc, l_mask_count ); } libxsmm_generator_gemm_header_mloop( io_generated_code, &l_loop_label_tracker, &l_gp_reg_mapping, &l_micro_kernel_config, l_m_blocking ); libxsmm_generator_gemm_load_C( io_generated_code, &l_gp_reg_mapping, &l_micro_kernel_config, i_xgemm_desc, l_m_blocking, l_n_blocking ); if ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) || (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) || (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_STRIDE)) { libxsmm_x86_instruction_push_reg( io_generated_code, l_gp_reg_mapping.gp_reg_mloop); if ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) || (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_STRIDE)) { libxsmm_x86_instruction_push_reg( io_generated_code, l_gp_reg_mapping.gp_reg_b); libxsmm_x86_instruction_push_reg( io_generated_code, l_gp_reg_mapping.gp_reg_a); } /* This is the reduce loop */ libxsmm_generator_gemm_header_reduceloop( io_generated_code, &l_loop_label_tracker, &l_gp_reg_mapping, &l_micro_kernel_config ); if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) { libxsmm_x86_instruction_push_reg( io_generated_code, l_gp_reg_mapping.gp_reg_a); libxsmm_x86_instruction_push_reg( io_generated_code, l_gp_reg_mapping.gp_reg_b); if (adjust_A_pf_ptrs) { libxsmm_x86_instruction_push_reg( io_generated_code, l_gp_reg_mapping.gp_reg_a_prefetch ); } if (adjust_B_pf_ptrs) { libxsmm_x86_instruction_push_reg( io_generated_code, l_gp_reg_mapping.gp_reg_b_prefetch ); } /* load to reg_a the proper array based on the reduce loop index */ libxsmm_x86_instruction_alu_mem( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_a, l_gp_reg_mapping.gp_reg_reduce_loop, 8, 0, l_gp_reg_mapping.gp_reg_a, 0 ); /* load to reg_b the proper array based on the reduce loop index */ libxsmm_x86_instruction_alu_mem( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_b, l_gp_reg_mapping.gp_reg_reduce_loop, 8, 0, l_gp_reg_mapping.gp_reg_b, 0 ); if (adjust_A_pf_ptrs) { libxsmm_x86_instruction_alu_mem( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_a_prefetch, l_gp_reg_mapping.gp_reg_reduce_loop, 8, 0, l_gp_reg_mapping.gp_reg_a_prefetch, 0 ); } if (adjust_B_pf_ptrs) { libxsmm_x86_instruction_alu_mem( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_b_prefetch, l_gp_reg_mapping.gp_reg_reduce_loop, 8, 0, l_gp_reg_mapping.gp_reg_b_prefetch, 0 ); } } else if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) { libxsmm_x86_instruction_pop_reg( io_generated_code, l_gp_reg_mapping.gp_reg_a); libxsmm_x86_instruction_pop_reg( io_generated_code, l_gp_reg_mapping.gp_reg_b); libxsmm_x86_instruction_push_reg( io_generated_code, l_gp_reg_mapping.gp_reg_b); libxsmm_x86_instruction_push_reg( io_generated_code, l_gp_reg_mapping.gp_reg_a); /* Calculate to reg_a the proper address based on the reduce loop index */ libxsmm_x86_instruction_alu_mem( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_a_offset, l_gp_reg_mapping.gp_reg_reduce_loop, 8, 0, l_gp_reg_mapping.gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_add_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_a); /* Calculate to reg_b the proper address based on the reduce loop index */ libxsmm_x86_instruction_alu_mem( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_b_offset, l_gp_reg_mapping.gp_reg_reduce_loop, 8, 0, l_gp_reg_mapping.gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_add_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_b); } else if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_STRIDE) { libxsmm_x86_instruction_pop_reg( io_generated_code, l_gp_reg_mapping.gp_reg_a); libxsmm_x86_instruction_pop_reg( io_generated_code, l_gp_reg_mapping.gp_reg_b); libxsmm_x86_instruction_push_reg( io_generated_code, l_gp_reg_mapping.gp_reg_b); libxsmm_x86_instruction_push_reg( io_generated_code, l_gp_reg_mapping.gp_reg_a); /* Calculate to reg_a the proper address based on the reduce loop index */ libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_reduce_loop, l_gp_reg_mapping.gp_reg_help_0); libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_IMUL, l_gp_reg_mapping.gp_reg_help_0, i_xgemm_desc->c1); libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_add_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_a); /* Calculate to reg_b the proper address based on the reduce loop index */ libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_reduce_loop, l_gp_reg_mapping.gp_reg_help_0); libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_IMUL, l_gp_reg_mapping.gp_reg_help_0, i_xgemm_desc->c2); libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_add_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_b); } } /* apply multiple k_blocking strategies */ /* 1. we are larger the k_threshold and a multiple of a predefined blocking parameter */ if ((i_xgemm_desc->k % l_k_blocking) == 0 && (l_k_threshold < (unsigned int)i_xgemm_desc->k)) { unsigned int l_k; libxsmm_generator_gemm_header_kloop( io_generated_code, &l_loop_label_tracker, &l_gp_reg_mapping, &l_micro_kernel_config, l_m_blocking, l_k_blocking); for ( l_k = 0; l_k < l_k_blocking; l_k++) { l_generator_microkernel(io_generated_code, &l_gp_reg_mapping, &l_micro_kernel_config, i_xgemm_desc, l_m_blocking, l_n_blocking, -1); } libxsmm_generator_gemm_footer_kloop( io_generated_code, &l_loop_label_tracker, &l_gp_reg_mapping, &l_micro_kernel_config, i_xgemm_desc, l_m_blocking, i_xgemm_desc->k, 1 ); } else { /* 2. we want to fully unroll below the threshold */ if ((unsigned int)i_xgemm_desc->k <= l_k_threshold) { unsigned int l_k; for ( l_k = 0; l_k < (unsigned int)i_xgemm_desc->k; l_k++) { l_generator_microkernel(io_generated_code, &l_gp_reg_mapping, &l_micro_kernel_config, i_xgemm_desc, l_m_blocking, l_n_blocking, l_k); } /* 3. we are large than the threshold but not a multiple of the blocking factor -> largest possible blocking + remainder handling */ } else { unsigned int l_max_blocked_k = ((i_xgemm_desc->k)/l_k_blocking)*l_k_blocking; unsigned int l_k; if ( l_max_blocked_k > 0 ) { libxsmm_generator_gemm_header_kloop( io_generated_code, &l_loop_label_tracker, &l_gp_reg_mapping, &l_micro_kernel_config, l_m_blocking, l_k_blocking); for ( l_k = 0; l_k < l_k_blocking; l_k++) { l_generator_microkernel(io_generated_code, &l_gp_reg_mapping, &l_micro_kernel_config, i_xgemm_desc, l_m_blocking, l_n_blocking, -1); } libxsmm_generator_gemm_footer_kloop( io_generated_code, &l_loop_label_tracker, &l_gp_reg_mapping, &l_micro_kernel_config, i_xgemm_desc, l_m_blocking, l_max_blocked_k, 0 ); } if (l_max_blocked_k > 0 ) { /* handle trans B */ int l_b_offset = 0; if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) { l_b_offset = i_xgemm_desc->ldb * l_max_blocked_k * l_micro_kernel_config.datatype_size; } else { l_b_offset = l_max_blocked_k * l_micro_kernel_config.datatype_size; } libxsmm_x86_instruction_alu_imm( io_generated_code, l_micro_kernel_config.alu_sub_instruction, l_gp_reg_mapping.gp_reg_b, l_b_offset ); } for ( l_k = l_max_blocked_k; l_k < (unsigned int)i_xgemm_desc->k; l_k++) { l_generator_microkernel(io_generated_code, &l_gp_reg_mapping, &l_micro_kernel_config, i_xgemm_desc, l_m_blocking, l_n_blocking, l_k); } } } if ((i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) || (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) || (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_STRIDE)) { if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) { if (adjust_B_pf_ptrs) { libxsmm_x86_instruction_pop_reg( io_generated_code, l_gp_reg_mapping.gp_reg_help_0); libxsmm_x86_instruction_alu_mem( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_reduce_loop, 8, 0, l_gp_reg_mapping.gp_reg_b_prefetch, 1 ); libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_b_prefetch); } if (adjust_A_pf_ptrs) { libxsmm_x86_instruction_pop_reg( io_generated_code, l_gp_reg_mapping.gp_reg_help_0); libxsmm_x86_instruction_alu_mem( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_reduce_loop, 8, 0, l_gp_reg_mapping.gp_reg_a_prefetch, 1 ); libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_a_prefetch); } /* Pop address of B_array to help_0 and store proper address of B */ libxsmm_x86_instruction_pop_reg( io_generated_code, l_gp_reg_mapping.gp_reg_help_0); libxsmm_x86_instruction_alu_mem( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_reduce_loop, 8, 0, l_gp_reg_mapping.gp_reg_b, 1 ); /* Move to reg_b the address of B_array */ libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_b); /* Pop address of A_array to help_0 and store proper address of A */ libxsmm_x86_instruction_pop_reg( io_generated_code, l_gp_reg_mapping.gp_reg_help_0); libxsmm_x86_instruction_alu_mem( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_reduce_loop, 8, 0, l_gp_reg_mapping.gp_reg_a, 1 ); /* Move to reg_a the address of A_array */ libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_a); } libxsmm_generator_gemm_footer_reduceloop( io_generated_code, &l_loop_label_tracker, &l_gp_reg_mapping, &l_micro_kernel_config, i_xgemm_desc); if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) { /* Calculate to reg_a the proper A advance form the microkernel */ libxsmm_x86_instruction_alu_mem( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_a_offset, l_gp_reg_mapping.gp_reg_reduce_loop, 8, -8, l_gp_reg_mapping.gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_sub_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_a); /* Calculate to reg_b the proper B advance form the microkernel */ libxsmm_x86_instruction_alu_mem( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_b_offset, l_gp_reg_mapping.gp_reg_reduce_loop, 8, -8, l_gp_reg_mapping.gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_sub_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_b); /* Consume the last two pushes form the stack */ libxsmm_x86_instruction_pop_reg( io_generated_code, l_gp_reg_mapping.gp_reg_help_0); libxsmm_x86_instruction_pop_reg( io_generated_code, l_gp_reg_mapping.gp_reg_help_0); } if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_STRIDE) { /* Calculate to reg_a the proper A advance form the microkernel */ libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_reduce_count, l_gp_reg_mapping.gp_reg_help_0); libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_IMUL, l_gp_reg_mapping.gp_reg_help_0, i_xgemm_desc->c1); libxsmm_x86_instruction_alu_imm( io_generated_code, l_micro_kernel_config.alu_sub_instruction, l_gp_reg_mapping.gp_reg_help_0, i_xgemm_desc->c1); libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_sub_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_a); /* Calculate to reg_b the proper B advance form the microkernel */ libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_mov_instruction, l_gp_reg_mapping.gp_reg_reduce_count, l_gp_reg_mapping.gp_reg_help_0); libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_IMUL, l_gp_reg_mapping.gp_reg_help_0, i_xgemm_desc->c2); libxsmm_x86_instruction_alu_imm( io_generated_code, l_micro_kernel_config.alu_sub_instruction, l_gp_reg_mapping.gp_reg_help_0, i_xgemm_desc->c2); libxsmm_x86_instruction_alu_reg( io_generated_code, l_micro_kernel_config.alu_sub_instruction, l_gp_reg_mapping.gp_reg_help_0, l_gp_reg_mapping.gp_reg_b); /* Consume the last two pushes form the stack */ libxsmm_x86_instruction_pop_reg( io_generated_code, l_gp_reg_mapping.gp_reg_help_0); libxsmm_x86_instruction_pop_reg( io_generated_code, l_gp_reg_mapping.gp_reg_help_0); } libxsmm_x86_instruction_pop_reg( io_generated_code, l_gp_reg_mapping.gp_reg_mloop ); } libxsmm_generator_gemm_store_C( io_generated_code, &l_gp_reg_mapping, &l_micro_kernel_config, i_xgemm_desc, l_m_blocking, l_n_blocking ); libxsmm_generator_gemm_footer_mloop( io_generated_code, &l_loop_label_tracker, &l_gp_reg_mapping, &l_micro_kernel_config, i_xgemm_desc, l_m_blocking, l_m_done, 0 ); } /* switch to next smaller m_blocking */ l_m_blocking = libxsmm_generator_gemm_sse3_avx_avx2_avx512_update_m_blocking( &l_micro_kernel_config, io_generated_code->arch, i_xgemm_desc, l_m_blocking ); } libxsmm_generator_gemm_footer_nloop( io_generated_code, &l_loop_label_tracker, &l_gp_reg_mapping, &l_micro_kernel_config, i_xgemm_desc, l_n_blocking, l_n_done ); } /* close asm */ libxsmm_x86_instruction_close_stream( io_generated_code, &l_gp_reg_mapping, i_xgemm_desc->prefetch ); } LIBXSMM_API_INTERN unsigned int libxsmm_generator_gemm_sse3_avx_avx2_avx512_get_initial_m_blocking( libxsmm_micro_kernel_config* io_micro_kernel_config, const unsigned int i_arch, const libxsmm_gemm_descriptor* i_xgemm_desc ) { unsigned int l_m_blocking = 0; unsigned int l_use_masking_a_c = 0; if ( ( i_arch <= LIBXSMM_X86_SSE4 ) && ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { l_m_blocking = 12; } else if ( ( i_arch <= LIBXSMM_X86_SSE4 ) && ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { l_m_blocking = 6; } else if ( ( i_arch == LIBXSMM_X86_AVX ) && ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { l_m_blocking = 24; } else if ( ( i_arch == LIBXSMM_X86_AVX ) && ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { l_m_blocking = 12; } else if ( ( i_arch == LIBXSMM_X86_AVX2 ) && ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { l_m_blocking = 32; } else if ( ( i_arch == LIBXSMM_X86_AVX2 ) && ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { l_m_blocking = 16; } else if ( ( i_arch <= LIBXSMM_X86_ALLFEAT ) && ( ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) || ( LIBXSMM_GEMM_PRECISION_I32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { /* Remark switching ti OUT datatype check here to cover BF16 in, Fp32/Int32 out kernel with the same logic */ /* @TODO check if there is a better blocking strategy */ if ( i_xgemm_desc->m >= 64 ) { l_m_blocking = 64; } else { l_m_blocking = i_xgemm_desc->m; /* in case we don't have a full vector length, we use masking */ if ( l_m_blocking % 16 != 0 ) { l_use_masking_a_c = 1; } } } else if ( ( i_arch <= LIBXSMM_X86_ALLFEAT ) && ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { /* @TODO check if there is a better blocking strategy */ if ( i_xgemm_desc->m >= 32 ) { l_m_blocking = 32; } else { l_m_blocking = i_xgemm_desc->m; /* in case we don't have a full vector length, we use masking */ if ( l_m_blocking % 8 != 0 ) { l_use_masking_a_c = 1; } } } else if ( ( i_arch <= LIBXSMM_X86_ALLFEAT ) && ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) { /* Remark switching ti OUT datatype check here to cover BF16 in, Fp32 out kernel with the same logic */ /* @TODO check if there is a better blocking strategy */ if ( i_xgemm_desc->m >= 64 ) { l_m_blocking = 64; } else { l_m_blocking = i_xgemm_desc->m; /* in case we don't have a full vector length, we use masking */ if ( l_m_blocking % 16 != 0 ) { l_use_masking_a_c = 1; } } } else { } libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_arch, i_xgemm_desc, l_use_masking_a_c ); return l_m_blocking; } LIBXSMM_API_INTERN unsigned int libxsmm_generator_gemm_sse3_avx_avx2_avx512_update_m_blocking( libxsmm_micro_kernel_config* io_micro_kernel_config, const unsigned int i_arch, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_current_m_blocking ) { unsigned int l_m_blocking = 0; unsigned int l_use_masking_a_c = 0; if ( ( i_arch <= LIBXSMM_X86_SSE4 ) && ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { if (i_current_m_blocking == 4) { l_m_blocking = 1; libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_arch, i_xgemm_desc, 0 ); } else if (i_current_m_blocking == 8) { l_m_blocking = 4; } else if (i_current_m_blocking == 12) { l_m_blocking = 8; } else { /* we are done with m_blocking */ } } else if ( ( i_arch <= LIBXSMM_X86_SSE4 ) && ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { if (i_current_m_blocking == 2) { l_m_blocking = 1; libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_arch, i_xgemm_desc, 0 ); } else if (i_current_m_blocking == 4) { l_m_blocking = 2; } else if (i_current_m_blocking == 6) { l_m_blocking = 4; } else { /* we are done with m_blocking */ } } else if ( ( i_arch == LIBXSMM_X86_AVX ) && ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { if (i_current_m_blocking == 4) { l_m_blocking = 1; libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_arch, i_xgemm_desc, 0 ); } else if (i_current_m_blocking == 8) { l_m_blocking = 4; libxsmm_generator_gemm_init_micro_kernel_config_halfvector( io_micro_kernel_config, i_arch, i_xgemm_desc, 0 ); } else if (i_current_m_blocking == 16) { l_m_blocking = 8; } else if (i_current_m_blocking == 24) { l_m_blocking = 16; } else { /* we are done with m_blocking */ } } else if ( ( i_arch == LIBXSMM_X86_AVX ) && ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { if (i_current_m_blocking == 2) { l_m_blocking = 1; libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_arch, i_xgemm_desc, 0 ); } else if (i_current_m_blocking == 4) { l_m_blocking = 2; libxsmm_generator_gemm_init_micro_kernel_config_halfvector( io_micro_kernel_config, i_arch, i_xgemm_desc, 0 ); } else if (i_current_m_blocking == 8) { l_m_blocking = 4; } else if (i_current_m_blocking == 12) { l_m_blocking = 8; } else { /* we are done with m_blocking */ } } else if ( ( i_arch == LIBXSMM_X86_AVX2 ) && ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { if (i_current_m_blocking == 4) { l_m_blocking = 1; libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_arch, i_xgemm_desc, 0 ); } else if (i_current_m_blocking == 8) { l_m_blocking = 4; libxsmm_generator_gemm_init_micro_kernel_config_halfvector( io_micro_kernel_config, i_arch, i_xgemm_desc, 0 ); } else if (i_current_m_blocking == 16) { l_m_blocking = 8; } else if (i_current_m_blocking == 24) { l_m_blocking = 16; } else if (i_current_m_blocking == 32) { l_m_blocking = 24; } else { /* we are done with m_blocking */ } } else if ( ( i_arch == LIBXSMM_X86_AVX2 ) && ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { if (i_current_m_blocking == 2) { l_m_blocking = 1; libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_arch, i_xgemm_desc, 0 ); } else if (i_current_m_blocking == 4) { l_m_blocking = 2; libxsmm_generator_gemm_init_micro_kernel_config_halfvector( io_micro_kernel_config, i_arch, i_xgemm_desc, 0 ); } else if (i_current_m_blocking == 8) { l_m_blocking = 4; } else if (i_current_m_blocking == 12) { l_m_blocking = 8; } else if (i_current_m_blocking == 16) { l_m_blocking = 12; } else { /* we are done with m_blocking */ } } else if ( ( i_arch <= LIBXSMM_X86_ALLFEAT ) && ( ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) || ( LIBXSMM_GEMM_PRECISION_I32 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { /* Remark switching ti OUT datatype check here to cover BF16 in, Fp32 out kernel with the same logic */ if (i_current_m_blocking == 64) { l_m_blocking = i_xgemm_desc->m % 64; if ( l_m_blocking % 16 != 0 ) { l_use_masking_a_c = 1; } libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_arch, i_xgemm_desc, l_use_masking_a_c ); } else { /* we are done with m_blocking */ } } else if ( ( i_arch <= LIBXSMM_X86_ALLFEAT ) && ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) ) { if (i_current_m_blocking == 32) { l_m_blocking = i_xgemm_desc->m % 32; if ( l_m_blocking % 8 != 0 ) { l_use_masking_a_c = 1; } libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_arch, i_xgemm_desc, l_use_masking_a_c ); } else { /* we are done with m_blocking */ } } else if ( ( i_arch <= LIBXSMM_X86_ALLFEAT ) && ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) { /* Remark switching ti OUT datatype check here to cover BF16 in, Fp32 out kernel with the same logic */ if (i_current_m_blocking == 64) { l_m_blocking = i_xgemm_desc->m % 64; if ( l_m_blocking % 16 != 0 ) { l_use_masking_a_c = 1; } libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_arch, i_xgemm_desc, l_use_masking_a_c ); } else { /* we are done with m_blocking */ } } else { } return l_m_blocking; }
591901.c
/*------------------------------------------------------------------------- * * nodeModifyTable.c * routines to handle ModifyTable nodes. * * Portions Copyright (c) 2012-2014, TransLattice, Inc. * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * src/backend/executor/nodeModifyTable.c * *------------------------------------------------------------------------- */ /* INTERFACE ROUTINES * ExecInitModifyTable - initialize the ModifyTable node * ExecModifyTable - retrieve the next tuple from the node * ExecEndModifyTable - shut down the ModifyTable node * ExecReScanModifyTable - rescan the ModifyTable node * * NOTES * Each ModifyTable node contains a list of one or more subplans, * much like an Append node. There is one subplan per result relation. * The key reason for this is that in an inherited UPDATE command, each * result relation could have a different schema (more or different * columns) requiring a different plan tree to produce it. In an * inherited DELETE, all the subplans should produce the same output * rowtype, but we might still find that different plans are appropriate * for different child relations. * * If the query specifies RETURNING, then the ModifyTable returns a * RETURNING tuple after completing each row insert, update, or delete. * It must be called again to continue the operation. Without RETURNING, * we just loop within the node until all the work is done, then * return NULL. This avoids useless call/return overhead. */ #include "postgres.h" #include "access/htup_details.h" #include "access/xact.h" #include "commands/trigger.h" #include "executor/executor.h" #include "executor/nodeModifyTable.h" #include "foreign/fdwapi.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "parser/parsetree.h" #include "storage/bufmgr.h" #include "storage/lmgr.h" #include "utils/builtins.h" #ifdef _MLS_ #include "utils/mls.h" #endif #include "utils/memutils.h" #include "utils/rel.h" #include "utils/tqual.h" #include "utils/lsyscache.h" #ifdef __TBASE__ #include "optimizer/pgxcship.h" #include "pgxc/execRemote.h" #include "pgxc/planner.h" #include "utils/ruleutils.h" #include "access/gtm.h" #include "access/relscan.h" #include "commands/prepare.h" #endif #ifdef __AUDIT_FGA__ #include "audit/audit_fga.h" #endif static bool ExecOnConflictUpdate(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo, ItemPointer conflictTid, TupleTableSlot *planSlot, TupleTableSlot *excludedSlot, EState *estate, bool canSetTag, TupleTableSlot **returning); /* * Verify that the tuples to be produced by INSERT or UPDATE match the * target relation's rowtype * * We do this to guard against stale plans. If plan invalidation is * functioning properly then we should never get a failure here, but better * safe than sorry. Note that this is called after we have obtained lock * on the target rel, so the rowtype can't change underneath us. * * The plan output is represented by its targetlist, because that makes * handling the dropped-column case easier. */ static void ExecCheckPlanOutput(Relation resultRel, List *targetList) {// #lizard forgives TupleDesc resultDesc = RelationGetDescr(resultRel); int attno = 0; ListCell *lc; foreach(lc, targetList) { TargetEntry *tle = (TargetEntry *) lfirst(lc); Form_pg_attribute attr; if (tle->resjunk) continue; /* ignore junk tlist items */ if (attno >= resultDesc->natts) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("table row type and query-specified row type do not match"), errdetail("Query has too many columns."))); attr = resultDesc->attrs[attno++]; if (!attr->attisdropped) { /* Normal case: demand type match */ if (exprType((Node *) tle->expr) != attr->atttypid) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("table row type and query-specified row type do not match"), errdetail("Table has type %s at ordinal position %d, but query expects %s.", format_type_be(attr->atttypid), attno, format_type_be(exprType((Node *) tle->expr))))); } else { /* * For a dropped column, we can't check atttypid (it's likely 0). * In any case the planner has most likely inserted an INT4 null. * What we insist on is just *some* NULL constant. */ if (!IsA(tle->expr, Const) || !((Const *) tle->expr)->constisnull) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("table row type and query-specified row type do not match"), errdetail("Query provides a value for a dropped column at ordinal position %d.", attno))); } } if (attno != resultDesc->natts) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("table row type and query-specified row type do not match"), errdetail("Query has too few columns."))); } /* * ExecProcessReturning --- evaluate a RETURNING list * * projectReturning: RETURNING projection info for current result rel * tupleSlot: slot holding tuple actually inserted/updated/deleted * planSlot: slot holding tuple returned by top subplan node * * Note: If tupleSlot is NULL, the FDW should have already provided econtext's * scan tuple. * * Returns a slot holding the result tuple */ static TupleTableSlot * ExecProcessReturning(ResultRelInfo *resultRelInfo, TupleTableSlot *tupleSlot, TupleTableSlot *planSlot) { ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning; ExprContext *econtext = projectReturning->pi_exprContext; /* * Reset per-tuple memory context to free any expression evaluation * storage allocated in the previous cycle. */ ResetExprContext(econtext); /* Make tuple and any needed join variables available to ExecProject */ if (tupleSlot) econtext->ecxt_scantuple = tupleSlot; else { HeapTuple tuple; /* * RETURNING expressions might reference the tableoid column, so * initialize t_tableOid before evaluating them. */ Assert(!TupIsNull(econtext->ecxt_scantuple)); tuple = ExecMaterializeSlot(econtext->ecxt_scantuple); tuple->t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); } econtext->ecxt_outertuple = planSlot; /* Compute the RETURNING expressions */ return ExecProject(projectReturning); } /* * ExecCheckHeapTupleVisible -- verify heap tuple is visible * * It would not be consistent with guarantees of the higher isolation levels to * proceed with avoiding insertion (taking speculative insertion's alternative * path) on the basis of another tuple that is not visible to MVCC snapshot. * Check for the need to raise a serialization failure, and do so as necessary. */ static void ExecCheckHeapTupleVisible(EState *estate, HeapTuple tuple, Buffer buffer) { if (!IsolationUsesXactSnapshot()) return; /* * We need buffer pin and lock to call HeapTupleSatisfiesVisibility. * Caller should be holding pin, but not lock. */ LockBuffer(buffer, BUFFER_LOCK_SHARE); if (!HeapTupleSatisfiesVisibility(tuple, estate->es_snapshot, buffer)) { /* * We should not raise a serialization failure if the conflict is * against a tuple inserted by our own transaction, even if it's not * visible to our snapshot. (This would happen, for example, if * conflicting keys are proposed for insertion in a single command.) */ if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple->t_data))) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); } LockBuffer(buffer, BUFFER_LOCK_UNLOCK); } /* * ExecCheckTIDVisible -- convenience variant of ExecCheckHeapTupleVisible() */ static void ExecCheckTIDVisible(EState *estate, ResultRelInfo *relinfo, ItemPointer tid) { Relation rel = relinfo->ri_RelationDesc; Buffer buffer; HeapTupleData tuple; /* Redundantly check isolation level */ if (!IsolationUsesXactSnapshot()) return; tuple.t_self = *tid; if (!heap_fetch(rel, SnapshotAny, &tuple, &buffer, false, NULL)) elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT"); ExecCheckHeapTupleVisible(estate, &tuple, buffer); ReleaseBuffer(buffer); } /* ---------------------------------------------------------------- * ExecInsert * * For INSERT, we have to insert the tuple into the target relation * and insert appropriate tuples into the index relations. * * Returns RETURNING result if any, otherwise NULL. * ---------------------------------------------------------------- */ static TupleTableSlot * ExecInsert(ModifyTableState *mtstate, TupleTableSlot *slot, TupleTableSlot *planSlot, List *arbiterIndexes, OnConflictAction onconflict, EState *estate, bool canSetTag) {// #lizard forgives HeapTuple tuple; ResultRelInfo *resultRelInfo; ResultRelInfo *saved_resultRelInfo = NULL; Relation resultRelationDesc; Oid newId; List *recheckIndexes = NIL; TupleTableSlot *result = NULL; #ifdef __TBASE__ bool has_unshippable_trigger = false; int remoterel_index = 0; ModifyTable *mt = (ModifyTable *)mtstate->ps.plan; #endif #ifdef _MLS_ mls_update_cls_with_current_user(slot); #endif #ifdef _SHARDING_ /* * get information on the (current) result relation */ resultRelInfo = estate->es_result_relation_info; resultRelationDesc = resultRelInfo->ri_RelationDesc; /* * get the heap tuple out of the tuple table slot, making sure we have a * writable copy */ { bool hasshard = false; AttrNumber diskey = InvalidAttrNumber; AttrNumber secdiskey = InvalidAttrNumber; hasshard = RelationIsSharded(resultRelationDesc); if(hasshard) { diskey = RelationGetDisKey(resultRelationDesc); secdiskey = RelationGetSecDisKey(resultRelationDesc); } tuple = ExecMaterializeSlot_shard(slot, hasshard, diskey, secdiskey, RelationGetRelid(resultRelationDesc)); } #endif /* Determine the partition to heap_insert the tuple into */ if (mtstate->mt_partition_dispatch_info) { int leaf_part_index; TupleConversionMap *map; /* * Away we go ... If we end up not finding a partition after all, * ExecFindPartition() does not return and errors out instead. * Otherwise, the returned value is to be used as an index into arrays * mt_partitions[] and mt_partition_tupconv_maps[] that will get us * the ResultRelInfo and TupleConversionMap for the partition, * respectively. */ leaf_part_index = ExecFindPartition(resultRelInfo, mtstate->mt_partition_dispatch_info, slot, estate); Assert(leaf_part_index >= 0 && leaf_part_index < mtstate->mt_num_partitions); /* * Save the old ResultRelInfo and switch to the one corresponding to * the selected partition. */ saved_resultRelInfo = resultRelInfo; resultRelInfo = mtstate->mt_partitions + leaf_part_index; /* We do not yet have a way to insert into a foreign partition */ if (resultRelInfo->ri_FdwRoutine) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot route inserted tuples to a foreign table"))); /* For ExecInsertIndexTuples() to work on the partition's indexes */ estate->es_result_relation_info = resultRelInfo; /* * If we're capturing transition tuples, we might need to convert from * the partition rowtype to parent rowtype. */ if (mtstate->mt_transition_capture != NULL) { if (resultRelInfo->ri_TrigDesc && (resultRelInfo->ri_TrigDesc->trig_insert_before_row || resultRelInfo->ri_TrigDesc->trig_insert_instead_row)) { /* * If there are any BEFORE or INSTEAD triggers on the * partition, we'll have to be ready to convert their result * back to tuplestore format. */ mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL; mtstate->mt_transition_capture->tcs_map = mtstate->mt_transition_tupconv_maps[leaf_part_index]; } else { /* * Otherwise, just remember the original unconverted tuple, to * avoid a needless round trip conversion. */ mtstate->mt_transition_capture->tcs_original_insert_tuple = tuple; mtstate->mt_transition_capture->tcs_map = NULL; } } /* * We might need to convert from the parent rowtype to the partition * rowtype. */ map = mtstate->mt_partition_tupconv_maps[leaf_part_index]; if (map) { Relation partrel = resultRelInfo->ri_RelationDesc; tuple = do_convert_tuple(tuple, map, partrel); /* * We must use the partition's tuple descriptor from this point * on, until we're finished dealing with the partition. Use the * dedicated slot for that. */ slot = mtstate->mt_partition_tuple_slot; Assert(slot != NULL); ExecSetSlotDescriptor(slot, RelationGetDescr(partrel)); ExecStoreTuple(tuple, slot, InvalidBuffer, true); } } #ifdef __TBASE__ /* Determine the interval partition to heap_insert the tuple into */ else if (resultRelInfo->ispartparent) { AttrNumber partkey; Datum partvalue; bool isnull; int partidx; ResultRelInfo *partRel; char *partname = NULL; Oid partoid = InvalidOid; /* router for tuple */ partkey = RelationGetPartitionColumnIndex(resultRelationDesc); partvalue = slot_getattr(slot, partkey, &isnull); if(isnull) { elog(ERROR,"partition key cannot be null"); } partidx = RelationGetPartitionIdxByValue(resultRelationDesc,partvalue); if(partidx < 0) { elog(ERROR, "inserted value is not in range of partitioned table, please check the value of paritition key"); } partname = GetPartitionName(RelationGetRelid(resultRelInfo->ri_RelationDesc), partidx, false); partoid = get_relname_relid(partname, RelationGetNamespace(resultRelInfo->ri_RelationDesc)); if(InvalidOid == partoid) { /* the partition have dropped */ elog(ERROR, "inserted value is not in range of partitioned table, please check the value of paritition key"); } switch(resultRelInfo->arraymode) { case RESULT_RELINFO_MODE_EXPAND: { partRel = resultRelInfo->part_relinfo[partidx]; remoterel_index = partidx; } break; case RESULT_RELINFO_MODE_COMPACT: { partRel = resultRelInfo->part_relinfo[0]; remoterel_index = 0; } break; default: elog(ERROR,"internal error: arraymode must is RESULT_RELINFO_MODE_EXPAND in INSERT statement"); break; } if (arbiterIndexes) { int partidx = partRel->part_index; if (!mtstate->part_arbiterindexes[remoterel_index]) { ListCell *lc; List *oids = NULL; foreach(lc, arbiterIndexes) { Oid parent_index = lfirst_oid(lc); Oid child_index = RelationGetPartitionIndex(resultRelationDesc, parent_index, partidx); oids = lappend_oid(oids, child_index); } mtstate->part_arbiterindexes[remoterel_index] = oids; } arbiterIndexes = mtstate->part_arbiterindexes[remoterel_index]; partRel->ri_onConflictSetProj = resultRelInfo->ri_onConflictSetProj; partRel->ri_onConflictSetWhere = resultRelInfo->ri_onConflictSetWhere; } saved_resultRelInfo = resultRelInfo; resultRelInfo = partRel; /* For ExecInsertIndexTuples() to work on the partition's indexes */ estate->es_result_relation_info = resultRelInfo; } #endif resultRelationDesc = resultRelInfo->ri_RelationDesc; /* * If the result relation has OIDs, force the tuple's OID to zero so that * heap_insert will assign a fresh OID. Usually the OID already will be * zero at this point, but there are corner cases where the plan tree can * return a tuple extracted literally from some table with the same * rowtype. * * XXX if we ever wanted to allow users to assign their own OIDs to new * rows, this'd be the place to do it. For the moment, we make a point of * doing this before calling triggers, so that a user-supplied trigger * could hack the OID if desired. */ if (resultRelationDesc->rd_rel->relhasoids) HeapTupleSetOid(tuple, InvalidOid); #ifdef __TBASE__ if (IS_PGXC_DATANODE && onconflict == ONCONFLICT_UPDATE && resultRelInfo->ri_TrigDesc) { int16 trigevent = pgxc_get_trigevent(mtstate->operation); has_unshippable_trigger = pgxc_find_unshippable_triggers(resultRelInfo->ri_TrigDesc, trigevent, 0, true); } #endif /* * BEFORE ROW INSERT Triggers. * * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an * INSERT ... ON CONFLICT statement. We cannot check for constraint * violations before firing these triggers, because they can change the * values to insert. Also, they can run arbitrary user-defined code with * side-effects that we can't cancel by just not inserting the tuple. */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->trig_insert_before_row) { slot = ExecBRInsertTriggers(estate, resultRelInfo, slot); if (slot == NULL) /* "do nothing" */ { if (saved_resultRelInfo) estate->es_result_relation_info = saved_resultRelInfo; return NULL; } /* trigger might have changed tuple */ tuple = ExecMaterializeSlot(slot); } /* INSTEAD OF ROW INSERT Triggers */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->trig_insert_instead_row) { slot = ExecIRInsertTriggers(estate, resultRelInfo, slot); if (slot == NULL) /* "do nothing" */ { if (saved_resultRelInfo) estate->es_result_relation_info = saved_resultRelInfo; return NULL; } /* trigger might have changed tuple */ tuple = ExecMaterializeSlot(slot); newId = InvalidOid; } else if (resultRelInfo->ri_FdwRoutine) { /* * insert into foreign table: let the FDW do it */ slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate, resultRelInfo, slot, planSlot); if (slot == NULL) /* "do nothing" */ return NULL; /* FDW might have changed tuple */ tuple = ExecMaterializeSlot(slot); /* * AFTER ROW Triggers or RETURNING expressions might reference the * tableoid column, so initialize t_tableOid before evaluating them. */ tuple->t_tableOid = RelationGetRelid(resultRelationDesc); newId = InvalidOid; } else { /* * We always check the partition constraint, including when the tuple * got here via tuple-routing. However we don't need to in the latter * case if no BR trigger is defined on the partition. Note that a BR * trigger might modify the tuple such that the partition constraint * is no longer satisfied, so we need to check in that case. */ bool check_partition_constr = (resultRelInfo->ri_PartitionCheck != NIL); /* * Constraints might reference the tableoid column, so initialize * t_tableOid before evaluating them. */ tuple->t_tableOid = RelationGetRelid(resultRelationDesc); /* * Check any RLS INSERT WITH CHECK policies * * ExecWithCheckOptions() will skip any WCOs which are not of the kind * we are looking for at this point. */ if (resultRelInfo->ri_WithCheckOptions != NIL) ExecWithCheckOptions(WCO_RLS_INSERT_CHECK, resultRelInfo, slot, estate); /* * No need though if the tuple has been routed, and a BR trigger * doesn't exist. */ if (saved_resultRelInfo != NULL && !(resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->trig_insert_before_row)) check_partition_constr = false; /* Check the constraints of the tuple */ if (resultRelationDesc->rd_att->constr || check_partition_constr) ExecConstraints(resultRelInfo, slot, estate); #ifdef _MLS_ if (is_mls_user()) CheckMlsTableUserAcl(resultRelInfo,slot->tts_tuple); #endif #ifdef __TBASE__ /* * DML with unshippable triggers on resultrelation, we execute DML * on coordiantor. */ if (IS_PGXC_COORDINATOR && mt->remote_plans) { bool succeed = false; UPSERT_ACTION result = UPSERT_NONE; TupleTableSlot *returning = NULL; succeed = ExecRemoteDML(mtstate, NULL, NULL, slot, planSlot, estate, NULL, canSetTag, &returning, &result, resultRelInfo, remoterel_index); if (succeed) { if (result == UPSERT_UPDATE) { if (saved_resultRelInfo) estate->es_result_relation_info = saved_resultRelInfo; return returning; } } else { if (saved_resultRelInfo) estate->es_result_relation_info = saved_resultRelInfo; return NULL; } newId = InvalidOid; } else { #endif if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0) { /* Perform a speculative insertion. */ uint32 specToken; ItemPointerData conflictTid; bool specConflict; /* * Do a non-conclusive check for conflicts first. * * We're not holding any locks yet, so this doesn't guarantee that * the later insert won't conflict. But it avoids leaving behind * a lot of canceled speculative insertions, if you run a lot of * INSERT ON CONFLICT statements that do conflict. * * We loop back here if we find a conflict below, either during * the pre-check, or when we re-check after inserting the tuple * speculatively. */ vlock: specConflict = false; if (!ExecCheckIndexConstraints(slot, estate, &conflictTid, arbiterIndexes)) { /* committed conflict tuple found */ if (onconflict == ONCONFLICT_UPDATE) { /* * In case of ON CONFLICT DO UPDATE, execute the UPDATE * part. Be prepared to retry if the UPDATE fails because * of another concurrent UPDATE/DELETE to the conflict * tuple. */ TupleTableSlot *returning = NULL; #ifdef _MLS_ bool ret; int oldtag; #endif #ifdef __TBASE__ if (has_unshippable_trigger) return NULL; #endif #ifdef _MLS_ oldtag = mls_command_tag_switch_to(CLS_CMD_WRITE); ret = ExecOnConflictUpdate(mtstate, resultRelInfo, &conflictTid, planSlot, slot, estate, canSetTag, &returning); mls_command_tag_switch_to(oldtag); #endif if (ret) { InstrCountFiltered2(&mtstate->ps, 1); if (saved_resultRelInfo) estate->es_result_relation_info = saved_resultRelInfo; return returning; } else goto vlock; } else { /* * In case of ON CONFLICT DO NOTHING, do nothing. However, * verify that the tuple is visible to the executor's MVCC * snapshot at higher isolation levels. */ Assert(onconflict == ONCONFLICT_NOTHING); ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid); InstrCountFiltered2(&mtstate->ps, 1); if (saved_resultRelInfo) estate->es_result_relation_info = saved_resultRelInfo; return NULL; } } /* * Before we start insertion proper, acquire our "speculative * insertion lock". Others can use that to wait for us to decide * if we're going to go ahead with the insertion, instead of * waiting for the whole transaction to complete. */ specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId()); HeapTupleHeaderSetSpeculativeToken(tuple->t_data, specToken); /* insert the tuple, with the speculative token */ newId = heap_insert(resultRelationDesc, tuple, estate->es_output_cid, HEAP_INSERT_SPECULATIVE, NULL); /* insert index entries for tuple */ recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), estate, true, &specConflict, arbiterIndexes); /* adjust the tuple's state accordingly */ if (!specConflict) heap_finish_speculative(resultRelationDesc, tuple); else heap_abort_speculative(resultRelationDesc, tuple); /* * Wake up anyone waiting for our decision. They will re-check * the tuple, see that it's no longer speculative, and wait on our * XID as if this was a regularly inserted tuple all along. Or if * we killed the tuple, they will see it's dead, and proceed as if * the tuple never existed. */ SpeculativeInsertionLockRelease(GetCurrentTransactionId()); /* * If there was a conflict, start from the beginning. We'll do * the pre-check again, which will now find the conflicting tuple * (unless it aborts before we get there). */ if (specConflict) { list_free(recheckIndexes); #ifdef __TBASE__ if (has_unshippable_trigger) return NULL; #endif goto vlock; } /* Since there was no insertion conflict, we're done */ } else { /* * insert the tuple normally. * * Note: heap_insert returns the tid (location) of the new tuple * in the t_self field. */ newId = heap_insert(resultRelationDesc, tuple, estate->es_output_cid, 0, NULL); /* insert index entries for tuple */ if (resultRelInfo->ri_NumIndices > 0) recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false, NULL, arbiterIndexes); } #ifdef __TBASE__ } #endif } if (canSetTag) { (estate->es_processed)++; estate->es_lastoid = newId; setLastTid(&(tuple->t_self)); } /* AFTER ROW INSERT Triggers */ ExecARInsertTriggers(estate, resultRelInfo, tuple, recheckIndexes, mtstate->mt_transition_capture); list_free(recheckIndexes); /* * Check any WITH CHECK OPTION constraints from parent views. We are * required to do this after testing all constraints and uniqueness * violations per the SQL spec, so we do it after actually inserting the * record into the heap and all indexes. * * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the * tuple will never be seen, if it violates the WITH CHECK OPTION. * * ExecWithCheckOptions() will skip any WCOs which are not of the kind we * are looking for at this point. */ if (resultRelInfo->ri_WithCheckOptions != NIL) ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate); /* Process RETURNING if present */ if (resultRelInfo->ri_projectReturning) result = ExecProcessReturning(resultRelInfo, slot, planSlot); if (saved_resultRelInfo) estate->es_result_relation_info = saved_resultRelInfo; return result; } /* ---------------------------------------------------------------- * ExecDelete * * DELETE is like UPDATE, except that we delete the tuple and no * index modifications are needed. * * When deleting from a table, tupleid identifies the tuple to * delete and oldtuple is NULL. When deleting from a view, * oldtuple is passed to the INSTEAD OF triggers and identifies * what to delete, and tupleid is invalid. When deleting from a * foreign table, tupleid is invalid; the FDW has to figure out * which row to delete using data from the planSlot. oldtuple is * passed to foreign table triggers; it is NULL when the foreign * table has no relevant triggers. * * Returns RETURNING result if any, otherwise NULL. * ---------------------------------------------------------------- */ #ifdef __TBASE__ static TupleTableSlot * ExecDelete(ModifyTableState *mtstate, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *sourceslot, TupleTableSlot *planSlot, EPQState *epqstate, EState *estate, bool canSetTag) #else static TupleTableSlot * ExecDelete(ModifyTableState *mtstate, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *planSlot, EPQState *epqstate, EState *estate, bool canSetTag) #endif {// #lizard forgives ResultRelInfo *resultRelInfo; Relation resultRelationDesc; HTSU_Result result; HeapUpdateFailureData hufd; TupleTableSlot *slot = NULL; #ifdef __TBASE__ int remoterel_index = 0; ModifyTable *mt = (ModifyTable *)mtstate->ps.plan; #endif /* * get information on the (current) result relation */ resultRelInfo = estate->es_result_relation_info; resultRelationDesc = resultRelInfo->ri_RelationDesc; /* BEFORE ROW DELETE Triggers */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->trig_delete_before_row) { bool dodelete; dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo, tupleid, oldtuple); if (!dodelete) /* "do nothing" */ return NULL; } #ifdef _MLS_ if (is_mls_user()) { HeapTupleData tp; Page page; BlockNumber block; Buffer buffer; ItemId lp; block = ItemPointerGetBlockNumber(tupleid); buffer = ReadBuffer(resultRelInfo->ri_RelationDesc, block); page = BufferGetPage(buffer); lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tupleid)); Assert(ItemIdIsNormal(lp)); tp.t_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc); tp.t_data = (HeapTupleHeader) PageGetItem(page, lp); tp.t_len = ItemIdGetLength(lp); tp.t_self = *tupleid; CheckMlsTableUserAcl(resultRelInfo,&tp); } #endif /* INSTEAD OF ROW DELETE Triggers */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->trig_delete_instead_row) { bool dodelete; Assert(oldtuple != NULL); dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple); if (!dodelete) /* "do nothing" */ return NULL; } else if (resultRelInfo->ri_FdwRoutine) { HeapTuple tuple; /* * delete from foreign table: let the FDW do it * * We offer the trigger tuple slot as a place to store RETURNING data, * although the FDW can return some other slot if it wants. Set up * the slot's tupdesc so the FDW doesn't need to do that for itself. */ slot = estate->es_trig_tuple_slot; if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc)) ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate, resultRelInfo, slot, planSlot); if (slot == NULL) /* "do nothing" */ return NULL; /* * RETURNING expressions might reference the tableoid column, so * initialize t_tableOid before evaluating them. */ if (slot->tts_isempty) ExecStoreAllNullTuple(slot); tuple = ExecMaterializeSlot(slot); tuple->t_tableOid = RelationGetRelid(resultRelationDesc); } else { #ifdef __TBASE__ if (IS_PGXC_COORDINATOR && mt->remote_plans) { bool succeed = false; if (mtstate->part_whichplan >= 0) { remoterel_index = mtstate->part_whichplan; } succeed = ExecRemoteDML(mtstate, tupleid, oldtuple, sourceslot, planSlot, estate, epqstate, canSetTag, NULL, NULL, resultRelInfo, remoterel_index); if (!succeed) return NULL; } else { #endif /* * delete the tuple * * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check * that the row to be deleted is visible to that snapshot, and throw a * can't-serialize error if not. This is a special-case behavior * needed for referential integrity updates in transaction-snapshot * mode transactions. */ ldelete:; result = heap_delete(resultRelationDesc, tupleid, estate->es_output_cid, estate->es_crosscheck_snapshot, true /* wait for commit */ , &hufd); switch (result) { case HeapTupleSelfUpdated: /* * The target tuple was already updated or deleted by the * current command, or by a later command in the current * transaction. The former case is possible in a join DELETE * where multiple tuples join to the same target tuple. This * is somewhat questionable, but Postgres has always allowed * it: we just ignore additional deletion attempts. * * The latter case arises if the tuple is modified by a * command in a BEFORE trigger, or perhaps by a command in a * volatile function used in the query. In such situations we * should not ignore the deletion, but it is equally unsafe to * proceed. We don't want to discard the original DELETE * while keeping the triggered actions based on its deletion; * and it would be no better to allow the original DELETE * while discarding updates that it triggered. The row update * carries some information that might be important according * to business rules; so throwing an error is the only safe * course. * * If a trigger actually intends this type of interaction, it * can re-execute the DELETE and then return NULL to cancel * the outer delete. */ if (hufd.cmax != estate->es_output_cid) ereport(ERROR, (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), errmsg("tuple to be updated was already modified by an operation triggered by the current command"), errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); /* Else, already deleted by self; nothing to do */ return NULL; case HeapTupleMayBeUpdated: break; case HeapTupleUpdated: if (IsolationUsesXactSnapshot()) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); if (!ItemPointerEquals(tupleid, &hufd.ctid)) { TupleTableSlot *epqslot; epqslot = EvalPlanQual(estate, epqstate, resultRelationDesc, resultRelInfo->ri_RangeTableIndex, LockTupleExclusive, &hufd.ctid, hufd.xmax); if (!TupIsNull(epqslot)) { *tupleid = hufd.ctid; goto ldelete; } } /* tuple already deleted; nothing to do */ return NULL; default: elog(ERROR, "unrecognized heap_delete status: %u", result); return NULL; } /* * Note: Normally one would think that we have to delete index tuples * associated with the heap tuple now... * * ... but in POSTGRES, we have no need to do this because VACUUM will * take care of it later. We can't delete index tuples immediately * anyway, since the tuple is still visible to other transactions. */ #ifdef __TBASE__ } #endif } if (canSetTag) (estate->es_processed)++; /* AFTER ROW DELETE Triggers */ ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple, mtstate->mt_transition_capture); /* Process RETURNING if present */ if (resultRelInfo->ri_projectReturning) { /* * We have to put the target tuple into a slot, which means first we * gotta fetch it. We can use the trigger tuple slot. */ TupleTableSlot *rslot; HeapTupleData deltuple; Buffer delbuffer; if (resultRelInfo->ri_FdwRoutine) { /* FDW must have provided a slot containing the deleted row */ Assert(!TupIsNull(slot)); delbuffer = InvalidBuffer; } else { slot = estate->es_trig_tuple_slot; if (oldtuple != NULL) { deltuple = *oldtuple; delbuffer = InvalidBuffer; } else { deltuple.t_self = *tupleid; if (!heap_fetch(resultRelationDesc, SnapshotAny, &deltuple, &delbuffer, false, NULL)) elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING"); } if (slot->tts_tupleDescriptor != RelationGetDescr(resultRelationDesc)) ExecSetSlotDescriptor(slot, RelationGetDescr(resultRelationDesc)); ExecStoreTuple(&deltuple, slot, InvalidBuffer, false); } rslot = ExecProcessReturning(resultRelInfo, slot, planSlot); /* * Before releasing the target tuple again, make sure rslot has a * local copy of any pass-by-reference values. */ ExecMaterializeSlot(rslot); ExecClearTuple(slot); if (BufferIsValid(delbuffer)) ReleaseBuffer(delbuffer); return rslot; } return NULL; } /* ---------------------------------------------------------------- * ExecUpdate * * note: we can't run UPDATE queries with transactions * off because UPDATEs are actually INSERTs and our * scan will mistakenly loop forever, updating the tuple * it just inserted.. This should be fixed but until it * is, we don't want to get stuck in an infinite loop * which corrupts your database.. * * When updating a table, tupleid identifies the tuple to * update and oldtuple is NULL. When updating a view, oldtuple * is passed to the INSTEAD OF triggers and identifies what to * update, and tupleid is invalid. When updating a foreign table, * tupleid is invalid; the FDW has to figure out which row to * update using data from the planSlot. oldtuple is passed to * foreign table triggers; it is NULL when the foreign table has * no relevant triggers. * * Returns RETURNING result if any, otherwise NULL. * ---------------------------------------------------------------- */ static TupleTableSlot * ExecUpdate(ModifyTableState *mtstate, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, TupleTableSlot *planSlot, EPQState *epqstate, EState *estate, bool canSetTag) {// #lizard forgives HeapTuple tuple; ResultRelInfo *resultRelInfo; Relation resultRelationDesc; HTSU_Result result; HeapUpdateFailureData hufd; List *recheckIndexes = NIL; #ifdef __TBASE__ int remoterel_index = 0; ModifyTable *mt = (ModifyTable *)mtstate->ps.plan; #endif #ifdef _SHARDING_ bool hasshard = false; AttrNumber diskey = InvalidAttrNumber; AttrNumber secdiskey = InvalidAttrNumber; #endif /* * abort the operation if not running transactions */ if (IsBootstrapProcessingMode()) elog(ERROR, "cannot UPDATE during bootstrap"); #ifdef _SHARDING_ /* * get information on the (current) result relation */ resultRelInfo = estate->es_result_relation_info; resultRelationDesc = resultRelInfo->ri_RelationDesc; /* * get the heap tuple out of the tuple table slot, making sure we have a * writable copy */ hasshard = RelationIsSharded(resultRelationDesc); if(hasshard) { diskey = RelationGetDisKey(resultRelationDesc); secdiskey = RelationGetSecDisKey(resultRelationDesc); } tuple = ExecMaterializeSlot_shard(slot, hasshard, diskey, secdiskey, RelationGetRelid(resultRelationDesc)); #endif /* * get information on the (current) result relation */ resultRelInfo = estate->es_result_relation_info; resultRelationDesc = resultRelInfo->ri_RelationDesc; /* BEFORE ROW UPDATE Triggers */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->trig_update_before_row) { slot = ExecBRUpdateTriggers(estate, epqstate, resultRelInfo, tupleid, oldtuple, slot); if (slot == NULL) /* "do nothing" */ return NULL; /* trigger might have changed tuple */ tuple = ExecMaterializeSlot(slot); #ifdef _SHARDING_ if(RelationHasExtent(resultRelationDesc) && !ShardIDIsValid(HeapTupleGetShardId(tuple))) { elog(PANIC, "relation is extent, but shardid of tuple is invalid."); } #endif } /* INSTEAD OF ROW UPDATE Triggers */ if (resultRelInfo->ri_TrigDesc && resultRelInfo->ri_TrigDesc->trig_update_instead_row) { slot = ExecIRUpdateTriggers(estate, resultRelInfo, oldtuple, slot); if (slot == NULL) /* "do nothing" */ return NULL; /* trigger might have changed tuple */ tuple = ExecMaterializeSlot(slot); #ifdef _SHARDING_ if(RelationHasExtent(resultRelationDesc) && !ShardIDIsValid(HeapTupleGetShardId(tuple))) { elog(PANIC, "relation is extent, but shardid of tuple is invalid."); } #endif } else if (resultRelInfo->ri_FdwRoutine) { /* * update in foreign table: let the FDW do it */ slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate, resultRelInfo, slot, planSlot); if (slot == NULL) /* "do nothing" */ return NULL; /* FDW might have changed tuple */ tuple = ExecMaterializeSlot(slot); #ifdef _SHARDING_ if(RelationHasExtent(resultRelationDesc) && !ShardIDIsValid(HeapTupleGetShardId(tuple))) { elog(PANIC, "relation is extent, but shardid of tuple is invalid."); } #endif /* * AFTER ROW Triggers or RETURNING expressions might reference the * tableoid column, so initialize t_tableOid before evaluating them. */ tuple->t_tableOid = RelationGetRelid(resultRelationDesc); } else { LockTupleMode lockmode; /* * Constraints might reference the tableoid column, so initialize * t_tableOid before evaluating them. */ tuple->t_tableOid = RelationGetRelid(resultRelationDesc); /* * Check any RLS UPDATE WITH CHECK policies * * If we generate a new candidate tuple after EvalPlanQual testing, we * must loop back here and recheck any RLS policies and constraints. * (We don't need to redo triggers, however. If there are any BEFORE * triggers then trigger.c will have done heap_lock_tuple to lock the * correct tuple, so there's no need to do them again.) * * ExecWithCheckOptions() will skip any WCOs which are not of the kind * we are looking for at this point. */ lreplace:; if (resultRelInfo->ri_WithCheckOptions != NIL) ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK, resultRelInfo, slot, estate); /* * Check the constraints of the tuple. Note that we pass the same * slot for the orig_slot argument, because unlike ExecInsert(), no * tuple-routing is performed here, hence the slot remains unchanged. */ if (resultRelationDesc->rd_att->constr || resultRelInfo->ri_PartitionCheck) ExecConstraints(resultRelInfo, slot, estate); #ifdef _MLS_ if (is_mls_user()) CheckMlsTableUserAcl(resultRelInfo,slot->tts_tuple); #endif #ifdef __TBASE__ if (IS_PGXC_COORDINATOR && mt->remote_plans) { bool succeed = false; if (mtstate->part_whichplan >= 0) { remoterel_index = mtstate->part_whichplan; } succeed = ExecRemoteDML(mtstate, tupleid, oldtuple, slot, planSlot, estate, epqstate, canSetTag, NULL, NULL, resultRelInfo, remoterel_index); if (!succeed) return NULL; } else { #endif /* * replace the heap tuple * * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check * that the row to be updated is visible to that snapshot, and throw a * can't-serialize error if not. This is a special-case behavior * needed for referential integrity updates in transaction-snapshot * mode transactions. */ result = heap_update(resultRelationDesc, tupleid, tuple, estate->es_output_cid, estate->es_crosscheck_snapshot, true /* wait for commit */ , &hufd, &lockmode); switch (result) { case HeapTupleSelfUpdated: /* * The target tuple was already updated or deleted by the * current command, or by a later command in the current * transaction. The former case is possible in a join UPDATE * where multiple tuples join to the same target tuple. This * is pretty questionable, but Postgres has always allowed it: * we just execute the first update action and ignore * additional update attempts. * * The latter case arises if the tuple is modified by a * command in a BEFORE trigger, or perhaps by a command in a * volatile function used in the query. In such situations we * should not ignore the update, but it is equally unsafe to * proceed. We don't want to discard the original UPDATE * while keeping the triggered actions based on it; and we * have no principled way to merge this update with the * previous ones. So throwing an error is the only safe * course. * * If a trigger actually intends this type of interaction, it * can re-execute the UPDATE (assuming it can figure out how) * and then return NULL to cancel the outer update. */ if (hufd.cmax != estate->es_output_cid) ereport(ERROR, (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION), errmsg("tuple to be updated was already modified by an operation triggered by the current command"), errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows."))); /* Else, already updated by self; nothing to do */ return NULL; case HeapTupleMayBeUpdated: break; case HeapTupleUpdated: if (IsolationUsesXactSnapshot()) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); if (!ItemPointerEquals(tupleid, &hufd.ctid)) { TupleTableSlot *epqslot; epqslot = EvalPlanQual(estate, epqstate, resultRelationDesc, resultRelInfo->ri_RangeTableIndex, lockmode, &hufd.ctid, hufd.xmax); if (!TupIsNull(epqslot)) { *tupleid = hufd.ctid; slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot); tuple = ExecMaterializeSlot_shard(slot, hasshard, diskey, secdiskey, RelationGetRelid(resultRelationDesc)); goto lreplace; } } /* tuple already deleted; nothing to do */ return NULL; default: elog(ERROR, "unrecognized heap_update status: %u", result); return NULL; } /* * Note: instead of having to update the old index tuples associated * with the heap tuple, all we do is form and insert new index tuples. * This is because UPDATEs are actually DELETEs and INSERTs, and index * tuple deletion is done later by VACUUM (see notes in ExecDelete). * All we do here is insert new index tuples. -cim 9/27/89 */ /* * insert index entries for tuple * * Note: heap_update returns the tid (location) of the new tuple in * the t_self field. * * If it's a HOT update, we mustn't insert new index entries. */ if (resultRelInfo->ri_NumIndices > 0 && !HeapTupleIsHeapOnly(tuple)) recheckIndexes = ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false, NULL, NIL); #ifdef __TBASE__ } #endif } if (canSetTag) (estate->es_processed)++; /* AFTER ROW UPDATE Triggers */ ExecARUpdateTriggers(estate, resultRelInfo, tupleid, oldtuple, tuple, recheckIndexes, mtstate->mt_transition_capture); list_free(recheckIndexes); /* * Check any WITH CHECK OPTION constraints from parent views. We are * required to do this after testing all constraints and uniqueness * violations per the SQL spec, so we do it after actually updating the * record in the heap and all indexes. * * ExecWithCheckOptions() will skip any WCOs which are not of the kind we * are looking for at this point. */ if (resultRelInfo->ri_WithCheckOptions != NIL) ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate); /* Process RETURNING if present */ if (resultRelInfo->ri_projectReturning) return ExecProcessReturning(resultRelInfo, slot, planSlot); return NULL; } /* * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE * * Try to lock tuple for update as part of speculative insertion. If * a qual originating from ON CONFLICT DO UPDATE is satisfied, update * (but still lock row, even though it may not satisfy estate's * snapshot). * * Returns true if if we're done (with or without an update), or false if * the caller must retry the INSERT from scratch. */ static bool ExecOnConflictUpdate(ModifyTableState *mtstate, ResultRelInfo *resultRelInfo, ItemPointer conflictTid, TupleTableSlot *planSlot, TupleTableSlot *excludedSlot, EState *estate, bool canSetTag, TupleTableSlot **returning) {// #lizard forgives ExprContext *econtext = mtstate->ps.ps_ExprContext; Relation relation = resultRelInfo->ri_RelationDesc; ExprState *onConflictSetWhere = resultRelInfo->ri_onConflictSetWhere; HeapTupleData tuple; HeapUpdateFailureData hufd; LockTupleMode lockmode; HTSU_Result test; Buffer buffer; /* Determine lock mode to use */ lockmode = ExecUpdateLockMode(estate, resultRelInfo); /* * Lock tuple for update. Don't follow updates when tuple cannot be * locked without doing so. A row locking conflict here means our * previous conclusion that the tuple is conclusively committed is not * true anymore. */ tuple.t_self = *conflictTid; test = heap_lock_tuple(relation, &tuple, estate->es_output_cid, lockmode, LockWaitBlock, false, &buffer, &hufd); switch (test) { case HeapTupleMayBeUpdated: /* success! */ break; case HeapTupleInvisible: /* * This can occur when a just inserted tuple is updated again in * the same command. E.g. because multiple rows with the same * conflicting key values are inserted. * * This is somewhat similar to the ExecUpdate() * HeapTupleSelfUpdated case. We do not want to proceed because * it would lead to the same row being updated a second time in * some unspecified order, and in contrast to plain UPDATEs * there's no historical behavior to break. * * It is the user's responsibility to prevent this situation from * occurring. These problems are why SQL-2003 similarly specifies * that for SQL MERGE, an exception must be raised in the event of * an attempt to update the same row twice. */ if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple.t_data))) ereport(ERROR, (errcode(ERRCODE_CARDINALITY_VIOLATION), errmsg("ON CONFLICT DO UPDATE command cannot affect row a second time"), errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values."))); /* This shouldn't happen */ elog(ERROR, "attempted to lock invisible tuple"); case HeapTupleSelfUpdated: /* * This state should never be reached. As a dirty snapshot is used * to find conflicting tuples, speculative insertion wouldn't have * seen this row to conflict with. */ elog(ERROR, "unexpected self-updated tuple"); case HeapTupleUpdated: if (IsolationUsesXactSnapshot()) ereport(ERROR, (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE), errmsg("could not serialize access due to concurrent update"))); /* * Tell caller to try again from the very start. * * It does not make sense to use the usual EvalPlanQual() style * loop here, as the new version of the row might not conflict * anymore, or the conflicting tuple has actually been deleted. */ ReleaseBuffer(buffer); return false; default: elog(ERROR, "unrecognized heap_lock_tuple status: %u", test); } /* * Success, the tuple is locked. * * Reset per-tuple memory context to free any expression evaluation * storage allocated in the previous cycle. */ ResetExprContext(econtext); /* * Verify that the tuple is visible to our MVCC snapshot if the current * isolation level mandates that. * * It's not sufficient to rely on the check within ExecUpdate() as e.g. * CONFLICT ... WHERE clause may prevent us from reaching that. * * This means we only ever continue when a new command in the current * transaction could see the row, even though in READ COMMITTED mode the * tuple will not be visible according to the current statement's * snapshot. This is in line with the way UPDATE deals with newer tuple * versions. */ ExecCheckHeapTupleVisible(estate, &tuple, buffer); /* Store target's existing tuple in the state's dedicated slot */ ExecStoreTuple(&tuple, mtstate->mt_existing, buffer, false); /* * Make tuple and any needed join variables available to ExecQual and * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while * the target's existing tuple is installed in the scantuple. EXCLUDED * has been made to reference INNER_VAR in setrefs.c, but there is no * other redirection. */ econtext->ecxt_scantuple = mtstate->mt_existing; econtext->ecxt_innertuple = excludedSlot; econtext->ecxt_outertuple = NULL; if (!ExecQual(onConflictSetWhere, econtext)) { ReleaseBuffer(buffer); InstrCountFiltered1(&mtstate->ps, 1); return true; /* done with the tuple */ } if (resultRelInfo->ri_WithCheckOptions != NIL) { /* * Check target's existing tuple against UPDATE-applicable USING * security barrier quals (if any), enforced here as RLS checks/WCOs. * * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK, * but that's almost the extent of its special handling for ON * CONFLICT DO UPDATE. * * The rewriter will also have associated UPDATE applicable straight * RLS checks/WCOs for the benefit of the ExecUpdate() call that * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO * kinds, so there is no danger of spurious over-enforcement in the * INSERT or UPDATE path. */ ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo, mtstate->mt_existing, mtstate->ps.state); } /* Project the new tuple version */ ExecProject(resultRelInfo->ri_onConflictSetProj); /* * Note that it is possible that the target tuple has been modified in * this session, after the above heap_lock_tuple. We choose to not error * out in that case, in line with ExecUpdate's treatment of similar cases. * This can happen if an UPDATE is triggered from within ExecQual(), * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a * wCTE in the ON CONFLICT's SET. */ /* Execute UPDATE with projection */ *returning = ExecUpdate(mtstate, &tuple.t_self, NULL, mtstate->mt_conflproj, planSlot, &mtstate->mt_epqstate, mtstate->ps.state, canSetTag); ReleaseBuffer(buffer); return true; } /* * Process BEFORE EACH STATEMENT triggers */ static void fireBSTriggers(ModifyTableState *node) { ResultRelInfo *resultRelInfo = node->resultRelInfo; /* * If the node modifies a partitioned table, we must fire its triggers. * Note that in that case, node->resultRelInfo points to the first leaf * partition, not the root table. */ if (node->rootResultRelInfo != NULL) resultRelInfo = node->rootResultRelInfo; switch (node->operation) { case CMD_INSERT: ExecBSInsertTriggers(node->ps.state, resultRelInfo); if (node->mt_onconflict == ONCONFLICT_UPDATE) ExecBSUpdateTriggers(node->ps.state, resultRelInfo); break; case CMD_UPDATE: ExecBSUpdateTriggers(node->ps.state, resultRelInfo); break; case CMD_DELETE: ExecBSDeleteTriggers(node->ps.state, resultRelInfo); break; default: elog(ERROR, "unknown operation"); break; } } /* * Return the ResultRelInfo for which we will fire AFTER STATEMENT triggers. * This is also the relation into whose tuple format all captured transition * tuples must be converted. */ static ResultRelInfo * getASTriggerResultRelInfo(ModifyTableState *node) { /* * If the node modifies a partitioned table, we must fire its triggers. * Note that in that case, node->resultRelInfo points to the first leaf * partition, not the root table. */ if (node->rootResultRelInfo != NULL) return node->rootResultRelInfo; else return node->resultRelInfo; } /* * Process AFTER EACH STATEMENT triggers */ static void fireASTriggers(ModifyTableState *node) { ResultRelInfo *resultRelInfo = getASTriggerResultRelInfo(node); switch (node->operation) { case CMD_INSERT: if (node->mt_onconflict == ONCONFLICT_UPDATE) ExecASUpdateTriggers(node->ps.state, resultRelInfo, node->mt_transition_capture); ExecASInsertTriggers(node->ps.state, resultRelInfo, node->mt_transition_capture); break; case CMD_UPDATE: ExecASUpdateTriggers(node->ps.state, resultRelInfo, node->mt_transition_capture); break; case CMD_DELETE: ExecASDeleteTriggers(node->ps.state, resultRelInfo, node->mt_transition_capture); break; default: elog(ERROR, "unknown operation"); break; } } /* * Set up the state needed for collecting transition tuples for AFTER * triggers. */ static void ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate) { ResultRelInfo *targetRelInfo = getASTriggerResultRelInfo(mtstate); int i; /* Check for transition tables on the directly targeted relation. */ mtstate->mt_transition_capture = MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc); /* * If we found that we need to collect transition tuples then we may also * need tuple conversion maps for any children that have TupleDescs that * aren't compatible with the tuplestores. */ if (mtstate->mt_transition_capture != NULL) { ResultRelInfo *resultRelInfos; int numResultRelInfos; /* Find the set of partitions so that we can find their TupleDescs. */ if (mtstate->mt_partition_dispatch_info != NULL) { /* * For INSERT via partitioned table, so we need TupleDescs based * on the partition routing table. */ resultRelInfos = mtstate->mt_partitions; numResultRelInfos = mtstate->mt_num_partitions; } else { /* Otherwise we need the ResultRelInfo for each subplan. */ resultRelInfos = mtstate->resultRelInfo; numResultRelInfos = mtstate->mt_nplans; } /* * Build array of conversion maps from each child's TupleDesc to the * one used in the tuplestore. The map pointers may be NULL when no * conversion is necessary, which is hopefully a common case for * partitions. */ mtstate->mt_transition_tupconv_maps = (TupleConversionMap **) palloc0(sizeof(TupleConversionMap *) * numResultRelInfos); for (i = 0; i < numResultRelInfos; ++i) { mtstate->mt_transition_tupconv_maps[i] = convert_tuples_by_name(RelationGetDescr(resultRelInfos[i].ri_RelationDesc), RelationGetDescr(targetRelInfo->ri_RelationDesc), gettext_noop("could not convert row type")); } /* * Install the conversion map for the first plan for UPDATE and DELETE * operations. It will be advanced each time we switch to the next * plan. (INSERT operations set it every time.) */ mtstate->mt_transition_capture->tcs_map = mtstate->mt_transition_tupconv_maps[0]; } } /* ---------------------------------------------------------------- * ExecModifyTable * * Perform table modifications as required, and return RETURNING results * if needed. * ---------------------------------------------------------------- */ static TupleTableSlot * ExecModifyTable(PlanState *pstate) {// #lizard forgives ModifyTableState *node = castNode(ModifyTableState, pstate); EState *estate = node->ps.state; CmdType operation = node->operation; ResultRelInfo *saved_resultRelInfo; ResultRelInfo *resultRelInfo; PlanState *subplanstate; JunkFilter *junkfilter; TupleTableSlot *slot; TupleTableSlot *planSlot; ItemPointer tupleid = NULL; ItemPointerData tuple_ctid; HeapTupleData oldtupdata; HeapTuple oldtuple; #ifdef __TBASE__ ModifyTable *mt = (ModifyTable *)node->ps.plan; ResultRelInfo *part_resultRelInfo; int64 insert_tuple_count = 0; #endif #ifdef __AUDIT_FGA__ ListCell *item = NULL; ExprContext *econtext = NULL; //EState *audit_fga_estate; TupleTableSlot *audit_fga_slot = NULL; char *cmd_type = NULL; TupleDesc audit_fga_slot_tupdesc; TupleTableSlot *old_ecxt_scantuple = NULL; #endif #ifdef _MLS_ int oldtag; #endif CHECK_FOR_INTERRUPTS(); /* * This should NOT get called during EvalPlanQual; we should have passed a * subplan tree to EvalPlanQual, instead. Use a runtime test not just * Assert because this condition is easy to miss in testing. (Note: * although ModifyTable should not get executed within an EvalPlanQual * operation, we do have to allow it to be initialized and shut down in * case it is within a CTE subplan. Hence this test must be here, not in * ExecInitModifyTable.) */ if (estate->es_epqTuple != NULL) elog(ERROR, "ModifyTable should not be called during EvalPlanQual"); /* * If we've already completed processing, don't try to do more. We need * this test because ExecPostprocessPlan might call us an extra time, and * our subplan's nodes aren't necessarily robust against being called * extra times. */ if (node->mt_done) return NULL; /* * On first call, fire BEFORE STATEMENT triggers before proceeding. */ if (node->fireBSTriggers) { fireBSTriggers(node); node->fireBSTriggers = false; } /* Preload local variables */ resultRelInfo = node->resultRelInfo + node->mt_whichplan; subplanstate = node->mt_plans[node->mt_whichplan]; junkfilter = resultRelInfo->ri_junkFilter; /* * es_result_relation_info must point to the currently active result * relation while we are within this ModifyTable node. Even though * ModifyTable nodes can't be nested statically, they can be nested * dynamically (since our subplan could include a reference to a modifying * CTE). So we have to save and restore the caller's value. */ saved_resultRelInfo = estate->es_result_relation_info; estate->es_result_relation_info = resultRelInfo; #ifdef __TBASE__ /* * Update/delete on interval partition table, get child resultRelation * and plan. * Insert is a little different, do prune before heap_insert, not here. */ if(node->is_exec_partition) { subplanstate = node->partplans[node->part_whichplan]; part_resultRelInfo = resultRelInfo->part_relinfo[node->part_whichplan]; } else { if(!subplanstate) { /* first result rel info is partition rel */ if(node->part_len == 0) { node->is_exec_partition = false; node->part_whichplan = -1; node->mt_whichplan++; if(node->mt_whichplan >= node->mt_nplans) { goto END; } subplanstate = node->mt_plans[node->mt_whichplan]; resultRelInfo++; junkfilter = resultRelInfo->ri_junkFilter; estate->es_result_relation_info = resultRelInfo; } else { node->is_exec_partition = true; node->part_whichplan = 0; part_resultRelInfo = resultRelInfo->part_relinfo[node->part_whichplan]; subplanstate = node->partplans[node->part_whichplan]; junkfilter = resultRelInfo->ri_junkFilter; estate->es_result_relation_info = part_resultRelInfo; } EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan, node->mt_arowmarks[node->mt_whichplan]); } } #endif #ifdef _MLS_ oldtag = mls_command_tag_switch_to(CLS_CMD_WRITE); #endif /* * Fetch rows from subplan(s), and execute the required table modification * for each row. */ for (;;) { /* * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly * to do this below the top level of the plan, however. We might need * to rethink this later. */ ResetPerTupleExprContext(estate); planSlot = ExecProcNode(subplanstate); if (TupIsNull(planSlot)) { #ifdef __TBASE__ if(node->is_exec_partition) /*in inner loop */ { node->part_whichplan++; if(node->part_whichplan >= node->part_len) /* end partition loop*/ { node->is_exec_partition = false; node->part_whichplan = -1; node->mt_whichplan++; if(node->mt_whichplan >= node->mt_nplans) { break; } subplanstate = node->mt_plans[node->mt_whichplan]; resultRelInfo++; junkfilter = resultRelInfo->ri_junkFilter; estate->es_result_relation_info = resultRelInfo; } else /* continue partition loop */ { part_resultRelInfo = resultRelInfo->part_relinfo[node->part_whichplan]; subplanstate = node->partplans[node->part_whichplan]; junkfilter = resultRelInfo->ri_junkFilter; estate->es_result_relation_info = part_resultRelInfo; } EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan, node->mt_arowmarks[node->mt_whichplan]); continue; } else /* in outer loop */ { #endif /* advance to next subplan if any */ node->mt_whichplan++; if (node->mt_whichplan < node->mt_nplans) { resultRelInfo++; #ifdef __TBASE__ if(resultRelInfo->ispartparent && node->operation != CMD_INSERT) { /* if loop enter into partitioned ResultRelInfo */ if(node->part_len == 0) { node->is_exec_partition = false; node->part_whichplan = -1; node->mt_whichplan++; if(node->mt_whichplan >= node->mt_nplans) { break; } subplanstate = node->mt_plans[node->mt_whichplan]; resultRelInfo++; junkfilter = resultRelInfo->ri_junkFilter; estate->es_result_relation_info = resultRelInfo; } else { node->is_exec_partition = true; node->part_whichplan = 0; part_resultRelInfo = resultRelInfo->part_relinfo[node->part_whichplan]; subplanstate = node->partplans[node->part_whichplan]; junkfilter = resultRelInfo->ri_junkFilter; estate->es_result_relation_info = part_resultRelInfo; } } else { #endif subplanstate = node->mt_plans[node->mt_whichplan]; junkfilter = resultRelInfo->ri_junkFilter; estate->es_result_relation_info = resultRelInfo; #ifdef __TBASE__ } #endif EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan, node->mt_arowmarks[node->mt_whichplan]); if (node->mt_transition_capture != NULL) { /* Prepare to convert transition tuples from this child. */ Assert(node->mt_transition_tupconv_maps != NULL); node->mt_transition_capture->tcs_map = node->mt_transition_tupconv_maps[node->mt_whichplan]; } continue; } else break; #ifdef __TBASE__ } #endif } /* * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do * here is compute the RETURNING expressions. */ if (resultRelInfo->ri_usesFdwDirectModify) { Assert(resultRelInfo->ri_projectReturning); /* * A scan slot containing the data that was actually inserted, * updated or deleted has already been made available to * ExecProcessReturning by IterateDirectModify, so no need to * provide it here. */ slot = ExecProcessReturning(resultRelInfo, NULL, planSlot); estate->es_result_relation_info = saved_resultRelInfo; #ifdef _MLS_ mls_command_tag_switch_to(oldtag); #endif return slot; } EvalPlanQualSetSlot(&node->mt_epqstate, planSlot); slot = planSlot; oldtuple = NULL; if (junkfilter != NULL) { /* * extract the 'ctid' or 'wholerow' junk attribute. */ if (operation == CMD_UPDATE || operation == CMD_DELETE) { char relkind; Datum datum; bool isNull; relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; if (relkind == RELKIND_RELATION || relkind == RELKIND_MATVIEW) { datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, &isNull); /* shouldn't ever get a null result... */ if (isNull) elog(ERROR, "ctid is NULL"); tupleid = (ItemPointer) DatumGetPointer(datum); tuple_ctid = *tupleid; /* be sure we don't free ctid!! */ tupleid = &tuple_ctid; #ifdef __TBASE__ /* * for update/delete with unshippable triggers, we need to get * the oldtuple for triggers. */ if (IS_PGXC_COORDINATOR && (operation == CMD_UPDATE || operation == CMD_DELETE) && mt->remote_plans) { datum = ExecGetJunkAttribute(slot, junkfilter->jf_xc_wholerow, &isNull); /* shouldn't ever get a null result... */ if (isNull) elog(ERROR, "wholerow is NULL"); oldtupdata.t_data = DatumGetHeapTupleHeader(datum); oldtupdata.t_len = HeapTupleHeaderGetDatumLength(oldtupdata.t_data); ItemPointerSetInvalid(&(oldtupdata.t_self)); /* Historically, view triggers see invalid t_tableOid. */ oldtupdata.t_tableOid = (relkind == RELKIND_MATVIEW) ? InvalidOid : RelationGetRelid(resultRelInfo->ri_RelationDesc); oldtuple = &oldtupdata; } #endif } /* * Use the wholerow attribute, when available, to reconstruct * the old relation tuple. * * Foreign table updates have a wholerow attribute when the * relation has a row-level trigger. Note that the wholerow * attribute does not carry system columns. Foreign table * triggers miss seeing those, except that we know enough here * to set t_tableOid. Quite separately from this, the FDW may * fetch its own junk attrs to identify the row. * * Other relevant relkinds, currently limited to views, always * have a wholerow attribute. */ else if (AttributeNumberIsValid(junkfilter->jf_junkAttNo)) { datum = ExecGetJunkAttribute(slot, junkfilter->jf_junkAttNo, &isNull); /* shouldn't ever get a null result... */ if (isNull) elog(ERROR, "wholerow is NULL"); oldtupdata.t_data = DatumGetHeapTupleHeader(datum); oldtupdata.t_len = HeapTupleHeaderGetDatumLength(oldtupdata.t_data); ItemPointerSetInvalid(&(oldtupdata.t_self)); /* Historically, view triggers see invalid t_tableOid. */ oldtupdata.t_tableOid = (relkind == RELKIND_VIEW) ? InvalidOid : RelationGetRelid(resultRelInfo->ri_RelationDesc); oldtuple = &oldtupdata; } else Assert(relkind == RELKIND_FOREIGN_TABLE); } /* * apply the junkfilter if needed. */ if (operation != CMD_DELETE) slot = ExecFilterJunk(junkfilter, slot); } #ifdef __AUDIT_FGA__ if (IsNormalProcessingMode() && IsUnderPostmaster && enable_fga) { foreach (item, node->ps.audit_fga_qual) { HeapTuple result = NULL; audit_fga_policy_state *audit_fga_qual = (audit_fga_policy_state *) lfirst(item); if (operation == CMD_UPDATE || operation == CMD_DELETE) { Page page; ItemId lp; Buffer buffer; HeapTupleData tuple; Relation relation = estate->es_result_relation_info->ri_RelationDesc; buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tupleid)); /* * Although we already know this tuple is valid, we must lock the * buffer to ensure that no one has a buffer cleanup lock; otherwise * they might move the tuple while we try to copy it. But we can * release the lock before actually doing the heap_copytuple call, * since holding pin is sufficient to prevent anyone from getting a * cleanup lock they don't already hold. */ LockBuffer(buffer, BUFFER_LOCK_SHARE); page = BufferGetPage(buffer); lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tupleid)); Assert(ItemIdIsNormal(lp)); tuple.t_data = (HeapTupleHeader) PageGetItem(page, lp); tuple.t_len = ItemIdGetLength(lp); tuple.t_self = *tupleid; tuple.t_tableOid = RelationGetRelid(relation); LockBuffer(buffer, BUFFER_LOCK_UNLOCK); result = heap_copytuple(&tuple); ReleaseBuffer(buffer); } audit_fga_slot_tupdesc = CreateTupleDescCopy(RelationGetDescr(estate->es_result_relation_info->ri_RelationDesc)); audit_fga_slot = MakeSingleTupleTableSlot(audit_fga_slot_tupdesc); switch (operation) { case CMD_INSERT: cmd_type = "INSERT"; ExecCopySlot(audit_fga_slot, slot); break; case CMD_UPDATE: cmd_type = "UPDATE"; ExecStoreTuple(result, audit_fga_slot, InvalidBuffer, false); break; case CMD_DELETE: cmd_type = "DELETE"; ExecStoreTuple(result, audit_fga_slot, InvalidBuffer, false); break; default: cmd_type = "???"; ExecCopySlot(audit_fga_slot, slot); break; } //ExecCopySlot(audit_fga_slot, slot); econtext = GetPerTupleExprContext(estate); old_ecxt_scantuple = econtext->ecxt_scantuple; econtext->ecxt_scantuple = audit_fga_slot; if (audit_fga_qual != NULL) { if(ExecQual(audit_fga_qual->qual, econtext)) { audit_fga_log_policy_info_2(audit_fga_qual, cmd_type); node->ps.audit_fga_qual = list_delete(node->ps.audit_fga_qual, audit_fga_qual); } else { elog(DEBUG1, "AUDIT_FGA: NOT EQAL"); } } econtext->ecxt_scantuple = old_ecxt_scantuple; ExecDropSingleTupleTableSlot(audit_fga_slot); if (audit_fga_slot_tupdesc) { FreeTupleDesc(audit_fga_slot_tupdesc); } } } #endif switch (operation) { case CMD_INSERT: #ifdef _MLS_ /* resultstate is 'insert values/insert select /copy from' action, need to change _cls values embedded. */ if (IsA(subplanstate, ResultState) || IsA(subplanstate, RemoteSubplanState) ) { oldtag = mls_command_tag_switch_to(CLS_CMD_ROW); } #endif slot = ExecInsert(node, slot, planSlot, node->mt_arbiterindexes, node->mt_onconflict, estate, node->canSetTag); #ifdef _MLS_ if (IsA(subplanstate, ResultState) || IsA(subplanstate, RemoteSubplanState) ) { mls_command_tag_switch_to(oldtag); } #endif if(enable_distri_debug) { insert_tuple_count++; } break; case CMD_UPDATE: slot = ExecUpdate(node, tupleid, oldtuple, slot, planSlot, &node->mt_epqstate, estate, node->canSetTag); break; case CMD_DELETE: #ifdef __TBASE__ slot = ExecDelete(node, tupleid, oldtuple, slot, planSlot, &node->mt_epqstate, estate, node->canSetTag); #else slot = ExecDelete(node, tupleid, oldtuple, planSlot, &node->mt_epqstate, estate, node->canSetTag); #endif break; default: elog(ERROR, "unknown operation"); break; } /* * If we got a RETURNING result, return it to caller. We'll continue * the work on next call. */ if (slot) { estate->es_result_relation_info = saved_resultRelInfo; #ifdef _MLS_ mls_command_tag_switch_to(oldtag); #endif return slot; } } END: if(enable_distri_debug) { GlobalTimestamp start_ts; if(estate->es_snapshot) { start_ts = estate->es_snapshot->start_ts; } else { start_ts = InvalidGlobalTimestamp; } LogScanGTM(GetTopTransactionIdIfAny(), PGXCNodeName, start_ts, GetCurrentTimestamp(), GetCurrentTimestamp(), INSERT_TUPLES, RelationGetRelationName(estate->es_result_relation_info->ri_RelationDesc), insert_tuple_count); } /* Restore es_result_relation_info before exiting */ estate->es_result_relation_info = saved_resultRelInfo; /* * We're done, but fire AFTER STATEMENT triggers before exiting. */ fireASTriggers(node); node->mt_done = true; #ifdef _MLS_ mls_command_tag_switch_to(oldtag); #endif return NULL; } /* ---------------------------------------------------------------- * ExecInitModifyTable * ---------------------------------------------------------------- */ ModifyTableState * ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) {// #lizard forgives ModifyTableState *mtstate; CmdType operation = node->operation; int nplans = list_length(node->plans); ResultRelInfo *saved_resultRelInfo; ResultRelInfo *resultRelInfo; TupleDesc tupDesc; Plan *subplan; ListCell *l; int i; Relation rel; #ifdef __TBASE__ bool remote_dml = false; #endif #ifdef __AUDIT_FGA__ ListCell *item; #endif /* check for unsupported flags */ Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK))); /* * create state structure */ mtstate = makeNode(ModifyTableState); mtstate->ps.plan = (Plan *) node; mtstate->ps.state = estate; mtstate->ps.ExecProcNode = ExecModifyTable; mtstate->operation = operation; mtstate->canSetTag = node->canSetTag; mtstate->mt_done = false; mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans); mtstate->resultRelInfo = estate->es_result_relations + node->resultRelIndex; /* If modifying a partitioned table, initialize the root table info */ if (node->rootResultRelIndex >= 0) mtstate->rootResultRelInfo = estate->es_root_result_relations + node->rootResultRelIndex; mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans); mtstate->mt_nplans = nplans; mtstate->mt_onconflict = node->onConflictAction; mtstate->mt_arbiterindexes = node->arbiterIndexes; /* set up epqstate with dummy subplan data for the moment */ EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam); mtstate->fireBSTriggers = true; #ifdef __TBASE__ /* interval partition used only */ mtstate->haspartparent = node->haspartparent; mtstate->partplans = NULL; mtstate->part_len = 0; mtstate->is_exec_partition = false; mtstate->part_whichplan = -1; mtstate->part_arbiterindexes = NULL; #endif /* * call ExecInitNode on each of the plans to be executed and save the * results into the array "mt_plans". This is also a convenient place to * verify that the proposed target relations are valid and open their * indexes for insertion of new index entries. Note we *must* set * estate->es_result_relation_info correctly while we initialize each * sub-plan; ExecContextForcesOids depends on that! */ saved_resultRelInfo = estate->es_result_relation_info; resultRelInfo = mtstate->resultRelInfo; i = 0; foreach(l, node->plans) { subplan = (Plan *) lfirst(l); /* Initialize the usesFdwDirectModify flag */ resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i, node->fdwDirectModifyPlans); /* * Verify result relation is a valid target for the current operation */ CheckValidResultRel(resultRelInfo->ri_RelationDesc, operation); /* * If there are indices on the result relation, open them and save * descriptors in the result relation info, so that we can add new * index entries for the tuples we add/update. We need not do this * for a DELETE, however, since deletion doesn't affect indexes. Also, * inside an EvalPlanQual operation, the indexes might be open * already, since we share the resultrel state with the original * query. */ if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex && operation != CMD_DELETE && resultRelInfo->ri_IndexRelationDescs == NULL) ExecOpenIndices(resultRelInfo, mtstate->mt_onconflict != ONCONFLICT_NONE); /* Now init the plan for this result rel */ #ifdef __TBASE__ if (resultRelInfo->ispartparent && node->arbiterIndexes) { mtstate->part_arbiterindexes = (List **)palloc0(resultRelInfo->partarraysize * sizeof(List *)); } if(!resultRelInfo->ispartparent || operation == CMD_INSERT) { #endif estate->es_result_relation_info = resultRelInfo; mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags); #ifdef __TBASE__ } else { int arrayidx; int partidx; Plan * partplan; Bitmapset * temp_bms; ListCell *cell; mtstate->haspartparent = true; mtstate->part_len = resultRelInfo->partarraysize; mtstate->partplans = (PlanState**)palloc0(resultRelInfo->partarraysize * sizeof(void*)); temp_bms = bms_copy(resultRelInfo->partpruning); if(operation != CMD_INSERT && list_length(node->partplans) != bms_num_members(temp_bms)) { elog(ERROR,"internal error: Modify Table must have same number of bitmap as length of plan list"); } cell = list_head(node->partplans); arrayidx = 0; while(cell && (partidx = bms_first_member(temp_bms))>=0) { partplan = (Plan *)lfirst(cell); switch (resultRelInfo->arraymode) { case RESULT_RELINFO_MODE_COMPACT: mtstate->partplans[arrayidx++] = ExecInitNode(partplan, estate, eflags); break; case RESULT_RELINFO_MODE_EXPAND: mtstate->partplans[partidx] = ExecInitNode(partplan, estate, eflags); break; default: break; } cell = lnext(cell); } /* check */ if(resultRelInfo->arraymode == RESULT_RELINFO_MODE_COMPACT && arrayidx != resultRelInfo->partarraysize) { elog(ERROR,"init partition plan for parent table failed"); } bms_free(temp_bms); } #endif /* Also let FDWs init themselves for foreign-table result rels */ if (!resultRelInfo->ri_usesFdwDirectModify && resultRelInfo->ri_FdwRoutine != NULL && resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL) { List *fdw_private = (List *) list_nth(node->fdwPrivLists, i); resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate, resultRelInfo, fdw_private, i, eflags); } resultRelInfo++; i++; } #ifdef __TBASE__ /* * We have to execDML on coordinator, init remoteDML planstate. */ if (node->remote_plans) { Plan *remoteplan = NULL; int nremote_plans = list_length(node->remote_plans); EState *estate_dml = CreateExecutorState(); remote_dml = true; mtstate->mt_remoterels = (PlanState **) palloc0(sizeof(PlanState *) * nremote_plans); for (i = 0; i < nremote_plans; i++) { remoteplan = list_nth(node->remote_plans, i); mtstate->mt_remoterels[i] = ExecInitNode(remoteplan, estate_dml, eflags); /* set params' number and type */ { RemoteQuery *rq = (RemoteQuery *)remoteplan; RemoteQueryState *rqs = (RemoteQueryState *)mtstate->mt_remoterels[i]; rqs->rqs_num_params = rq->rq_num_params; rqs->rqs_param_types = rq->rq_param_types; rqs->ss_num_params = rq->ss_num_params; rqs->ss_param_types = rq->ss_param_types; rqs->su_num_params = rq->su_num_params; rqs->su_param_types = rq->su_param_types; } } } #endif estate->es_result_relation_info = saved_resultRelInfo; /* The root table RT index is at the head of the partitioned_rels list */ if (node->partitioned_rels) { Index root_rti; Oid root_oid; root_rti = linitial_int(node->partitioned_rels); root_oid = getrelid(root_rti, estate->es_range_table); rel = heap_open(root_oid, NoLock); /* locked by InitPlan */ } else rel = mtstate->resultRelInfo->ri_RelationDesc; /* Build state for INSERT tuple routing */ if (operation == CMD_INSERT && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { PartitionDispatch *partition_dispatch_info; ResultRelInfo *partitions; TupleConversionMap **partition_tupconv_maps; TupleTableSlot *partition_tuple_slot; int num_parted, num_partitions; ExecSetupPartitionTupleRouting(rel, node->nominalRelation, &partition_dispatch_info, &partitions, &partition_tupconv_maps, &partition_tuple_slot, &num_parted, &num_partitions); mtstate->mt_partition_dispatch_info = partition_dispatch_info; mtstate->mt_num_dispatch = num_parted; mtstate->mt_partitions = partitions; mtstate->mt_num_partitions = num_partitions; mtstate->mt_partition_tupconv_maps = partition_tupconv_maps; mtstate->mt_partition_tuple_slot = partition_tuple_slot; } /* Build state for collecting transition tuples */ ExecSetupTransitionCaptureState(mtstate, estate); /* * Initialize any WITH CHECK OPTION constraints if needed. */ resultRelInfo = mtstate->resultRelInfo; i = 0; foreach(l, node->withCheckOptionLists) { List *wcoList = (List *) lfirst(l); List *wcoExprs = NIL; ListCell *ll; foreach(ll, wcoList) { WithCheckOption *wco = (WithCheckOption *) lfirst(ll); ExprState *wcoExpr = ExecInitQual((List *) wco->qual, mtstate->mt_plans[i]); wcoExprs = lappend(wcoExprs, wcoExpr); } resultRelInfo->ri_WithCheckOptions = wcoList; resultRelInfo->ri_WithCheckOptionExprs = wcoExprs; resultRelInfo++; i++; } #ifdef __AUDIT_FGA__ if (enable_fga) { foreach (item, node->plan.audit_fga_quals) { AuditFgaPolicy *audit_fga_qual = (AuditFgaPolicy *) lfirst(item); audit_fga_policy_state * audit_fga_policy_state_item = palloc0(sizeof(audit_fga_policy_state)); audit_fga_policy_state_item->policy_name = audit_fga_qual->policy_name; audit_fga_policy_state_item->query_string = audit_fga_qual->query_string; audit_fga_policy_state_item->qual = ExecInitQual(audit_fga_qual->qual, &mtstate->ps); mtstate->ps.audit_fga_qual = lappend(mtstate->ps.audit_fga_qual, audit_fga_policy_state_item); } } #endif /* * Build WITH CHECK OPTION constraints for each leaf partition rel. Note * that we didn't build the withCheckOptionList for each partition within * the planner, but simple translation of the varattnos for each partition * will suffice. This only occurs for the INSERT case; UPDATE/DELETE * cases are handled above. */ if (node->withCheckOptionLists != NIL && mtstate->mt_num_partitions > 0) { List *wcoList; PlanState *plan; /* * In case of INSERT on partitioned tables, there is only one plan. * Likewise, there is only one WITH CHECK OPTIONS list, not one per * partition. We make a copy of the WCO qual for each partition; note * that, if there are SubPlans in there, they all end up attached to * the one parent Plan node. */ Assert(operation == CMD_INSERT && list_length(node->withCheckOptionLists) == 1 && mtstate->mt_nplans == 1); wcoList = linitial(node->withCheckOptionLists); plan = mtstate->mt_plans[0]; resultRelInfo = mtstate->mt_partitions; for (i = 0; i < mtstate->mt_num_partitions; i++) { Relation partrel = resultRelInfo->ri_RelationDesc; List *mapped_wcoList; List *wcoExprs = NIL; ListCell *ll; /* varno = node->nominalRelation */ mapped_wcoList = map_partition_varattnos(wcoList, node->nominalRelation, partrel, rel, NULL); foreach(ll, mapped_wcoList) { WithCheckOption *wco = castNode(WithCheckOption, lfirst(ll)); ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual), plan); wcoExprs = lappend(wcoExprs, wcoExpr); } resultRelInfo->ri_WithCheckOptions = mapped_wcoList; resultRelInfo->ri_WithCheckOptionExprs = wcoExprs; resultRelInfo++; } } /* * Initialize RETURNING projections if needed. */ if (node->returningLists) { TupleTableSlot *slot; ExprContext *econtext; List *returningList; /* * Initialize result tuple slot and assign its rowtype using the first * RETURNING list. We assume the rest will look the same. */ tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists), false); /* Set up a slot for the output of the RETURNING projection(s) */ ExecInitResultTupleSlot(estate, &mtstate->ps); ExecAssignResultType(&mtstate->ps, tupDesc); slot = mtstate->ps.ps_ResultTupleSlot; /* Need an econtext too */ if (mtstate->ps.ps_ExprContext == NULL) ExecAssignExprContext(estate, &mtstate->ps); econtext = mtstate->ps.ps_ExprContext; /* * Build a projection for each result rel. */ resultRelInfo = mtstate->resultRelInfo; foreach(l, node->returningLists) { List *rlist = (List *) lfirst(l); resultRelInfo->ri_projectReturning = ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps, resultRelInfo->ri_RelationDesc->rd_att); #ifdef __TBASE__ /* prepare returninglist for each child partition */ if(resultRelInfo->ispartparent) { int i; for(i = 0; i < resultRelInfo->partarraysize; i++) resultRelInfo->part_relinfo[i]->ri_projectReturning = resultRelInfo->ri_projectReturning; } #endif resultRelInfo++; } /* * Build a projection for each leaf partition rel. Note that we * didn't build the returningList for each partition within the * planner, but simple translation of the varattnos for each partition * will suffice. This only occurs for the INSERT case; UPDATE/DELETE * are handled above. */ resultRelInfo = mtstate->mt_partitions; returningList = linitial(node->returningLists); for (i = 0; i < mtstate->mt_num_partitions; i++) { Relation partrel = resultRelInfo->ri_RelationDesc; List *rlist; /* varno = node->nominalRelation */ rlist = map_partition_varattnos(returningList, node->nominalRelation, partrel, rel, NULL); resultRelInfo->ri_projectReturning = ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps, resultRelInfo->ri_RelationDesc->rd_att); resultRelInfo++; } } else { /* * We still must construct a dummy result tuple type, because InitPlan * expects one (maybe should change that?). */ tupDesc = ExecTypeFromTL(NIL, false); ExecInitResultTupleSlot(estate, &mtstate->ps); ExecAssignResultType(&mtstate->ps, tupDesc); mtstate->ps.ps_ExprContext = NULL; } /* Close the root partitioned rel if we opened it above. */ if (rel != mtstate->resultRelInfo->ri_RelationDesc) heap_close(rel, NoLock); /* * If needed, Initialize target list, projection and qual for ON CONFLICT * DO UPDATE. */ resultRelInfo = mtstate->resultRelInfo; if (node->onConflictAction == ONCONFLICT_UPDATE) { ExprContext *econtext; TupleDesc tupDesc; /* insert may only have one plan, inheritance is not expanded */ Assert(nplans == 1); /* already exists if created by RETURNING processing above */ if (mtstate->ps.ps_ExprContext == NULL) ExecAssignExprContext(estate, &mtstate->ps); econtext = mtstate->ps.ps_ExprContext; /* initialize slot for the existing tuple */ mtstate->mt_existing = ExecInitExtraTupleSlot(mtstate->ps.state); ExecSetSlotDescriptor(mtstate->mt_existing, resultRelInfo->ri_RelationDesc->rd_att); /* carried forward solely for the benefit of explain */ mtstate->mt_excludedtlist = node->exclRelTlist; /* create target slot for UPDATE SET projection */ tupDesc = ExecTypeFromTL((List *) node->onConflictSet, resultRelInfo->ri_RelationDesc->rd_rel->relhasoids); mtstate->mt_conflproj = ExecInitExtraTupleSlot(mtstate->ps.state); ExecSetSlotDescriptor(mtstate->mt_conflproj, tupDesc); /* build UPDATE SET projection state */ resultRelInfo->ri_onConflictSetProj = ExecBuildProjectionInfo(node->onConflictSet, econtext, mtstate->mt_conflproj, &mtstate->ps, resultRelInfo->ri_RelationDesc->rd_att); /* build DO UPDATE WHERE clause expression */ if (node->onConflictWhere) { ExprState *qualexpr; qualexpr = ExecInitQual((List *) node->onConflictWhere, &mtstate->ps); resultRelInfo->ri_onConflictSetWhere = qualexpr; } } /* * If we have any secondary relations in an UPDATE or DELETE, they need to * be treated like non-locked relations in SELECT FOR UPDATE, ie, the * EvalPlanQual mechanism needs to be told about them. Locate the * relevant ExecRowMarks. */ foreach(l, node->rowMarks) { PlanRowMark *rc = lfirst_node(PlanRowMark, l); ExecRowMark *erm; /* ignore "parent" rowmarks; they are irrelevant at runtime */ if (rc->isParent) continue; /* find ExecRowMark (same for all subplans) */ erm = ExecFindRowMark(estate, rc->rti, false); /* build ExecAuxRowMark for each subplan */ for (i = 0; i < nplans; i++) { ExecAuxRowMark *aerm; if (mtstate->mt_plans[i]) { subplan = mtstate->mt_plans[i]->plan; } else { subplan = mtstate->partplans[0]->plan; } aerm = ExecBuildAuxRowMark(erm, subplan->targetlist); mtstate->mt_arowmarks[i] = lappend(mtstate->mt_arowmarks[i], aerm); } } /* select first subplan */ mtstate->mt_whichplan = 0; subplan = (Plan *) linitial(node->plans); EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, mtstate->mt_arowmarks[0]); /* * Initialize the junk filter(s) if needed. INSERT queries need a filter * if there are any junk attrs in the tlist. UPDATE and DELETE always * need a filter, since there's always at least one junk attribute present * --- no need to look first. Typically, this will be a 'ctid' or * 'wholerow' attribute, but in the case of a foreign data wrapper it * might be a set of junk attributes sufficient to identify the remote * row. * * If there are multiple result relations, each one needs its own junk * filter. Note multiple rels are only possible for UPDATE/DELETE, so we * can't be fooled by some needing a filter and some not. * * This section of code is also a convenient place to verify that the * output of an INSERT or UPDATE matches the target table(s). */ { bool junk_filter_needed = false; switch (operation) { case CMD_INSERT: foreach(l, subplan->targetlist) { TargetEntry *tle = (TargetEntry *) lfirst(l); if (tle->resjunk) { junk_filter_needed = true; break; } } break; case CMD_UPDATE: case CMD_DELETE: junk_filter_needed = true; break; default: elog(ERROR, "unknown operation"); break; } if (junk_filter_needed) { resultRelInfo = mtstate->resultRelInfo; for (i = 0; i < nplans; i++) { JunkFilter *j; #ifdef __TBASE__ if(resultRelInfo->ispartparent && (operation == CMD_UPDATE || operation == CMD_DELETE)) { int partidx = 0; while(partidx < mtstate->part_len && !mtstate->partplans[partidx]) partidx++; if(partidx == mtstate->part_len) { resultRelInfo++; continue; } subplan = mtstate->partplans[partidx]->plan; } else { #endif subplan = mtstate->mt_plans[i]->plan; #ifdef __TBASE__ } #endif if (operation == CMD_INSERT || operation == CMD_UPDATE) ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, subplan->targetlist); j = ExecInitJunkFilter(subplan->targetlist, resultRelInfo->ri_RelationDesc->rd_att->tdhasoid, ExecInitExtraTupleSlot(estate)); if (operation == CMD_UPDATE || operation == CMD_DELETE) { /* For UPDATE/DELETE, find the appropriate junk attr now */ char relkind; relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind; if (relkind == RELKIND_RELATION || relkind == RELKIND_MATVIEW || relkind == RELKIND_PARTITIONED_TABLE) { j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid"); if (!AttributeNumberIsValid(j->jf_junkAttNo)) elog(ERROR, "could not find junk ctid column"); #ifdef __TBASE__ if (remote_dml) { j->jf_xc_wholerow = ExecFindJunkAttribute(j, "wholerow"); if (!AttributeNumberIsValid(j->jf_xc_wholerow)) elog(ERROR, "could not find junk wholerow column"); j->jf_xc_node_id = ExecFindJunkAttribute(j, "xc_node_id"); if (!AttributeNumberIsValid(j->jf_xc_node_id)) elog(ERROR, "could not find junk xc_node_id column"); } #endif } else if (relkind == RELKIND_FOREIGN_TABLE) { /* * When there is a row-level trigger, there should be * a wholerow attribute. */ j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow"); } else { j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow"); if (!AttributeNumberIsValid(j->jf_junkAttNo)) elog(ERROR, "could not find junk wholerow column"); } } resultRelInfo->ri_junkFilter = j; #ifdef __TBASE__ /* init junkfiler for each interval partition child table */ if(resultRelInfo->ispartparent) { int partidx; for(partidx = 0; partidx < resultRelInfo->partarraysize; partidx++) { if(resultRelInfo->part_relinfo[partidx]) resultRelInfo->part_relinfo[partidx]->ri_junkFilter = j; } } #endif resultRelInfo++; } } else { if (operation == CMD_INSERT) ExecCheckPlanOutput(mtstate->resultRelInfo->ri_RelationDesc, subplan->targetlist); } } /* * Set up a tuple table slot for use for trigger output tuples. In a plan * containing multiple ModifyTable nodes, all can share one such slot, so * we keep it in the estate. */ if (estate->es_trig_tuple_slot == NULL) estate->es_trig_tuple_slot = ExecInitExtraTupleSlot(estate); /* * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it * to estate->es_auxmodifytables so that it will be run to completion by * ExecPostprocessPlan. (It'd actually work fine to add the primary * ModifyTable node too, but there's no need.) Note the use of lcons not * lappend: we need later-initialized ModifyTable nodes to be shut down * before earlier ones. This ensures that we don't throw away RETURNING * rows that need to be seen by a later CTE subplan. */ if (!mtstate->canSetTag) estate->es_auxmodifytables = lcons(mtstate, estate->es_auxmodifytables); return mtstate; } /* ---------------------------------------------------------------- * ExecEndModifyTable * * Shuts down the plan. * * Returns nothing of interest. * ---------------------------------------------------------------- */ void ExecEndModifyTable(ModifyTableState *node) {// #lizard forgives int i; /* Free transition tables */ if (node->mt_transition_capture != NULL) DestroyTransitionCaptureState(node->mt_transition_capture); /* * Allow any FDWs to shut down */ for (i = 0; i < node->mt_nplans; i++) { ResultRelInfo *resultRelInfo = node->resultRelInfo + i; if (!resultRelInfo->ri_usesFdwDirectModify && resultRelInfo->ri_FdwRoutine != NULL && resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL) resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state, resultRelInfo); } #ifdef __TBASE__ if (IS_PGXC_COORDINATOR) { EState *state = NULL; ResponseCombiner *combiner; ModifyTable *plan = (ModifyTable *)node->ps.plan; if (plan->remote_plans) { int nremote_plans = list_length(plan->remote_plans); for (i = 0; i < nremote_plans; i++) { RemoteQuery *rq = (RemoteQuery *)list_nth(plan->remote_plans, i); combiner = (ResponseCombiner *) node->mt_remoterels[i]; state = combiner->ss.ps.state; ExecEndNode(node->mt_remoterels[i]); DropRemoteDMLStatement(rq->statement, rq->update_cursor); } FreeExecutorState(state); } } #endif /* * Close all the partitioned tables, leaf partitions, and their indices * * Remember node->mt_partition_dispatch_info[0] corresponds to the root * partitioned table, which we must not try to close, because it is the * main target table of the query that will be closed by ExecEndPlan(). * Also, tupslot is NULL for the root partitioned table. */ for (i = 1; i < node->mt_num_dispatch; i++) { PartitionDispatch pd = node->mt_partition_dispatch_info[i]; heap_close(pd->reldesc, NoLock); ExecDropSingleTupleTableSlot(pd->tupslot); } for (i = 0; i < node->mt_num_partitions; i++) { ResultRelInfo *resultRelInfo = node->mt_partitions + i; ExecCloseIndices(resultRelInfo); heap_close(resultRelInfo->ri_RelationDesc, NoLock); } /* Release the standalone partition tuple descriptor, if any */ if (node->mt_partition_tuple_slot) ExecDropSingleTupleTableSlot(node->mt_partition_tuple_slot); /* * Free the exprcontext */ ExecFreeExprContext(&node->ps); /* * clean out the tuple table */ ExecClearTuple(node->ps.ps_ResultTupleSlot); /* * Terminate EPQ execution if active */ EvalPlanQualEnd(&node->mt_epqstate); /* * shut down subplans */ for (i = 0; i < node->mt_nplans; i++) ExecEndNode(node->mt_plans[i]); #ifdef __TBASE__ for (i = 0; i < node->part_len; i++) { if(node->partplans[i]) ExecEndNode(node->partplans[i]); } #endif } void ExecReScanModifyTable(ModifyTableState *node) { /* * Currently, we don't need to support rescan on ModifyTable nodes. The * semantics of that would be a bit debatable anyway. */ elog(ERROR, "ExecReScanModifyTable is not implemented"); } #ifdef __TBASE__ TupleTableSlot * ExecRemoteUpdate(ModifyTableState *mtstate, ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot, TupleTableSlot *planSlot, EPQState *epqstate, EState *estate, bool canSetTag) { return ExecUpdate(mtstate, tupleid, oldtuple, slot, planSlot, epqstate, estate, canSetTag); } #endif
759457.c
/* ** 2016 September 10 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** This file contains test code to delete an SQLite database and all ** of its associated files. Associated files include: ** ** * The journal file. ** * The wal file. ** * The SQLITE_ENABLE_8_3_NAMES version of the db, journal or wal files. ** * Files created by the test_multiplex.c module to extend any of the ** above. */ #ifndef SQLITE_OS_WIN # include <unistd.h> # include <errno.h> #endif #include <string.h> #include <assert.h> #include "sqlite3.h" /* The following #defines are copied from test_multiplex.c */ #ifndef MX_CHUNK_NUMBER # define MX_CHUNK_NUMBER 299 #endif #ifndef SQLITE_MULTIPLEX_JOURNAL_8_3_OFFSET # define SQLITE_MULTIPLEX_JOURNAL_8_3_OFFSET 400 #endif #ifndef SQLITE_MULTIPLEX_WAL_8_3_OFFSET # define SQLITE_MULTIPLEX_WAL_8_3_OFFSET 700 #endif /* ** This routine is a copy of (most of) the code from SQLite function ** sqlite3FileSuffix3(). It modifies the filename in buffer z in the ** same way as SQLite does when in 8.3 filenames mode. */ static void sqlite3Delete83Name(char *z){ int i, sz; sz = (int)strlen(z); for(i=sz-1; i>0 && z[i]!='/' && z[i]!='.'; i--){} if( z[i]=='.' && (sz>i+4) ) memmove(&z[i+1], &z[sz-3], 4); } /* ** zFile is a filename. Assuming no error occurs, if this file exists, ** set *pbExists to true and unlink it. Or, if the file does not exist, ** set *pbExists to false before returning. ** ** If an error occurs, non-zero is returned. Or, if no error occurs, zero. */ static int sqlite3DeleteUnlinkIfExists( sqlite3_vfs *pVfs, const char *zFile, int *pbExists ){ int rc = SQLITE_ERROR; #if SQLITE_OS_WIN if( pVfs ){ if( pbExists ) *pbExists = 1; rc = pVfs->xDelete(pVfs, zFile, 0); if( rc==SQLITE_IOERR_DELETE_NOENT ){ if( pbExists ) *pbExists = 0; rc = SQLITE_OK; } } #else assert( pVfs==0 ); rc = access(zFile, F_OK); if( rc ){ if( errno==ENOENT ){ if( pbExists ) *pbExists = 0; rc = SQLITE_OK; } }else{ if( pbExists ) *pbExists = 1; rc = unlink(zFile); } #endif return rc; } /* ** Delete the database file identified by the string argument passed to this ** function. The string must contain a filename, not an SQLite URI. */ SQLITE_API int sqlite3_delete_database( const char *zFile /* File to delete */ ){ char *zBuf; /* Buffer to sprintf() filenames to */ int nBuf; /* Size of buffer in bytes */ int rc = 0; /* System error code */ int i; /* Iterate through azFmt[] and aMFile[] */ const char *azFmt[] = { "%s", "%s-journal", "%s-wal", "%s-shm" }; struct MFile { const char *zFmt; int iOffset; int b83; } aMFile[] = { { "%s%03d", 0, 0 }, { "%s-journal%03d", 0, 0 }, { "%s-wal%03d", 0, 0 }, { "%s%03d", 0, 1 }, { "%s-journal%03d", SQLITE_MULTIPLEX_JOURNAL_8_3_OFFSET, 1 }, { "%s-wal%03d", SQLITE_MULTIPLEX_WAL_8_3_OFFSET, 1 }, }; #ifdef SQLITE_OS_WIN sqlite3_vfs *pVfs = sqlite3_vfs_find("win32"); #else sqlite3_vfs *pVfs = 0; #endif /* Allocate a buffer large enough for any of the files that need to be ** deleted. */ nBuf = (int)strlen(zFile) + 100; zBuf = (char*)sqlite3_malloc(nBuf); if( zBuf==0 ) return SQLITE_NOMEM; /* Delete both the regular and 8.3 filenames versions of the database, ** journal, wal and shm files. */ for(i=0; rc==0 && i<sizeof(azFmt)/sizeof(azFmt[0]); i++){ sqlite3_snprintf(nBuf, zBuf, azFmt[i], zFile); rc = sqlite3DeleteUnlinkIfExists(pVfs, zBuf, 0); if( rc==0 && i!=0 ){ sqlite3Delete83Name(zBuf); rc = sqlite3DeleteUnlinkIfExists(pVfs, zBuf, 0); } } /* Delete any multiplexor files */ for(i=0; rc==0 && i<sizeof(aMFile)/sizeof(aMFile[0]); i++){ struct MFile *p = &aMFile[i]; int iChunk; for(iChunk=1; iChunk<=MX_CHUNK_NUMBER; iChunk++){ int bExists; sqlite3_snprintf(nBuf, zBuf, p->zFmt, zFile, iChunk+p->iOffset); if( p->b83 ) sqlite3Delete83Name(zBuf); rc = sqlite3DeleteUnlinkIfExists(pVfs, zBuf, &bExists); if( bExists==0 || rc!=0 ) break; } } sqlite3_free(zBuf); return (rc ? SQLITE_ERROR : SQLITE_OK); }
557889.c
/* * Copyright (c) 2013-2016 ARM Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the License); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an AS IS BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "Driver_ETH_PHY.h" #define ARM_ETH_PHY_DRV_VERSION ARM_DRIVER_VERSION_MAJOR_MINOR(2, 0) /* driver version */ /* Driver Version */ static const ARM_DRIVER_VERSION DriverVersion = { ARM_ETH_PHY_API_VERSION, ARM_ETH_PHY_DRV_VERSION }; // // Functions // ARM_DRIVER_VERSION ARM_ETH_PHY_GetVersion(void) { } int32_t ARM_ETH_PHY_Initialize(ARM_ETH_PHY_Read_t fn_read, ARM_ETH_PHY_Write_t fn_write) { } int32_t ARM_ETH_PHY_Uninitialize(void) { } int32_t ARM_ETH_PHY_PowerControl(ARM_POWER_STATE state) { switch (state) { case ARM_POWER_OFF: break; case ARM_POWER_LOW: break; case ARM_POWER_FULL: break; default: return ARM_DRIVER_ERROR_UNSUPPORTED; } } int32_t ARM_ETH_PHY_SetInterface(uint32_t interface) { switch (interface) { case ARM_ETH_INTERFACE_MII: break; case ARM_ETH_INTERFACE_RMII: break; default: return ARM_DRIVER_ERROR_UNSUPPORTED; } } int32_t ARM_ETH_PHY_SetMode(uint32_t mode) { switch (mode & ARM_ETH_PHY_SPEED_Msk) { case ARM_ETH_PHY_SPEED_10M: break; case ARM_ETH_PHY_SPEED_100M: break; default: return ARM_DRIVER_ERROR_UNSUPPORTED; } switch (mode & ARM_ETH_PHY_DUPLEX_Msk) { case ARM_ETH_PHY_DUPLEX_HALF: break; case ARM_ETH_PHY_DUPLEX_FULL: break; } if (mode & ARM_ETH_PHY_AUTO_NEGOTIATE) { } if (mode & ARM_ETH_PHY_LOOPBACK) { } if (mode & ARM_ETH_PHY_ISOLATE) { } } ARM_ETH_LINK_STATE ARM_ETH_PHY_GetLinkState(void) { } ARM_ETH_LINK_INFO ARM_ETH_PHY_GetLinkInfo(void) { } ARM_DRIVER_ETH_PHY ARM_Driver_ETH_PHY_(ETH_PHY_NUM) = { ARM_ETH_PHY_GetVersion, ARM_ETH_PHY_Initialize, ARM_ETH_PHY_Uninitialize, ARM_ETH_PHY_PowerControl, ARM_ETH_PHY_SetInterface, ARM_ETH_PHY_SetMode, ARM_ETH_PHY_GetLinkState, ARM_ETH_PHY_GetLinkInfo, };
733346.c
#define RES_LEN 8 static char res[RES_LEN + 1]; char *toHex(int num) { if (num == 0) { // Set result to 0 and return. res[0] = '0'; res[1] = '\0'; return res; } // Build map from {0, 1, ..., e, f} to their code point values. int charMap[16] = {0}, i = 0; char *s = "0123456789abcdef", tmp[RES_LEN + 1]; while (*s) charMap[i++] = *s++; /* Fill the temp char array with the values. We work with 4 bit sequences each time (num & 15, num >>= 4) and use the charMap to look-up the appropriate value. With 2's complement, negative numbers appear as an infinite sequence of ones stretching to the left, so we cap it with size. (set to 8, the number of 4bit sequences for a 32bit number). */ int size = RES_LEN; while (num && size > 0) { tmp[RES_LEN - size] = charMap[num & 15]; num >>= 4; size--; } // Tmp has results reversed, reverse them. for (int i = RES_LEN - size - 1, j = 0; i >= 0; i--, j++) res[j] = tmp[i]; // Set marker at the end and return. res[RES_LEN - size] = '\0'; return res; }
232893.c
// Lean compiler output // Module: Lean.Meta.Match.CaseValues // Imports: Init Lean.Meta.Tactic.Subst Lean.Meta.Tactic.Clear #include <lean/lean.h> #if defined(__clang__) #pragma clang diagnostic ignored "-Wunused-parameter" #pragma clang diagnostic ignored "-Wunused-label" #elif defined(__GNUC__) && !defined(__CLANG__) #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wunused-label" #pragma GCC diagnostic ignored "-Wunused-but-set-variable" #endif #ifdef __cplusplus extern "C" { #endif lean_object* l_Lean_Meta_caseValues_loop(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); size_t l_USize_add(size_t, size_t); lean_object* l_Lean_Expr_mvarId_x21(lean_object*); lean_object* l_Lean_Meta_withMVarContext___at_Lean_Meta_admit___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_stringToMessageData(lean_object*); lean_object* lean_mk_empty_array_with_capacity(lean_object*); lean_object* l_Lean_Meta_appendTagSuffix(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_name_mk_string(lean_object*, lean_object*); uint8_t l_USize_decEq(size_t, size_t); lean_object* lean_array_uget(lean_object*, size_t); lean_object* l_Lean_Meta_caseValueAux___lambda__3___closed__2; lean_object* l_Lean_Meta_caseValueAux___lambda__2___closed__3; lean_object* l_Lean_Meta_caseValueAux___lambda__3___closed__4; lean_object* l_Lean_Meta_caseValueAux___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l___private_Lean_Util_Trace_0__Lean_checkTraceOptionM___at_Lean_Meta_isLevelDefEqAux___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_myMacro____x40_Init_Notation___hyg_12336____closed__4; extern lean_object* l_Array_empty___closed__1; lean_object* l_Lean_Meta_caseValues_loop_match__1(lean_object*); lean_object* l_Lean_Meta_instInhabitedCaseValueSubgoal___closed__1; lean_object* lean_st_ref_get(lean_object*, lean_object*); lean_object* l_Lean_Meta_CaseValuesSubgoal_newHs___default; lean_object* l_Lean_Meta_FVarSubst_domain(lean_object*); lean_object* l_Lean_Meta_caseValue___closed__4; extern lean_object* l___private_Lean_Meta_SynthInstance_0__Lean_Meta_SynthInstance_mkAnswer___closed__4; lean_object* lean_array_push(lean_object*, lean_object*); lean_object* lean_array_get_size(lean_object*); lean_object* l_Lean_MessageData_ofList(lean_object*); lean_object* l_ReaderT_bind___at_Lean_Meta_instMonadLCtxMetaM___spec__2___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValueAux_match__1(lean_object*); lean_object* l_Lean_Meta_caseValue___closed__1; lean_object* l_Lean_Meta_instInhabitedCaseValueSubgoal; lean_object* l_Lean_Meta_mkAppOptM(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValueAux___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_nat_add(lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValues_loop_match__4(lean_object*); lean_object* l_Lean_Meta_caseValueAux___lambda__1___closed__2; lean_object* l_Lean_Meta_CaseValuesSubgoal_subst___default; lean_object* l_Lean_Meta_intro1Core(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValues_loop___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValues_loop___closed__4; lean_object* l_Lean_Meta_substCore(lean_object*, lean_object*, uint8_t, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValueAux_match__3(lean_object*); lean_object* l_Lean_Meta_getMVarType(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValueAux___lambda__1(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValueAux___lambda__1___closed__1; lean_object* l_Lean_Meta_caseValueAux___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_addTrace___at_Lean_Meta_isLevelDefEqAux___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValueAux___lambda__2___closed__1; lean_object* l_Lean_Meta_tryClear(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Array_foldlMUnsafe___at_Lean_Meta_caseValues_loop___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValueAux___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValue___closed__2; lean_object* l_Lean_Expr_fvarId_x21(lean_object*); lean_object* l_Lean_Name_appendIndexAfter(lean_object*, lean_object*); extern lean_object* l_myMacro____x40_Init_Notation___hyg_8971____closed__4; lean_object* l_Lean_Meta_instInhabitedCaseValuesSubgoal; extern lean_object* l_Lean_Meta_initFn____x40_Lean_Meta_Basic___hyg_550____closed__2; lean_object* l_Lean_Meta_caseValueAux_match__1___rarg(lean_object*, lean_object*); lean_object* lean_array_to_list(lean_object*, lean_object*); lean_object* l_Lean_Meta_CaseValueSubgoal_subst___default; lean_object* l_Lean_Meta_caseValue(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValue___closed__6; lean_object* l_Lean_Meta_instInhabitedCaseValuesSubgoal___closed__1; extern lean_object* l_Lean_KernelException_toMessageData___closed__15; lean_object* l_Lean_Meta_caseValues_loop___closed__3; lean_object* l_Lean_Meta_caseValueAux___lambda__3___closed__3; lean_object* l_Lean_Meta_caseValues_loop___closed__5; lean_object* l_Lean_Meta_caseValues_loop_match__1___rarg(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValue___closed__5; lean_object* l_Lean_mkFVar(lean_object*); size_t lean_usize_of_nat(lean_object*); lean_object* l_Lean_Meta_caseValues_loop___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValues_loop_match__4___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_throwTacticEx___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_FVarSubst_get(lean_object*, lean_object*); lean_object* l_Lean_Meta_assignExprMVar(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Meta_caseValues_loop___spec__2(lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValues_loop___closed__2; lean_object* l_Lean_Meta_caseValueAux(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_checkNotAssigned(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_getLocalDecl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValues_loop___closed__6; uint8_t lean_nat_dec_le(lean_object*, lean_object*); lean_object* l_Lean_mkApp(lean_object*, lean_object*); lean_object* l_Array_foldlMUnsafe___at_Lean_Meta_caseValues_loop___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_Parser_Tactic_case___closed__1; lean_object* l_Lean_Meta_caseValues_loop___closed__1; lean_object* l_Lean_Meta_caseValues_loop_match__3(lean_object*); lean_object* l_Lean_Meta_caseValueAux___lambda__3___closed__7; lean_object* l_Lean_Meta_caseValueAux___lambda__2___closed__2; lean_object* l_Lean_Meta_caseValue___closed__3; lean_object* l_Lean_Meta_caseValues_loop_match__2___rarg(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValues(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_mkForall(lean_object*, uint8_t, lean_object*, lean_object*); lean_object* l_List_map___at_Lean_Meta_substCore___spec__6(lean_object*); lean_object* l_Lean_Meta_caseValues_loop_match__2(lean_object*); lean_object* l_Lean_Meta_caseValueAux___lambda__3___closed__5; lean_object* l_Lean_Meta_caseValueAux_match__2___rarg(lean_object*, lean_object*); extern lean_object* l_Lean_mkOptionalNode___closed__2; lean_object* l_Lean_Meta_caseValueAux_match__2(lean_object*); lean_object* l_Lean_Meta_mkFreshExprSyntheticOpaqueMVar(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValueAux___lambda__2___closed__4; lean_object* l_Lean_Meta_mkEq(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValueAux___lambda__3___closed__1; lean_object* l_Lean_Meta_getMVarTag___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Meta_caseValues_loop___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_mkConst(lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValueAux_match__3___rarg(lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValueAux___lambda__3___closed__6; lean_object* l_Lean_Meta_withMVarContext___at_Lean_Meta_substCore___spec__5___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l___private_Lean_Meta_SynthInstance_0__Lean_Meta_SynthInstance_mkAnswer___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_caseValues_loop_match__3___rarg(lean_object*, lean_object*); uint8_t lean_nat_dec_lt(lean_object*, lean_object*); static lean_object* _init_l_Lean_Meta_CaseValueSubgoal_subst___default() { _start: { lean_object* x_1; x_1 = lean_box(0); return x_1; } } static lean_object* _init_l_Lean_Meta_instInhabitedCaseValueSubgoal___closed__1() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); x_2 = lean_box(0); x_3 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_3, 0, x_2); lean_ctor_set(x_3, 1, x_2); lean_ctor_set(x_3, 2, x_1); return x_3; } } static lean_object* _init_l_Lean_Meta_instInhabitedCaseValueSubgoal() { _start: { lean_object* x_1; x_1 = l_Lean_Meta_instInhabitedCaseValueSubgoal___closed__1; return x_1; } } lean_object* l_Lean_Meta_caseValueAux_match__1___rarg(lean_object* x_1, lean_object* x_2) { _start: { lean_object* x_3; lean_object* x_4; lean_object* x_5; x_3 = lean_ctor_get(x_1, 0); lean_inc(x_3); x_4 = lean_ctor_get(x_1, 1); lean_inc(x_4); lean_dec(x_1); x_5 = lean_apply_2(x_2, x_3, x_4); return x_5; } } lean_object* l_Lean_Meta_caseValueAux_match__1(lean_object* x_1) { _start: { lean_object* x_2; x_2 = lean_alloc_closure((void*)(l_Lean_Meta_caseValueAux_match__1___rarg), 2, 0); return x_2; } } lean_object* l_Lean_Meta_caseValueAux_match__2___rarg(lean_object* x_1, lean_object* x_2) { _start: { lean_object* x_3; lean_object* x_4; lean_object* x_5; x_3 = lean_ctor_get(x_1, 0); lean_inc(x_3); x_4 = lean_ctor_get(x_1, 1); lean_inc(x_4); lean_dec(x_1); x_5 = lean_apply_2(x_2, x_3, x_4); return x_5; } } lean_object* l_Lean_Meta_caseValueAux_match__2(lean_object* x_1) { _start: { lean_object* x_2; x_2 = lean_alloc_closure((void*)(l_Lean_Meta_caseValueAux_match__2___rarg), 2, 0); return x_2; } } lean_object* l_Lean_Meta_caseValueAux_match__3___rarg(lean_object* x_1, lean_object* x_2) { _start: { lean_object* x_3; lean_object* x_4; lean_object* x_5; x_3 = lean_ctor_get(x_1, 0); lean_inc(x_3); x_4 = lean_ctor_get(x_1, 1); lean_inc(x_4); lean_dec(x_1); x_5 = lean_apply_2(x_2, x_3, x_4); return x_5; } } lean_object* l_Lean_Meta_caseValueAux_match__3(lean_object* x_1) { _start: { lean_object* x_2; x_2 = lean_alloc_closure((void*)(l_Lean_Meta_caseValueAux_match__3___rarg), 2, 0); return x_2; } } static lean_object* _init_l_Lean_Meta_caseValueAux___lambda__1___closed__1() { _start: { lean_object* x_1; x_1 = lean_mk_string("subst domain: "); return x_1; } } static lean_object* _init_l_Lean_Meta_caseValueAux___lambda__1___closed__2() { _start: { lean_object* x_1; lean_object* x_2; x_1 = l_Lean_Meta_caseValueAux___lambda__1___closed__1; x_2 = l_Lean_stringToMessageData(x_1); return x_2; } } lean_object* l_Lean_Meta_caseValueAux___lambda__1(lean_object* x_1, lean_object* x_2, uint8_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { if (x_3 == 0) { lean_object* x_9; lean_object* x_10; lean_dec(x_2); x_9 = lean_box(0); x_10 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_10, 0, x_9); lean_ctor_set(x_10, 1, x_8); return x_10; } else { lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; x_11 = l_Lean_Meta_FVarSubst_domain(x_1); x_12 = l_List_map___at_Lean_Meta_substCore___spec__6(x_11); x_13 = l_Lean_MessageData_ofList(x_12); lean_dec(x_12); x_14 = l_Lean_Meta_caseValueAux___lambda__1___closed__2; x_15 = lean_alloc_ctor(10, 2, 0); lean_ctor_set(x_15, 0, x_14); lean_ctor_set(x_15, 1, x_13); x_16 = l_Lean_KernelException_toMessageData___closed__15; x_17 = lean_alloc_ctor(10, 2, 0); lean_ctor_set(x_17, 0, x_15); lean_ctor_set(x_17, 1, x_16); x_18 = l_Lean_addTrace___at_Lean_Meta_isLevelDefEqAux___spec__1(x_2, x_17, x_4, x_5, x_6, x_7, x_8); return x_18; } } } static lean_object* _init_l_Lean_Meta_caseValueAux___lambda__2___closed__1() { _start: { lean_object* x_1; x_1 = lean_mk_string("found decl"); return x_1; } } static lean_object* _init_l_Lean_Meta_caseValueAux___lambda__2___closed__2() { _start: { lean_object* x_1; lean_object* x_2; x_1 = l_Lean_Meta_caseValueAux___lambda__2___closed__1; x_2 = l_Lean_stringToMessageData(x_1); return x_2; } } static lean_object* _init_l_Lean_Meta_caseValueAux___lambda__2___closed__3() { _start: { lean_object* x_1; x_1 = lean_mk_string("searching for decl"); return x_1; } } static lean_object* _init_l_Lean_Meta_caseValueAux___lambda__2___closed__4() { _start: { lean_object* x_1; lean_object* x_2; x_1 = l_Lean_Meta_caseValueAux___lambda__2___closed__3; x_2 = l_Lean_stringToMessageData(x_1); return x_2; } } lean_object* l_Lean_Meta_caseValueAux___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { _start: { lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_43; lean_object* x_44; lean_object* x_45; uint8_t x_46; x_10 = l_Lean_Meta_FVarSubst_get(x_1, x_2); x_11 = l_Lean_Expr_fvarId_x21(x_10); lean_dec(x_10); x_43 = lean_st_ref_get(x_8, x_9); x_44 = lean_ctor_get(x_43, 0); lean_inc(x_44); x_45 = lean_ctor_get(x_44, 3); lean_inc(x_45); lean_dec(x_44); x_46 = lean_ctor_get_uint8(x_45, sizeof(void*)*1); lean_dec(x_45); if (x_46 == 0) { lean_object* x_47; x_47 = lean_ctor_get(x_43, 1); lean_inc(x_47); lean_dec(x_43); x_12 = x_47; goto block_42; } else { lean_object* x_48; lean_object* x_49; lean_object* x_50; uint8_t x_51; x_48 = lean_ctor_get(x_43, 1); lean_inc(x_48); lean_dec(x_43); lean_inc(x_3); x_49 = l___private_Lean_Util_Trace_0__Lean_checkTraceOptionM___at_Lean_Meta_isLevelDefEqAux___spec__2(x_3, x_5, x_6, x_7, x_8, x_48); x_50 = lean_ctor_get(x_49, 0); lean_inc(x_50); x_51 = lean_unbox(x_50); lean_dec(x_50); if (x_51 == 0) { lean_object* x_52; x_52 = lean_ctor_get(x_49, 1); lean_inc(x_52); lean_dec(x_49); x_12 = x_52; goto block_42; } else { lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; x_53 = lean_ctor_get(x_49, 1); lean_inc(x_53); lean_dec(x_49); x_54 = l_Lean_Meta_caseValueAux___lambda__2___closed__4; lean_inc(x_3); x_55 = l_Lean_addTrace___at_Lean_Meta_isLevelDefEqAux___spec__1(x_3, x_54, x_5, x_6, x_7, x_8, x_53); x_56 = lean_ctor_get(x_55, 1); lean_inc(x_56); lean_dec(x_55); x_12 = x_56; goto block_42; } } block_42: { lean_object* x_13; lean_inc(x_5); x_13 = l_Lean_Meta_getLocalDecl(x_11, x_5, x_6, x_7, x_8, x_12); if (lean_obj_tag(x_13) == 0) { lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; uint8_t x_18; x_14 = lean_ctor_get(x_13, 1); lean_inc(x_14); lean_dec(x_13); x_15 = lean_st_ref_get(x_8, x_14); x_16 = lean_ctor_get(x_15, 0); lean_inc(x_16); x_17 = lean_ctor_get(x_16, 3); lean_inc(x_17); lean_dec(x_16); x_18 = lean_ctor_get_uint8(x_17, sizeof(void*)*1); lean_dec(x_17); if (x_18 == 0) { uint8_t x_19; lean_dec(x_5); lean_dec(x_3); x_19 = !lean_is_exclusive(x_15); if (x_19 == 0) { lean_object* x_20; lean_object* x_21; x_20 = lean_ctor_get(x_15, 0); lean_dec(x_20); x_21 = lean_box(0); lean_ctor_set(x_15, 0, x_21); return x_15; } else { lean_object* x_22; lean_object* x_23; lean_object* x_24; x_22 = lean_ctor_get(x_15, 1); lean_inc(x_22); lean_dec(x_15); x_23 = lean_box(0); x_24 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_24, 0, x_23); lean_ctor_set(x_24, 1, x_22); return x_24; } } else { lean_object* x_25; lean_object* x_26; lean_object* x_27; uint8_t x_28; x_25 = lean_ctor_get(x_15, 1); lean_inc(x_25); lean_dec(x_15); lean_inc(x_3); x_26 = l___private_Lean_Util_Trace_0__Lean_checkTraceOptionM___at_Lean_Meta_isLevelDefEqAux___spec__2(x_3, x_5, x_6, x_7, x_8, x_25); x_27 = lean_ctor_get(x_26, 0); lean_inc(x_27); x_28 = lean_unbox(x_27); lean_dec(x_27); if (x_28 == 0) { uint8_t x_29; lean_dec(x_5); lean_dec(x_3); x_29 = !lean_is_exclusive(x_26); if (x_29 == 0) { lean_object* x_30; lean_object* x_31; x_30 = lean_ctor_get(x_26, 0); lean_dec(x_30); x_31 = lean_box(0); lean_ctor_set(x_26, 0, x_31); return x_26; } else { lean_object* x_32; lean_object* x_33; lean_object* x_34; x_32 = lean_ctor_get(x_26, 1); lean_inc(x_32); lean_dec(x_26); x_33 = lean_box(0); x_34 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_34, 0, x_33); lean_ctor_set(x_34, 1, x_32); return x_34; } } else { lean_object* x_35; lean_object* x_36; lean_object* x_37; x_35 = lean_ctor_get(x_26, 1); lean_inc(x_35); lean_dec(x_26); x_36 = l_Lean_Meta_caseValueAux___lambda__2___closed__2; x_37 = l_Lean_addTrace___at_Lean_Meta_isLevelDefEqAux___spec__1(x_3, x_36, x_5, x_6, x_7, x_8, x_35); lean_dec(x_5); return x_37; } } } else { uint8_t x_38; lean_dec(x_5); lean_dec(x_3); x_38 = !lean_is_exclusive(x_13); if (x_38 == 0) { return x_13; } else { lean_object* x_39; lean_object* x_40; lean_object* x_41; x_39 = lean_ctor_get(x_13, 0); x_40 = lean_ctor_get(x_13, 1); lean_inc(x_40); lean_inc(x_39); lean_dec(x_13); x_41 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_41, 0, x_39); lean_ctor_set(x_41, 1, x_40); return x_41; } } } } } static lean_object* _init_l_Lean_Meta_caseValueAux___lambda__3___closed__1() { _start: { lean_object* x_1; x_1 = lean_mk_string("caseValue"); return x_1; } } static lean_object* _init_l_Lean_Meta_caseValueAux___lambda__3___closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); x_2 = l_Lean_Meta_caseValueAux___lambda__3___closed__1; x_3 = lean_name_mk_string(x_1, x_2); return x_3; } } static lean_object* _init_l_Lean_Meta_caseValueAux___lambda__3___closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); x_2 = l_myMacro____x40_Init_Notation___hyg_8971____closed__4; x_3 = l_Lean_mkConst(x_2, x_1); return x_3; } } static lean_object* _init_l_Lean_Meta_caseValueAux___lambda__3___closed__4() { _start: { lean_object* x_1; lean_object* x_2; x_1 = lean_unsigned_to_nat(5u); x_2 = lean_mk_empty_array_with_capacity(x_1); return x_2; } } static lean_object* _init_l_Lean_Meta_caseValueAux___lambda__3___closed__5() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); x_2 = l_Lean_Meta_caseValueAux___lambda__3___closed__4; x_3 = lean_array_push(x_2, x_1); return x_3; } } static lean_object* _init_l_Lean_Meta_caseValueAux___lambda__3___closed__6() { _start: { lean_object* x_1; lean_object* x_2; x_1 = l_Lean_Meta_initFn____x40_Lean_Meta_Basic___hyg_550____closed__2; x_2 = lean_alloc_closure((void*)(l___private_Lean_Meta_SynthInstance_0__Lean_Meta_SynthInstance_mkAnswer___lambda__2___boxed), 7, 1); lean_closure_set(x_2, 0, x_1); return x_2; } } static lean_object* _init_l_Lean_Meta_caseValueAux___lambda__3___closed__7() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l___private_Lean_Meta_SynthInstance_0__Lean_Meta_SynthInstance_mkAnswer___closed__4; x_2 = l_Lean_Meta_caseValueAux___lambda__3___closed__6; x_3 = lean_alloc_closure((void*)(l_ReaderT_bind___at_Lean_Meta_instMonadLCtxMetaM___spec__2___rarg), 7, 2); lean_closure_set(x_3, 0, x_1); lean_closure_set(x_3, 1, x_2); return x_3; } } lean_object* l_Lean_Meta_caseValueAux___lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { lean_object* x_12; lean_object* x_13; x_12 = l_Lean_Meta_caseValueAux___lambda__3___closed__2; lean_inc(x_1); x_13 = l_Lean_Meta_checkNotAssigned(x_1, x_12, x_7, x_8, x_9, x_10, x_11); if (lean_obj_tag(x_13) == 0) { lean_object* x_14; lean_object* x_15; x_14 = lean_ctor_get(x_13, 1); lean_inc(x_14); lean_dec(x_13); lean_inc(x_1); x_15 = l_Lean_Meta_getMVarType(x_1, x_7, x_8, x_9, x_10, x_14); if (lean_obj_tag(x_15) == 0) { lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; x_16 = lean_ctor_get(x_15, 0); lean_inc(x_16); x_17 = lean_ctor_get(x_15, 1); lean_inc(x_17); lean_dec(x_15); x_18 = l_Lean_mkFVar(x_2); lean_inc(x_10); lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); x_19 = l_Lean_Meta_mkEq(x_18, x_3, x_7, x_8, x_9, x_10, x_17); if (lean_obj_tag(x_19) == 0) { lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; uint8_t x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; x_20 = lean_ctor_get(x_19, 0); lean_inc(x_20); x_21 = lean_ctor_get(x_19, 1); lean_inc(x_21); lean_dec(x_19); x_22 = l_Lean_Meta_caseValueAux___lambda__3___closed__3; lean_inc(x_20); x_23 = l_Lean_mkApp(x_22, x_20); x_24 = 0; lean_inc(x_16); lean_inc(x_20); lean_inc(x_4); x_25 = l_Lean_mkForall(x_4, x_24, x_20, x_16); x_26 = l_Lean_mkForall(x_4, x_24, x_23, x_16); lean_inc(x_7); lean_inc(x_6); x_27 = l_Lean_Meta_mkFreshExprSyntheticOpaqueMVar(x_25, x_6, x_7, x_8, x_9, x_10, x_21); x_28 = lean_ctor_get(x_27, 0); lean_inc(x_28); x_29 = lean_ctor_get(x_27, 1); lean_inc(x_29); lean_dec(x_27); lean_inc(x_7); x_30 = l_Lean_Meta_mkFreshExprSyntheticOpaqueMVar(x_26, x_6, x_7, x_8, x_9, x_10, x_29); x_31 = lean_ctor_get(x_30, 0); lean_inc(x_31); x_32 = lean_ctor_get(x_30, 1); lean_inc(x_32); lean_dec(x_30); x_33 = lean_box(0); x_34 = lean_alloc_ctor(1, 1, 0); lean_ctor_set(x_34, 0, x_20); lean_inc(x_28); x_35 = lean_alloc_ctor(1, 1, 0); lean_ctor_set(x_35, 0, x_28); lean_inc(x_31); x_36 = lean_alloc_ctor(1, 1, 0); lean_ctor_set(x_36, 0, x_31); x_37 = l_Lean_Meta_caseValueAux___lambda__3___closed__5; x_38 = lean_array_push(x_37, x_34); x_39 = lean_array_push(x_38, x_33); x_40 = lean_array_push(x_39, x_35); x_41 = lean_array_push(x_40, x_36); x_42 = l_myMacro____x40_Init_Notation___hyg_12336____closed__4; lean_inc(x_10); lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); x_43 = l_Lean_Meta_mkAppOptM(x_42, x_41, x_7, x_8, x_9, x_10, x_32); if (lean_obj_tag(x_43) == 0) { lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; uint8_t x_49; lean_object* x_50; x_44 = lean_ctor_get(x_43, 0); lean_inc(x_44); x_45 = lean_ctor_get(x_43, 1); lean_inc(x_45); lean_dec(x_43); x_46 = l_Lean_Meta_assignExprMVar(x_1, x_44, x_7, x_8, x_9, x_10, x_45); x_47 = lean_ctor_get(x_46, 1); lean_inc(x_47); lean_dec(x_46); x_48 = l_Lean_Expr_mvarId_x21(x_31); lean_dec(x_31); x_49 = 1; lean_inc(x_10); lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); x_50 = l_Lean_Meta_intro1Core(x_48, x_49, x_7, x_8, x_9, x_10, x_47); if (lean_obj_tag(x_50) == 0) { lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; x_51 = lean_ctor_get(x_50, 0); lean_inc(x_51); x_52 = lean_ctor_get(x_50, 1); lean_inc(x_52); lean_dec(x_50); x_53 = lean_ctor_get(x_51, 0); lean_inc(x_53); x_54 = lean_ctor_get(x_51, 1); lean_inc(x_54); lean_dec(x_51); lean_inc(x_5); x_55 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_55, 0, x_54); lean_ctor_set(x_55, 1, x_53); lean_ctor_set(x_55, 2, x_5); x_56 = l_Lean_Expr_mvarId_x21(x_28); lean_dec(x_28); lean_inc(x_10); lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); x_57 = l_Lean_Meta_intro1Core(x_56, x_49, x_7, x_8, x_9, x_10, x_52); if (lean_obj_tag(x_57) == 0) { lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; uint8_t x_62; lean_object* x_63; x_58 = lean_ctor_get(x_57, 0); lean_inc(x_58); x_59 = lean_ctor_get(x_57, 1); lean_inc(x_59); lean_dec(x_57); x_60 = lean_ctor_get(x_58, 0); lean_inc(x_60); x_61 = lean_ctor_get(x_58, 1); lean_inc(x_61); lean_dec(x_58); x_62 = 0; lean_inc(x_10); lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); lean_inc(x_60); x_63 = l_Lean_Meta_substCore(x_61, x_60, x_62, x_5, x_62, x_7, x_8, x_9, x_10, x_59); if (lean_obj_tag(x_63) == 0) { lean_object* x_64; lean_object* x_65; uint8_t x_66; x_64 = lean_ctor_get(x_63, 0); lean_inc(x_64); x_65 = lean_ctor_get(x_63, 1); lean_inc(x_65); lean_dec(x_63); x_66 = !lean_is_exclusive(x_64); if (x_66 == 0) { lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; x_67 = lean_ctor_get(x_64, 0); x_68 = lean_ctor_get(x_64, 1); x_69 = l_Lean_Meta_initFn____x40_Lean_Meta_Basic___hyg_550____closed__2; lean_inc(x_67); x_70 = lean_alloc_closure((void*)(l_Lean_Meta_caseValueAux___lambda__1___boxed), 8, 2); lean_closure_set(x_70, 0, x_67); lean_closure_set(x_70, 1, x_69); x_71 = l_Lean_Meta_caseValueAux___lambda__3___closed__7; x_72 = lean_alloc_closure((void*)(l_ReaderT_bind___at_Lean_Meta_instMonadLCtxMetaM___spec__2___rarg), 7, 2); lean_closure_set(x_72, 0, x_71); lean_closure_set(x_72, 1, x_70); lean_inc(x_60); lean_inc(x_67); x_73 = lean_alloc_closure((void*)(l_Lean_Meta_caseValueAux___lambda__2___boxed), 9, 3); lean_closure_set(x_73, 0, x_67); lean_closure_set(x_73, 1, x_60); lean_closure_set(x_73, 2, x_69); x_74 = lean_alloc_closure((void*)(l_ReaderT_bind___at_Lean_Meta_instMonadLCtxMetaM___spec__2___rarg), 7, 2); lean_closure_set(x_74, 0, x_72); lean_closure_set(x_74, 1, x_73); lean_inc(x_68); x_75 = l_Lean_Meta_withMVarContext___at_Lean_Meta_substCore___spec__5___rarg(x_68, x_74, x_7, x_8, x_9, x_10, x_65); if (lean_obj_tag(x_75) == 0) { uint8_t x_76; x_76 = !lean_is_exclusive(x_75); if (x_76 == 0) { lean_object* x_77; lean_object* x_78; lean_object* x_79; lean_object* x_80; x_77 = lean_ctor_get(x_75, 0); lean_dec(x_77); x_78 = l_Lean_Meta_FVarSubst_get(x_67, x_60); x_79 = l_Lean_Expr_fvarId_x21(x_78); lean_dec(x_78); x_80 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_80, 0, x_68); lean_ctor_set(x_80, 1, x_79); lean_ctor_set(x_80, 2, x_67); lean_ctor_set(x_64, 1, x_55); lean_ctor_set(x_64, 0, x_80); lean_ctor_set(x_75, 0, x_64); return x_75; } else { lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; x_81 = lean_ctor_get(x_75, 1); lean_inc(x_81); lean_dec(x_75); x_82 = l_Lean_Meta_FVarSubst_get(x_67, x_60); x_83 = l_Lean_Expr_fvarId_x21(x_82); lean_dec(x_82); x_84 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_84, 0, x_68); lean_ctor_set(x_84, 1, x_83); lean_ctor_set(x_84, 2, x_67); lean_ctor_set(x_64, 1, x_55); lean_ctor_set(x_64, 0, x_84); x_85 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_85, 0, x_64); lean_ctor_set(x_85, 1, x_81); return x_85; } } else { uint8_t x_86; lean_free_object(x_64); lean_dec(x_68); lean_dec(x_67); lean_dec(x_60); lean_dec(x_55); x_86 = !lean_is_exclusive(x_75); if (x_86 == 0) { return x_75; } else { lean_object* x_87; lean_object* x_88; lean_object* x_89; x_87 = lean_ctor_get(x_75, 0); x_88 = lean_ctor_get(x_75, 1); lean_inc(x_88); lean_inc(x_87); lean_dec(x_75); x_89 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_89, 0, x_87); lean_ctor_set(x_89, 1, x_88); return x_89; } } } else { lean_object* x_90; lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; x_90 = lean_ctor_get(x_64, 0); x_91 = lean_ctor_get(x_64, 1); lean_inc(x_91); lean_inc(x_90); lean_dec(x_64); x_92 = l_Lean_Meta_initFn____x40_Lean_Meta_Basic___hyg_550____closed__2; lean_inc(x_90); x_93 = lean_alloc_closure((void*)(l_Lean_Meta_caseValueAux___lambda__1___boxed), 8, 2); lean_closure_set(x_93, 0, x_90); lean_closure_set(x_93, 1, x_92); x_94 = l_Lean_Meta_caseValueAux___lambda__3___closed__7; x_95 = lean_alloc_closure((void*)(l_ReaderT_bind___at_Lean_Meta_instMonadLCtxMetaM___spec__2___rarg), 7, 2); lean_closure_set(x_95, 0, x_94); lean_closure_set(x_95, 1, x_93); lean_inc(x_60); lean_inc(x_90); x_96 = lean_alloc_closure((void*)(l_Lean_Meta_caseValueAux___lambda__2___boxed), 9, 3); lean_closure_set(x_96, 0, x_90); lean_closure_set(x_96, 1, x_60); lean_closure_set(x_96, 2, x_92); x_97 = lean_alloc_closure((void*)(l_ReaderT_bind___at_Lean_Meta_instMonadLCtxMetaM___spec__2___rarg), 7, 2); lean_closure_set(x_97, 0, x_95); lean_closure_set(x_97, 1, x_96); lean_inc(x_91); x_98 = l_Lean_Meta_withMVarContext___at_Lean_Meta_substCore___spec__5___rarg(x_91, x_97, x_7, x_8, x_9, x_10, x_65); if (lean_obj_tag(x_98) == 0) { lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; x_99 = lean_ctor_get(x_98, 1); lean_inc(x_99); if (lean_is_exclusive(x_98)) { lean_ctor_release(x_98, 0); lean_ctor_release(x_98, 1); x_100 = x_98; } else { lean_dec_ref(x_98); x_100 = lean_box(0); } x_101 = l_Lean_Meta_FVarSubst_get(x_90, x_60); x_102 = l_Lean_Expr_fvarId_x21(x_101); lean_dec(x_101); x_103 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_103, 0, x_91); lean_ctor_set(x_103, 1, x_102); lean_ctor_set(x_103, 2, x_90); x_104 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_104, 0, x_103); lean_ctor_set(x_104, 1, x_55); if (lean_is_scalar(x_100)) { x_105 = lean_alloc_ctor(0, 2, 0); } else { x_105 = x_100; } lean_ctor_set(x_105, 0, x_104); lean_ctor_set(x_105, 1, x_99); return x_105; } else { lean_object* x_106; lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_dec(x_91); lean_dec(x_90); lean_dec(x_60); lean_dec(x_55); x_106 = lean_ctor_get(x_98, 0); lean_inc(x_106); x_107 = lean_ctor_get(x_98, 1); lean_inc(x_107); if (lean_is_exclusive(x_98)) { lean_ctor_release(x_98, 0); lean_ctor_release(x_98, 1); x_108 = x_98; } else { lean_dec_ref(x_98); x_108 = lean_box(0); } if (lean_is_scalar(x_108)) { x_109 = lean_alloc_ctor(1, 2, 0); } else { x_109 = x_108; } lean_ctor_set(x_109, 0, x_106); lean_ctor_set(x_109, 1, x_107); return x_109; } } } else { uint8_t x_110; lean_dec(x_60); lean_dec(x_55); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); x_110 = !lean_is_exclusive(x_63); if (x_110 == 0) { return x_63; } else { lean_object* x_111; lean_object* x_112; lean_object* x_113; x_111 = lean_ctor_get(x_63, 0); x_112 = lean_ctor_get(x_63, 1); lean_inc(x_112); lean_inc(x_111); lean_dec(x_63); x_113 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_113, 0, x_111); lean_ctor_set(x_113, 1, x_112); return x_113; } } } else { uint8_t x_114; lean_dec(x_55); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_5); x_114 = !lean_is_exclusive(x_57); if (x_114 == 0) { return x_57; } else { lean_object* x_115; lean_object* x_116; lean_object* x_117; x_115 = lean_ctor_get(x_57, 0); x_116 = lean_ctor_get(x_57, 1); lean_inc(x_116); lean_inc(x_115); lean_dec(x_57); x_117 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_117, 0, x_115); lean_ctor_set(x_117, 1, x_116); return x_117; } } } else { uint8_t x_118; lean_dec(x_28); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_5); x_118 = !lean_is_exclusive(x_50); if (x_118 == 0) { return x_50; } else { lean_object* x_119; lean_object* x_120; lean_object* x_121; x_119 = lean_ctor_get(x_50, 0); x_120 = lean_ctor_get(x_50, 1); lean_inc(x_120); lean_inc(x_119); lean_dec(x_50); x_121 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_121, 0, x_119); lean_ctor_set(x_121, 1, x_120); return x_121; } } } else { uint8_t x_122; lean_dec(x_31); lean_dec(x_28); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_5); lean_dec(x_1); x_122 = !lean_is_exclusive(x_43); if (x_122 == 0) { return x_43; } else { lean_object* x_123; lean_object* x_124; lean_object* x_125; x_123 = lean_ctor_get(x_43, 0); x_124 = lean_ctor_get(x_43, 1); lean_inc(x_124); lean_inc(x_123); lean_dec(x_43); x_125 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_125, 0, x_123); lean_ctor_set(x_125, 1, x_124); return x_125; } } } else { uint8_t x_126; lean_dec(x_16); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_1); x_126 = !lean_is_exclusive(x_19); if (x_126 == 0) { return x_19; } else { lean_object* x_127; lean_object* x_128; lean_object* x_129; x_127 = lean_ctor_get(x_19, 0); x_128 = lean_ctor_get(x_19, 1); lean_inc(x_128); lean_inc(x_127); lean_dec(x_19); x_129 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_129, 0, x_127); lean_ctor_set(x_129, 1, x_128); return x_129; } } } else { uint8_t x_130; lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_130 = !lean_is_exclusive(x_15); if (x_130 == 0) { return x_15; } else { lean_object* x_131; lean_object* x_132; lean_object* x_133; x_131 = lean_ctor_get(x_15, 0); x_132 = lean_ctor_get(x_15, 1); lean_inc(x_132); lean_inc(x_131); lean_dec(x_15); x_133 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_133, 0, x_131); lean_ctor_set(x_133, 1, x_132); return x_133; } } } else { uint8_t x_134; lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_134 = !lean_is_exclusive(x_13); if (x_134 == 0) { return x_13; } else { lean_object* x_135; lean_object* x_136; lean_object* x_137; x_135 = lean_ctor_get(x_13, 0); x_136 = lean_ctor_get(x_13, 1); lean_inc(x_136); lean_inc(x_135); lean_dec(x_13); x_137 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_137, 0, x_135); lean_ctor_set(x_137, 1, x_136); return x_137; } } } } lean_object* l_Lean_Meta_caseValueAux(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_inc(x_1); x_11 = lean_alloc_closure((void*)(l_Lean_Meta_getMVarTag___boxed), 6, 1); lean_closure_set(x_11, 0, x_1); lean_inc(x_1); x_12 = lean_alloc_closure((void*)(l_Lean_Meta_caseValueAux___lambda__3), 11, 5); lean_closure_set(x_12, 0, x_1); lean_closure_set(x_12, 1, x_2); lean_closure_set(x_12, 2, x_3); lean_closure_set(x_12, 3, x_4); lean_closure_set(x_12, 4, x_5); x_13 = lean_alloc_closure((void*)(l_ReaderT_bind___at_Lean_Meta_instMonadLCtxMetaM___spec__2___rarg), 7, 2); lean_closure_set(x_13, 0, x_11); lean_closure_set(x_13, 1, x_12); x_14 = l_Lean_Meta_withMVarContext___at_Lean_Meta_admit___spec__1___rarg(x_1, x_13, x_6, x_7, x_8, x_9, x_10); return x_14; } } lean_object* l_Lean_Meta_caseValueAux___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { uint8_t x_9; lean_object* x_10; x_9 = lean_unbox(x_3); lean_dec(x_3); x_10 = l_Lean_Meta_caseValueAux___lambda__1(x_1, x_2, x_9, x_4, x_5, x_6, x_7, x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_1); return x_10; } } lean_object* l_Lean_Meta_caseValueAux___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { _start: { lean_object* x_10; x_10 = l_Lean_Meta_caseValueAux___lambda__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_4); lean_dec(x_1); return x_10; } } static lean_object* _init_l_Lean_Meta_caseValue___closed__1() { _start: { lean_object* x_1; x_1 = lean_mk_string("h"); return x_1; } } static lean_object* _init_l_Lean_Meta_caseValue___closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); x_2 = l_Lean_Meta_caseValue___closed__1; x_3 = lean_name_mk_string(x_1, x_2); return x_3; } } static lean_object* _init_l_Lean_Meta_caseValue___closed__3() { _start: { lean_object* x_1; x_1 = lean_mk_string("thenBranch"); return x_1; } } static lean_object* _init_l_Lean_Meta_caseValue___closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); x_2 = l_Lean_Meta_caseValue___closed__3; x_3 = lean_name_mk_string(x_1, x_2); return x_3; } } static lean_object* _init_l_Lean_Meta_caseValue___closed__5() { _start: { lean_object* x_1; x_1 = lean_mk_string("elseBranch"); return x_1; } } static lean_object* _init_l_Lean_Meta_caseValue___closed__6() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); x_2 = l_Lean_Meta_caseValue___closed__5; x_3 = lean_name_mk_string(x_1, x_2); return x_3; } } lean_object* l_Lean_Meta_caseValue(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { lean_object* x_9; lean_object* x_10; lean_object* x_11; x_9 = lean_box(0); x_10 = l_Lean_Meta_caseValue___closed__2; lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); x_11 = l_Lean_Meta_caseValueAux(x_1, x_2, x_3, x_10, x_9, x_4, x_5, x_6, x_7, x_8); if (lean_obj_tag(x_11) == 0) { lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; x_12 = lean_ctor_get(x_11, 0); lean_inc(x_12); x_13 = lean_ctor_get(x_11, 1); lean_inc(x_13); lean_dec(x_11); x_14 = lean_ctor_get(x_12, 0); lean_inc(x_14); x_15 = lean_ctor_get(x_14, 0); lean_inc(x_15); lean_dec(x_14); x_16 = l_Lean_Meta_caseValue___closed__4; x_17 = l_Lean_Meta_appendTagSuffix(x_15, x_16, x_4, x_5, x_6, x_7, x_13); if (lean_obj_tag(x_17) == 0) { lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; x_18 = lean_ctor_get(x_17, 1); lean_inc(x_18); lean_dec(x_17); x_19 = lean_ctor_get(x_12, 1); lean_inc(x_19); x_20 = lean_ctor_get(x_19, 0); lean_inc(x_20); lean_dec(x_19); x_21 = l_Lean_Meta_caseValue___closed__6; x_22 = l_Lean_Meta_appendTagSuffix(x_20, x_21, x_4, x_5, x_6, x_7, x_18); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); if (lean_obj_tag(x_22) == 0) { uint8_t x_23; x_23 = !lean_is_exclusive(x_22); if (x_23 == 0) { lean_object* x_24; x_24 = lean_ctor_get(x_22, 0); lean_dec(x_24); lean_ctor_set(x_22, 0, x_12); return x_22; } else { lean_object* x_25; lean_object* x_26; x_25 = lean_ctor_get(x_22, 1); lean_inc(x_25); lean_dec(x_22); x_26 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_26, 0, x_12); lean_ctor_set(x_26, 1, x_25); return x_26; } } else { uint8_t x_27; lean_dec(x_12); x_27 = !lean_is_exclusive(x_22); if (x_27 == 0) { return x_22; } else { lean_object* x_28; lean_object* x_29; lean_object* x_30; x_28 = lean_ctor_get(x_22, 0); x_29 = lean_ctor_get(x_22, 1); lean_inc(x_29); lean_inc(x_28); lean_dec(x_22); x_30 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_30, 0, x_28); lean_ctor_set(x_30, 1, x_29); return x_30; } } } else { uint8_t x_31; lean_dec(x_12); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); x_31 = !lean_is_exclusive(x_17); if (x_31 == 0) { return x_17; } else { lean_object* x_32; lean_object* x_33; lean_object* x_34; x_32 = lean_ctor_get(x_17, 0); x_33 = lean_ctor_get(x_17, 1); lean_inc(x_33); lean_inc(x_32); lean_dec(x_17); x_34 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_34, 0, x_32); lean_ctor_set(x_34, 1, x_33); return x_34; } } } else { uint8_t x_35; lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); x_35 = !lean_is_exclusive(x_11); if (x_35 == 0) { return x_11; } else { lean_object* x_36; lean_object* x_37; lean_object* x_38; x_36 = lean_ctor_get(x_11, 0); x_37 = lean_ctor_get(x_11, 1); lean_inc(x_37); lean_inc(x_36); lean_dec(x_11); x_38 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_38, 0, x_36); lean_ctor_set(x_38, 1, x_37); return x_38; } } } } static lean_object* _init_l_Lean_Meta_CaseValuesSubgoal_newHs___default() { _start: { lean_object* x_1; x_1 = l_Array_empty___closed__1; return x_1; } } static lean_object* _init_l_Lean_Meta_CaseValuesSubgoal_subst___default() { _start: { lean_object* x_1; x_1 = lean_box(0); return x_1; } } static lean_object* _init_l_Lean_Meta_instInhabitedCaseValuesSubgoal___closed__1() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(0); x_2 = lean_box(0); x_3 = l_Array_empty___closed__1; x_4 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_4, 0, x_2); lean_ctor_set(x_4, 1, x_3); lean_ctor_set(x_4, 2, x_1); return x_4; } } static lean_object* _init_l_Lean_Meta_instInhabitedCaseValuesSubgoal() { _start: { lean_object* x_1; x_1 = l_Lean_Meta_instInhabitedCaseValuesSubgoal___closed__1; return x_1; } } lean_object* l_Lean_Meta_caseValues_loop_match__1___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { if (lean_obj_tag(x_1) == 1) { lean_object* x_4; uint64_t x_5; lean_object* x_6; lean_object* x_7; lean_dec(x_3); x_4 = lean_ctor_get(x_1, 0); lean_inc(x_4); x_5 = lean_ctor_get_uint64(x_1, sizeof(void*)*1); lean_dec(x_1); x_6 = lean_box_uint64(x_5); x_7 = lean_apply_2(x_2, x_4, x_6); return x_7; } else { lean_object* x_8; lean_dec(x_2); x_8 = lean_apply_1(x_3, x_1); return x_8; } } } lean_object* l_Lean_Meta_caseValues_loop_match__1(lean_object* x_1) { _start: { lean_object* x_2; x_2 = lean_alloc_closure((void*)(l_Lean_Meta_caseValues_loop_match__1___rarg), 3, 0); return x_2; } } lean_object* l_Lean_Meta_caseValues_loop_match__2___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { if (lean_obj_tag(x_1) == 0) { lean_object* x_4; lean_object* x_5; lean_dec(x_3); x_4 = lean_box(0); x_5 = lean_apply_1(x_2, x_4); return x_5; } else { lean_object* x_6; lean_dec(x_2); x_6 = lean_apply_1(x_3, x_1); return x_6; } } } lean_object* l_Lean_Meta_caseValues_loop_match__2(lean_object* x_1) { _start: { lean_object* x_2; x_2 = lean_alloc_closure((void*)(l_Lean_Meta_caseValues_loop_match__2___rarg), 3, 0); return x_2; } } lean_object* l_Lean_Meta_caseValues_loop_match__3___rarg(lean_object* x_1, lean_object* x_2) { _start: { lean_object* x_3; lean_object* x_4; lean_object* x_5; x_3 = lean_ctor_get(x_1, 0); lean_inc(x_3); x_4 = lean_ctor_get(x_1, 1); lean_inc(x_4); lean_dec(x_1); x_5 = lean_apply_2(x_2, x_3, x_4); return x_5; } } lean_object* l_Lean_Meta_caseValues_loop_match__3(lean_object* x_1) { _start: { lean_object* x_2; x_2 = lean_alloc_closure((void*)(l_Lean_Meta_caseValues_loop_match__3___rarg), 2, 0); return x_2; } } lean_object* l_Lean_Meta_caseValues_loop_match__4___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { _start: { if (lean_obj_tag(x_3) == 0) { lean_object* x_8; lean_dec(x_7); x_8 = lean_apply_4(x_6, x_1, x_2, x_4, x_5); return x_8; } else { lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_dec(x_6); x_9 = lean_ctor_get(x_3, 0); lean_inc(x_9); x_10 = lean_ctor_get(x_3, 1); lean_inc(x_10); lean_dec(x_3); x_11 = lean_apply_6(x_7, x_1, x_2, x_9, x_10, x_4, x_5); return x_11; } } } lean_object* l_Lean_Meta_caseValues_loop_match__4(lean_object* x_1) { _start: { lean_object* x_2; x_2 = lean_alloc_closure((void*)(l_Lean_Meta_caseValues_loop_match__4___rarg), 7, 0); return x_2; } } lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Meta_caseValues_loop___spec__2(lean_object* x_1, lean_object* x_2, size_t x_3, size_t x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { uint8_t x_11; x_11 = x_3 == x_4; if (x_11 == 0) { lean_object* x_12; lean_object* x_13; x_12 = lean_array_uget(x_2, x_3); lean_inc(x_1); lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); lean_inc(x_6); x_13 = lean_apply_7(x_1, x_5, x_12, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_13) == 0) { lean_object* x_14; lean_object* x_15; size_t x_16; size_t x_17; x_14 = lean_ctor_get(x_13, 0); lean_inc(x_14); x_15 = lean_ctor_get(x_13, 1); lean_inc(x_15); lean_dec(x_13); x_16 = 1; x_17 = x_3 + x_16; x_3 = x_17; x_5 = x_14; x_10 = x_15; goto _start; } else { uint8_t x_19; lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_1); x_19 = !lean_is_exclusive(x_13); if (x_19 == 0) { return x_13; } else { lean_object* x_20; lean_object* x_21; lean_object* x_22; x_20 = lean_ctor_get(x_13, 0); x_21 = lean_ctor_get(x_13, 1); lean_inc(x_21); lean_inc(x_20); lean_dec(x_13); x_22 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_22, 0, x_20); lean_ctor_set(x_22, 1, x_21); return x_22; } } } else { lean_object* x_23; lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_1); x_23 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_23, 0, x_5); lean_ctor_set(x_23, 1, x_10); return x_23; } } } lean_object* l_Array_foldlMUnsafe___at_Lean_Meta_caseValues_loop___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { uint8_t x_11; x_11 = lean_nat_dec_lt(x_4, x_5); if (x_11 == 0) { lean_object* x_12; lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_1); x_12 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_12, 0, x_2); lean_ctor_set(x_12, 1, x_10); return x_12; } else { lean_object* x_13; uint8_t x_14; x_13 = lean_array_get_size(x_3); x_14 = lean_nat_dec_le(x_5, x_13); lean_dec(x_13); if (x_14 == 0) { lean_object* x_15; lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_1); x_15 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_15, 0, x_2); lean_ctor_set(x_15, 1, x_10); return x_15; } else { size_t x_16; size_t x_17; lean_object* x_18; x_16 = lean_usize_of_nat(x_4); x_17 = lean_usize_of_nat(x_5); x_18 = l_Array_foldlMUnsafe_fold___at_Lean_Meta_caseValues_loop___spec__2(x_1, x_3, x_16, x_17, x_2, x_6, x_7, x_8, x_9, x_10); return x_18; } } } } lean_object* l_Lean_Meta_caseValues_loop___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { lean_object* x_9; x_9 = l_Lean_Meta_FVarSubst_get(x_1, x_3); if (lean_obj_tag(x_9) == 1) { lean_object* x_10; lean_object* x_11; x_10 = lean_ctor_get(x_9, 0); lean_inc(x_10); lean_dec(x_9); x_11 = l_Lean_Meta_tryClear(x_2, x_10, x_4, x_5, x_6, x_7, x_8); return x_11; } else { lean_object* x_12; lean_dec(x_9); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); x_12 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_12, 0, x_2); lean_ctor_set(x_12, 1, x_8); return x_12; } } } static lean_object* _init_l_Lean_Meta_caseValues_loop___closed__1() { _start: { lean_object* x_1; x_1 = lean_mk_string("caseValues"); return x_1; } } static lean_object* _init_l_Lean_Meta_caseValues_loop___closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); x_2 = l_Lean_Meta_caseValues_loop___closed__1; x_3 = lean_name_mk_string(x_1, x_2); return x_3; } } static lean_object* _init_l_Lean_Meta_caseValues_loop___closed__3() { _start: { lean_object* x_1; x_1 = lean_mk_string("list of values must not be empty"); return x_1; } } static lean_object* _init_l_Lean_Meta_caseValues_loop___closed__4() { _start: { lean_object* x_1; lean_object* x_2; x_1 = l_Lean_Meta_caseValues_loop___closed__3; x_2 = lean_alloc_ctor(2, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } static lean_object* _init_l_Lean_Meta_caseValues_loop___closed__5() { _start: { lean_object* x_1; lean_object* x_2; x_1 = l_Lean_Meta_caseValues_loop___closed__4; x_2 = lean_alloc_ctor(0, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } static lean_object* _init_l_Lean_Meta_caseValues_loop___closed__6() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); x_2 = l_Lean_Parser_Tactic_case___closed__1; x_3 = lean_name_mk_string(x_1, x_2); return x_3; } } lean_object* l_Lean_Meta_caseValues_loop(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { if (lean_obj_tag(x_5) == 0) { lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_dec(x_7); lean_dec(x_6); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_13 = l_Lean_Meta_caseValues_loop___closed__2; x_14 = l_Lean_Meta_caseValues_loop___closed__5; x_15 = lean_box(0); x_16 = l_Lean_Meta_throwTacticEx___rarg(x_13, x_4, x_14, x_15, x_8, x_9, x_10, x_11, x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); return x_16; } else { lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; x_17 = lean_ctor_get(x_5, 0); lean_inc(x_17); x_18 = lean_ctor_get(x_5, 1); lean_inc(x_18); lean_dec(x_5); lean_inc(x_3); lean_inc(x_2); x_19 = l_Lean_Name_appendIndexAfter(x_2, x_3); x_20 = lean_box(0); lean_inc(x_11); lean_inc(x_10); lean_inc(x_9); lean_inc(x_8); lean_inc(x_1); x_21 = l_Lean_Meta_caseValueAux(x_4, x_1, x_17, x_19, x_20, x_8, x_9, x_10, x_11, x_12); if (lean_obj_tag(x_21) == 0) { lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; x_22 = lean_ctor_get(x_21, 0); lean_inc(x_22); x_23 = lean_ctor_get(x_22, 0); lean_inc(x_23); x_24 = lean_ctor_get(x_21, 1); lean_inc(x_24); lean_dec(x_21); x_25 = lean_ctor_get(x_22, 1); lean_inc(x_25); lean_dec(x_22); x_26 = lean_ctor_get(x_23, 0); lean_inc(x_26); x_27 = lean_ctor_get(x_23, 1); lean_inc(x_27); x_28 = lean_ctor_get(x_23, 2); lean_inc(x_28); lean_dec(x_23); x_29 = l_Lean_Meta_caseValues_loop___closed__6; lean_inc(x_3); x_30 = l_Lean_Name_appendIndexAfter(x_29, x_3); lean_inc(x_26); x_31 = l_Lean_Meta_appendTagSuffix(x_26, x_30, x_8, x_9, x_10, x_11, x_24); if (lean_obj_tag(x_31) == 0) { lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; x_32 = lean_ctor_get(x_31, 1); lean_inc(x_32); lean_dec(x_31); lean_inc(x_28); x_33 = lean_alloc_closure((void*)(l_Lean_Meta_caseValues_loop___lambda__1___boxed), 8, 1); lean_closure_set(x_33, 0, x_28); x_34 = lean_array_get_size(x_6); x_35 = lean_unsigned_to_nat(0u); lean_inc(x_11); lean_inc(x_10); lean_inc(x_9); lean_inc(x_8); x_36 = l_Array_foldlMUnsafe___at_Lean_Meta_caseValues_loop___spec__1(x_33, x_26, x_6, x_35, x_34, x_8, x_9, x_10, x_11, x_32); lean_dec(x_34); if (lean_obj_tag(x_36) == 0) { lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; x_37 = lean_ctor_get(x_36, 0); lean_inc(x_37); x_38 = lean_ctor_get(x_36, 1); lean_inc(x_38); lean_dec(x_36); x_39 = l_Lean_mkOptionalNode___closed__2; x_40 = lean_array_push(x_39, x_27); x_41 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_41, 0, x_37); lean_ctor_set(x_41, 1, x_40); lean_ctor_set(x_41, 2, x_28); x_42 = lean_array_push(x_7, x_41); if (lean_obj_tag(x_18) == 0) { lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_dec(x_2); lean_dec(x_1); x_43 = lean_ctor_get(x_25, 0); lean_inc(x_43); x_44 = lean_ctor_get(x_25, 1); lean_inc(x_44); lean_dec(x_25); x_45 = lean_unsigned_to_nat(1u); x_46 = lean_nat_add(x_3, x_45); lean_dec(x_3); x_47 = l_Lean_Name_appendIndexAfter(x_29, x_46); lean_inc(x_43); x_48 = l_Lean_Meta_appendTagSuffix(x_43, x_47, x_8, x_9, x_10, x_11, x_38); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); if (lean_obj_tag(x_48) == 0) { uint8_t x_49; x_49 = !lean_is_exclusive(x_48); if (x_49 == 0) { lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; x_50 = lean_ctor_get(x_48, 0); lean_dec(x_50); x_51 = lean_array_push(x_6, x_44); x_52 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_52, 0, x_43); lean_ctor_set(x_52, 1, x_51); lean_ctor_set(x_52, 2, x_20); x_53 = lean_array_push(x_42, x_52); lean_ctor_set(x_48, 0, x_53); return x_48; } else { lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; x_54 = lean_ctor_get(x_48, 1); lean_inc(x_54); lean_dec(x_48); x_55 = lean_array_push(x_6, x_44); x_56 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_56, 0, x_43); lean_ctor_set(x_56, 1, x_55); lean_ctor_set(x_56, 2, x_20); x_57 = lean_array_push(x_42, x_56); x_58 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_58, 0, x_57); lean_ctor_set(x_58, 1, x_54); return x_58; } } else { uint8_t x_59; lean_dec(x_44); lean_dec(x_43); lean_dec(x_42); lean_dec(x_6); x_59 = !lean_is_exclusive(x_48); if (x_59 == 0) { return x_48; } else { lean_object* x_60; lean_object* x_61; lean_object* x_62; x_60 = lean_ctor_get(x_48, 0); x_61 = lean_ctor_get(x_48, 1); lean_inc(x_61); lean_inc(x_60); lean_dec(x_48); x_62 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_62, 0, x_60); lean_ctor_set(x_62, 1, x_61); return x_62; } } } else { lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; x_63 = lean_unsigned_to_nat(1u); x_64 = lean_nat_add(x_3, x_63); lean_dec(x_3); x_65 = lean_ctor_get(x_25, 0); lean_inc(x_65); x_66 = lean_ctor_get(x_25, 1); lean_inc(x_66); lean_dec(x_25); x_67 = lean_array_push(x_6, x_66); x_3 = x_64; x_4 = x_65; x_5 = x_18; x_6 = x_67; x_7 = x_42; x_12 = x_38; goto _start; } } else { uint8_t x_69; lean_dec(x_28); lean_dec(x_27); lean_dec(x_25); lean_dec(x_18); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_69 = !lean_is_exclusive(x_36); if (x_69 == 0) { return x_36; } else { lean_object* x_70; lean_object* x_71; lean_object* x_72; x_70 = lean_ctor_get(x_36, 0); x_71 = lean_ctor_get(x_36, 1); lean_inc(x_71); lean_inc(x_70); lean_dec(x_36); x_72 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_72, 0, x_70); lean_ctor_set(x_72, 1, x_71); return x_72; } } } else { uint8_t x_73; lean_dec(x_28); lean_dec(x_27); lean_dec(x_26); lean_dec(x_25); lean_dec(x_18); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_73 = !lean_is_exclusive(x_31); if (x_73 == 0) { return x_31; } else { lean_object* x_74; lean_object* x_75; lean_object* x_76; x_74 = lean_ctor_get(x_31, 0); x_75 = lean_ctor_get(x_31, 1); lean_inc(x_75); lean_inc(x_74); lean_dec(x_31); x_76 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_76, 0, x_74); lean_ctor_set(x_76, 1, x_75); return x_76; } } } else { uint8_t x_77; lean_dec(x_18); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_77 = !lean_is_exclusive(x_21); if (x_77 == 0) { return x_21; } else { lean_object* x_78; lean_object* x_79; lean_object* x_80; x_78 = lean_ctor_get(x_21, 0); x_79 = lean_ctor_get(x_21, 1); lean_inc(x_79); lean_inc(x_78); lean_dec(x_21); x_80 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_80, 0, x_78); lean_ctor_set(x_80, 1, x_79); return x_80; } } } } } lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Meta_caseValues_loop___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { size_t x_11; size_t x_12; lean_object* x_13; x_11 = lean_unbox_usize(x_3); lean_dec(x_3); x_12 = lean_unbox_usize(x_4); lean_dec(x_4); x_13 = l_Array_foldlMUnsafe_fold___at_Lean_Meta_caseValues_loop___spec__2(x_1, x_2, x_11, x_12, x_5, x_6, x_7, x_8, x_9, x_10); lean_dec(x_2); return x_13; } } lean_object* l_Array_foldlMUnsafe___at_Lean_Meta_caseValues_loop___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { lean_object* x_11; x_11 = l_Array_foldlMUnsafe___at_Lean_Meta_caseValues_loop___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); return x_11; } } lean_object* l_Lean_Meta_caseValues_loop___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { lean_object* x_9; x_9 = l_Lean_Meta_caseValues_loop___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8); lean_dec(x_1); return x_9; } } lean_object* l_Lean_Meta_caseValues(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { _start: { lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; x_10 = lean_array_to_list(lean_box(0), x_3); x_11 = lean_unsigned_to_nat(1u); x_12 = l_Array_empty___closed__1; x_13 = l_Lean_Meta_caseValues_loop(x_2, x_4, x_11, x_1, x_10, x_12, x_12, x_5, x_6, x_7, x_8, x_9); return x_13; } } lean_object* initialize_Init(lean_object*); lean_object* initialize_Lean_Meta_Tactic_Subst(lean_object*); lean_object* initialize_Lean_Meta_Tactic_Clear(lean_object*); static bool _G_initialized = false; lean_object* initialize_Lean_Meta_Match_CaseValues(lean_object* w) { lean_object * res; if (_G_initialized) return lean_io_result_mk_ok(lean_box(0)); _G_initialized = true; res = initialize_Init(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); res = initialize_Lean_Meta_Tactic_Subst(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); res = initialize_Lean_Meta_Tactic_Clear(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); l_Lean_Meta_CaseValueSubgoal_subst___default = _init_l_Lean_Meta_CaseValueSubgoal_subst___default(); lean_mark_persistent(l_Lean_Meta_CaseValueSubgoal_subst___default); l_Lean_Meta_instInhabitedCaseValueSubgoal___closed__1 = _init_l_Lean_Meta_instInhabitedCaseValueSubgoal___closed__1(); lean_mark_persistent(l_Lean_Meta_instInhabitedCaseValueSubgoal___closed__1); l_Lean_Meta_instInhabitedCaseValueSubgoal = _init_l_Lean_Meta_instInhabitedCaseValueSubgoal(); lean_mark_persistent(l_Lean_Meta_instInhabitedCaseValueSubgoal); l_Lean_Meta_caseValueAux___lambda__1___closed__1 = _init_l_Lean_Meta_caseValueAux___lambda__1___closed__1(); lean_mark_persistent(l_Lean_Meta_caseValueAux___lambda__1___closed__1); l_Lean_Meta_caseValueAux___lambda__1___closed__2 = _init_l_Lean_Meta_caseValueAux___lambda__1___closed__2(); lean_mark_persistent(l_Lean_Meta_caseValueAux___lambda__1___closed__2); l_Lean_Meta_caseValueAux___lambda__2___closed__1 = _init_l_Lean_Meta_caseValueAux___lambda__2___closed__1(); lean_mark_persistent(l_Lean_Meta_caseValueAux___lambda__2___closed__1); l_Lean_Meta_caseValueAux___lambda__2___closed__2 = _init_l_Lean_Meta_caseValueAux___lambda__2___closed__2(); lean_mark_persistent(l_Lean_Meta_caseValueAux___lambda__2___closed__2); l_Lean_Meta_caseValueAux___lambda__2___closed__3 = _init_l_Lean_Meta_caseValueAux___lambda__2___closed__3(); lean_mark_persistent(l_Lean_Meta_caseValueAux___lambda__2___closed__3); l_Lean_Meta_caseValueAux___lambda__2___closed__4 = _init_l_Lean_Meta_caseValueAux___lambda__2___closed__4(); lean_mark_persistent(l_Lean_Meta_caseValueAux___lambda__2___closed__4); l_Lean_Meta_caseValueAux___lambda__3___closed__1 = _init_l_Lean_Meta_caseValueAux___lambda__3___closed__1(); lean_mark_persistent(l_Lean_Meta_caseValueAux___lambda__3___closed__1); l_Lean_Meta_caseValueAux___lambda__3___closed__2 = _init_l_Lean_Meta_caseValueAux___lambda__3___closed__2(); lean_mark_persistent(l_Lean_Meta_caseValueAux___lambda__3___closed__2); l_Lean_Meta_caseValueAux___lambda__3___closed__3 = _init_l_Lean_Meta_caseValueAux___lambda__3___closed__3(); lean_mark_persistent(l_Lean_Meta_caseValueAux___lambda__3___closed__3); l_Lean_Meta_caseValueAux___lambda__3___closed__4 = _init_l_Lean_Meta_caseValueAux___lambda__3___closed__4(); lean_mark_persistent(l_Lean_Meta_caseValueAux___lambda__3___closed__4); l_Lean_Meta_caseValueAux___lambda__3___closed__5 = _init_l_Lean_Meta_caseValueAux___lambda__3___closed__5(); lean_mark_persistent(l_Lean_Meta_caseValueAux___lambda__3___closed__5); l_Lean_Meta_caseValueAux___lambda__3___closed__6 = _init_l_Lean_Meta_caseValueAux___lambda__3___closed__6(); lean_mark_persistent(l_Lean_Meta_caseValueAux___lambda__3___closed__6); l_Lean_Meta_caseValueAux___lambda__3___closed__7 = _init_l_Lean_Meta_caseValueAux___lambda__3___closed__7(); lean_mark_persistent(l_Lean_Meta_caseValueAux___lambda__3___closed__7); l_Lean_Meta_caseValue___closed__1 = _init_l_Lean_Meta_caseValue___closed__1(); lean_mark_persistent(l_Lean_Meta_caseValue___closed__1); l_Lean_Meta_caseValue___closed__2 = _init_l_Lean_Meta_caseValue___closed__2(); lean_mark_persistent(l_Lean_Meta_caseValue___closed__2); l_Lean_Meta_caseValue___closed__3 = _init_l_Lean_Meta_caseValue___closed__3(); lean_mark_persistent(l_Lean_Meta_caseValue___closed__3); l_Lean_Meta_caseValue___closed__4 = _init_l_Lean_Meta_caseValue___closed__4(); lean_mark_persistent(l_Lean_Meta_caseValue___closed__4); l_Lean_Meta_caseValue___closed__5 = _init_l_Lean_Meta_caseValue___closed__5(); lean_mark_persistent(l_Lean_Meta_caseValue___closed__5); l_Lean_Meta_caseValue___closed__6 = _init_l_Lean_Meta_caseValue___closed__6(); lean_mark_persistent(l_Lean_Meta_caseValue___closed__6); l_Lean_Meta_CaseValuesSubgoal_newHs___default = _init_l_Lean_Meta_CaseValuesSubgoal_newHs___default(); lean_mark_persistent(l_Lean_Meta_CaseValuesSubgoal_newHs___default); l_Lean_Meta_CaseValuesSubgoal_subst___default = _init_l_Lean_Meta_CaseValuesSubgoal_subst___default(); lean_mark_persistent(l_Lean_Meta_CaseValuesSubgoal_subst___default); l_Lean_Meta_instInhabitedCaseValuesSubgoal___closed__1 = _init_l_Lean_Meta_instInhabitedCaseValuesSubgoal___closed__1(); lean_mark_persistent(l_Lean_Meta_instInhabitedCaseValuesSubgoal___closed__1); l_Lean_Meta_instInhabitedCaseValuesSubgoal = _init_l_Lean_Meta_instInhabitedCaseValuesSubgoal(); lean_mark_persistent(l_Lean_Meta_instInhabitedCaseValuesSubgoal); l_Lean_Meta_caseValues_loop___closed__1 = _init_l_Lean_Meta_caseValues_loop___closed__1(); lean_mark_persistent(l_Lean_Meta_caseValues_loop___closed__1); l_Lean_Meta_caseValues_loop___closed__2 = _init_l_Lean_Meta_caseValues_loop___closed__2(); lean_mark_persistent(l_Lean_Meta_caseValues_loop___closed__2); l_Lean_Meta_caseValues_loop___closed__3 = _init_l_Lean_Meta_caseValues_loop___closed__3(); lean_mark_persistent(l_Lean_Meta_caseValues_loop___closed__3); l_Lean_Meta_caseValues_loop___closed__4 = _init_l_Lean_Meta_caseValues_loop___closed__4(); lean_mark_persistent(l_Lean_Meta_caseValues_loop___closed__4); l_Lean_Meta_caseValues_loop___closed__5 = _init_l_Lean_Meta_caseValues_loop___closed__5(); lean_mark_persistent(l_Lean_Meta_caseValues_loop___closed__5); l_Lean_Meta_caseValues_loop___closed__6 = _init_l_Lean_Meta_caseValues_loop___closed__6(); lean_mark_persistent(l_Lean_Meta_caseValues_loop___closed__6); return lean_io_result_mk_ok(lean_box(0)); } #ifdef __cplusplus } #endif
714400.c
/* $Id: getroute.c,v 1.4 2013/02/06 10:50:04 nanard Exp $ */ /* MiniUPnP project * http://miniupnp.free.fr/ or http://miniupnp.tuxfamily.org/ * (c) 2006-2013 Thomas Bernard * This software is subject to the conditions detailed * in the LICENCE file provided within the distribution */ #include <stdio.h> #include <string.h> #include <unistd.h> #include <errno.h> #include <syslog.h> #include <linux/types.h> #include <sys/socket.h> #include <netinet/in.h> /*#include <linux/in_route.h>*/ #include <linux/netlink.h> #include <linux/rtnetlink.h> #include <libnfnetlink/libnfnetlink.h> #include "../getroute.h" #include "../upnputils.h" int get_src_for_route_to(const struct sockaddr * dst, void * src, size_t * src_len, int * index) { int fd = -1; struct nlmsghdr *h; int status; struct { struct nlmsghdr n; struct rtmsg r; char buf[1024]; } req; struct sockaddr_nl nladdr; struct iovec iov = { .iov_base = (void*) &req.n, }; struct msghdr msg = { .msg_name = &nladdr, .msg_namelen = sizeof(nladdr), .msg_iov = &iov, .msg_iovlen = 1, }; const struct sockaddr_in * dst4; const struct sockaddr_in6 * dst6; memset(&req, 0, sizeof(req)); req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct rtmsg)); req.n.nlmsg_flags = NLM_F_REQUEST; req.n.nlmsg_type = RTM_GETROUTE; req.r.rtm_family = dst->sa_family; req.r.rtm_table = 0; req.r.rtm_protocol = 0; req.r.rtm_scope = 0; req.r.rtm_type = 0; req.r.rtm_src_len = 0; req.r.rtm_dst_len = 0; req.r.rtm_tos = 0; { char dst_str[128]; sockaddr_to_string(dst, dst_str, sizeof(dst_str)); syslog(LOG_DEBUG, "get_src_for_route_to (%s)", dst_str); } /* add address */ if(dst->sa_family == AF_INET) { dst4 = (const struct sockaddr_in *)dst; nfnl_addattr_l(&req.n, sizeof(req), RTA_DST, &dst4->sin_addr, 4); req.r.rtm_dst_len = 32; } else { dst6 = (const struct sockaddr_in6 *)dst; nfnl_addattr_l(&req.n, sizeof(req), RTA_DST, &dst6->sin6_addr, 16); req.r.rtm_dst_len = 128; } fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); if (fd < 0) { syslog(LOG_ERR, "socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE) : %m"); return -1; } memset(&nladdr, 0, sizeof(nladdr)); nladdr.nl_family = AF_NETLINK; req.n.nlmsg_seq = 1; iov.iov_len = req.n.nlmsg_len; status = sendmsg(fd, &msg, 0); if (status < 0) { syslog(LOG_ERR, "sendmsg(rtnetlink) : %m"); goto error; } memset(&req, 0, sizeof(req)); for(;;) { iov.iov_len = sizeof(req); status = recvmsg(fd, &msg, 0); if(status < 0) { if (errno == EINTR || errno == EAGAIN) continue; syslog(LOG_ERR, "recvmsg(rtnetlink) %m"); goto error; } if(status == 0) { syslog(LOG_ERR, "recvmsg(rtnetlink) EOF"); goto error; } for (h = (struct nlmsghdr*)&req.n; status >= (int)sizeof(*h); ) { int len = h->nlmsg_len; int l = len - sizeof(*h); if (l<0 || len>status) { if (msg.msg_flags & MSG_TRUNC) { syslog(LOG_ERR, "Truncated message"); } syslog(LOG_ERR, "malformed message: len=%d", len); goto error; } if(nladdr.nl_pid != 0 || h->nlmsg_seq != 1/*seq*/) { syslog(LOG_ERR, "wrong seq = %d\n", h->nlmsg_seq); /* Don't forget to skip that message. */ status -= NLMSG_ALIGN(len); h = (struct nlmsghdr*)((char*)h + NLMSG_ALIGN(len)); continue; } if(h->nlmsg_type == NLMSG_ERROR) { struct nlmsgerr *err = (struct nlmsgerr*)NLMSG_DATA(h); syslog(LOG_ERR, "NLMSG_ERROR %d : %s", err->error, strerror(-err->error)); goto error; } if(h->nlmsg_type == RTM_NEWROUTE) { struct rtattr * rta; int len = h->nlmsg_len; len -= NLMSG_LENGTH(sizeof(struct rtmsg)); for(rta = RTM_RTA(NLMSG_DATA((h))); RTA_OK(rta, len); rta = RTA_NEXT(rta,len)) { unsigned char * data = RTA_DATA(rta); if(rta->rta_type == RTA_PREFSRC) { if(src_len && src) { if(*src_len < RTA_PAYLOAD(rta)) { syslog(LOG_WARNING, "cannot copy src: %u<%lu", (unsigned)*src_len, RTA_PAYLOAD(rta)); goto error; } *src_len = RTA_PAYLOAD(rta); memcpy(src, data, RTA_PAYLOAD(rta)); } } else if(rta->rta_type == RTA_OIF) { if(index) memcpy(index, data, sizeof(int)); } } close(fd); return 0; } status -= NLMSG_ALIGN(len); h = (struct nlmsghdr*)((char*)h + NLMSG_ALIGN(len)); } } syslog(LOG_WARNING, "get_src_for_route_to() : src not found"); error: if(fd >= 0) close(fd); return -1; }
554652.c
/* * Copyright (C) 2011 Cary R. ([email protected]) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "sys_priv.h" #include <assert.h> #include <inttypes.h> #include <stdlib.h> /* * The two queue types. */ #define IVL_QUEUE_FIFO 1 #define IVL_QUEUE_LIFO 2 /* * The statistical codes that can be passed to $q_exam(). */ #define IVL_QUEUE_LENGTH 1 #define IVL_QUEUE_MEAN 2 #define IVL_QUEUE_MAX_LENGTH 3 #define IVL_QUEUE_SHORTEST 4 #define IVL_QUEUE_LONGEST 5 #define IVL_QUEUE_AVERAGE 6 /* * All the values that can be returned by the queue tasks/function. */ #define IVL_QUEUE_OK 0 #define IVL_QUEUE_FULL 1 #define IVL_QUEUE_UNDEFINED_ID 2 #define IVL_QUEUE_EMPTY 3 #define IVL_QUEUE_UNSUPPORTED_TYPE 4 #define IVL_QUEUE_INVALID_LENGTH 5 #define IVL_QUEUE_DUPLICATE_ID 6 #define IVL_QUEUE_OUT_OF_MEMORY 7 /* Icarus specific status codes. */ #define IVL_QUEUE_UNDEFINED_STAT_CODE 8 #define IVL_QUEUE_VALUE_OVERFLOWED 9 #define IVL_QUEUE_NO_STATISTICS 10 /* * Routine to add the given time to the the total time (high/low). */ void add_to_wait_time(uint64_t *high, uint64_t *low, uint64_t c_time) { uint64_t carry = 0U; if ((UINT64_MAX - *low) < c_time) carry = 1U; *low += c_time; assert((carry == 0U) || (*high < UINT64_MAX)); *high += carry; } /* * Routine to divide the given total time (high/low) by the number of * items to get the average. */ uint64_t calc_average_wait_time(uint64_t high, uint64_t low, uint64_t total) { int bit = 64; uint64_t result = 0U; assert(total != 0U); if (high == 0U) return (low/total); /* This is true by design, but since we can only return 64 bits * make sure nothing went wrong. */ assert(high < total); /* It's a big value so calculate the average the long way. */ do { unsigned carry = 0U; /* Copy bits from low to high until we have a bit to place * in the result or there are no bits left. */ while ((bit >= 0) && (high < total) && !carry) { /* If the MSB is set then we will have a carry. */ if (high > (UINT64_MAX >> 1)) carry = 1U; high <<= 1; high |= (low & 0x8000000000000000) != 0; low <<= 1; bit -= 1; } /* If this is a valid bit, set the appropriate bit in the * result and subtract the total from the current value. */ if (bit >= 0) { result |= UINT64_C(1) << bit; high = high - total; } /* Loop until there are no bits left. */ } while (bit > 0); return result; } /* * The data structure used for an individual queue element. It hold four * state result for the jobs and inform fields along with the time that * the element was added in base time units. */ typedef struct t_ivl_queue_elem { uint64_t time; s_vpi_vecval job; s_vpi_vecval inform; } s_ivl_queue_elem, *p_ivl_queue_elem; /* * This structure is used to represent a specific queue. The time * information is in base simulation units. */ typedef struct t_ivl_queue_base { uint64_t shortest_wait_time; uint64_t first_add_time; uint64_t latest_add_time; uint64_t wait_time_high; uint64_t wait_time_low; uint64_t number_of_adds; p_ivl_queue_elem queue; PLI_INT32 id; PLI_INT32 length; PLI_INT32 type; PLI_INT32 head; PLI_INT32 elems; PLI_INT32 max_len; PLI_INT32 have_shortest_statistic; } s_ivl_queue_base, *p_ivl_queue_base; /* * For now we keep the queues in a vector since there are likely not too many * of them. We may need something more efficient later. */ static p_ivl_queue_base base = NULL; static int64_t base_len = 0; /* * This routine is called at the end of simulation to free the queue memory. */ static PLI_INT32 cleanup_queue(p_cb_data cause) { PLI_INT32 idx; (void) cause; /* Unused argument. */ for (idx = 0; idx < base_len; idx += 1) free(base[idx].queue); free(base); base = NULL; base_len = 0; return 0; } /* * Add a new queue to the list, return 1 if there is not enough memory, * otherwise return 0. */ static unsigned create_queue(PLI_INT32 id, PLI_INT32 type, PLI_INT32 length) { p_ivl_queue_base new_base; p_ivl_queue_elem queue; /* Allocate space for the new queue base. */ base_len += 1; new_base = (p_ivl_queue_base) realloc(base, base_len*sizeof(s_ivl_queue_base)); /* If we ran out of memory then fix the length and return a fail. */ if (new_base == NULL) { base_len -= 1; return 1; } base = new_base; /* Allocate space for the queue elements. */ queue = (p_ivl_queue_elem) malloc(length*sizeof(s_ivl_queue_elem)); /* If we ran out of memory then fix the length and return a fail. */ if (queue == NULL) { base_len -= 1; return 1; } /* The memory was allocated so configure it. */ base[base_len-1].queue = queue; base[base_len-1].id = id; base[base_len-1].length = length; base[base_len-1].type = type; base[base_len-1].head = 0; base[base_len-1].elems = 0; base[base_len-1].max_len = 0; base[base_len-1].shortest_wait_time = UINT64_MAX; base[base_len-1].first_add_time = 0U; base[base_len-1].latest_add_time = 0U; base[base_len-1].wait_time_high = 0U; base[base_len-1].wait_time_low = 0U; base[base_len-1].number_of_adds = 0U; base[base_len-1].have_shortest_statistic = 0; return 0; } /* * Check to see if the given queue is full. */ static unsigned is_queue_full(int64_t idx) { if (base[idx].elems >= base[idx].length) return 1; return 0; } /* * Add the job and inform to the queue. Return 1 if the queue is full, * otherwise return 0. */ static unsigned add_to_queue(int64_t idx, p_vpi_vecval job, p_vpi_vecval inform) { PLI_INT32 length = base[idx].length; PLI_INT32 type = base[idx].type; PLI_INT32 head = base[idx].head; PLI_INT32 elems = base[idx].elems; PLI_INT32 loc; s_vpi_time cur_time; uint64_t c_time; assert(elems <= length); /* If the queue is full we can't add anything. */ if (elems == length) return 1; /* Increment the number of element since one will be added.*/ base[idx].elems += 1; /* Save the job and inform to the queue. */ if (type == IVL_QUEUE_LIFO) { assert(head == 0); /* For a LIFO head must always be zero. */ loc = elems; } else { assert(type == IVL_QUEUE_FIFO); loc = head + elems; if (loc >= length) loc -= length; } base[idx].queue[loc].job.aval = job->aval; base[idx].queue[loc].job.bval = job->bval; base[idx].queue[loc].inform.aval = inform->aval; base[idx].queue[loc].inform.bval = inform->bval; /* Save the current time with this entry for the statistics. */ cur_time.type = vpiSimTime; vpi_get_time(NULL, &cur_time); c_time = cur_time.high; c_time <<= 32; c_time |= cur_time.low; base[idx].queue[loc].time = c_time; /* Increment the maximum length if needed. */ if (base[idx].max_len == elems) base[idx].max_len += 1; /* Update the inter-arrival statistics. */ assert(base[idx].number_of_adds < UINT64_MAX); base[idx].number_of_adds += 1; if (base[idx].number_of_adds == 1) base[idx].first_add_time = c_time; base[idx].latest_add_time = c_time; return 0; } /* * Get the job and inform values from the queue. Return 1 if the queue is * empty, otherwise return 0. */ static unsigned remove_from_queue(int64_t idx, p_vpi_vecval job, p_vpi_vecval inform) { PLI_INT32 type = base[idx].type; PLI_INT32 head = base[idx].head; PLI_INT32 elems = base[idx].elems - 1; PLI_INT32 loc; s_vpi_time cur_time; uint64_t c_time; assert(elems >= -1); /* If the queue is empty we can't remove anything. */ if (elems < 0) return 1; /* Decrement the number of element in the queue structure since one * will be removed.*/ base[idx].elems -= 1; /* Remove the job and inform from the queue. */ if (type == IVL_QUEUE_LIFO) { assert(head == 0); /* For a LIFO head must always be zero. */ loc = elems; } else { assert(type == IVL_QUEUE_FIFO); loc = head; if (head + 1 == base[idx].length) base[idx].head = 0; else base[idx].head += 1; } job->aval = base[idx].queue[loc].job.aval; job->bval = base[idx].queue[loc].job.bval; inform->aval = base[idx].queue[loc].inform.aval; inform->bval = base[idx].queue[loc].inform.bval; /* Get the current simulation time. */ cur_time.type = vpiSimTime; vpi_get_time(NULL, &cur_time); c_time = cur_time.high; c_time <<= 32; c_time |= cur_time.low; /* Set the shortest wait time if needed. */ assert(c_time >= base[idx].queue[loc].time); c_time -= base[idx].queue[loc].time; if (c_time < base[idx].shortest_wait_time) { base[idx].shortest_wait_time = c_time; } base[idx].have_shortest_statistic = 1; /* Add the current element wait time to the total wait time. */ add_to_wait_time(&(base[idx].wait_time_high), &(base[idx].wait_time_low), c_time); return 0; } /* * Return the current queue length. */ static PLI_INT32 get_current_queue_length(int64_t idx) { return base[idx].elems; } /* * Return the maximum queue length. */ static PLI_INT32 get_maximum_queue_length(int64_t idx) { return base[idx].max_len; } /* * Return the longest wait time in the queue in base simulation units. * Make sure to check that there are elements in the queue before calling * this routine. The caller will need to scale the time as appropriate. */ static uint64_t get_longest_queue_time(int64_t idx) { s_vpi_time cur_time; uint64_t c_time; /* Get the current simulation time. */ cur_time.type = vpiSimTime; vpi_get_time(NULL, &cur_time); c_time = cur_time.high; c_time <<= 32; c_time |= cur_time.low; /* Subtract the element with the longest time (the head) from the * current time. */ assert(c_time >= base[idx].queue[base[idx].head].time); c_time -= base[idx].queue[base[idx].head].time; return c_time; } /* * Check to see if there are inter-arrival time statistics. */ static unsigned have_interarrival_statistic(int64_t idx) { return (base[idx].number_of_adds >= 2U); } /* * Return the mean inter-arrival time for the queue. This is just the * latest add time minus the first add time divided be the number of time * deltas (the number of adds - 1). */ static uint64_t get_mean_interarrival_time(int64_t idx) { return ((base[idx].latest_add_time - base[idx].first_add_time) / (base[idx].number_of_adds - 1U)); } /* * Check to see if there are shortest wait time statistics. */ static unsigned have_shortest_wait_statistic(int64_t idx) { return (base[idx].have_shortest_statistic != 0); } /* * Return the shortest amount of time an element has waited in the queue. */ static uint64_t get_shortest_wait_time(int64_t idx) { return base[idx].shortest_wait_time; } /* * Check to see if we have an average wait time statistics. */ static unsigned have_average_wait_statistic(int64_t idx) { return (base[idx].number_of_adds >= 1U); } /* * Return the average wait time in the queue. */ static uint64_t get_average_wait_time(int64_t idx) { PLI_INT32 length = base[idx].length; PLI_INT32 loc = base[idx].head; PLI_INT32 elems = base[idx].elems; PLI_INT32 count; /* Initialize the high and low time with the current total time. */ uint64_t high = base[idx].wait_time_high; uint64_t low = base[idx].wait_time_low; s_vpi_time cur_time; uint64_t c_time, add_time; /* Get the current simulation time. */ cur_time.type = vpiSimTime; vpi_get_time(NULL, &cur_time); c_time = cur_time.high; c_time <<= 32; c_time |= cur_time.low; /* For each element still in the queue, add its wait time to the * total wait time. */ for (count = 0; count < elems; count += 1) { add_time = base[idx].queue[loc].time; assert(c_time >= add_time); add_to_wait_time(&high, &low, c_time-add_time); /* Move to the next element. */ loc += 1; if (loc == length) loc = 0; } /* Return the average wait time. */ return calc_average_wait_time(high, low, base[idx].number_of_adds); } /* * Check to see if the given id already exists. Return the index for the * queue if it exists, otherwise return -1. */ static int64_t get_id_index(PLI_INT32 id) { int64_t idx; for (idx = 0; idx < base_len; idx += 1) { if (id == base[idx].id) return idx; } return -1; } /* * Check to see if the given value is bit based and has 32 or fewer bits. */ static unsigned is_32_or_smaller_obj(vpiHandle obj) { PLI_INT32 const_type; unsigned rtn = 0; assert(obj); switch(vpi_get(vpiType, obj)) { case vpiConstant: case vpiParameter: const_type = vpi_get(vpiConstType, obj); if ((const_type != vpiRealConst) && (const_type != vpiStringConst)) rtn = 1; break; /* These can have valid 32 bit or smaller numeric values. */ case vpiIntegerVar: case vpiMemoryWord: case vpiNet: case vpiPartSelect: case vpiReg: rtn = 1; break; } /* The object must be 32 bits or smaller. */ if (vpi_get(vpiSize, obj) > 32) rtn = 0; return rtn; } /* * Check to see if the argument is a variable that is exactly 32 bits in size. */ static void check_var_arg_32(vpiHandle arg, vpiHandle callh, const char *name, const char *desc) { assert(arg); switch (vpi_get(vpiType, arg)) { case vpiMemoryWord: case vpiPartSelect: case vpiReg: // Check that we have exactly 32 bits. if (vpi_get(vpiSize, arg) != 32) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s's %s (variable) argument must be 32 bits.\n", name, desc); vpi_control(vpiFinish, 1); } case vpiIntegerVar: break; default: vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s's %s argument must be a 32 bit variable.\n", name, desc); vpi_control(vpiFinish, 1); } } /* * Check to see if the argument is a variable of at least 32 bits. */ static void check_var_arg_large(vpiHandle arg, vpiHandle callh, const char *name, const char *desc) { assert(arg); switch (vpi_get(vpiType, arg)) { case vpiMemoryWord: case vpiPartSelect: case vpiReg: // Check that we have at least 32 bits. if (vpi_get(vpiSize, arg) < 32) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s's %s (variable) argument must have at least " "32 bits.\n", name, desc); vpi_control(vpiFinish, 1); } case vpiIntegerVar: case vpiTimeVar: break; default: vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s's %s argument must be a variable.\n", name, desc); vpi_control(vpiFinish, 1); } } /* * Check that the given number of arguments are numeric. */ static unsigned check_numeric_args(vpiHandle argv, unsigned count, vpiHandle callh, const char *name) { unsigned idx; /* Check that the first count arguments are numeric. Currently * only three are needed/supported. */ for (idx = 0; idx < count; idx += 1) { char *loc; vpiHandle arg = vpi_scan(argv); /* Get the name for this argument. */ switch (idx) { case 0: loc = "first"; break; case 1: loc = "second"; break; case 2: loc = "third"; break; default: assert(0); } /* Check that there actually is an argument. */ if (! arg) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s requires a %s (<= 32 bit numeric) argument.\n", name, loc); vpi_control(vpiFinish, 1); return 1; } /* Check that it is no more than 32 bits. */ if (! is_32_or_smaller_obj(arg)) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s's %s argument must be numeric (<= 32 bits).\n", name, loc); vpi_control(vpiFinish, 1); } } return 0; } /* * Check to see if the given argument is valid (does not have any X/Z bits). * Return zero if it is valid and a positive value if it is invalid. */ static unsigned get_valid_32(vpiHandle arg, PLI_INT32 *value) { PLI_INT32 size, mask; s_vpi_value val; size = vpi_get(vpiSize, arg); /* The compiletf routine should have already verified that this is * <= 32 bits. */ assert((size <= 32) && (size > 0)); /* Create a mask so that we only check the appropriate bits. */ mask = UINT32_MAX >> (32 - size); /* Get the value and return the possible integer value in the value * variable. Return the b-value bits to indicate if the value is * undefined (has X/Z bit). */ val.format = vpiVectorVal; vpi_get_value(arg, &val); *value = val.value.vector->aval & mask; /* If the argument is signed and less than 32 bit we need to sign * extend the value. */ if (vpi_get(vpiSigned, arg) && (size < 32)) { if ((*value) & (1 << (size - 1))) *value |= ~mask; } return (val.value.vector->bval & mask); } static void get_four_state(vpiHandle arg, p_vpi_vecval vec) { PLI_INT32 size, mask; s_vpi_value val; size = vpi_get(vpiSize, arg); /* The compiletf routine should have already verified that this is * <= 32 bits. */ assert((size <= 32) && (size > 0)); /* Create a mask so that we only use the appropriate bits. */ mask = UINT32_MAX >> (32 - size); /* Get the bits for the argument and save them in the return value. */ val.format = vpiVectorVal; vpi_get_value(arg, &val); vec->aval = val.value.vector->aval & mask; vec->bval = val.value.vector->bval & mask; /* If the argument is signed and less than 32 bit we need to sign * extend the value. */ if (vpi_get(vpiSigned, arg) && (size < 32)) { if (vec->aval & (1 << (size - 1))) vec->aval |= ~mask; if (vec->bval & (1 << (size - 1))) vec->bval |= ~mask; } } /* * Fill the passed variable with x. */ static void fill_variable_with_x(vpiHandle var) { s_vpi_value val; PLI_INT32 words = ((vpi_get(vpiSize, var) - 1) / 32) + 1; PLI_INT32 idx; p_vpi_vecval val_ptr = (p_vpi_vecval) malloc(words*sizeof(s_vpi_vecval)); assert(val_ptr); /* Fill the vector with X. */ for (idx = 0; idx < words; idx += 1) { val_ptr[idx].aval = 0xffffffff; val_ptr[idx].bval = 0xffffffff; } /* Put the vector to the variable. */ val.format = vpiVectorVal; val.value.vector = val_ptr; vpi_put_value(var, &val, 0, vpiNoDelay); free(val_ptr); } /* * Fill the passed variable with the passed value if it fits. If it doesn't * fit then set all bits to one and return that the value is too big instead * of the normal OK. The value is a time and needs to be scaled to the * calling module's timescale. */ static PLI_INT32 fill_variable_with_scaled_time(vpiHandle var, uint64_t c_time) { s_vpi_value val; PLI_INT32 size = vpi_get(vpiSize, var); PLI_INT32 is_signed = vpi_get(vpiSigned, var); PLI_INT32 words = ((size - 1) / 32) + 1; uint64_t max_val = 0; uint64_t scale = 1; uint64_t frac; PLI_INT32 rtn, idx, units, prec; p_vpi_vecval val_ptr = (p_vpi_vecval) malloc(words*sizeof(s_vpi_vecval)); assert(val_ptr); assert(size >= 32); assert(words > 0); /* Scale the variable to match the calling module's timescale. */ prec = vpi_get(vpiTimePrecision, 0); units = vpi_get(vpiTimeUnit, vpi_handle(vpiModule, var)); assert(units >= prec); while (units > prec) { scale *= 10; units -= 1; } frac = c_time % scale; c_time /= scale; if ((scale > 1) && (frac >= scale/2)) c_time += 1; /* Find the maximum value + 1 that can be put into the variable. */ if (size < 64) { max_val = 1; max_val <<= (size - is_signed); } /* If the time is too big to fit then return the maximum positive * value and that the value overflowed. Otherwise, return the time * and OK. */ if (max_val && (c_time >= max_val)) { /* For a single word only the MSB is cleared if signed. */ if (words == 1) { if (is_signed) { val_ptr[0].aval = 0x7fffffff; } else { val_ptr[0].aval = 0xffffffff; } val_ptr[0].bval = 0x00000000; /* For two words the lower word is filled with 1 and the top * word has a size dependent fill if signed. */ } else { assert(words == 2); val_ptr[0].aval = 0xffffffff; val_ptr[0].bval = 0x00000000; if (is_signed) { val_ptr[1].aval = ~(UINT32_MAX >> (size - 32)); } else { val_ptr[1].aval = 0xffffffff; } val_ptr[1].bval = 0x00000000; } rtn = IVL_QUEUE_VALUE_OVERFLOWED; } else { /* Fill the vector with 0. */ for (idx = 0; idx < words; idx += 1) { val_ptr[idx].aval = 0x00000000; val_ptr[idx].bval = 0x00000000; } /* Add the time to the vector. */ switch (words) { default: val_ptr[1].aval = (c_time >> 32) & 0xffffffff; case 1: val_ptr[0].aval = c_time & 0xffffffff; } rtn = IVL_QUEUE_OK; } /* Put the vector to the variable. */ val.format = vpiVectorVal; val.value.vector = val_ptr; vpi_put_value(var, &val, 0, vpiNoDelay); free(val_ptr); return rtn; } /* * Check that the given $q_initialize() call has valid arguments. */ static PLI_INT32 sys_q_initialize_compiletf(PLI_BYTE8 *name) { vpiHandle callh = vpi_handle(vpiSysTfCall, 0); vpiHandle argv = vpi_iterate(vpiArgument, callh); vpiHandle arg; /* Check that there are arguments. */ if (argv == 0) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s requires four arguments.\n", name); vpi_control(vpiFinish, 1); return 0; } /* Check that the first three arguments (the id, type and maximum * length) are numeric. */ if (check_numeric_args(argv, 3, callh, name)) return 0; /* The fourth argument (the status) must be a variable. */ arg = vpi_scan(argv); if (! arg) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s requires a fourth (variable) argument.\n", name); vpi_control(vpiFinish, 1); return 0; } /* Check that the status argument is a 32 bit variable. */ check_var_arg_32(arg, callh, name, "fourth"); /* Make sure there are no extra arguments. */ check_for_extra_args(argv, callh, name, "four arguments", 0); return 0; } /* * The runtime code for $q_initialize(). */ static PLI_INT32 sys_q_initialize_calltf(PLI_BYTE8 *name) { vpiHandle callh = vpi_handle(vpiSysTfCall, 0); vpiHandle argv = vpi_iterate(vpiArgument, callh); vpiHandle status; PLI_INT32 id, type, length; s_vpi_value val; unsigned invalid_id, invalid_type, invalid_length; /* Get the id. */ invalid_id = get_valid_32(vpi_scan(argv), &id); /* Get the queue type. */ invalid_type = get_valid_32(vpi_scan(argv), &type); /* Get the queue maximum length. */ invalid_length = get_valid_32(vpi_scan(argv), &length); /* Get the status variable. */ status = vpi_scan(argv); /* We are done with the argument iterator so free it. */ vpi_free_object(argv); /* If the id is invalid then return. */ if (invalid_id) { val.format = vpiIntVal; val.value.integer = IVL_QUEUE_UNDEFINED_ID; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } /* Verify that the type is valid. */ if (invalid_type || ((type != IVL_QUEUE_FIFO) && (type != IVL_QUEUE_LIFO))) { val.format = vpiIntVal; val.value.integer = IVL_QUEUE_UNSUPPORTED_TYPE; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } /* Verify that the queue length is greater than zero. */ if (invalid_length || (length <= 0)) { val.format = vpiIntVal; val.value.integer = IVL_QUEUE_INVALID_LENGTH; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } /* Check that this is not a duplicate queue id. */ if (get_id_index(id) >= 0) { val.format = vpiIntVal; val.value.integer = IVL_QUEUE_DUPLICATE_ID; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } /* Create the queue and fail if we do not have enough memory. */ if (create_queue(id, type, length)) { val.format = vpiIntVal; val.value.integer = IVL_QUEUE_OUT_OF_MEMORY; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } /* The queue was initialized correctly so return OK. */ val.format = vpiIntVal; val.value.integer = IVL_QUEUE_OK; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } /* * Check that the given $q_add() call has valid arguments. */ static PLI_INT32 sys_q_add_compiletf(PLI_BYTE8 *name) { vpiHandle callh = vpi_handle(vpiSysTfCall, 0); vpiHandle argv = vpi_iterate(vpiArgument, callh); vpiHandle arg; /* Check that there are arguments. */ if (argv == 0) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s requires four arguments.\n", name); vpi_control(vpiFinish, 1); return 0; } /* Check that the first three arguments (the id, job and information) * are numeric. */ if (check_numeric_args(argv, 3, callh, name)) return 0; /* The fourth argument (the status) must be a variable. */ arg = vpi_scan(argv); if (! arg) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s requires a fourth (variable) argument.\n", name); vpi_control(vpiFinish, 1); return 0; } /* Check that the status argument is a 32 bit variable. */ check_var_arg_32(arg, callh, name, "fourth"); /* Make sure there are no extra arguments. */ check_for_extra_args(argv, callh, name, "four arguments", 0); return 0; } /* * The runtime code for $q_add(). */ static PLI_INT32 sys_q_add_calltf(PLI_BYTE8 *name) { vpiHandle callh = vpi_handle(vpiSysTfCall, 0); vpiHandle argv = vpi_iterate(vpiArgument, callh); vpiHandle status; PLI_INT32 id; int64_t idx; s_vpi_vecval job, inform; s_vpi_value val; unsigned invalid_id; /* Get the id. */ invalid_id = get_valid_32(vpi_scan(argv), &id); /* Get the job. */ get_four_state(vpi_scan(argv), &job); /* Get the value. */ get_four_state(vpi_scan(argv), &inform); /* Get the status variable. */ status = vpi_scan(argv); /* We are done with the argument iterator so free it. */ vpi_free_object(argv); /* Verify that the id is valid. */ idx = get_id_index(id); if (invalid_id || (idx < 0)) { val.format = vpiIntVal; val.value.integer = IVL_QUEUE_UNDEFINED_ID; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } /* Add the data to the queue if it is not already full. */ if (add_to_queue(idx, &job, &inform)) { val.format = vpiIntVal; val.value.integer = IVL_QUEUE_FULL; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } /* The data was added to the queue so return OK. */ val.format = vpiIntVal; val.value.integer = IVL_QUEUE_OK; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } /* * Check that the given $q_remove() call has valid arguments. */ static PLI_INT32 sys_q_remove_compiletf(PLI_BYTE8 *name) { vpiHandle callh = vpi_handle(vpiSysTfCall, 0); vpiHandle argv = vpi_iterate(vpiArgument, callh); vpiHandle arg; /* Check that there are arguments. */ if (argv == 0) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s requires four arguments.\n", name); vpi_control(vpiFinish, 1); return 0; } /* The first argument (the id) must be numeric. */ if (! is_32_or_smaller_obj(vpi_scan(argv))) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s's first argument must be numeric (<= 32 bits).\n", name); vpi_control(vpiFinish, 1); } /* The second argument (the job id) must be a variable. */ arg = vpi_scan(argv); if (! arg) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s requires a second (variable) argument.\n", name); vpi_control(vpiFinish, 1); return 0; } /* Check that the job id argument is a 32 bit variable. */ check_var_arg_32(arg, callh, name, "second"); /* The third argument (the information id) must be a variable. */ arg = vpi_scan(argv); if (! arg) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s requires a third (variable) argument.\n", name); vpi_control(vpiFinish, 1); return 0; } /* Check that the information id argument is a 32 bit variable. */ check_var_arg_32(arg, callh, name, "third"); /* The fourth argument (the status) must be a variable. */ arg = vpi_scan(argv); if (! arg) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s requires a fourth (variable) argument.\n", name); vpi_control(vpiFinish, 1); return 0; } /* Check that the status argument is a 32 bit variable. */ check_var_arg_32(arg, callh, name, "fourth"); /* Make sure there are no extra arguments. */ check_for_extra_args(argv, callh, name, "four arguments", 0); return 0; } /* * The runtime code for $q_remove(). */ static PLI_INT32 sys_q_remove_calltf(PLI_BYTE8 *name) { vpiHandle callh = vpi_handle(vpiSysTfCall, 0); vpiHandle argv = vpi_iterate(vpiArgument, callh); vpiHandle job, inform, status; PLI_INT32 id, idx; s_vpi_vecval job_val, inform_val; s_vpi_value val; unsigned invalid_id; /* Get the id. */ invalid_id = get_valid_32(vpi_scan(argv), &id); /* Get the job variable. */ job = vpi_scan(argv); /* Get the inform variable. */ inform = vpi_scan(argv); /* Get the status variable. */ status = vpi_scan(argv); /* We are done with the argument iterator so free it. */ vpi_free_object(argv); /* Verify that the id is valid. */ idx = get_id_index(id); if (invalid_id || (idx < 0)) { fill_variable_with_x(job); fill_variable_with_x(inform); val.format = vpiIntVal; val.value.integer = IVL_QUEUE_UNDEFINED_ID; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } /* Remove the data from the queue if it is not already empty. */ if (remove_from_queue(idx, &job_val, &inform_val)) { fill_variable_with_x(job); fill_variable_with_x(inform); val.format = vpiIntVal; val.value.integer = IVL_QUEUE_EMPTY; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } val.format = vpiVectorVal; val.value.vector = &job_val; vpi_put_value(job, &val, 0, vpiNoDelay); val.format = vpiVectorVal; val.value.vector = &inform_val; vpi_put_value(inform, &val, 0, vpiNoDelay); /* The data was added to the queue so return OK. */ val.format = vpiIntVal; val.value.integer = IVL_QUEUE_OK; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } /* * Check that the given $q_full() call has valid arguments. */ static PLI_INT32 sys_q_full_compiletf(PLI_BYTE8 *name) { vpiHandle callh = vpi_handle(vpiSysTfCall, 0); vpiHandle argv = vpi_iterate(vpiArgument, callh); vpiHandle arg; /* Check that there are arguments. */ if (argv == 0) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s requires two arguments.\n", name); vpi_control(vpiFinish, 1); return 0; } /* The first argument (the id) must be numeric. */ if (! is_32_or_smaller_obj(vpi_scan(argv))) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s's first argument must be numeric (<= 32 bits).\n", name); vpi_control(vpiFinish, 1); } /* The second argument (the status) must be a variable. */ arg = vpi_scan(argv); if (! arg) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s requires a second (variable) argument.\n", name); vpi_control(vpiFinish, 1); return 0; } /* Check that the status argument is a 32 bit variable. */ check_var_arg_32(arg, callh, name, "second"); /* Make sure there are no extra arguments. */ check_for_extra_args(argv, callh, name, "two arguments", 0); return 0; } /* * The runtime code for $q_full(). */ static PLI_INT32 sys_q_full_calltf(PLI_BYTE8 *name) { vpiHandle callh = vpi_handle(vpiSysTfCall, 0); vpiHandle argv = vpi_iterate(vpiArgument, callh); vpiHandle status; PLI_INT32 id, idx; s_vpi_value val; unsigned invalid_id; /* Get the id. */ invalid_id = get_valid_32(vpi_scan(argv), &id); /* Get the status variable. */ status = vpi_scan(argv); /* We are done with the argument iterator so free it. */ vpi_free_object(argv); /* Verify that the id is valid. */ idx = get_id_index(id); if (invalid_id || (idx < 0)) { val.format = vpiIntVal; val.value.integer = IVL_QUEUE_UNDEFINED_ID; vpi_put_value(status, &val, 0, vpiNoDelay); fill_variable_with_x(callh); return 0; } /* Get the queue state and return it. */ val.format = vpiIntVal; if (is_queue_full(idx)) val.value.integer = 1; else val.value.integer = 0; vpi_put_value(callh, &val, 0, vpiNoDelay); /* The queue state was passed back so return OK. */ val.format = vpiIntVal; val.value.integer = IVL_QUEUE_OK; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } /* * Check that the given $q_exam() call has valid arguments. */ static PLI_INT32 sys_q_exam_compiletf(PLI_BYTE8 *name) { vpiHandle callh = vpi_handle(vpiSysTfCall, 0); vpiHandle argv = vpi_iterate(vpiArgument, callh); vpiHandle arg; /* Check that there are arguments. */ if (argv == 0) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s requires four arguments.\n", name); vpi_control(vpiFinish, 1); return 0; } /* Check that the first two arguments (the id and code) are numeric. */ if (check_numeric_args(argv, 2, callh, name)) return 0; /* The third argument (the value) must be a variable. */ arg = vpi_scan(argv); if (! arg) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s requires a third (variable) argument.\n", name); vpi_control(vpiFinish, 1); return 0; } /* Check that the value argument is a variable with at least * 32 bits. */ check_var_arg_large(arg, callh, name, "third"); /* The fourth argument (the status) must be a variable. */ arg = vpi_scan(argv); if (! arg) { vpi_printf("ERROR: %s:%d: ", vpi_get_str(vpiFile, callh), (int)vpi_get(vpiLineNo, callh)); vpi_printf("%s requires a fourth (variable) argument.\n", name); vpi_control(vpiFinish, 1); return 0; } /* Check that the status argument is a 32 bit variable. */ check_var_arg_32(arg, callh, name, "fourth"); /* Make sure there are no extra arguments. */ check_for_extra_args(argv, callh, name, "two arguments", 0); return 0; } /* * The runtime code for $q_exam(). */ static PLI_INT32 sys_q_exam_calltf(PLI_BYTE8 *name) { vpiHandle callh = vpi_handle(vpiSysTfCall, 0); vpiHandle argv = vpi_iterate(vpiArgument, callh); vpiHandle value, status; PLI_INT32 id, code, idx, rtn; s_vpi_value val; unsigned invalid_id, invalid_code; /* Get the id. */ invalid_id = get_valid_32(vpi_scan(argv), &id); /* Get the code. */ invalid_code = get_valid_32(vpi_scan(argv), &code); /* Get the value variable. */ value = vpi_scan(argv); /* Get the status variable. */ status = vpi_scan(argv); /* We are done with the argument iterator so free it. */ vpi_free_object(argv); /* Verify that the id is valid. */ idx = get_id_index(id); if (invalid_id || (idx < 0)) { fill_variable_with_x(value); val.format = vpiIntVal; val.value.integer = IVL_QUEUE_UNDEFINED_ID; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } /* Verify that the code is valid. */ if (invalid_code || (code <= 0) || (code > 6)) { fill_variable_with_x(value); val.format = vpiIntVal; val.value.integer = IVL_QUEUE_UNDEFINED_STAT_CODE; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } rtn = IVL_QUEUE_OK; /* Calculate the requested queue information. */ switch (code) { /* The current queue length. */ case IVL_QUEUE_LENGTH: val.format = vpiIntVal; val.value.integer = get_current_queue_length(idx); vpi_put_value(value, &val, 0, vpiNoDelay); break; /* The mean inter-arrival time. */ case IVL_QUEUE_MEAN: if (have_interarrival_statistic(idx) == 0) { fill_variable_with_x(value); rtn = IVL_QUEUE_NO_STATISTICS; } else { uint64_t ia_time = get_mean_interarrival_time(idx); rtn = fill_variable_with_scaled_time(value, ia_time); } break; /* The maximum queue length. */ case IVL_QUEUE_MAX_LENGTH: val.format = vpiIntVal; val.value.integer = get_maximum_queue_length(idx); vpi_put_value(value, &val, 0, vpiNoDelay); break; /* The shortest queue wait time ever. */ case IVL_QUEUE_SHORTEST: if (have_shortest_wait_statistic(idx) == 0) { fill_variable_with_x(value); rtn = IVL_QUEUE_NO_STATISTICS; } else { uint64_t sw_time = get_shortest_wait_time(idx); rtn = fill_variable_with_scaled_time(value, sw_time); } break; /* The longest wait time for elements still in the queue. */ case IVL_QUEUE_LONGEST: if (get_current_queue_length(idx) == 0) { fill_variable_with_x(value); rtn = IVL_QUEUE_NO_STATISTICS; } else { uint64_t lq_time = get_longest_queue_time(idx); rtn = fill_variable_with_scaled_time(value, lq_time); } break; /* The average queue wait time. */ case IVL_QUEUE_AVERAGE: if (have_average_wait_statistic(idx) == 0) { fill_variable_with_x(value); rtn = IVL_QUEUE_NO_STATISTICS; } else { uint64_t aw_time = get_average_wait_time(idx); rtn = fill_variable_with_scaled_time(value, aw_time); } break; default: assert(0); } /* The queue information was passed back so now return the status. */ val.format = vpiIntVal; val.value.integer = rtn; vpi_put_value(status, &val, 0, vpiNoDelay); return 0; } /* * Routine to register the system tasks/functions provided in this file. */ void sys_queue_register() { s_vpi_systf_data tf_data; s_cb_data cb; tf_data.type = vpiSysTask; tf_data.tfname = "$q_initialize"; tf_data.calltf = sys_q_initialize_calltf; tf_data.compiletf = sys_q_initialize_compiletf; tf_data.sizetf = 0; tf_data.user_data = "$q_initialize"; vpi_register_systf(&tf_data); tf_data.type = vpiSysTask; tf_data.tfname = "$q_add"; tf_data.calltf = sys_q_add_calltf; tf_data.compiletf = sys_q_add_compiletf; tf_data.sizetf = 0; tf_data.user_data = "$q_add"; vpi_register_systf(&tf_data); tf_data.type = vpiSysTask; tf_data.tfname = "$q_remove"; tf_data.calltf = sys_q_remove_calltf; tf_data.compiletf = sys_q_remove_compiletf; tf_data.sizetf = 0; tf_data.user_data = "$q_remove"; vpi_register_systf(&tf_data); tf_data.type = vpiSysFunc; tf_data.sysfunctype = vpiSysFuncInt; tf_data.tfname = "$q_full"; tf_data.calltf = sys_q_full_calltf; tf_data.compiletf = sys_q_full_compiletf; tf_data.sizetf = 0; /* Not needed for a vpiSysFuncInt. */ tf_data.user_data = "$q_full"; vpi_register_systf(&tf_data); tf_data.type = vpiSysTask; tf_data.tfname = "$q_exam"; tf_data.calltf = sys_q_exam_calltf; tf_data.compiletf = sys_q_exam_compiletf; tf_data.sizetf = 0; tf_data.user_data = "$q_exam"; vpi_register_systf(&tf_data); /* Create a callback to clear all the queue memory when the * simulator finishes. */ cb.time = NULL; cb.reason = cbEndOfSimulation; cb.cb_rtn = cleanup_queue; cb.user_data = 0x0; cb.obj = 0x0; vpi_register_cb(&cb); }
23622.c
/****************************************************************************** * * Copyright © International Business Machines Corp., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * NAME * periodic_cpu_load_single.c * * DESCRIPTION * Measure variation in computational execution time * at the specified period, priority, and loops. * * USAGE: * Use run_auto.sh script in current directory to build and run test. * * AUTHOR * Darren Hart <[email protected]> * * HISTORY * 2007-May-2: Initial version by Darren Hart <[email protected]> * * This line has to be added to avoid a stupid CVS problem *****************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <librttest.h> #include <libstats.h> #define HIST_BUCKETS 100 // define sane defaults #define DEFAULT_ITERATIONS 10000 /* 1000 is the min for 3 nines */ #define DEFAULT_PERIOD 5 #define DEFAULT_PRIO 90 #define DEFAULT_CALC_LOOPS 1000 #define LOOPS_MULTIPLIER 4.2 #define DEFAULT_FILENAME_PREFIX "pcl" static int prio; static int period; static int calc_loops; static int ret = 0; static char *filename_prefix = DEFAULT_FILENAME_PREFIX; static int iterations = DEFAULT_ITERATIONS; void usage(void) { rt_help(); printf("periodic_cpu_load_single specific options:\n"); printf(" -lCALC_LOOPS loops per iteration\n"); printf(" -fFILENAME_PREFIX filename prefix for plot output\n"); printf (" -iITERATIONS number of iterations to calculate the average over\n"); printf(" -r[0-99] real-time priority\n"); printf(" -tPERIOD period in ms\n"); } void *calc(int loops) { int i, j; for (i = 0; i < loops * LOOPS_MULTIPLIER; i++) { for (j = 0; j < 125; j++) { // Sum of the numbers up to J volatile int temp = j * (j + 1) / 2; (void)temp; } } return NULL; } int periodic_thread(nsec_t period, int iterations, int loops) { stats_container_t dat; stats_container_t hist; stats_quantiles_t quantiles; stats_record_t rec; int i = 0; int fail = 0; nsec_t next, now; nsec_t exe_start, exe_end, exe_time; char *samples_filename; char *hist_filename; stats_container_init(&dat, iterations); stats_container_init(&hist, HIST_BUCKETS); stats_quantiles_init(&quantiles, (int)log10(iterations)); if (asprintf(&samples_filename, "%s-samples", filename_prefix) == -1) { fprintf(stderr, "Failed to allocate string for samples filename\n"); return -1; } if (asprintf(&hist_filename, "%s-hist", filename_prefix) == -1) { fprintf(stderr, "Failed to allocate string for samples filename\n"); return -1; } next = rt_gettime(); while (i < iterations) { next += period; now = rt_gettime(); if (now > next) { printf ("Missed period, aborting (didn't get scheduled in time)\n"); fail = 1; break; } exe_start = rt_gettime(); calc(loops); exe_end = rt_gettime(); exe_time = exe_end - exe_start; rec.x = i; rec.y = exe_time / NS_PER_US; stats_container_append(&dat, rec); i++; now = rt_gettime(); if (now > next) { printf ("Missed period, aborting (calc took too long)\n"); fail = 1; break; } rt_nanosleep(next - now); } stats_container_save(samples_filename, "Periodic CPU Load Scatter Plot", "Iteration", "Runtime (us)", &dat, "points"); stats_container_save(hist_filename, "Periodic CPU Load Histogram", "Runtime (us)", "Samples", &hist, "steps"); printf(" Execution Time Statistics:\n"); printf("Min: %ld us\n", stats_min(&dat)); printf("Max: %ld us\n", stats_max(&dat)); printf("Avg: %.4f us\n", stats_avg(&dat)); printf("StdDev: %.4f us\n", stats_stddev(&dat)); printf("Quantiles:\n"); stats_quantiles_calc(&dat, &quantiles); stats_quantiles_print(&quantiles); printf("Criteria: no missed periods\n"); printf("Result: %s\n", fail ? "FAIL" : "PASS"); free(samples_filename); free(hist_filename); return fail; } int parse_args(int c, char *v) { int handled = 1; switch (c) { case 'l': calc_loops = atoi(v); break; case 'f': filename_prefix = v; break; case 'h': usage(); exit(0); case 'i': iterations = atoi(v); break; case 'r': prio = atoi(v); break; case 't': period = atoi(v) * NS_PER_MS; break; default: handled = 0; break; } return handled; } int main(int argc, char *argv[]) { period = DEFAULT_PERIOD * NS_PER_MS; prio = DEFAULT_PRIO; calc_loops = DEFAULT_CALC_LOOPS; setup(); rt_init("f:hi:r:t:l:", parse_args, argc, argv); if (iterations < 100) { printf("Number of iterations cannot be less than 100\n"); exit(1); } if (!period || !prio | !calc_loops) { usage(); exit(1); } set_priority(prio); printf("------------------------------------\n"); printf("Periodic CPU Load Execution Variance\n"); printf("------------------------------------\n\n"); printf("Running %d iterations\n", iterations); printf("priority: %d\n", prio); printf(" period: %d ms\n", period / NS_PER_MS); printf(" loops: %d\n", calc_loops); printf(" logs: %s*\n", filename_prefix); ret = periodic_thread(period, iterations, calc_loops); return ret; }
573114.c
/* * Exceptions for specific devices. Usually work-arounds for fatal design flaws. */ #include <linux/delay.h> #include <linux/dmi.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/vgaarb.h> #include <asm/pci_x86.h> static void pci_fixup_i450nx(struct pci_dev *d) { /* * i450NX -- Find and scan all secondary buses on all PXB's. */ int pxb, reg; u8 busno, suba, subb; dev_warn(&d->dev, "Searching for i450NX host bridges\n"); reg = 0xd0; for(pxb = 0; pxb < 2; pxb++) { pci_read_config_byte(d, reg++, &busno); pci_read_config_byte(d, reg++, &suba); pci_read_config_byte(d, reg++, &subb); dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, suba, subb); if (busno) pci_scan_bus_with_sysdata(busno); /* Bus A */ if (suba < subb) pci_scan_bus_with_sysdata(suba+1); /* Bus B */ } pcibios_last_bus = -1; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx); static void pci_fixup_i450gx(struct pci_dev *d) { /* * i450GX and i450KX -- Find and scan all secondary buses. * (called separately for each PCI bridge found) */ u8 busno; pci_read_config_byte(d, 0x4a, &busno); dev_info(&d->dev, "i440KX/GX host bridge; secondary bus %02x\n", busno); pci_scan_bus_with_sysdata(busno); pcibios_last_bus = -1; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454GX, pci_fixup_i450gx); static void pci_fixup_umc_ide(struct pci_dev *d) { /* * UM8886BF IDE controller sets region type bits incorrectly, * therefore they look like memory despite of them being I/O. */ int i; dev_warn(&d->dev, "Fixing base address flags\n"); for(i = 0; i < 4; i++) d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide); static void pci_fixup_ncr53c810(struct pci_dev *d) { /* * NCR 53C810 returns class code 0 (at least on some systems). * Fix class to be PCI_CLASS_STORAGE_SCSI */ if (!d->class) { dev_warn(&d->dev, "Fixing NCR 53C810 class code\n"); d->class = PCI_CLASS_STORAGE_SCSI << 8; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, pci_fixup_ncr53c810); static void pci_fixup_latency(struct pci_dev *d) { /* * SiS 5597 and 5598 chipsets require latency timer set to * at most 32 to avoid lockups. */ dev_dbg(&d->dev, "Setting max latency to 32\n"); pcibios_max_latency = 32; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency); static void pci_fixup_piix4_acpi(struct pci_dev *d) { /* * PIIX4 ACPI device: hardwired IRQ9 */ d->irq = 9; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, pci_fixup_piix4_acpi); /* * Addresses issues with problems in the memory write queue timer in * certain VIA Northbridges. This bugfix is per VIA's specifications, * except for the KL133/KM133: clearing bit 5 on those Northbridges seems * to trigger a bug in its integrated ProSavage video card, which * causes screen corruption. We only clear bits 6 and 7 for that chipset, * until VIA can provide us with definitive information on why screen * corruption occurs, and what exactly those bits do. * * VIA 8363,8622,8361 Northbridges: * - bits 5, 6, 7 at offset 0x55 need to be turned off * VIA 8367 (KT266x) Northbridges: * - bits 5, 6, 7 at offset 0x95 need to be turned off * VIA 8363 rev 0x81/0x84 (KL133/KM133) Northbridges: * - bits 6, 7 at offset 0x55 need to be turned off */ #define VIA_8363_KL133_REVISION_ID 0x81 #define VIA_8363_KM133_REVISION_ID 0x84 static void pci_fixup_via_northbridge_bug(struct pci_dev *d) { u8 v; int where = 0x55; int mask = 0x1f; /* clear bits 5, 6, 7 by default */ if (d->device == PCI_DEVICE_ID_VIA_8367_0) { /* fix pci bus latency issues resulted by NB bios error it appears on bug free^Wreduced kt266x's bios forces NB latency to zero */ pci_write_config_byte(d, PCI_LATENCY_TIMER, 0); where = 0x95; /* the memory write queue timer register is different for the KT266x's: 0x95 not 0x55 */ } else if (d->device == PCI_DEVICE_ID_VIA_8363_0 && (d->revision == VIA_8363_KL133_REVISION_ID || d->revision == VIA_8363_KM133_REVISION_ID)) { mask = 0x3f; /* clear only bits 6 and 7; clearing bit 5 causes screen corruption on the KL133/KM133 */ } pci_read_config_byte(d, where, &v); if (v & ~mask) { dev_warn(&d->dev, "Disabling VIA memory write queue (PCI ID %04x, rev %02x): [%02x] %02x & %02x -> %02x\n", \ d->device, d->revision, where, v, mask, v & mask); v &= mask; pci_write_config_byte(d, where, v); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug); /* * For some reasons Intel decided that certain parts of their * 815, 845 and some other chipsets must look like PCI-to-PCI bridges * while they are obviously not. The 82801 family (AA, AB, BAM/CAM, * BA/CA/DB and E) PCI bridges are actually HUB-to-PCI ones, according * to Intel terminology. These devices do forward all addresses from * system to PCI bus no matter what are their window settings, so they are * "transparent" (or subtractive decoding) from programmers point of view. */ static void pci_fixup_transparent_bridge(struct pci_dev *dev) { if ((dev->device & 0xff00) == 0x2400) dev->transparent = 1; } DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_BRIDGE_PCI, 8, pci_fixup_transparent_bridge); /* * Fixup for C1 Halt Disconnect problem on nForce2 systems. * * From information provided by "Allen Martin" <[email protected]>: * * A hang is caused when the CPU generates a very fast CONNECT/HALT cycle * sequence. Workaround is to set the SYSTEM_IDLE_TIMEOUT to 80 ns. * This allows the state-machine and timer to return to a proper state within * 80 ns of the CONNECT and probe appearing together. Since the CPU will not * issue another HALT within 80 ns of the initial HALT, the failure condition * is avoided. */ static void pci_fixup_nforce2(struct pci_dev *dev) { u32 val; /* * Chip Old value New value * C17 0x1F0FFF01 0x1F01FF01 * C18D 0x9F0FFF01 0x9F01FF01 * * Northbridge chip version may be determined by * reading the PCI revision ID (0xC1 or greater is C18D). */ pci_read_config_dword(dev, 0x6c, &val); /* * Apply fixup if needed, but don't touch disconnect state */ if ((val & 0x00FF0000) != 0x00010000) { dev_warn(&dev->dev, "nForce2 C1 Halt Disconnect fixup\n"); pci_write_config_dword(dev, 0x6c, (val & 0xFF00FFFF) | 0x00010000); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2); /* Max PCI Express root ports */ #define MAX_PCIEROOT 6 static int quirk_aspm_offset[MAX_PCIEROOT << 3]; #define GET_INDEX(a, b) ((((a) - PCI_DEVICE_ID_INTEL_MCH_PA) << 3) + ((b) & 7)) static int quirk_pcie_aspm_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { return raw_pci_read(pci_domain_nr(bus), bus->number, devfn, where, size, value); } /* * Replace the original pci bus ops for write with a new one that will filter * the request to insure ASPM cannot be enabled. */ static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { u8 offset; offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)]; if ((offset) && (where == offset)) value = value & 0xfffffffc; return raw_pci_write(pci_domain_nr(bus), bus->number, devfn, where, size, value); } static struct pci_ops quirk_pcie_aspm_ops = { .read = quirk_pcie_aspm_read, .write = quirk_pcie_aspm_write, }; /* * Prevents PCI Express ASPM (Active State Power Management) being enabled. * * Save the register offset, where the ASPM control bits are located, * for each PCI Express device that is in the device list of * the root port in an array for fast indexing. Replace the bus ops * with the modified one. */ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev) { int cap_base, i; struct pci_bus *pbus; struct pci_dev *dev; if ((pbus = pdev->subordinate) == NULL) return; /* * Check if the DID of pdev matches one of the six root ports. This * check is needed in the case this function is called directly by the * hot-plug driver. */ if ((pdev->device < PCI_DEVICE_ID_INTEL_MCH_PA) || (pdev->device > PCI_DEVICE_ID_INTEL_MCH_PC1)) return; if (list_empty(&pbus->devices)) { /* * If no device is attached to the root port at power-up or * after hot-remove, the pbus->devices is empty and this code * will set the offsets to zero and the bus ops to parent's bus * ops, which is unmodified. */ for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i) quirk_aspm_offset[i] = 0; pbus->ops = pbus->parent->ops; } else { /* * If devices are attached to the root port at power-up or * after hot-add, the code loops through the device list of * each root port to save the register offsets and replace the * bus ops. */ list_for_each_entry(dev, &pbus->devices, bus_list) { /* There are 0 to 8 devices attached to this bus */ cap_base = pci_find_capability(dev, PCI_CAP_ID_EXP); quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] = cap_base + 0x10; } pbus->ops = &quirk_pcie_aspm_ops; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA, pcie_rootport_aspm_quirk); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA1, pcie_rootport_aspm_quirk); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB, pcie_rootport_aspm_quirk); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB1, pcie_rootport_aspm_quirk); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC, pcie_rootport_aspm_quirk); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC1, pcie_rootport_aspm_quirk); /* * Fixup to mark boot BIOS video selected by BIOS before it changes * * From information provided by "Jon Smirl" <[email protected]> * * The standard boot ROM sequence for an x86 machine uses the BIOS * to select an initial video card for boot display. This boot video * card will have it's BIOS copied to C0000 in system RAM. * IORESOURCE_ROM_SHADOW is used to associate the boot video * card with this copy. On laptops this copy has to be used since * the main ROM may be compressed or combined with another image. * See pci_map_rom() for use of this flag. IORESOURCE_ROM_SHADOW * is marked here since the boot video device will be the only enabled * video device at this point. */ static void pci_fixup_video(struct pci_dev *pdev) { struct pci_dev *bridge; struct pci_bus *bus; u16 config; /* Is VGA routed to us? */ bus = pdev->bus; while (bus) { bridge = bus->self; /* * From information provided by * "David Miller" <[email protected]> * The bridge control register is valid for PCI header * type BRIDGE, or CARDBUS. Host to PCI controllers use * PCI header type NORMAL. */ if (bridge && ((bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE) || (bridge->hdr_type == PCI_HEADER_TYPE_CARDBUS))) { pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &config); if (!(config & PCI_BRIDGE_CTL_VGA)) return; } bus = bus->parent; } pci_read_config_word(pdev, PCI_COMMAND, &config); if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n"); if (!vga_default_device()) vga_set_default_device(pdev); } } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video); static const struct dmi_system_id msi_k8t_dmi_table[] = { { .ident = "MSI-K8T-Neo2Fir", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MSI"), DMI_MATCH(DMI_PRODUCT_NAME, "MS-6702E"), }, }, {} }; /* * The AMD-Athlon64 board MSI "K8T Neo2-FIR" disables the onboard sound * card if a PCI-soundcard is added. * * The BIOS only gives options "DISABLED" and "AUTO". This code sets * the corresponding register-value to enable the soundcard. * * The soundcard is only enabled, if the mainborad is identified * via DMI-tables and the soundcard is detected to be off. */ static void pci_fixup_msi_k8t_onboard_sound(struct pci_dev *dev) { unsigned char val; if (!dmi_check_system(msi_k8t_dmi_table)) return; /* only applies to MSI K8T Neo2-FIR */ pci_read_config_byte(dev, 0x50, &val); if (val & 0x40) { pci_write_config_byte(dev, 0x50, val & (~0x40)); /* verify the change for status output */ pci_read_config_byte(dev, 0x50, &val); if (val & 0x40) dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; " "can't enable onboard soundcard!\n"); else dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; " "enabled onboard soundcard\n"); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, pci_fixup_msi_k8t_onboard_sound); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, pci_fixup_msi_k8t_onboard_sound); /* * Some Toshiba laptops need extra code to enable their TI TSB43AB22/A. * * We pretend to bring them out of full D3 state, and restore the proper * IRQ, PCI cache line size, and BARs, otherwise the device won't function * properly. In some cases, the device will generate an interrupt on * the wrong IRQ line, causing any devices sharing the line it's * *supposed* to use to be disabled by the kernel's IRQ debug code. */ static u16 toshiba_line_size; static const struct dmi_system_id toshiba_ohci1394_dmi_table[] = { { .ident = "Toshiba PS5 based laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_VERSION, "PS5"), }, }, { .ident = "Toshiba PSM4 based laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"), }, }, { .ident = "Toshiba A40 based laptop", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"), }, }, { } }; static void pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev) { if (!dmi_check_system(toshiba_ohci1394_dmi_table)) return; /* only applies to certain Toshibas (so far) */ dev->current_state = PCI_D3cold; pci_read_config_word(dev, PCI_CACHE_LINE_SIZE, &toshiba_line_size); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0x8032, pci_pre_fixup_toshiba_ohci1394); static void pci_post_fixup_toshiba_ohci1394(struct pci_dev *dev) { if (!dmi_check_system(toshiba_ohci1394_dmi_table)) return; /* only applies to certain Toshibas (so far) */ /* Restore config space on Toshiba laptops */ pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, toshiba_line_size); pci_read_config_byte(dev, PCI_INTERRUPT_LINE, (u8 *)&dev->irq); pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, pci_resource_start(dev, 0)); pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, pci_resource_start(dev, 1)); } DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_TI, 0x8032, pci_post_fixup_toshiba_ohci1394); /* * Prevent the BIOS trapping accesses to the Cyrix CS5530A video device * configuration space. */ static void pci_early_fixup_cyrix_5530(struct pci_dev *dev) { u8 r; /* clear 'F4 Video Configuration Trap' bit */ pci_read_config_byte(dev, 0x42, &r); r &= 0xfd; pci_write_config_byte(dev, 0x42, r); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, pci_early_fixup_cyrix_5530); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, pci_early_fixup_cyrix_5530); /* * Siemens Nixdorf AG FSC Multiprocessor Interrupt Controller: * prevent update of the BAR0, which doesn't look like a normal BAR. */ static void pci_siemens_interrupt_controller(struct pci_dev *dev) { dev->resource[0].flags |= IORESOURCE_PCI_FIXED; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015, pci_siemens_interrupt_controller); /* * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from * confusing the PCI engine: */ static void sb600_disable_hpet_bar(struct pci_dev *dev) { u8 val; /* * The SB600 and SB700 both share the same device * ID, but the PM register 0x55 does something different * for the SB700, so make sure we are dealing with the * SB600 before touching the bit: */ pci_read_config_byte(dev, 0x08, &val); if (val < 0x2F) { outb(0x55, 0xCD6); val = inb(0xCD7); /* Set bit 7 in PM register 0x55 */ outb(0x55, 0xCD6); outb(val | 0x80, 0xCD7); } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar); /* * Twinhead H12Y needs us to block out a region otherwise we map devices * there and any access kills the box. * * See: https://bugzilla.kernel.org/show_bug.cgi?id=10231 * * Match off the LPC and svid/sdid (older kernels lose the bridge subvendor) */ static void twinhead_reserve_killing_zone(struct pci_dev *dev) { if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) { pr_info("Reserving memory on Twinhead H12Y\n"); request_mem_region(0xFFB00000, 0x100000, "twinhead"); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
135438.c
/* Copyright (C) 1996-1997 Id Software, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ // gl_warp.c -- sky and water polygons #include "quakedef.h" extern model_t *loadmodel; int skytexturenum; int solidskytexture; int alphaskytexture; float speedscale; // for top sky and bottom sky msurface_t *warpface; extern cvar_t gl_subdivide_size; void BoundPoly (int numverts, float *verts, vec3_t mins, vec3_t maxs) { int i, j; float *v; mins[0] = mins[1] = mins[2] = 9999; maxs[0] = maxs[1] = maxs[2] = -9999; v = verts; for (i=0 ; i<numverts ; i++) for (j=0 ; j<3 ; j++, v++) { if (*v < mins[j]) mins[j] = *v; if (*v > maxs[j]) maxs[j] = *v; } } void SubdividePolygon (int numverts, float *verts) { int i, j, k; vec3_t mins, maxs; float m; float *v; vec3_t front[64], back[64]; int f, b; float dist[64]; float frac; glpoly_t *poly; float s, t; if (numverts > 60) Sys_Error ("numverts = %i", numverts); BoundPoly (numverts, verts, mins, maxs); for (i=0 ; i<3 ; i++) { m = (mins[i] + maxs[i]) * 0.5; m = gl_subdivide_size.value * floor (m/gl_subdivide_size.value + 0.5); if (maxs[i] - m < 8) continue; if (m - mins[i] < 8) continue; // cut it v = verts + i; for (j=0 ; j<numverts ; j++, v+= 3) dist[j] = *v - m; // wrap cases dist[j] = dist[0]; v-=i; VectorCopy (verts, v); f = b = 0; v = verts; for (j=0 ; j<numverts ; j++, v+= 3) { if (dist[j] >= 0) { VectorCopy (v, front[f]); f++; } if (dist[j] <= 0) { VectorCopy (v, back[b]); b++; } if (dist[j] == 0 || dist[j+1] == 0) continue; if ( (dist[j] > 0) != (dist[j+1] > 0) ) { // clip point frac = dist[j] / (dist[j] - dist[j+1]); for (k=0 ; k<3 ; k++) front[f][k] = back[b][k] = v[k] + frac*(v[3+k] - v[k]); f++; b++; } } SubdividePolygon (f, front[0]); SubdividePolygon (b, back[0]); return; } // poly = Hunk_Alloc (sizeof(glpoly_t) + (numverts-4) * VERTEXSIZE*sizeof(float)); poly = Hunk_Alloc (sizeof(glpoly_t) + (numverts-4) * VERTEXSIZE * sizeof(qgl_hfloat)); poly->next = warpface->polys; warpface->polys = poly; poly->numverts = numverts; for (i=0 ; i<numverts ; i++, verts+= 3) { VectorCopy (verts, poly->verts[i]); s = DotProduct (verts, warpface->texinfo->vecs[0]); t = DotProduct (verts, warpface->texinfo->vecs[1]); poly->verts[i][3] = s; poly->verts[i][4] = t; } } mvertex_t *r_pcurrentvertbase; model_t *currentmodel; /* ================ GL_SubdivideSurface Breaks a polygon up along axial 64 unit boundaries so that turbulent and sky warps can be done reasonably. ================ */ void GL_SubdivideSurface (msurface_t *fa) { vec3_t verts[64]; int numverts; int i; int lindex; float *vec; texture_t *t; // return; currentmodel = loadmodel; r_pcurrentvertbase = currentmodel->vertexes; BoundSurface(fa); warpface = fa; // // convert edges back to a normal polygon // numverts = 0; for (i=0 ; i<fa->numedges ; i++) { lindex = loadmodel->surfedges[fa->firstedge + i]; if (lindex > 0) vec = loadmodel->vertexes[loadmodel->edges[lindex].v[0]].position; else vec = loadmodel->vertexes[loadmodel->edges[-lindex].v[1]].position; VectorCopy (vec, verts[numverts]); numverts++; } SubdividePolygon (numverts, verts[0]); } //========================================================= // speed up sin calculations - Ed float turbsin[] = { #include "gl_warp_sin.h" }; #define TURBSCALE (256.0 / (2 * M_PI)) float qgl_sintab[256]; float qgl_fastsin(float ra) { int ix; ix=ra*(128.0/M_PI)+0.5; ix=ix&255; return(qgl_sintab[ix]); } float qgl_fastcos(float ra) { int ix; ix=ra*(128.0/M_PI)+0.5; ix=(ix+64)&255; return(qgl_sintab[ix]); } #ifdef __BJX2__ u64 __float64_getbits(double v); double __float64_frombits(u64 v); double qgl_fastsqrt(double ra) { u64 v; v=__float64_getbits(ra); // v=((v-0x3FF0000000000000ULL)>>1)+0x3FF0000000000000ULL; v=(v>>1)+0x1FF8000000000000ULL; return(__float64_frombits(v)); } #else float qgl_fastsqrt(float ra) { return(sqrt(ra)); } #endif void qgl_initsin() { float f; int ix; for(ix=0; ix<256; ix++) { f = ix * (M_PI / 128.0); qgl_sintab[ix] = sin(f); } } #define TURBSCALE_1_8 (TURBSCALE * 0.125) /* ============= EmitWaterPolys Does a water warp on the pre-fragmented glpoly_t chain ============= */ void EmitWaterPolys (msurface_t *fa) { glpoly_t *p; // float *v; qgl_hfloat *v; int i; float s, t, os, ot, ttsc, sofs, tofs; // return; ttsc = realtime * TURBSCALE; // sofs = turbsin[(int)(ttsc) & 255]; // tofs = turbsin[(int)(ttsc) & 255]; for (p=fa->polys ; p ; p=p->next) { qglBegin (GL_POLYGON); for (i=0,v=p->verts[0] ; i<p->numverts ; i++, v+=VERTEXSIZE) { os = v[3]; ot = v[4]; // s = os + turbsin[(int)((ot*0.125+realtime) * TURBSCALE) & 255]; s = os + turbsin[(int)(ot*TURBSCALE_1_8+ttsc) & 255]; // s = os + sofs; s *= (1.0/64); // t = ot + turbsin[(int)((os*0.125+realtime) * TURBSCALE) & 255]; t = ot + turbsin[(int)(os*TURBSCALE_1_8+ttsc) & 255]; // t = ot + tofs; t *= (1.0/64); // s=os; // t=ot; qglTexCoord2f (s, t); // qglVertex3fv (v); qglVertex3f (v[0], v[1], v[2]); } qglEnd (); } } /* ============= EmitSkyPolys ============= */ void EmitSkyPolys (msurface_t *fa) { glpoly_t *p; // float *v; qgl_hfloat *v; int i; float s, t; vec3_t dir; float length; for (p=fa->polys ; p ; p=p->next) { qglBegin (GL_POLYGON); for (i=0,v=p->verts[0] ; i<p->numverts ; i++, v+=VERTEXSIZE) { VectorSubtract (v, r_origin, dir); dir[2] *= 3; // flatten the sphere length = dir[0]*dir[0] + dir[1]*dir[1] + dir[2]*dir[2]; // length = sqrt (length); length = qgl_fastsqrt (length); length = 6*63/length; dir[0] *= length; dir[1] *= length; s = (speedscale + dir[0]) * (1.0/128); t = (speedscale + dir[1]) * (1.0/128); qglTexCoord2f (s, t); // qglVertex3fv (v); qglVertex3f (v[0], v[1], v[2]); } qglEnd (); } } /* =============== EmitBothSkyLayers Does a sky warp on the pre-fragmented glpoly_t chain This will be called for brushmodels, the world will have them chained together. =============== */ void EmitBothSkyLayers (msurface_t *fa) { int i; int lindex; // float *vec; GL_DisableMultitexture(); GL_Bind (solidskytexture); speedscale = realtime*8; speedscale -= (int)speedscale & ~127 ; EmitSkyPolys (fa); qglEnable (GL_BLEND); GL_Bind (alphaskytexture); speedscale = realtime*16; speedscale -= (int)speedscale & ~127 ; EmitSkyPolys (fa); qglDisable (GL_BLEND); } #ifndef QUAKE2 /* ================= R_DrawSkyChain ================= */ void R_DrawSkyChain (msurface_t *s) { msurface_t *fa; GL_DisableMultitexture(); // used when gl_texsort is on GL_Bind(solidskytexture); speedscale = realtime*8; speedscale -= (int)speedscale & ~127 ; for (fa=s ; fa ; fa=fa->texturechain) EmitSkyPolys (fa); qglEnable (GL_BLEND); GL_Bind (alphaskytexture); speedscale = realtime*16; speedscale -= (int)speedscale & ~127 ; for (fa=s ; fa ; fa=fa->texturechain) EmitSkyPolys (fa); qglDisable (GL_BLEND); } #endif /* ================================================================= Quake 2 environment sky ================================================================= */ #ifdef QUAKE2 #define SKY_TEX 2000 /* ================================================================= PCX Loading ================================================================= */ typedef struct { char manufacturer; char version; char encoding; char bits_per_pixel; unsigned short xmin,ymin,xmax,ymax; unsigned short hres,vres; unsigned char palette[48]; char reserved; char color_planes; unsigned short bytes_per_line; unsigned short palette_type; char filler[58]; unsigned data; // unbounded } pcx_t; byte *pcx_rgb; /* ============ LoadPCX ============ */ void LoadPCX (FILE *f) { pcx_t *pcx, pcxbuf; byte palette[768]; byte *pix; int x, y; int dataByte, runLength; int count; // // parse the PCX file // fread (&pcxbuf, 1, sizeof(pcxbuf), f); pcx = &pcxbuf; if (pcx->manufacturer != 0x0a || pcx->version != 5 || pcx->encoding != 1 || pcx->bits_per_pixel != 8 || pcx->xmax >= 320 || pcx->ymax >= 256) { Con_Printf ("Bad pcx file\n"); return; } // seek to palette fseek (f, -768, SEEK_END); fread (palette, 1, 768, f); fseek (f, sizeof(pcxbuf) - 4, SEEK_SET); count = (pcx->xmax+1) * (pcx->ymax+1); pcx_rgb = malloc( count * 4); for (y=0 ; y<=pcx->ymax ; y++) { pix = pcx_rgb + 4*y*(pcx->xmax+1); for (x=0 ; x<=pcx->ymax ; ) { dataByte = fgetc(f); if((dataByte & 0xC0) == 0xC0) { runLength = dataByte & 0x3F; dataByte = fgetc(f); } else runLength = 1; while(runLength-- > 0) { pix[0] = palette[dataByte*3]; pix[1] = palette[dataByte*3+1]; pix[2] = palette[dataByte*3+2]; pix[3] = 255; pix += 4; x++; } } } } /* ========================================================= TARGA LOADING ========================================================= */ typedef struct _TargaHeader { unsigned char id_length, colormap_type, image_type; unsigned short colormap_index, colormap_length; unsigned char colormap_size; unsigned short x_origin, y_origin, width, height; unsigned char pixel_size, attributes; } TargaHeader; TargaHeader targa_header; byte *targa_rgba; int fgetLittleShort (FILE *f) { byte b1, b2; b1 = fgetc(f); b2 = fgetc(f); return (short)(b1 + b2*256); } int fgetLittleLong (FILE *f) { byte b1, b2, b3, b4; b1 = fgetc(f); b2 = fgetc(f); b3 = fgetc(f); b4 = fgetc(f); return b1 + (b2<<8) + (b3<<16) + (b4<<24); } /* ============= LoadTGA ============= */ void LoadTGA (FILE *fin) { int columns, rows, numPixels; byte *pixbuf; int row, column; targa_header.id_length = fgetc(fin); targa_header.colormap_type = fgetc(fin); targa_header.image_type = fgetc(fin); targa_header.colormap_index = fgetLittleShort(fin); targa_header.colormap_length = fgetLittleShort(fin); targa_header.colormap_size = fgetc(fin); targa_header.x_origin = fgetLittleShort(fin); targa_header.y_origin = fgetLittleShort(fin); targa_header.width = fgetLittleShort(fin); targa_header.height = fgetLittleShort(fin); targa_header.pixel_size = fgetc(fin); targa_header.attributes = fgetc(fin); if (targa_header.image_type!=2 && targa_header.image_type!=10) Sys_Error ("LoadTGA: Only type 2 and 10 targa RGB images supported\n"); if (targa_header.colormap_type !=0 || (targa_header.pixel_size!=32 && targa_header.pixel_size!=24)) Sys_Error ("Texture_LoadTGA: Only 32 or 24 bit images supported (no colormaps)\n"); columns = targa_header.width; rows = targa_header.height; numPixels = columns * rows; targa_rgba = malloc (numPixels*4); if (targa_header.id_length != 0) fseek(fin, targa_header.id_length, SEEK_CUR); // skip TARGA image comment if (targa_header.image_type==2) { // Uncompressed, RGB images for(row=rows-1; row>=0; row--) { pixbuf = targa_rgba + row*columns*4; for(column=0; column<columns; column++) { unsigned char red,green,blue,alphabyte; switch (targa_header.pixel_size) { case 24: blue = getc(fin); green = getc(fin); red = getc(fin); *pixbuf++ = red; *pixbuf++ = green; *pixbuf++ = blue; *pixbuf++ = 255; break; case 32: blue = getc(fin); green = getc(fin); red = getc(fin); alphabyte = getc(fin); *pixbuf++ = red; *pixbuf++ = green; *pixbuf++ = blue; *pixbuf++ = alphabyte; break; } } } } else if (targa_header.image_type==10) { // Runlength encoded RGB images unsigned char red,green,blue,alphabyte,packetHeader,packetSize,j; for(row=rows-1; row>=0; row--) { pixbuf = targa_rgba + row*columns*4; for(column=0; column<columns; ) { packetHeader=getc(fin); packetSize = 1 + (packetHeader & 0x7f); if (packetHeader & 0x80) { // run-length packet switch (targa_header.pixel_size) { case 24: blue = getc(fin); green = getc(fin); red = getc(fin); alphabyte = 255; break; case 32: blue = getc(fin); green = getc(fin); red = getc(fin); alphabyte = getc(fin); break; } for(j=0;j<packetSize;j++) { *pixbuf++=red; *pixbuf++=green; *pixbuf++=blue; *pixbuf++=alphabyte; column++; if (column==columns) { // run spans across rows column=0; if (row>0) row--; else goto breakOut; pixbuf = targa_rgba + row*columns*4; } } } else { // non run-length packet for(j=0;j<packetSize;j++) { switch (targa_header.pixel_size) { case 24: blue = getc(fin); green = getc(fin); red = getc(fin); *pixbuf++ = red; *pixbuf++ = green; *pixbuf++ = blue; *pixbuf++ = 255; break; case 32: blue = getc(fin); green = getc(fin); red = getc(fin); alphabyte = getc(fin); *pixbuf++ = red; *pixbuf++ = green; *pixbuf++ = blue; *pixbuf++ = alphabyte; break; } column++; if (column==columns) { // pixel packet run spans across rows column=0; if (row>0) row--; else goto breakOut; pixbuf = targa_rgba + row*columns*4; } } } } breakOut:; } } fclose(fin); } /* ================== R_LoadSkys ================== */ char *suf[6] = {"rt", "bk", "lf", "ft", "up", "dn"}; void R_LoadSkys (void) { int i; FILE *f; char name[64]; for (i=0 ; i<6 ; i++) { GL_Bind (SKY_TEX + i); sprintf (name, "gfx/env/bkgtst%s.tga", suf[i]); COM_FOpenFile (name, &f); if (!f) { Con_Printf ("Couldn't load %s\n", name); continue; } LoadTGA (f); // LoadPCX (f); qglTexImage2D (GL_TEXTURE_2D, 0, gl_solid_format, 256, 256, 0, GL_RGBA, GL_UNSIGNED_BYTE, targa_rgba); // qglTexImage2D (GL_TEXTURE_2D, 0, gl_solid_format, 256, 256, 0, GL_RGBA, GL_UNSIGNED_BYTE, pcx_rgb); free (targa_rgba); // free (pcx_rgb); qglTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); qglTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); } } vec3_t skyclip[6] = { {1,1,0}, {1,-1,0}, {0,-1,1}, {0,1,1}, {1,0,1}, {-1,0,1} }; int c_sky; // 1 = s, 2 = t, 3 = 2048 int st_to_vec[6][3] = { {3,-1,2}, {-3,1,2}, {1,3,2}, {-1,-3,2}, {-2,-1,3}, // 0 degrees yaw, look straight up {2,-1,-3} // look straight down // {-1,2,3}, // {1,2,-3} }; // s = [0]/[2], t = [1]/[2] int vec_to_st[6][3] = { {-2,3,1}, {2,3,-1}, {1,3,2}, {-1,3,-2}, {-2,-1,3}, {-2,1,-3} // {-1,2,3}, // {1,2,-3} }; float skymins[2][6], skymaxs[2][6]; void DrawSkyPolygon (int nump, vec3_t vecs) { int i,j; vec3_t v, av; float s, t, dv; int axis; float *vp; c_sky++; #if 0 glBegin (GL_POLYGON); for (i=0 ; i<nump ; i++, vecs+=3) { VectorAdd(vecs, r_origin, v); qglVertex3fv (v); } glEnd(); return; #endif // decide which face it maps to VectorCopy (vec3_origin, v); for (i=0, vp=vecs ; i<nump ; i++, vp+=3) { VectorAdd (vp, v, v); } av[0] = fabs(v[0]); av[1] = fabs(v[1]); av[2] = fabs(v[2]); if (av[0] > av[1] && av[0] > av[2]) { if (v[0] < 0) axis = 1; else axis = 0; } else if (av[1] > av[2] && av[1] > av[0]) { if (v[1] < 0) axis = 3; else axis = 2; } else { if (v[2] < 0) axis = 5; else axis = 4; } // project new texture coords for (i=0 ; i<nump ; i++, vecs+=3) { j = vec_to_st[axis][2]; if (j > 0) dv = vecs[j - 1]; else dv = -vecs[-j - 1]; j = vec_to_st[axis][0]; if (j < 0) s = -vecs[-j -1] / dv; else s = vecs[j-1] / dv; j = vec_to_st[axis][1]; if (j < 0) t = -vecs[-j -1] / dv; else t = vecs[j-1] / dv; if (s < skymins[0][axis]) skymins[0][axis] = s; if (t < skymins[1][axis]) skymins[1][axis] = t; if (s > skymaxs[0][axis]) skymaxs[0][axis] = s; if (t > skymaxs[1][axis]) skymaxs[1][axis] = t; } } #define MAX_CLIP_VERTS 64 void ClipSkyPolygon (int nump, vec3_t vecs, int stage) { float *norm; float *v; qboolean front, back; float d, e; float dists[MAX_CLIP_VERTS]; int sides[MAX_CLIP_VERTS]; vec3_t newv[2][MAX_CLIP_VERTS]; int newc[2]; int i, j; if (nump > MAX_CLIP_VERTS-2) Sys_Error ("ClipSkyPolygon: MAX_CLIP_VERTS"); if (stage == 6) { // fully clipped, so draw it DrawSkyPolygon (nump, vecs); return; } front = back = false; norm = skyclip[stage]; for (i=0, v = vecs ; i<nump ; i++, v+=3) { d = DotProduct (v, norm); if (d > ON_EPSILON) { front = true; sides[i] = SIDE_FRONT; } else if (d < ON_EPSILON) { back = true; sides[i] = SIDE_BACK; } else sides[i] = SIDE_ON; dists[i] = d; } if (!front || !back) { // not clipped ClipSkyPolygon (nump, vecs, stage+1); return; } // clip it sides[i] = sides[0]; dists[i] = dists[0]; VectorCopy (vecs, (vecs+(i*3)) ); newc[0] = newc[1] = 0; for (i=0, v = vecs ; i<nump ; i++, v+=3) { switch (sides[i]) { case SIDE_FRONT: VectorCopy (v, newv[0][newc[0]]); newc[0]++; break; case SIDE_BACK: VectorCopy (v, newv[1][newc[1]]); newc[1]++; break; case SIDE_ON: VectorCopy (v, newv[0][newc[0]]); newc[0]++; VectorCopy (v, newv[1][newc[1]]); newc[1]++; break; } if (sides[i] == SIDE_ON || sides[i+1] == SIDE_ON || sides[i+1] == sides[i]) continue; d = dists[i] / (dists[i] - dists[i+1]); for (j=0 ; j<3 ; j++) { e = v[j] + d*(v[j+3] - v[j]); newv[0][newc[0]][j] = e; newv[1][newc[1]][j] = e; } newc[0]++; newc[1]++; } // continue ClipSkyPolygon (newc[0], newv[0][0], stage+1); ClipSkyPolygon (newc[1], newv[1][0], stage+1); } /* ================= R_DrawSkyChain ================= */ void R_DrawSkyChain (msurface_t *s) { msurface_t *fa; int i; vec3_t verts[MAX_CLIP_VERTS]; glpoly_t *p; c_sky = 0; GL_Bind(solidskytexture); // calculate vertex values for sky box for (fa=s ; fa ; fa=fa->texturechain) { for (p=fa->polys ; p ; p=p->next) { for (i=0 ; i<p->numverts ; i++) { VectorSubtract (p->verts[i], r_origin, verts[i]); } ClipSkyPolygon (p->numverts, verts[0], 0); } } } /* ============== R_ClearSkyBox ============== */ void R_ClearSkyBox (void) { int i; for (i=0 ; i<6 ; i++) { skymins[0][i] = skymins[1][i] = 9999; skymaxs[0][i] = skymaxs[1][i] = -9999; } } void MakeSkyVec (float s, float t, int axis) { vec3_t v, b; int j, k; b[0] = s*2048; b[1] = t*2048; b[2] = 2048; for (j=0 ; j<3 ; j++) { k = st_to_vec[axis][j]; if (k < 0) v[j] = -b[-k - 1]; else v[j] = b[k - 1]; v[j] += r_origin[j]; } // avoid bilerp seam s = (s+1)*0.5; t = (t+1)*0.5; if (s < 1.0/512) s = 1.0/512; else if (s > 511.0/512) s = 511.0/512; if (t < 1.0/512) t = 1.0/512; else if (t > 511.0/512) t = 511.0/512; t = 1.0 - t; qglTexCoord2f (s, t); qglVertex3fv (v); } /* ============== R_DrawSkyBox ============== */ int skytexorder[6] = {0,2,1,3,4,5}; void R_DrawSkyBox (void) { int i, j, k; vec3_t v; float s, t; #if 0 glEnable (GL_BLEND); glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE); glColor4f (1,1,1,0.5); glDisable (GL_DEPTH_TEST); #endif for (i=0 ; i<6 ; i++) { if (skymins[0][i] >= skymaxs[0][i] || skymins[1][i] >= skymaxs[1][i]) continue; GL_Bind (SKY_TEX+skytexorder[i]); #if 0 skymins[0][i] = -1; skymins[1][i] = -1; skymaxs[0][i] = 1; skymaxs[1][i] = 1; #endif qglBegin (GL_QUADS); MakeSkyVec (skymins[0][i], skymins[1][i], i); MakeSkyVec (skymins[0][i], skymaxs[1][i], i); MakeSkyVec (skymaxs[0][i], skymaxs[1][i], i); MakeSkyVec (skymaxs[0][i], skymins[1][i], i); qglEnd (); } #if 0 glDisable (GL_BLEND); glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE); glColor4f (1,1,1,0.5); glEnable (GL_DEPTH_TEST); #endif } #endif //=============================================================== /* ============= R_InitSky A sky texture is 256*128, with the right side being a masked overlay ============== */ void R_InitSky (texture_t *mt) { int i, j, p; byte *src; unsigned trans[128*128]; unsigned transpix; int r, g, b; unsigned *rgba; extern int skytexturenum; src = (byte *)mt + mt->offsets[0]; // make an average value for the back to avoid // a fringe on the top level r = g = b = 0; for (i=0 ; i<128 ; i++) for (j=0 ; j<128 ; j++) { p = src[i*256 + j + 128]; rgba = &d_8to24table[p]; trans[(i*128) + j] = *rgba; r += ((byte *)rgba)[0]; g += ((byte *)rgba)[1]; b += ((byte *)rgba)[2]; } ((byte *)&transpix)[0] = r/(128*128); ((byte *)&transpix)[1] = g/(128*128); ((byte *)&transpix)[2] = b/(128*128); ((byte *)&transpix)[3] = 0; if (!solidskytexture) solidskytexture = texture_extension_number++; GL_Bind (solidskytexture ); qglTexImage2D (GL_TEXTURE_2D, 0, gl_solid_format, 128, 128, 0, GL_RGBA, GL_UNSIGNED_BYTE, trans); qglTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); qglTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); for (i=0 ; i<128 ; i++) for (j=0 ; j<128 ; j++) { p = src[i*256 + j]; if (p == 0) trans[(i*128) + j] = transpix; else trans[(i*128) + j] = d_8to24table[p]; } if (!alphaskytexture) alphaskytexture = texture_extension_number++; GL_Bind(alphaskytexture); qglTexImage2D (GL_TEXTURE_2D, 0, gl_alpha_format, 128, 128, 0, GL_RGBA, GL_UNSIGNED_BYTE, trans); qglTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); qglTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); }
929491.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "graph_tb.h" #include "reader_tb.h" #include "reader.h" #include "phenotype.h" #include "ga_tb.h" int main(int argc, char **argv) { int code = 0; printf("Running Testbench\n"); if (argc < 2) { printf("usage: test <argument>\n"); printf(" If <argument> is 0, print SUCCESS. Otherwise print FAIL.\n"); exit(1); } if(!strcmp(argv[1], "GRAPHLINK")){ printf("Graphlink test\n"); if(graph_init_test()){ printf("INITFAIL"); exit(1); } if((code = graph_link_test()) == 0){ printf("SUCCESS\n"); }else{ printf("FAIL %d\n", code); } } else if(!strcmp(argv[1], "GRAPHTRAVERSE")){ printf("Graphtraverse test\n"); graph_t *tsp_graph = malloc(sizeof(graph_t)); if(open_csv("../assets/european_cities.csv")){ printf("INITFAIL"); exit(1); } /* * Read Header */ int len = 0; char **entries = line_csv(&len); if(len){ create_graph_nodes(tsp_graph, entries, len); } /* * Read Data */ int i = 0; while((entries = line_csv(&len))){ if(len){ link_graph_node(tsp_graph, i++, entries, len); } } if((code = graph_test_traverse(tsp_graph)) == 0){ printf("SUCCESS\n"); }else{ printf("FAIL %d\n", code); } } else if(!strcmp(argv[1], "CSVREADER")){ printf("CSVREADER test\n"); if((code = reader_read_test()) == 0){ printf("SUCCESS\n"); }else{ printf("FAIL %d\n", code); } } else if(!strcmp(argv[1], "PERMUTEGENOME")){ int sum = 0; genome_t* g = malloc(sizeof(genome_t)); int tseq[24]; for(int i = 0; i < 24; i++){ tseq[i] = i; } g->sequence = tseq; g->length = 23; if((g->sequence[12] == 12) && (g->sequence[23] == 23)){ for(int i = 0; i < 10000; i++){ permute(g->sequence, g->length); } for(int i = 0; i < 24; i++){ sum += tseq[i]; } /* If sum matches triangle number of 23 */ if(sum == (23 * ((23 + 1) / 2))){ printf("SUCCESS\n"); }else{ printf("FAIL permute\n"); } }else{ printf("FAIL genome\n"); } } else if(!strcmp(argv[1], "GAPMX")){ if(!ga_pmx_test()){ printf("SUCCESS\n"); }else{ printf("FAIL PMX\n"); } } }
715788.c
/* Copyright 2014. The Regents of the University of California. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2014 Martin Uecker <[email protected]> */ #include <stdlib.h> #include <assert.h> #include <stdio.h> #include <complex.h> #include "num/multind.h" #include "num/flpmath.h" #include "num/init.h" #include "misc/mmio.h" #include "misc/misc.h" #ifndef DIMS #define DIMS 16 #endif static const char usage_str[] = "<input> <output>"; static const char help_str[] = "Real value.\n"; int main_creal(int argc, char* argv[]) { mini_cmdline(&argc, argv, 2, usage_str, help_str); num_init(); long dims[DIMS]; complex float* in_data = load_cfl(argv[1], DIMS, dims); complex float* out_data = create_cfl(argv[2], DIMS, dims); md_zreal(DIMS, dims, out_data, in_data); unmap_cfl(DIMS, dims, out_data); unmap_cfl(DIMS, dims, in_data); return 0; }
593452.c
/* pngpread.c - read a png file in push mode * * Last changed in libpng 1.2.26 [April 2, 2008] * For conditions of distribution and use, see copyright notice in png.h * Copyright (c) 1998-2008 Glenn Randers-Pehrson * (Version 0.96 Copyright (c) 1996, 1997 Andreas Dilger) * (Version 0.88 Copyright (c) 1995, 1996 Guy Eric Schalnat, Group 42, Inc.) */ #define PNG_INTERNAL #include "png.h" #ifdef PNG_PROGRESSIVE_READ_SUPPORTED /* push model modes */ #define PNG_READ_SIG_MODE 0 #define PNG_READ_CHUNK_MODE 1 #define PNG_READ_IDAT_MODE 2 #define PNG_SKIP_MODE 3 #define PNG_READ_tEXt_MODE 4 #define PNG_READ_zTXt_MODE 5 #define PNG_READ_DONE_MODE 6 #define PNG_READ_iTXt_MODE 7 #define PNG_ERROR_MODE 8 void PNGAPI png_process_data(png_structp png_ptr, png_infop info_ptr, png_bytep buffer, png_size_t buffer_size) { if(png_ptr == NULL || info_ptr == NULL) return; png_push_restore_buffer(png_ptr, buffer, buffer_size); while (png_ptr->buffer_size) { png_process_some_data(png_ptr, info_ptr); } } /* What we do with the incoming data depends on what we were previously * doing before we ran out of data... */ void /* PRIVATE */ png_process_some_data(png_structp png_ptr, png_infop info_ptr) { if(png_ptr == NULL) return; switch (png_ptr->process_mode) { case PNG_READ_SIG_MODE: { png_push_read_sig(png_ptr, info_ptr); break; } case PNG_READ_CHUNK_MODE: { png_push_read_chunk(png_ptr, info_ptr); break; } case PNG_READ_IDAT_MODE: { png_push_read_IDAT(png_ptr); break; } #if defined(PNG_READ_tEXt_SUPPORTED) case PNG_READ_tEXt_MODE: { png_push_read_tEXt(png_ptr, info_ptr); break; } #endif #if defined(PNG_READ_zTXt_SUPPORTED) case PNG_READ_zTXt_MODE: { png_push_read_zTXt(png_ptr, info_ptr); break; } #endif #if defined(PNG_READ_iTXt_SUPPORTED) case PNG_READ_iTXt_MODE: { png_push_read_iTXt(png_ptr, info_ptr); break; } #endif case PNG_SKIP_MODE: { png_push_crc_finish(png_ptr); break; } default: { png_ptr->buffer_size = 0; break; } } } /* Read any remaining signature bytes from the stream and compare them with * the correct PNG signature. It is possible that this routine is called * with bytes already read from the signature, either because they have been * checked by the calling application, or because of multiple calls to this * routine. */ void /* PRIVATE */ png_push_read_sig(png_structp png_ptr, png_infop info_ptr) { png_size_t num_checked = png_ptr->sig_bytes, num_to_check = 8 - num_checked; if (png_ptr->buffer_size < num_to_check) { num_to_check = png_ptr->buffer_size; } png_push_fill_buffer(png_ptr, &(info_ptr->signature[num_checked]), num_to_check); png_ptr->sig_bytes = (png_byte)(png_ptr->sig_bytes+num_to_check); if (png_sig_cmp(info_ptr->signature, num_checked, num_to_check)) { if (num_checked < 4 && png_sig_cmp(info_ptr->signature, num_checked, num_to_check - 4)) png_error(png_ptr, "Not a PNG file"); else png_error(png_ptr, "PNG file corrupted by ASCII conversion"); } else { if (png_ptr->sig_bytes >= 8) { png_ptr->process_mode = PNG_READ_CHUNK_MODE; } } } void /* PRIVATE */ png_push_read_chunk(png_structp png_ptr, png_infop info_ptr) { #ifdef PNG_USE_LOCAL_ARRAYS PNG_CONST PNG_IHDR; PNG_CONST PNG_IDAT; PNG_CONST PNG_IEND; PNG_CONST PNG_PLTE; #if defined(PNG_READ_bKGD_SUPPORTED) PNG_CONST PNG_bKGD; #endif #if defined(PNG_READ_cHRM_SUPPORTED) PNG_CONST PNG_cHRM; #endif #if defined(PNG_READ_gAMA_SUPPORTED) PNG_CONST PNG_gAMA; #endif #if defined(PNG_READ_hIST_SUPPORTED) PNG_CONST PNG_hIST; #endif #if defined(PNG_READ_iCCP_SUPPORTED) PNG_CONST PNG_iCCP; #endif #if defined(PNG_READ_iTXt_SUPPORTED) PNG_CONST PNG_iTXt; #endif #if defined(PNG_READ_oFFs_SUPPORTED) PNG_CONST PNG_oFFs; #endif #if defined(PNG_READ_pCAL_SUPPORTED) PNG_CONST PNG_pCAL; #endif #if defined(PNG_READ_pHYs_SUPPORTED) PNG_CONST PNG_pHYs; #endif #if defined(PNG_READ_sBIT_SUPPORTED) PNG_CONST PNG_sBIT; #endif #if defined(PNG_READ_sCAL_SUPPORTED) PNG_CONST PNG_sCAL; #endif #if defined(PNG_READ_sRGB_SUPPORTED) PNG_CONST PNG_sRGB; #endif #if defined(PNG_READ_sPLT_SUPPORTED) PNG_CONST PNG_sPLT; #endif #if defined(PNG_READ_tEXt_SUPPORTED) PNG_CONST PNG_tEXt; #endif #if defined(PNG_READ_tIME_SUPPORTED) PNG_CONST PNG_tIME; #endif #if defined(PNG_READ_tRNS_SUPPORTED) PNG_CONST PNG_tRNS; #endif #if defined(PNG_READ_zTXt_SUPPORTED) PNG_CONST PNG_zTXt; #endif #endif /* PNG_USE_LOCAL_ARRAYS */ /* First we make sure we have enough data for the 4 byte chunk name * and the 4 byte chunk length before proceeding with decoding the * chunk data. To fully decode each of these chunks, we also make * sure we have enough data in the buffer for the 4 byte CRC at the * end of every chunk (except IDAT, which is handled separately). */ if (!(png_ptr->mode & PNG_HAVE_CHUNK_HEADER)) { png_byte chunk_length[4]; if (png_ptr->buffer_size < 8) { png_push_save_buffer(png_ptr); return; } png_push_fill_buffer(png_ptr, chunk_length, 4); png_ptr->push_length = png_get_uint_31(png_ptr,chunk_length); png_reset_crc(png_ptr); png_crc_read(png_ptr, png_ptr->chunk_name, 4); png_ptr->mode |= PNG_HAVE_CHUNK_HEADER; } if (!png_memcmp(png_ptr->chunk_name, png_IDAT, 4)) if(png_ptr->mode & PNG_AFTER_IDAT) png_ptr->mode |= PNG_HAVE_CHUNK_AFTER_IDAT; if (!png_memcmp(png_ptr->chunk_name, png_IHDR, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { if (png_ptr->push_length != 13) png_error(png_ptr, "Invalid IHDR length"); png_push_save_buffer(png_ptr); return; } png_handle_IHDR(png_ptr, info_ptr, png_ptr->push_length); } else if (!png_memcmp(png_ptr->chunk_name, png_IEND, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_IEND(png_ptr, info_ptr, png_ptr->push_length); png_ptr->process_mode = PNG_READ_DONE_MODE; png_push_have_end(png_ptr, info_ptr); } #ifdef PNG_HANDLE_AS_UNKNOWN_SUPPORTED else if (png_handle_as_unknown(png_ptr, png_ptr->chunk_name)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } if (!png_memcmp(png_ptr->chunk_name, png_IDAT, 4)) png_ptr->mode |= PNG_HAVE_IDAT; png_handle_unknown(png_ptr, info_ptr, png_ptr->push_length); if (!png_memcmp(png_ptr->chunk_name, png_PLTE, 4)) png_ptr->mode |= PNG_HAVE_PLTE; else if (!png_memcmp(png_ptr->chunk_name, png_IDAT, 4)) { if (!(png_ptr->mode & PNG_HAVE_IHDR)) png_error(png_ptr, "Missing IHDR before IDAT"); else if (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE && !(png_ptr->mode & PNG_HAVE_PLTE)) png_error(png_ptr, "Missing PLTE before IDAT"); } } #endif else if (!png_memcmp(png_ptr->chunk_name, png_PLTE, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_PLTE(png_ptr, info_ptr, png_ptr->push_length); } else if (!png_memcmp(png_ptr->chunk_name, png_IDAT, 4)) { /* If we reach an IDAT chunk, this means we have read all of the * header chunks, and we can start reading the image (or if this * is called after the image has been read - we have an error). */ if (!(png_ptr->mode & PNG_HAVE_IHDR)) png_error(png_ptr, "Missing IHDR before IDAT"); else if (png_ptr->color_type == PNG_COLOR_TYPE_PALETTE && !(png_ptr->mode & PNG_HAVE_PLTE)) png_error(png_ptr, "Missing PLTE before IDAT"); if (png_ptr->mode & PNG_HAVE_IDAT) { if (!(png_ptr->mode & PNG_HAVE_CHUNK_AFTER_IDAT)) if (png_ptr->push_length == 0) return; if (png_ptr->mode & PNG_AFTER_IDAT) png_error(png_ptr, "Too many IDAT's found"); } png_ptr->idat_size = png_ptr->push_length; png_ptr->mode |= PNG_HAVE_IDAT; png_ptr->process_mode = PNG_READ_IDAT_MODE; png_push_have_info(png_ptr, info_ptr); png_ptr->zstream.avail_out = (uInt)png_ptr->irowbytes; png_ptr->zstream.next_out = png_ptr->row_buf; return; } #if defined(PNG_READ_gAMA_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_gAMA, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_gAMA(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_sBIT_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_sBIT, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_sBIT(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_cHRM_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_cHRM, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_cHRM(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_sRGB_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_sRGB, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_sRGB(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_iCCP_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_iCCP, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_iCCP(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_sPLT_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_sPLT, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_sPLT(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_tRNS_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_tRNS, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_tRNS(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_bKGD_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_bKGD, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_bKGD(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_hIST_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_hIST, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_hIST(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_pHYs_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_pHYs, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_pHYs(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_oFFs_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_oFFs, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_oFFs(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_pCAL_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_pCAL, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_pCAL(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_sCAL_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_sCAL, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_sCAL(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_tIME_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_tIME, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_handle_tIME(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_tEXt_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_tEXt, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_push_handle_tEXt(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_zTXt_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_zTXt, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_push_handle_zTXt(png_ptr, info_ptr, png_ptr->push_length); } #endif #if defined(PNG_READ_iTXt_SUPPORTED) else if (!png_memcmp(png_ptr->chunk_name, png_iTXt, 4)) { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_push_handle_iTXt(png_ptr, info_ptr, png_ptr->push_length); } #endif else { if (png_ptr->push_length + 4 > png_ptr->buffer_size) { png_push_save_buffer(png_ptr); return; } png_push_handle_unknown(png_ptr, info_ptr, png_ptr->push_length); } png_ptr->mode &= ~PNG_HAVE_CHUNK_HEADER; } void /* PRIVATE */ png_push_crc_skip(png_structp png_ptr, png_uint_32 skip) { png_ptr->process_mode = PNG_SKIP_MODE; png_ptr->skip_length = skip; } void /* PRIVATE */ png_push_crc_finish(png_structp png_ptr) { if (png_ptr->skip_length && png_ptr->save_buffer_size) { png_size_t save_size; if (png_ptr->skip_length < (png_uint_32)png_ptr->save_buffer_size) save_size = (png_size_t)png_ptr->skip_length; else save_size = png_ptr->save_buffer_size; png_calculate_crc(png_ptr, png_ptr->save_buffer_ptr, save_size); png_ptr->skip_length -= save_size; png_ptr->buffer_size -= save_size; png_ptr->save_buffer_size -= save_size; png_ptr->save_buffer_ptr += save_size; } if (png_ptr->skip_length && png_ptr->current_buffer_size) { png_size_t save_size; if (png_ptr->skip_length < (png_uint_32)png_ptr->current_buffer_size) save_size = (png_size_t)png_ptr->skip_length; else save_size = png_ptr->current_buffer_size; png_calculate_crc(png_ptr, png_ptr->current_buffer_ptr, save_size); png_ptr->skip_length -= save_size; png_ptr->buffer_size -= save_size; png_ptr->current_buffer_size -= save_size; png_ptr->current_buffer_ptr += save_size; } if (!png_ptr->skip_length) { if (png_ptr->buffer_size < 4) { png_push_save_buffer(png_ptr); return; } png_crc_finish(png_ptr, 0); png_ptr->process_mode = PNG_READ_CHUNK_MODE; } } void PNGAPI png_push_fill_buffer(png_structp png_ptr, png_bytep buffer, png_size_t length) { png_bytep ptr; if(png_ptr == NULL) return; ptr = buffer; if (png_ptr->save_buffer_size) { png_size_t save_size; if (length < png_ptr->save_buffer_size) save_size = length; else save_size = png_ptr->save_buffer_size; png_memcpy(ptr, png_ptr->save_buffer_ptr, save_size); length -= save_size; ptr += save_size; png_ptr->buffer_size -= save_size; png_ptr->save_buffer_size -= save_size; png_ptr->save_buffer_ptr += save_size; } if (length && png_ptr->current_buffer_size) { png_size_t save_size; if (length < png_ptr->current_buffer_size) save_size = length; else save_size = png_ptr->current_buffer_size; png_memcpy(ptr, png_ptr->current_buffer_ptr, save_size); png_ptr->buffer_size -= save_size; png_ptr->current_buffer_size -= save_size; png_ptr->current_buffer_ptr += save_size; } } void /* PRIVATE */ png_push_save_buffer(png_structp png_ptr) { if (png_ptr->save_buffer_size) { if (png_ptr->save_buffer_ptr != png_ptr->save_buffer) { png_size_t i,istop; png_bytep sp; png_bytep dp; istop = png_ptr->save_buffer_size; for (i = 0, sp = png_ptr->save_buffer_ptr, dp = png_ptr->save_buffer; i < istop; i++, sp++, dp++) { *dp = *sp; } } } if (png_ptr->save_buffer_size + png_ptr->current_buffer_size > png_ptr->save_buffer_max) { png_size_t new_max; png_bytep old_buffer; if (png_ptr->save_buffer_size > PNG_SIZE_MAX - (png_ptr->current_buffer_size + 256)) { png_error(png_ptr, "Potential overflow of save_buffer"); } new_max = png_ptr->save_buffer_size + png_ptr->current_buffer_size + 256; old_buffer = png_ptr->save_buffer; png_ptr->save_buffer = (png_bytep)png_malloc(png_ptr, (png_uint_32)new_max); png_memcpy(png_ptr->save_buffer, old_buffer, png_ptr->save_buffer_size); png_free(png_ptr, old_buffer); png_ptr->save_buffer_max = new_max; } if (png_ptr->current_buffer_size) { png_memcpy(png_ptr->save_buffer + png_ptr->save_buffer_size, png_ptr->current_buffer_ptr, png_ptr->current_buffer_size); png_ptr->save_buffer_size += png_ptr->current_buffer_size; png_ptr->current_buffer_size = 0; } png_ptr->save_buffer_ptr = png_ptr->save_buffer; png_ptr->buffer_size = 0; } void /* PRIVATE */ png_push_restore_buffer(png_structp png_ptr, png_bytep buffer, png_size_t buffer_length) { png_ptr->current_buffer = buffer; png_ptr->current_buffer_size = buffer_length; png_ptr->buffer_size = buffer_length + png_ptr->save_buffer_size; png_ptr->current_buffer_ptr = png_ptr->current_buffer; } void /* PRIVATE */ png_push_read_IDAT(png_structp png_ptr) { #ifdef PNG_USE_LOCAL_ARRAYS PNG_CONST PNG_IDAT; #endif if (!(png_ptr->mode & PNG_HAVE_CHUNK_HEADER)) { png_byte chunk_length[4]; if (png_ptr->buffer_size < 8) { png_push_save_buffer(png_ptr); return; } png_push_fill_buffer(png_ptr, chunk_length, 4); png_ptr->push_length = png_get_uint_31(png_ptr,chunk_length); png_reset_crc(png_ptr); png_crc_read(png_ptr, png_ptr->chunk_name, 4); png_ptr->mode |= PNG_HAVE_CHUNK_HEADER; if (png_memcmp(png_ptr->chunk_name, png_IDAT, 4)) { png_ptr->process_mode = PNG_READ_CHUNK_MODE; if (!(png_ptr->flags & PNG_FLAG_ZLIB_FINISHED)) png_error(png_ptr, "Not enough compressed data"); return; } png_ptr->idat_size = png_ptr->push_length; } if (png_ptr->idat_size && png_ptr->save_buffer_size) { png_size_t save_size; if (png_ptr->idat_size < (png_uint_32)png_ptr->save_buffer_size) { save_size = (png_size_t)png_ptr->idat_size; /* check for overflow */ if((png_uint_32)save_size != png_ptr->idat_size) png_error(png_ptr, "save_size overflowed in pngpread"); } else save_size = png_ptr->save_buffer_size; png_calculate_crc(png_ptr, png_ptr->save_buffer_ptr, save_size); if (!(png_ptr->flags & PNG_FLAG_ZLIB_FINISHED)) png_process_IDAT_data(png_ptr, png_ptr->save_buffer_ptr, save_size); png_ptr->idat_size -= save_size; png_ptr->buffer_size -= save_size; png_ptr->save_buffer_size -= save_size; png_ptr->save_buffer_ptr += save_size; } if (png_ptr->idat_size && png_ptr->current_buffer_size) { png_size_t save_size; if (png_ptr->idat_size < (png_uint_32)png_ptr->current_buffer_size) { save_size = (png_size_t)png_ptr->idat_size; /* check for overflow */ if((png_uint_32)save_size != png_ptr->idat_size) png_error(png_ptr, "save_size overflowed in pngpread"); } else save_size = png_ptr->current_buffer_size; png_calculate_crc(png_ptr, png_ptr->current_buffer_ptr, save_size); if (!(png_ptr->flags & PNG_FLAG_ZLIB_FINISHED)) png_process_IDAT_data(png_ptr, png_ptr->current_buffer_ptr, save_size); png_ptr->idat_size -= save_size; png_ptr->buffer_size -= save_size; png_ptr->current_buffer_size -= save_size; png_ptr->current_buffer_ptr += save_size; } if (!png_ptr->idat_size) { if (png_ptr->buffer_size < 4) { png_push_save_buffer(png_ptr); return; } png_crc_finish(png_ptr, 0); png_ptr->mode &= ~PNG_HAVE_CHUNK_HEADER; png_ptr->mode |= PNG_AFTER_IDAT; } } void /* PRIVATE */ png_process_IDAT_data(png_structp png_ptr, png_bytep buffer, png_size_t buffer_length) { int ret; if ((png_ptr->flags & PNG_FLAG_ZLIB_FINISHED) && buffer_length) png_error(png_ptr, "Extra compression data"); png_ptr->zstream.next_in = buffer; png_ptr->zstream.avail_in = (uInt)buffer_length; for(;;) { ret = inflate(&png_ptr->zstream, Z_PARTIAL_FLUSH); if (ret != Z_OK) { if (ret == Z_STREAM_END) { if (png_ptr->zstream.avail_in) png_error(png_ptr, "Extra compressed data"); if (!(png_ptr->zstream.avail_out)) { png_push_process_row(png_ptr); } png_ptr->mode |= PNG_AFTER_IDAT; png_ptr->flags |= PNG_FLAG_ZLIB_FINISHED; break; } else if (ret == Z_BUF_ERROR) break; else png_error(png_ptr, "Decompression Error"); } if (!(png_ptr->zstream.avail_out)) { if (( #if defined(PNG_READ_INTERLACING_SUPPORTED) png_ptr->interlaced && png_ptr->pass > 6) || (!png_ptr->interlaced && #endif png_ptr->row_number == png_ptr->num_rows)) { if (png_ptr->zstream.avail_in) png_warning(png_ptr, "Too much data in IDAT chunks"); png_ptr->flags |= PNG_FLAG_ZLIB_FINISHED; break; } png_push_process_row(png_ptr); png_ptr->zstream.avail_out = (uInt)png_ptr->irowbytes; png_ptr->zstream.next_out = png_ptr->row_buf; } else break; } } void /* PRIVATE */ png_push_process_row(png_structp png_ptr) { png_ptr->row_info.color_type = png_ptr->color_type; png_ptr->row_info.width = png_ptr->iwidth; png_ptr->row_info.channels = png_ptr->channels; png_ptr->row_info.bit_depth = png_ptr->bit_depth; png_ptr->row_info.pixel_depth = png_ptr->pixel_depth; png_ptr->row_info.rowbytes = PNG_ROWBYTES(png_ptr->row_info.pixel_depth, png_ptr->row_info.width); png_read_filter_row(png_ptr, &(png_ptr->row_info), png_ptr->row_buf + 1, png_ptr->prev_row + 1, (int)(png_ptr->row_buf[0])); png_memcpy_check(png_ptr, png_ptr->prev_row, png_ptr->row_buf, png_ptr->rowbytes + 1); if (png_ptr->transformations || (png_ptr->flags&PNG_FLAG_STRIP_ALPHA)) png_do_read_transformations(png_ptr); #if defined(PNG_READ_INTERLACING_SUPPORTED) /* blow up interlaced rows to full size */ if (png_ptr->interlaced && (png_ptr->transformations & PNG_INTERLACE)) { if (png_ptr->pass < 6) /* old interface (pre-1.0.9): png_do_read_interlace(&(png_ptr->row_info), png_ptr->row_buf + 1, png_ptr->pass, png_ptr->transformations); */ png_do_read_interlace(png_ptr); switch (png_ptr->pass) { case 0: { int i; for (i = 0; i < 8 && png_ptr->pass == 0; i++) { png_push_have_row(png_ptr, png_ptr->row_buf + 1); png_read_push_finish_row(png_ptr); /* updates png_ptr->pass */ } if (png_ptr->pass == 2) /* pass 1 might be empty */ { for (i = 0; i < 4 && png_ptr->pass == 2; i++) { png_push_have_row(png_ptr, png_bytep_NULL); png_read_push_finish_row(png_ptr); } } if (png_ptr->pass == 4 && png_ptr->height <= 4) { for (i = 0; i < 2 && png_ptr->pass == 4; i++) { png_push_have_row(png_ptr, png_bytep_NULL); png_read_push_finish_row(png_ptr); } } if (png_ptr->pass == 6 && png_ptr->height <= 4) { png_push_have_row(png_ptr, png_bytep_NULL); png_read_push_finish_row(png_ptr); } break; } case 1: { int i; for (i = 0; i < 8 && png_ptr->pass == 1; i++) { png_push_have_row(png_ptr, png_ptr->row_buf + 1); png_read_push_finish_row(png_ptr); } if (png_ptr->pass == 2) /* skip top 4 generated rows */ { for (i = 0; i < 4 && png_ptr->pass == 2; i++) { png_push_have_row(png_ptr, png_bytep_NULL); png_read_push_finish_row(png_ptr); } } break; } case 2: { int i; for (i = 0; i < 4 && png_ptr->pass == 2; i++) { png_push_have_row(png_ptr, png_ptr->row_buf + 1); png_read_push_finish_row(png_ptr); } for (i = 0; i < 4 && png_ptr->pass == 2; i++) { png_push_have_row(png_ptr, png_bytep_NULL); png_read_push_finish_row(png_ptr); } if (png_ptr->pass == 4) /* pass 3 might be empty */ { for (i = 0; i < 2 && png_ptr->pass == 4; i++) { png_push_have_row(png_ptr, png_bytep_NULL); png_read_push_finish_row(png_ptr); } } break; } case 3: { int i; for (i = 0; i < 4 && png_ptr->pass == 3; i++) { png_push_have_row(png_ptr, png_ptr->row_buf + 1); png_read_push_finish_row(png_ptr); } if (png_ptr->pass == 4) /* skip top two generated rows */ { for (i = 0; i < 2 && png_ptr->pass == 4; i++) { png_push_have_row(png_ptr, png_bytep_NULL); png_read_push_finish_row(png_ptr); } } break; } case 4: { int i; for (i = 0; i < 2 && png_ptr->pass == 4; i++) { png_push_have_row(png_ptr, png_ptr->row_buf + 1); png_read_push_finish_row(png_ptr); } for (i = 0; i < 2 && png_ptr->pass == 4; i++) { png_push_have_row(png_ptr, png_bytep_NULL); png_read_push_finish_row(png_ptr); } if (png_ptr->pass == 6) /* pass 5 might be empty */ { png_push_have_row(png_ptr, png_bytep_NULL); png_read_push_finish_row(png_ptr); } break; } case 5: { int i; for (i = 0; i < 2 && png_ptr->pass == 5; i++) { png_push_have_row(png_ptr, png_ptr->row_buf + 1); png_read_push_finish_row(png_ptr); } if (png_ptr->pass == 6) /* skip top generated row */ { png_push_have_row(png_ptr, png_bytep_NULL); png_read_push_finish_row(png_ptr); } break; } case 6: { png_push_have_row(png_ptr, png_ptr->row_buf + 1); png_read_push_finish_row(png_ptr); if (png_ptr->pass != 6) break; png_push_have_row(png_ptr, png_bytep_NULL); png_read_push_finish_row(png_ptr); } } } else #endif { png_push_have_row(png_ptr, png_ptr->row_buf + 1); png_read_push_finish_row(png_ptr); } } void /* PRIVATE */ png_read_push_finish_row(png_structp png_ptr) { #ifdef PNG_USE_LOCAL_ARRAYS /* arrays to facilitate easy interlacing - use pass (0 - 6) as index */ /* start of interlace block */ PNG_CONST int FARDATA png_pass_start[] = {0, 4, 0, 2, 0, 1, 0}; /* offset to next interlace block */ PNG_CONST int FARDATA png_pass_inc[] = {8, 8, 4, 4, 2, 2, 1}; /* start of interlace block in the y direction */ PNG_CONST int FARDATA png_pass_ystart[] = {0, 0, 4, 0, 2, 0, 1}; /* offset to next interlace block in the y direction */ PNG_CONST int FARDATA png_pass_yinc[] = {8, 8, 8, 4, 4, 2, 2}; /* Height of interlace block. This is not currently used - if you need * it, uncomment it here and in png.h PNG_CONST int FARDATA png_pass_height[] = {8, 8, 4, 4, 2, 2, 1}; */ #endif png_ptr->row_number++; if (png_ptr->row_number < png_ptr->num_rows) return; if (png_ptr->interlaced) { png_ptr->row_number = 0; png_memset_check(png_ptr, png_ptr->prev_row, 0, png_ptr->rowbytes + 1); do { int pass; pass = png_ptr->pass; pass++; if ((pass == 1 && png_ptr->width < 5) || (pass == 3 && png_ptr->width < 3) || (pass == 5 && png_ptr->width < 2)) pass++; if (pass > 7) pass--; png_ptr->pass = (png_byte) pass; if (pass < 7) { png_ptr->iwidth = (png_ptr->width + png_pass_inc[pass] - 1 - png_pass_start[pass]) / png_pass_inc[pass]; png_ptr->irowbytes = PNG_ROWBYTES(png_ptr->pixel_depth, png_ptr->iwidth) + 1; if (png_ptr->transformations & PNG_INTERLACE) break; png_ptr->num_rows = (png_ptr->height + png_pass_yinc[pass] - 1 - png_pass_ystart[pass]) / png_pass_yinc[pass]; } else break; } while (png_ptr->iwidth == 0 || png_ptr->num_rows == 0); } } #if defined(PNG_READ_tEXt_SUPPORTED) void /* PRIVATE */ png_push_handle_tEXt(png_structp png_ptr, png_infop info_ptr, png_uint_32 length) { if (!(png_ptr->mode & PNG_HAVE_IHDR) || (png_ptr->mode & PNG_HAVE_IEND)) { png_error(png_ptr, "Out of place tEXt"); info_ptr = info_ptr; /* to quiet some compiler warnings */ } #ifdef PNG_MAX_MALLOC_64K png_ptr->skip_length = 0; /* This may not be necessary */ if (length > (png_uint_32)65535L) /* Can't hold entire string in memory */ { png_warning(png_ptr, "tEXt chunk too large to fit in memory"); png_ptr->skip_length = length - (png_uint_32)65535L; length = (png_uint_32)65535L; } #endif png_ptr->current_text = (png_charp)png_malloc(png_ptr, (png_uint_32)(length+1)); png_ptr->current_text[length] = '\0'; png_ptr->current_text_ptr = png_ptr->current_text; png_ptr->current_text_size = (png_size_t)length; png_ptr->current_text_left = (png_size_t)length; png_ptr->process_mode = PNG_READ_tEXt_MODE; } void /* PRIVATE */ png_push_read_tEXt(png_structp png_ptr, png_infop info_ptr) { if (png_ptr->buffer_size && png_ptr->current_text_left) { png_size_t text_size; if (png_ptr->buffer_size < png_ptr->current_text_left) text_size = png_ptr->buffer_size; else text_size = png_ptr->current_text_left; png_crc_read(png_ptr, (png_bytep)png_ptr->current_text_ptr, text_size); png_ptr->current_text_left -= text_size; png_ptr->current_text_ptr += text_size; } if (!(png_ptr->current_text_left)) { png_textp text_ptr; png_charp text; png_charp key; int ret; if (png_ptr->buffer_size < 4) { png_push_save_buffer(png_ptr); return; } png_push_crc_finish(png_ptr); #if defined(PNG_MAX_MALLOC_64K) if (png_ptr->skip_length) return; #endif key = png_ptr->current_text; for (text = key; *text; text++) /* empty loop */ ; if (text < key + png_ptr->current_text_size) text++; text_ptr = (png_textp)png_malloc(png_ptr, (png_uint_32)png_sizeof(png_text)); text_ptr->compression = PNG_TEXT_COMPRESSION_NONE; text_ptr->key = key; #ifdef PNG_iTXt_SUPPORTED text_ptr->lang = NULL; text_ptr->lang_key = NULL; #endif text_ptr->text = text; ret = png_set_text_2(png_ptr, info_ptr, text_ptr, 1); png_free(png_ptr, key); png_free(png_ptr, text_ptr); png_ptr->current_text = NULL; if (ret) png_warning(png_ptr, "Insufficient memory to store text chunk."); } } #endif #if defined(PNG_READ_zTXt_SUPPORTED) void /* PRIVATE */ png_push_handle_zTXt(png_structp png_ptr, png_infop info_ptr, png_uint_32 length) { if (!(png_ptr->mode & PNG_HAVE_IHDR) || (png_ptr->mode & PNG_HAVE_IEND)) { png_error(png_ptr, "Out of place zTXt"); info_ptr = info_ptr; /* to quiet some compiler warnings */ } #ifdef PNG_MAX_MALLOC_64K /* We can't handle zTXt chunks > 64K, since we don't have enough space * to be able to store the uncompressed data. Actually, the threshold * is probably around 32K, but it isn't as definite as 64K is. */ if (length > (png_uint_32)65535L) { png_warning(png_ptr, "zTXt chunk too large to fit in memory"); png_push_crc_skip(png_ptr, length); return; } #endif png_ptr->current_text = (png_charp)png_malloc(png_ptr, (png_uint_32)(length+1)); png_ptr->current_text[length] = '\0'; png_ptr->current_text_ptr = png_ptr->current_text; png_ptr->current_text_size = (png_size_t)length; png_ptr->current_text_left = (png_size_t)length; png_ptr->process_mode = PNG_READ_zTXt_MODE; } void /* PRIVATE */ png_push_read_zTXt(png_structp png_ptr, png_infop info_ptr) { if (png_ptr->buffer_size && png_ptr->current_text_left) { png_size_t text_size; if (png_ptr->buffer_size < (png_uint_32)png_ptr->current_text_left) text_size = png_ptr->buffer_size; else text_size = png_ptr->current_text_left; png_crc_read(png_ptr, (png_bytep)png_ptr->current_text_ptr, text_size); png_ptr->current_text_left -= text_size; png_ptr->current_text_ptr += text_size; } if (!(png_ptr->current_text_left)) { png_textp text_ptr; png_charp text; png_charp key; int ret; png_size_t text_size, key_size; if (png_ptr->buffer_size < 4) { png_push_save_buffer(png_ptr); return; } png_push_crc_finish(png_ptr); key = png_ptr->current_text; for (text = key; *text; text++) /* empty loop */ ; /* zTXt can't have zero text */ if (text >= key + png_ptr->current_text_size) { png_ptr->current_text = NULL; png_free(png_ptr, key); return; } text++; if (*text != PNG_TEXT_COMPRESSION_zTXt) /* check compression byte */ { png_ptr->current_text = NULL; png_free(png_ptr, key); return; } text++; png_ptr->zstream.next_in = (png_bytep )text; png_ptr->zstream.avail_in = (uInt)(png_ptr->current_text_size - (text - key)); png_ptr->zstream.next_out = png_ptr->zbuf; png_ptr->zstream.avail_out = (uInt)png_ptr->zbuf_size; key_size = text - key; text_size = 0; text = NULL; ret = Z_STREAM_END; while (png_ptr->zstream.avail_in) { ret = inflate(&png_ptr->zstream, Z_PARTIAL_FLUSH); if (ret != Z_OK && ret != Z_STREAM_END) { inflateReset(&png_ptr->zstream); png_ptr->zstream.avail_in = 0; png_ptr->current_text = NULL; png_free(png_ptr, key); png_free(png_ptr, text); return; } if (!(png_ptr->zstream.avail_out) || ret == Z_STREAM_END) { if (text == NULL) { text = (png_charp)png_malloc(png_ptr, (png_uint_32)(png_ptr->zbuf_size - png_ptr->zstream.avail_out + key_size + 1)); png_memcpy(text + key_size, png_ptr->zbuf, png_ptr->zbuf_size - png_ptr->zstream.avail_out); png_memcpy(text, key, key_size); text_size = key_size + png_ptr->zbuf_size - png_ptr->zstream.avail_out; *(text + text_size) = '\0'; } else { png_charp tmp; tmp = text; text = (png_charp)png_malloc(png_ptr, text_size + (png_uint_32)(png_ptr->zbuf_size - png_ptr->zstream.avail_out + 1)); png_memcpy(text, tmp, text_size); png_free(png_ptr, tmp); png_memcpy(text + text_size, png_ptr->zbuf, png_ptr->zbuf_size - png_ptr->zstream.avail_out); text_size += png_ptr->zbuf_size - png_ptr->zstream.avail_out; *(text + text_size) = '\0'; } if (ret != Z_STREAM_END) { png_ptr->zstream.next_out = png_ptr->zbuf; png_ptr->zstream.avail_out = (uInt)png_ptr->zbuf_size; } } else { break; } if (ret == Z_STREAM_END) break; } inflateReset(&png_ptr->zstream); png_ptr->zstream.avail_in = 0; if (ret != Z_STREAM_END) { png_ptr->current_text = NULL; png_free(png_ptr, key); png_free(png_ptr, text); return; } png_ptr->current_text = NULL; png_free(png_ptr, key); key = text; text += key_size; text_ptr = (png_textp)png_malloc(png_ptr, (png_uint_32)png_sizeof(png_text)); text_ptr->compression = PNG_TEXT_COMPRESSION_zTXt; text_ptr->key = key; #ifdef PNG_iTXt_SUPPORTED text_ptr->lang = NULL; text_ptr->lang_key = NULL; #endif text_ptr->text = text; ret = png_set_text_2(png_ptr, info_ptr, text_ptr, 1); png_free(png_ptr, key); png_free(png_ptr, text_ptr); if (ret) png_warning(png_ptr, "Insufficient memory to store text chunk."); } } #endif #if defined(PNG_READ_iTXt_SUPPORTED) void /* PRIVATE */ png_push_handle_iTXt(png_structp png_ptr, png_infop info_ptr, png_uint_32 length) { if (!(png_ptr->mode & PNG_HAVE_IHDR) || (png_ptr->mode & PNG_HAVE_IEND)) { png_error(png_ptr, "Out of place iTXt"); info_ptr = info_ptr; /* to quiet some compiler warnings */ } #ifdef PNG_MAX_MALLOC_64K png_ptr->skip_length = 0; /* This may not be necessary */ if (length > (png_uint_32)65535L) /* Can't hold entire string in memory */ { png_warning(png_ptr, "iTXt chunk too large to fit in memory"); png_ptr->skip_length = length - (png_uint_32)65535L; length = (png_uint_32)65535L; } #endif png_ptr->current_text = (png_charp)png_malloc(png_ptr, (png_uint_32)(length+1)); png_ptr->current_text[length] = '\0'; png_ptr->current_text_ptr = png_ptr->current_text; png_ptr->current_text_size = (png_size_t)length; png_ptr->current_text_left = (png_size_t)length; png_ptr->process_mode = PNG_READ_iTXt_MODE; } void /* PRIVATE */ png_push_read_iTXt(png_structp png_ptr, png_infop info_ptr) { if (png_ptr->buffer_size && png_ptr->current_text_left) { png_size_t text_size; if (png_ptr->buffer_size < png_ptr->current_text_left) text_size = png_ptr->buffer_size; else text_size = png_ptr->current_text_left; png_crc_read(png_ptr, (png_bytep)png_ptr->current_text_ptr, text_size); png_ptr->current_text_left -= text_size; png_ptr->current_text_ptr += text_size; } if (!(png_ptr->current_text_left)) { png_textp text_ptr; png_charp key; int comp_flag; png_charp lang; png_charp lang_key; png_charp text; int ret; if (png_ptr->buffer_size < 4) { png_push_save_buffer(png_ptr); return; } png_push_crc_finish(png_ptr); #if defined(PNG_MAX_MALLOC_64K) if (png_ptr->skip_length) return; #endif key = png_ptr->current_text; for (lang = key; *lang; lang++) /* empty loop */ ; if (lang < key + png_ptr->current_text_size - 3) lang++; comp_flag = *lang++; lang++; /* skip comp_type, always zero */ for (lang_key = lang; *lang_key; lang_key++) /* empty loop */ ; lang_key++; /* skip NUL separator */ text=lang_key; if (lang_key < key + png_ptr->current_text_size - 1) { for (; *text; text++) /* empty loop */ ; } if (text < key + png_ptr->current_text_size) text++; text_ptr = (png_textp)png_malloc(png_ptr, (png_uint_32)png_sizeof(png_text)); text_ptr->compression = comp_flag + 2; text_ptr->key = key; text_ptr->lang = lang; text_ptr->lang_key = lang_key; text_ptr->text = text; text_ptr->text_length = 0; text_ptr->itxt_length = png_strlen(text); ret = png_set_text_2(png_ptr, info_ptr, text_ptr, 1); png_ptr->current_text = NULL; png_free(png_ptr, text_ptr); if (ret) png_warning(png_ptr, "Insufficient memory to store iTXt chunk."); } } #endif /* This function is called when we haven't found a handler for this * chunk. If there isn't a problem with the chunk itself (ie a bad chunk * name or a critical chunk), the chunk is (currently) silently ignored. */ void /* PRIVATE */ png_push_handle_unknown(png_structp png_ptr, png_infop info_ptr, png_uint_32 length) { png_uint_32 skip=0; png_check_chunk_name(png_ptr, png_ptr->chunk_name); if (!(png_ptr->chunk_name[0] & 0x20)) { #if defined(PNG_READ_UNKNOWN_CHUNKS_SUPPORTED) if(png_handle_as_unknown(png_ptr, png_ptr->chunk_name) != PNG_HANDLE_CHUNK_ALWAYS #if defined(PNG_READ_USER_CHUNKS_SUPPORTED) && png_ptr->read_user_chunk_fn == NULL #endif ) #endif png_chunk_error(png_ptr, "unknown critical chunk"); info_ptr = info_ptr; /* to quiet some compiler warnings */ } #if defined(PNG_READ_UNKNOWN_CHUNKS_SUPPORTED) if (png_ptr->flags & PNG_FLAG_KEEP_UNKNOWN_CHUNKS) { #ifdef PNG_MAX_MALLOC_64K if (length > (png_uint_32)65535L) { png_warning(png_ptr, "unknown chunk too large to fit in memory"); skip = length - (png_uint_32)65535L; length = (png_uint_32)65535L; } #endif png_memcpy((png_charp)png_ptr->unknown_chunk.name, (png_charp)png_ptr->chunk_name, png_sizeof(png_ptr->unknown_chunk.name)); png_ptr->unknown_chunk.name[png_sizeof(png_ptr->unknown_chunk.name)-1]='\0'; png_ptr->unknown_chunk.data = (png_bytep)png_malloc(png_ptr, length); png_ptr->unknown_chunk.size = (png_size_t)length; png_crc_read(png_ptr, (png_bytep)png_ptr->unknown_chunk.data, length); #if defined(PNG_READ_USER_CHUNKS_SUPPORTED) if(png_ptr->read_user_chunk_fn != NULL) { /* callback to user unknown chunk handler */ int ret; ret = (*(png_ptr->read_user_chunk_fn)) (png_ptr, &png_ptr->unknown_chunk); if (ret < 0) png_chunk_error(png_ptr, "error in user chunk"); if (ret == 0) { if (!(png_ptr->chunk_name[0] & 0x20)) if(png_handle_as_unknown(png_ptr, png_ptr->chunk_name) != PNG_HANDLE_CHUNK_ALWAYS) png_chunk_error(png_ptr, "unknown critical chunk"); png_set_unknown_chunks(png_ptr, info_ptr, &png_ptr->unknown_chunk, 1); } } else #endif png_set_unknown_chunks(png_ptr, info_ptr, &png_ptr->unknown_chunk, 1); png_free(png_ptr, png_ptr->unknown_chunk.data); png_ptr->unknown_chunk.data = NULL; } else #endif skip=length; png_push_crc_skip(png_ptr, skip); } void /* PRIVATE */ png_push_have_info(png_structp png_ptr, png_infop info_ptr) { if (png_ptr->info_fn != NULL) (*(png_ptr->info_fn))(png_ptr, info_ptr); } void /* PRIVATE */ png_push_have_end(png_structp png_ptr, png_infop info_ptr) { if (png_ptr->end_fn != NULL) (*(png_ptr->end_fn))(png_ptr, info_ptr); } void /* PRIVATE */ png_push_have_row(png_structp png_ptr, png_bytep row) { if (png_ptr->row_fn != NULL) (*(png_ptr->row_fn))(png_ptr, row, png_ptr->row_number, (int)png_ptr->pass); } void PNGAPI png_progressive_combine_row (png_structp png_ptr, png_bytep old_row, png_bytep new_row) { #ifdef PNG_USE_LOCAL_ARRAYS PNG_CONST int FARDATA png_pass_dsp_mask[7] = {0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff}; #endif if(png_ptr == NULL) return; if (new_row != NULL) /* new_row must == png_ptr->row_buf here. */ png_combine_row(png_ptr, old_row, png_pass_dsp_mask[png_ptr->pass]); } void PNGAPI png_set_progressive_read_fn(png_structp png_ptr, png_voidp progressive_ptr, png_progressive_info_ptr info_fn, png_progressive_row_ptr row_fn, png_progressive_end_ptr end_fn) { if(png_ptr == NULL) return; png_ptr->info_fn = info_fn; png_ptr->row_fn = row_fn; png_ptr->end_fn = end_fn; png_set_read_fn(png_ptr, progressive_ptr, png_push_fill_buffer); } png_voidp PNGAPI png_get_progressive_ptr(png_structp png_ptr) { if(png_ptr == NULL) return (NULL); return png_ptr->io_ptr; } #endif /* PNG_PROGRESSIVE_READ_SUPPORTED */
928719.c
/** \file policy_video.c * \brief Policy for video streaming * * \copyright Copyright 2013-2019 Philipp Tiesel, Theresa Enghardt, and Mirko Palmer. * All rights reserved. This project is released under the New BSD License. * * This policy is optimist by default (i.e., it is the "Optimist Policy" unless specified otherwise in the config file). * To get the Pessimist Policy, add the optiom "set pessimist = 1" to the configuration file (or just use pessimist_policy.conf where this is already done). * * Intents: Category -- QUERY for small files, like manifest files or initial segments (that just contain information about the segments, no actual video data) * BULK for large files, like video segments * CONTROLTRAFFIC for "fake audio" (generated by modified wget) * * Policy_info: Data structure for each prefix (roughly, each access network) * In this policy: Whether this prefix is default, * Whether this prefix has connections available for reuse * Predicted load time estimates for each time scale * Count of how often this prefix was NOT picked for video segment * * Behavior: * Resolve Request - (Not supported) * Connect - (Not supported) * Socketconnect - Choose network based on capacity estimates and optimism or pessimism * Socketchoose - Choose network based on capacity estimates and optimism or pessimism, reuse connections if possible * * Pseudocode of basic decisionmaking algorithm -- see details on optimist and pessimist below If (category = query) { Return(network with shortest minrtt) } Else If (category = controltraffic) { Return(network that was not recently used for video segment (i.e., with highest "count")) } Else If (category = bulk) { rsize = bitrate * segmentduration ForEach(network) { shorttermestimate = predictloadtime(minrtt, shorttermcapacity, reuse, usetls, rsize) // worst case load time based on shorttermcapacity of last 1 second midtermestimate = predictloadtime(minrtt, midtermcapacity, reuse, usetls, rsize) // expected load time based on midtermcapacity of last 10 seconds longtermestimate = predictloadtime(minrtt, longtermcapacity, reuse, usetls, rsize) // long term estimate based on longtermcapacity of last 1 minute verylongtermestimate = predictloadtime(minrtt, verylongtermcapacity, reuse, usetls, rsize) // best case based on verylongtermcapacity of last 10 minutes } candidate = network with shortest midtermestimate If (Optimist Policy) { Return(optimistalternative(candidate, segmentduration, bufferlevel, candidatelongtermestimate, candidateverylongtermestimate)) } \Else If (Pessimist Policy) { Return(pessimistalternative(candidate, segmentduration, bufferlevel, candidateshorttermestimate, candidatemidtermestimate, candidatelongtermestimate)) } } * * --- Optimist Policy --- * optimistalternative(candidate, segmentduration, bufferlevel, longtermestimate, verylongtermestimate){ alternative = network with shortest verylongtermestimate // Compare to candidate with shortest midtermestimate If (bufferlevel = 0 and alternative not used for last segment) { Return alternative // Playout not started yet -- safe } If (alternative not used for last 3 segments){ If (alternativelongtermestimate < (2/3) * bufferlevel) { Return alternative // Safe } Else If (candidatelongtermestimate > (2/3) * bufferlevel and alternativelongtermestimate < candidatelongtermestimate) { Return alternative // Not safe, but better } Else If(alternative not used last 10 segments) { Return alternative // Not used recently, try it } } Return(candidate) // If we have not switched * --- Pessimist Policy --- * pessimistalternative(candidate, segmentduration, bufferlevel, shorttermestimate, midtermestimate, longtermestimate){ // candidate is network with shortest midtermestimate worstcaseloadtime = candidateshorttermestimate // If not available, use candidatemidtermestimate If (worstcaseloadtime > bufferlevel or segmentduration) { // Be concerned alternative = Network with shortest shorttermestimate If (alternativeshorttermestimate < bufferlevel) { Return (alternative) // safer to use } Else If (candidate used for last segment and worstcaseloadtime > (4/3) * bufferlevel) { // worstcaseloadtime likely accurate: Use alternative if faster for either estimate} If (alternativeshorttermestimate < candidateshorttermestimate) { Return (alternative) } Else If (alternativelongtermestimate < candidatelongtermestimate) { Return (alternative) } } Return (candidate) // We are not concerned or found no alternative } */ #include "policy.h" #include "policy_util.h" #define LATENCY_ESTIMATE "srtt_minimum_recent" #define SHORTTERM_MAX_CAPACITY_ESTIMATE "download_sma_max" #define MIN_CAPACITY_ESTIMATE "download_sma_nonzero_10q" #define LONGTERM_MAX_CAPACITY_ESTIMATE "download_sma_max_long" #define LONGTERM_MIN_CAPACITY_ESTIMATE "download_sma_nonzero_10q_long" #define MIDTERM_MAX_CAPACITY_ESTIMATE "download_sma_max_mid" #define MIDTERM_MIN_CAPACITY_ESTIMATE "download_sma_nonzero_10q_mid" #define LONGLONGTERM_MAX_CAPACITY_ESTIMATE "download_sma_max_longlong" // Segment duration in seconds -- needed to estimate file size from bitrate #define SEGMENT_DURATION 4 /** Policy-specific per-prefix data structure that contains additional information */ struct video_info { int is_default; int reuse; double predicted_time_longlongterm; double predicted_time_longterm; double predicted_time_midterm; double predicted_time_shortterm; int not_picked_count; }; /** List of enabled addresses for each address family */ GSList *in4_enabled = NULL; GSList *in6_enabled = NULL; enum time { LONGLONGTERM, LONGTERM, MIDTERM, SHORTTERM }; static const char *logfile = NULL; struct src_prefix_list *choose_prefix(request_context_t *rctx, strbuf_t *sb, intent_category_t category, int bitrate, int duration); struct src_prefix_list *get_default_prefix(request_context_t *rctx, strbuf_t *sb); struct src_prefix_list *get_shortest_time_prefix(GSList *spl, enum time what_term, strbuf_t *sb); void predict_all_load_times(GSList *spl, int filesize, request_context_t *rctx, strbuf_t *sb); void increment_non_chosen(GSList *spl, struct src_prefix_list *chosen); struct src_prefix_list *get_not_recently_picked(GSList *spl, strbuf_t *sb); struct src_prefix_list *consider_pessimist_switch(struct src_prefix_list *candidate, GSList *spl, strbuf_t *sb, int duration); struct src_prefix_list *consider_optimist_switch(struct src_prefix_list *candidate, GSList *spl, strbuf_t *sb, int duration); int resolve_name(request_context_t *rctx); int be_pessimist = 0; int be_optimist = 1; /** Helper to set the policy information for each prefix * Here, set is_default if prefix has been set as default in the config file */ void set_policy_info(gpointer elem, gpointer data) { struct src_prefix_list *spl = elem; struct video_info *new = malloc(sizeof(struct video_info)); new->is_default = 0; // Query the config dictionary for this prefix if (spl->policy_set_dict != NULL) { gpointer value = NULL; if (((value = g_hash_table_lookup(spl->policy_set_dict, "default")) != NULL) && value ) new->is_default = 1; } spl->policy_info = new; } /** Helper to print policy info */ void print_policy_info(void *policy_info) { struct video_info *info = policy_info; if (info->is_default) printf(" (default)"); } /** Helper to free policy info at cleanup time */ void freepolicyinfo(gpointer elem, gpointer data) { struct src_prefix_list *spl = elem; if (spl->policy_info != NULL) free(spl->policy_info); spl->policy_info = NULL; } /** Helper function * Returns the default prefix, if any exists, otherwise NULL */ struct src_prefix_list *get_default_prefix(request_context_t *rctx, strbuf_t *sb) { GSList *spl = NULL; struct src_prefix_list *cur = NULL; struct video_info *info = NULL; // If address family is specified, only look in its list, else look in both (v4 first) if (rctx->ctx->domain == AF_INET) spl = in4_enabled; else if (rctx->ctx->domain == AF_INET6) spl = in6_enabled; else spl = g_slist_concat(in4_enabled, in6_enabled); // Go through list of src prefixes while (spl != NULL) { // Look at per-prefix policy information cur = spl->data; info = (struct video_info *)cur->policy_info; if (info != NULL && info->is_default) { /* This prefix is configured as default. Return it */ strbuf_printf(sb, "\tFound default prefix "); _muacc_print_sockaddr(sb, cur->if_addrs->addr, cur->if_addrs->addr_len); strbuf_printf(sb, "\n"); return cur; } spl = spl->next; } strbuf_printf(sb, "\tDid not find a default prefix %s%s\n", (rctx->ctx->domain == AF_INET) ? "for IPv4" : "", (rctx->ctx->domain == AF_INET6) ? "for IPv6" : ""); return NULL; } /** get_not_recently_picked * -- get the network with higher not_picked_count (without resetting it) */ struct src_prefix_list *get_not_recently_picked(GSList *spl, strbuf_t *sb) { struct src_prefix_list *cur = NULL; struct video_info *info = NULL; int not_picked_max = 0; struct src_prefix_list *candidate = NULL; // Go through list of src prefixes while (spl != NULL) { // Look at per-prefix policy information cur = spl->data; info = (struct video_info *)cur->policy_info; if (info != NULL && info->not_picked_count > not_picked_max) { candidate = cur; not_picked_max = info->not_picked_count; } spl = spl->next; } return(candidate); } // Assuming object is capacity dominated, return prefix with lowest expected load time void predict_all_load_times(GSList *spl, int filesize, request_context_t *rctx, strbuf_t *sb) { // Go through list of possible source prefixes while (spl != NULL) { struct src_prefix_list *cur = spl->data; struct video_info *pfxinfo = cur->policy_info; strbuf_printf(sb, "\tEstimating load times on %s for resource of size %d\n", cur->if_name, filesize); // Predict FOUR completion times on this prefix: // longlong term max capacity (10 minutes) // longterm max capacity (1 minute) // midterm max capacity (10 seconds) // shortterm max capacity (1 second) // with midterm max capacity, longterm max capacity, and short term min capacity double capacity_longlongterm = lookup_value(cur, LONGLONGTERM_MAX_CAPACITY_ESTIMATE, NULL); strbuf_printf(sb, "\t\tLONGLONG = %.3f", capacity_longlongterm); double capacity_longterm = lookup_value(cur, LONGTERM_MAX_CAPACITY_ESTIMATE, NULL); strbuf_printf(sb, "\tLONG = %.3f", capacity_longterm); double capacity_midterm = lookup_value(cur, MIDTERM_MAX_CAPACITY_ESTIMATE, NULL); strbuf_printf(sb, "\tMID = %.3f", capacity_midterm); double capacity_shortterm = lookup_value(cur, SHORTTERM_MAX_CAPACITY_ESTIMATE, NULL); strbuf_printf(sb, "\tSHORT = %.3f\n", capacity_shortterm); double srtt = lookup_value(cur, "srtt_minimum_recent", NULL); strbuf_printf(sb, "\t\t(srtt_minimum_recent = %.3f)\n", (srtt < DBL_MAX ? srtt : -1)); pfxinfo->predicted_time_longlongterm = predict_completion_time(cur, filesize, pfxinfo->reuse, NULL, (strncmp(rctx->ctx->remote_service, "443", 4) == 0 ? 1 : 0), capacity_longlongterm, "srtt_minimum_recent"); pfxinfo->predicted_time_longterm = predict_completion_time(cur, filesize, pfxinfo->reuse, NULL, (strncmp(rctx->ctx->remote_service, "443", 4) == 0 ? 1 : 0), capacity_longterm, "srtt_minimum_recent"); pfxinfo->predicted_time_midterm = predict_completion_time(cur, filesize, pfxinfo->reuse, NULL, (strncmp(rctx->ctx->remote_service, "443", 4) == 0 ? 1 : 0), capacity_midterm, "srtt_minimum_recent"); pfxinfo->predicted_time_shortterm = predict_completion_time(cur, filesize, pfxinfo->reuse, NULL, (strncmp(rctx->ctx->remote_service, "443", 4) == 0 ? 1 : 0), capacity_shortterm, "srtt_minimum_recent"); strbuf_printf(sb, "\t\tLonglong term: %.3f\n", (DBL_MAX - pfxinfo->predicted_time_longlongterm < EPSILON? -1 : pfxinfo->predicted_time_longlongterm)); strbuf_printf(sb, "\t\tLong term: %.3f\n", (DBL_MAX - pfxinfo->predicted_time_longterm < EPSILON? -1 : pfxinfo->predicted_time_longterm)); strbuf_printf(sb, "\t\tMid term: %.3f\n", (DBL_MAX - pfxinfo->predicted_time_midterm < EPSILON? -1 : pfxinfo->predicted_time_midterm)); strbuf_printf(sb, "\t\tShort term: %.3f\n", (DBL_MAX - pfxinfo->predicted_time_shortterm < EPSILON? -1 : pfxinfo->predicted_time_shortterm)); spl = spl->next; } } /** Pessimism: If worst case load time is too high, be concerned. * Consider switching to another network which has an acceptable worst case * load time. */ struct src_prefix_list *consider_pessimist_switch(struct src_prefix_list *candidate, GSList *spl, strbuf_t *sb, int duration) { if (duration < EPSILON) { // No current buffer level -- cannot compare worst case load time with it strbuf_printf(sb, "\t\tPlayout not started yet - skip being pessimist\n"); return candidate; } strbuf_printf(sb, "\t\tBeing pessimist: "); // Determine worst case load time on candidate int how_concerned_are_we = 0; struct video_info *pxinfo = candidate->policy_info; double worst_case_load_time = pxinfo->predicted_time_shortterm; double duration_or_segment = (double) duration; if (duration_or_segment < 1000 * SEGMENT_DURATION) { duration_or_segment = 1000 * SEGMENT_DURATION; } if (DBL_MAX - pxinfo->predicted_time_shortterm < EPSILON) { worst_case_load_time = pxinfo->predicted_time_midterm; strbuf_printf(sb, "Short term estimate on this interface is N/A, so using mid term estimate %.2f", worst_case_load_time); } else { strbuf_printf(sb, "Worst case load time on this interface is %.2f ms", worst_case_load_time); } strbuf_printf(sb, " -- comparing to segment duration %d and buffer status duration %d (not_picked_count = %d)\n", SEGMENT_DURATION * 1000, duration, pxinfo->not_picked_count); // Determine how concerned we are: // 0 means "not concerned" // 1 means "concerned" // 2 means "very concerned" if (worst_case_load_time > (1.5 * duration_or_segment) && pxinfo->not_picked_count == 0) { // We have recently used this prefix and it gave us a bad worst case estimate how_concerned_are_we = 2; strbuf_printf(sb, "\t\tBe VERY concerned\n"); } else if (worst_case_load_time > duration_or_segment) { if (pxinfo->not_picked_count > 0 && pxinfo->predicted_time_midterm < SEGMENT_DURATION * 1000) { strbuf_printf(sb, "\t\tPerhaps that estimate is not recent and midterm looks okay -- do not be concerned\n"); return(candidate); } else { how_concerned_are_we = 1; strbuf_printf(sb, "\t\tBe concerned\n"); } } else if (worst_case_load_time > SEGMENT_DURATION * 1000) { if (pxinfo->not_picked_count == 0 && pxinfo->predicted_time_midterm > duration_or_segment * 1.5) { how_concerned_are_we = 2; strbuf_printf(sb, "\t\tWe recently picked this and it has a terrible worst case estimate -- be VERY concerned\n"); } else { how_concerned_are_we = 1; strbuf_printf(sb, "\t\tNot sure if I should be concerned, but I am\n"); } } else { //strbuf_printf(sb, "\t\tSeems okay -- be optimist\n"); strbuf_printf(sb, "\t\tSeems okay -- but DO NOT be optimist\n"); return(candidate); } if (how_concerned_are_we > 0) { // See if there is an alternative with better short term load time struct src_prefix_list *alt_better_shortterm = get_shortest_time_prefix(spl, SHORTTERM, sb); if (alt_better_shortterm != NULL && alt_better_shortterm != candidate) { struct video_info *px2info = alt_better_shortterm->policy_info; strbuf_printf(sb, "\t\tConsidering to use %s with short term load time %.3f", alt_better_shortterm->if_name, px2info->predicted_time_shortterm); if (px2info->predicted_time_shortterm < duration) { strbuf_printf(sb, " -- is shorter than our buffer, picking it\n"); return(alt_better_shortterm); } else if (px2info->predicted_time_shortterm < pxinfo->predicted_time_shortterm) { strbuf_printf(sb, " -- both longer than buffer, but alternative is less bad, picking it\n"); return(alt_better_shortterm); } else { strbuf_printf(sb, " -- not picking it\n"); } } // See if there is an alternative prefix with better mid term struct src_prefix_list *alt_better_midterm = get_shortest_time_prefix(spl, MIDTERM, sb); if (alt_better_midterm != NULL && alt_better_midterm != candidate) { struct video_info *px2info = alt_better_midterm->policy_info; strbuf_printf(sb, "\t\tConsidering to use %s with mid term load time %.3f", alt_better_midterm->if_name, px2info->predicted_time_midterm); if (px2info->not_picked_count == 0 && (px2info->predicted_time_midterm < SEGMENT_DURATION * 1000)) { // This prefix has recently been used -- its short and midterm // estimates should be about accurate, so only accept it if // the mid estimate is below segment duration strbuf_printf(sb, " -- is shorter than segment duration, picking it\n"); return(alt_better_midterm); } else if (px2info->not_picked_count >= 1 && (px2info->predicted_time_midterm * 1.5 < duration)) { // This prefix has an acceptable midterm estimate strbuf_printf(sb, " -- is shorter than buffer, picking it\n"); return(alt_better_midterm); } else { strbuf_printf(sb, " -- not picking it\n"); } } // See if there is an alternative prefix with shorter long term load time struct src_prefix_list *alt_better_longterm = get_shortest_time_prefix(spl, LONGTERM, sb); if (alt_better_longterm != NULL && alt_better_longterm != candidate) { struct video_info *px2info = alt_better_longterm->policy_info; strbuf_printf(sb, "\t\tConsidering to use %s with long term load time %.3f", alt_better_longterm->if_name, px2info->predicted_time_longterm); /*if (px2info->predicted_time_longterm * 1.5 < duration) { strbuf_printf(sb, " -- is shorter than our buffer and has not been picked %d times, picking it\n", px2info->not_picked_count); return(alt_better_longterm); } else if (px2info->predicted_time_longterm * 1.5 < SEGMENT_DURATION * 1000) { strbuf_printf(sb, " -- is shorter than our segment duration and has not been picked %d times, picking it\n", px2info->not_picked_count); return(alt_better_longterm); } else { strbuf_printf(sb, " -- not picking it\n"); }*/ } // If worst case load time is more than 1.5 times the current buffer duration // we may be in trouble and are even more ready to switch if (how_concerned_are_we > 1) { strbuf_printf(sb, "\t\tvery concerned -- reconsidering alternatives\n"); if (alt_better_shortterm != candidate && ((struct video_info *)alt_better_shortterm)->predicted_time_shortterm < pxinfo->predicted_time_shortterm) { strbuf_printf(sb, "\t\t%s has better worst case load time -- picking it\n", alt_better_shortterm->if_name); return(alt_better_shortterm); } if (alt_better_midterm != candidate) { struct video_info *px3info = alt_better_midterm->policy_info; if (px3info->predicted_time_shortterm < pxinfo->predicted_time_shortterm && px3info->predicted_time_midterm < pxinfo->predicted_time_midterm) { strbuf_printf(sb, "\t\t%s has better mid term load time -- picking it\n", alt_better_midterm->if_name); return(alt_better_midterm); } } if (alt_better_longterm != candidate) { struct video_info *px4info = alt_better_longterm->policy_info; if (px4info->predicted_time_shortterm < pxinfo->predicted_time_shortterm && px4info->predicted_time_longterm < pxinfo->predicted_time_longterm) { strbuf_printf(sb, "\t\t%s still has better longterm load time -- picking it\n", alt_better_longterm->if_name); return(alt_better_longterm); } } } strbuf_printf(sb, "\t\tcould not find an alternative -- staying with %s\n", candidate->if_name); } // If we did not find an alternative, // return the same candidate we got initially return candidate; } /** Optimism: If there is an alternative prefix with a better long term load time * which has not been tried in a while, try it -- it might have gotten better! */ struct src_prefix_list *consider_optimist_switch(struct src_prefix_list *candidate, GSList *spl, strbuf_t *sb, int duration) { strbuf_printf(sb, "\t\tBeing optimist:"); double duration_to_use = (double) duration; struct video_info *pxinfo = candidate->policy_info; // Get interface with lowest longterm best case load time as alternative struct src_prefix_list *alternative = get_shortest_time_prefix(spl, LONGLONGTERM, sb); if (alternative != NULL && alternative != candidate) { // This only makes sense if the alternative is different from the candidate struct video_info *px2info = alternative->policy_info; strbuf_printf(sb, " Considering to use %s with longlongterm load time %.3f and long term load time %.3f / mid term load time %.3f, not picked %d times\n", alternative->if_name, px2info->predicted_time_longlongterm, px2info->predicted_time_longterm, px2info->predicted_time_midterm, px2info->not_picked_count); if (duration < EPSILON && px2info->not_picked_count > 0) { // Give alternative, which has not been recently used, a chance strbuf_printf(sb, "\t\tPlayout hasn't started yet -- trying this"); return(alternative); } if (px2info->not_picked_count > 3) { // Only consider optimist switch if alternative has not been used // recently, i.e., at least 3 times // If alternative is "safe to use", i.e., longterm says it will // be finished within buffer duration, try it if ((px2info->predicted_time_longterm * 1.5) < duration_to_use) { strbuf_printf(sb, "\t\tlong term predicted time * 1.5 = %f is acceptable with buffer = %.0f -- picking it\n", px2info->predicted_time_longterm * 1.5, duration_to_use); return(alternative); /*} else if ((px2info->predicted_time_longterm * 1.2) < duration_to_use) { // OPTIONAL: Consider using factor 1.2 if 1.5 means // we don't switch enough strbuf_printf(sb, " -- long term predicted time %f is acceptable compared to buffer * 1.2 = %.0f -- picking it\n", px2info->predicted_time_longterm, duration_to_use * 1.2); return(alternative);*/ } else if ((pxinfo->predicted_time_longterm * 1.5) > duration_to_use && px2info->predicted_time_longterm < pxinfo->predicted_time_longterm) { // alternative may not be safe to use, but // candidate is not safe to use either // --> if alternative has better longterm estimate, try it strbuf_printf(sb, "\t\thas a better long term predicted time %f -- picking it\n", px2info->predicted_time_longterm); return(alternative); } else if (px2info->not_picked_count > 10) { strbuf_printf(sb, "\t\tlonglong term predicted time %f is better than candidate longlongterm and hasn't been tried %d times -- picking it\n", px2info->predicted_time_longlongterm, px2info->not_picked_count); return(alternative); } } // Found no reason to switch to alternative strbuf_printf(sb, "\t\tstaying with %s\n", candidate->if_name); } else { strbuf_printf(sb, "\t\tNo alternative found -- staying with %s\n", candidate->if_name); } // If we did not find an alternative, // return the same candidate we got initially return candidate; } void increment_non_chosen(GSList *spl, struct src_prefix_list *chosen) { // Go through list of possible source prefixes while (spl != NULL) { struct src_prefix_list *cur = spl->data; struct video_info *pfxinfo = cur->policy_info; if (spl->data != chosen) { pfxinfo->not_picked_count++; } spl = spl->next; } } struct src_prefix_list *get_shortest_time_prefix(GSList *spl, enum time what_term, strbuf_t *sb) { // Get prefix with shortest predicted completion time struct src_prefix_list *cur = NULL; struct src_prefix_list *fastest = NULL; double min_completion_time = DBL_MAX; while (spl != NULL) { cur = spl->data; if (cur->policy_info == NULL) { continue; } double time_to_look_at = DBL_MAX; if (what_term == LONGLONGTERM) { time_to_look_at = ((struct video_info *)cur->policy_info)->predicted_time_longlongterm; } else if (what_term == LONGTERM) { time_to_look_at = ((struct video_info *)cur->policy_info)->predicted_time_longterm; } else if (what_term == MIDTERM) { time_to_look_at = ((struct video_info *)cur->policy_info)->predicted_time_midterm; } else if (what_term == SHORTTERM) { time_to_look_at = ((struct video_info *)cur->policy_info)->predicted_time_shortterm; } if (time_to_look_at > 0 && time_to_look_at < min_completion_time) { fastest = cur; min_completion_time = time_to_look_at; } spl = spl->next; } return fastest; } /** * Chooses the source prefix */ struct src_prefix_list *choose_prefix(request_context_t *rctx, strbuf_t *sb, intent_category_t category, int bitrate, int duration) { GSList *spl = NULL; // If address family is specified, only look in its list, else look in both (v4 first) if (rctx->ctx->domain == AF_INET) spl = in4_enabled; else if (rctx->ctx->domain == AF_INET6) spl = in6_enabled; else spl = g_slist_concat(in4_enabled, in6_enabled); struct src_prefix_list *chosenpfx = NULL; if (category == INTENT_QUERY) { // Initial Query -- choose lowest latency prefix chosenpfx = get_lowest_srtt_pfx(spl, LATENCY_ESTIMATE, sb); strbuf_printf(sb, "\tLowest latency (%s) interface: %s\n", LATENCY_ESTIMATE, (chosenpfx == NULL) ? "none" : chosenpfx->if_name); if (chosenpfx != NULL) { _muacc_logtofile(logfile, "%s,lowlatency\n", chosenpfx->if_name); } } else if (category == INTENT_CONTROLTRAFFIC) { // "Control traffic" is our audio stream -- get not recently picked network chosenpfx = get_not_recently_picked(spl, sb); strbuf_printf(sb, "\tNot recently picked (%d) interface: %s\n", (chosenpfx == NULL) ? -1 : ((struct video_info *)chosenpfx->policy_info)->not_picked_count, (chosenpfx == NULL) ? "none" : chosenpfx->if_name); /*if (chosenpfx != NULL) { _muacc_logtofile(logfile, "%s,control\n", chosenpfx->if_name); }*/ } else if (category == INTENT_BULKTRANSFER) { // Video segments -- estimate load time predict_all_load_times(spl, bitrate / 8 * SEGMENT_DURATION, rctx, sb); chosenpfx = get_shortest_time_prefix(spl, MIDTERM, sb); if (chosenpfx == NULL) { chosenpfx = get_shortest_time_prefix(spl, LONGTERM, sb); } if (chosenpfx == NULL) { chosenpfx = get_shortest_time_prefix(spl, LONGLONGTERM, sb); } strbuf_printf(sb, "\tShortest load time interface: %s\n", (chosenpfx == NULL) ? "none" : chosenpfx->if_name); if (chosenpfx != NULL && be_pessimist) { // See if we are concerned -- if not, consider optimist switch too chosenpfx = consider_pessimist_switch(chosenpfx, spl, sb, duration); } else if (chosenpfx != NULL) { // Only consider optimist switches chosenpfx = consider_optimist_switch(chosenpfx, spl, sb, duration); } if (chosenpfx != NULL) { struct video_info *chosen_info = chosenpfx->policy_info; chosen_info->not_picked_count = 0; increment_non_chosen(spl, chosenpfx); _muacc_logtofile(logfile, "%s,lowloadtime\n", chosenpfx->if_name); } } if (chosenpfx == NULL) { // No category given or no load time estimates -- return default prefix chosenpfx = get_default_prefix(rctx, sb); strbuf_printf(sb, "\tDefault interface: %s\n", (chosenpfx == NULL) ? "none" : chosenpfx->if_name); if (chosenpfx != NULL) { _muacc_logtofile(logfile, "%s,default\n", chosenpfx->if_name); } else { _muacc_logtofile(logfile, ",none\n"); } } return chosenpfx; } /** Initializer function (mandatory) * Is called once the policy is loaded and every time it is reloaded * Typically sets the policy_info and initializes the lists of candidate addresses */ int init(mam_context_t *mctx) { printf("Policy module \"video\" is loading.\n"); g_slist_foreach(mctx->prefixes, &set_policy_info, NULL); make_v4v6_enabled_lists (mctx->prefixes, &in4_enabled, &in6_enabled); GSList *spl = in4_enabled; while (spl != NULL) { struct video_info *pfxinfo = ((struct src_prefix_list *)spl->data)->policy_info; pfxinfo->not_picked_count = 0; spl = spl->next; } spl = in6_enabled; while (spl != NULL) { struct video_info *pfxinfo = ((struct src_prefix_list *)spl->data)->policy_info; pfxinfo->not_picked_count = 0; spl = spl->next; } logfile = g_hash_table_lookup(mctx->policy_set_dict, "logfile"); if (logfile != NULL) { printf("\nLogging to %s\n", logfile); } gpointer value = NULL; if ((value = g_hash_table_lookup(mctx->policy_set_dict, "be_pessimist")) != NULL) { be_pessimist = 1; printf("\nBeing pessimist\n"); } else { be_pessimist = 0; printf("\nNot being pessimist\n"); } printf("\nPolicy module \"video\" has been loaded.\n"); return 0; } /** Cleanup function (mandatory) * Is called once the policy is torn down, e.g. if MAM is terminates * Tear down lists of candidate addresses (no deep free) and policy infos */ int cleanup(mam_context_t *mctx) { g_slist_free(in4_enabled); g_slist_free(in6_enabled); g_slist_foreach(mctx->prefixes, &freepolicyinfo, NULL); in4_enabled = NULL; in6_enabled = NULL; printf("Policy video cleaned up.\n"); return 0; } /** Asynchronous callback function for resolve_name * Invoked once a response to the resolver query has been received * Sends back a reply to the client with the received answer */ static void resolve_request_result(int errcode, struct evutil_addrinfo *addr, void *ptr) { request_context_t *rctx = ptr; strbuf_t sb; strbuf_init(&sb); if (errcode) { strbuf_printf(&sb, "\t[%.6f] Error resolving: %s -> %s\n", gettimestamp(), rctx->ctx->remote_hostname, evutil_gai_strerror(errcode)); rctx->action = muacc_error_resolve; } else { // Successfully resolved name strbuf_printf(&sb, "\t[%.6f] Got resolver response for %s %s\n", gettimestamp(), rctx->ctx->remote_hostname, addr->ai_canonname ? addr->ai_canonname : ""); strbuf_printf(&sb, "\t"); _muacc_print_addrinfo(&sb, addr); strbuf_printf(&sb, "\n"); // Clone result into the request context assert(addr != NULL); assert(rctx->ctx->remote_addrinfo_res == NULL); rctx->ctx->remote_addrinfo_res = _muacc_clone_addrinfo(addr); // Choose first result as the remote address rctx->ctx->domain = addr->ai_family; rctx->ctx->type = addr->ai_socktype; rctx->ctx->protocol = addr->ai_protocol; rctx->ctx->remote_sa_len = addr->ai_addrlen; rctx->ctx->remote_sa = _muacc_clone_sockaddr(addr->ai_addr, addr->ai_addrlen); // Print remote address strbuf_printf(&sb, "\n\tSet remote address ="); _muacc_print_sockaddr(&sb, rctx->ctx->remote_sa, rctx->ctx->remote_sa_len); strbuf_printf(&sb, "\n"); //strbuf_release(&sb); evutil_freeaddrinfo(addr); } // send reply to client strbuf_printf(&sb, "\n\t[%.6f] Sending reply\n", gettimestamp()); _muacc_send_ctx_event(rctx, rctx->action); //printf("%s\n", strbuf_export(&sb)); strbuf_release(&sb); //printf("\n\t[%.6f] Returning resolve result callback\n\n", gettimestamp()); } /* Helper function that issues a DNS request and registers the callback resolve_request_result */ int resolve_name(request_context_t *rctx) { strbuf_t sb; strbuf_init(&sb); struct evdns_base *evdns_base = rctx->evdns_base; // If no dns base is given for the chosen source prefix, use default dns base if (evdns_base == NULL) { strbuf_printf(&sb, "\tNo prefix-specific DNS base found - using default DNS base\n"); evdns_base = rctx->mctx->evdns_default_base; } // Set hints to resolve name for our chosen address family if (rctx->ctx->remote_addrinfo_hint != NULL) { rctx->ctx->remote_addrinfo_hint->ai_family = rctx->ctx->domain; } else { // Initialize hints for address resolution rctx->ctx->remote_addrinfo_hint = malloc(sizeof(struct addrinfo)); memset(rctx->ctx->remote_addrinfo_hint, 0, sizeof(struct addrinfo)); rctx->ctx->remote_addrinfo_hint->ai_family = rctx->ctx->domain; rctx->ctx->remote_addrinfo_hint->ai_socktype = rctx->ctx->type; rctx->ctx->remote_addrinfo_hint->ai_protocol = rctx->ctx->protocol; } if (evdns_base_set_option(evdns_base, "timeout", "1") < 0) { strbuf_printf(&sb, "Setting DNS timeout failed\n"); } strbuf_printf(&sb, "\t[%.6f] Resolving: %s:%s with hint: ", gettimestamp(), (rctx->ctx->remote_hostname == NULL ? "" : rctx->ctx->remote_hostname), (rctx->ctx->remote_service == NULL ? "" : rctx->ctx->remote_service)); _muacc_print_addrinfo(&sb, rctx->ctx->remote_addrinfo_hint); strbuf_printf(&sb, "\n"); /* Try to resolve this request using asynchronous lookup */ assert(evdns_base != NULL); evdns_getaddrinfo( evdns_base, rctx->ctx->remote_hostname, rctx->ctx->remote_service, rctx->ctx->remote_addrinfo_hint, &resolve_request_result, rctx); //printf("%s\n", strbuf_export(&sb)); strbuf_release(&sb); //printf("\t[%.6f] Returning resolve_name.\n\n", gettimestamp()); return 0; } /** Resolve request function (mandatory) * Is called upon each getaddrinfo request from a client * Must send a reply back using _muacc_sent_ctx_event or register a callback that does so */ int on_resolve_request(request_context_t *rctx, struct event_base *base) { //printf("\n\t[%.6f] Resolve request: %s:%s\n\n", gettimestamp(), (rctx->ctx->remote_hostname == NULL ? "" : rctx->ctx->remote_hostname), (rctx->ctx->remote_service == NULL ? "" : rctx->ctx->remote_service)); if(rctx->ctx->bind_sa_req != NULL) { // already bound //printf("\tBind interface already specified\n"); rctx->ctx->domain = rctx->ctx->bind_sa_req->sa_family; struct src_prefix_list *bind_pfx = get_pfx_with_addr(rctx, rctx->ctx->bind_sa_req); if (bind_pfx != NULL) { // Set DNS base to this prefix's rctx->evdns_base = bind_pfx->evdns_base; //printf("\tSet DNS base\n"); } } rctx->action = muacc_act_getaddrinfo_resolve_resp; //printf("\n\t[%.6f] Calling resolve_name\n", gettimestamp()); return resolve_name(rctx); } /** Connect request function (mandatory) * Is called upon each connect request from a client * Must send a reply back using _muacc_sent_ctx_event or register a callback that does so */ int on_connect_request(request_context_t *rctx, struct event_base *base) { strbuf_t sb; strbuf_init(&sb); strbuf_printf(&sb, "\t[%.6f] Connect request: dest=", gettimestamp()); _muacc_print_sockaddr(&sb, rctx->ctx->remote_sa, rctx->ctx->remote_sa_len); // Print Intents intent_category_t category = -1; socklen_t categorylen = sizeof(intent_category_t); if (mampol_get_socketopt(rctx->ctx->sockopts_current, SOL_INTENTS, INTENT_CATEGORY, &categorylen, &category) == 0) { //printf("\t\twith category %d\n", category); } int fs = -1; socklen_t fslen = sizeof(int); if (mampol_get_socketopt(rctx->ctx->sockopts_current, SOL_INTENTS, INTENT_FILESIZE, &fslen, &fs) == 0) { //printf("\t\twith file size %d\n", fs); } int bitrate = -1; if (mampol_get_socketopt(rctx->ctx->sockopts_current, SOL_INTENTS, INTENT_BITRATE, &fslen, &bitrate) == 0) { //printf("\t\twith bitrate %d\n", bitrate); } int duration = -1; if (mampol_get_socketopt(rctx->ctx->sockopts_current, SOL_INTENTS, INTENT_DURATION, &fslen, &duration) == 0) { //printf("\t\twith duration %d\n", duration); } // Check if client has already chosen a source address to bind to if(rctx->ctx->bind_sa_req != NULL) { // already bound strbuf_printf(&sb, "\tAlready bound to src="); _muacc_print_sockaddr(&sb, rctx->ctx->bind_sa_req, rctx->ctx->bind_sa_req_len); rctx->ctx->domain = rctx->ctx->bind_sa_req->sa_family; } else { struct src_prefix_list *bind_pfx = choose_prefix(rctx, &sb, category, bitrate, duration); if (bind_pfx != NULL) { set_bind_sa(rctx, bind_pfx, &sb); } } // send response back strbuf_printf(&sb, "\n\t[%.6f] Sending reply\n", gettimestamp()); _muacc_send_ctx_event(rctx, muacc_act_connect_resp); //printf("%s\n", strbuf_export(&sb)); strbuf_release(&sb); //printf("\t[%.6f] Returning\n\n", gettimestamp()); return 0; } /** Socketconnect request function * Is called upon each socketconnect request from a client * Chooses a source prefix/address and then resolves the name * Must send a reply back using _muacc_sent_ctx_event or register a callback that does so */ int on_socketconnect_request(request_context_t *rctx, struct event_base *base) { strbuf_t sb; strbuf_init(&sb); printf("\n\t[%.6f] Socketconnect request: %s:%s\n", gettimestamp(), (rctx->ctx->remote_hostname == NULL ? "" : rctx->ctx->remote_hostname), (rctx->ctx->remote_service == NULL ? "" : rctx->ctx->remote_service)); // Print Intents intent_category_t category = -1; socklen_t categorylen = sizeof(intent_category_t); if (mampol_get_socketopt(rctx->ctx->sockopts_current, SOL_INTENTS, INTENT_CATEGORY, &categorylen, &category) == 0) { printf("\t\twith category %d\n", category); } int fs = -1; socklen_t fslen = sizeof(int); if (mampol_get_socketopt(rctx->ctx->sockopts_current, SOL_INTENTS, INTENT_FILESIZE, &fslen, &fs) == 0) { printf("\t\twith file size %d\n", fs); } int bitrate = -1; if (mampol_get_socketopt(rctx->ctx->sockopts_current, SOL_INTENTS, INTENT_BITRATE, &fslen, &bitrate) == 0) { printf("\t\twith bitrate %d\n", bitrate); } int duration = -1; if (mampol_get_socketopt(rctx->ctx->sockopts_current, SOL_INTENTS, INTENT_DURATION, &fslen, &duration) == 0) { printf("\t\twith duration %d\n", duration); } printf("\n"); double timestamp = gettimestamp(); _muacc_logtofile(logfile, "%.6f,,,,,,,,%d,%d,%d,%d,", timestamp, fs, category, bitrate, duration); // Check if client has already chosen a source address to bind to if(rctx->ctx->bind_sa_req != NULL) { // already bound strbuf_printf(&sb, "\tAlready bound to src="); _muacc_print_sockaddr(&sb, rctx->ctx->bind_sa_req, rctx->ctx->bind_sa_req_len); rctx->ctx->domain = rctx->ctx->bind_sa_req->sa_family; struct src_prefix_list *bind_pfx = get_pfx_with_addr(rctx, rctx->ctx->bind_sa_req); if (bind_pfx != NULL) { // Set DNS base to this prefix's rctx->evdns_base = bind_pfx->evdns_base; strbuf_printf(&sb, ", set DNS base. "); } } else { struct src_prefix_list *bind_pfx = choose_prefix(rctx, &sb, category, bitrate, duration); if (bind_pfx != NULL) { set_bind_sa(rctx, bind_pfx, &sb); // Set this prefix' evdns base for name resolution rctx->evdns_base = bind_pfx->evdns_base; } else { rctx->evdns_base = NULL; } } printf("%s\n\n", strbuf_export(&sb)); strbuf_release(&sb); rctx->action = muacc_act_socketconnect_resp; return resolve_name(rctx); } /** Socketchoose request function * Is called upon each socketchoose request from a client * Chooses from a set of existing sockets, or if none exists, does the same as socketconnect * Must send a reply back using _muacc_sent_ctx_event or register a callback that does so */ int on_socketchoose_request(request_context_t *rctx, struct event_base *base) { strbuf_t sb; strbuf_init(&sb); printf("\n\t[%.6f] Socketchoose request: %s:%s", gettimestamp(), (rctx->ctx->remote_hostname == NULL ? "" : rctx->ctx->remote_hostname), (rctx->ctx->remote_service == NULL ? "" : rctx->ctx->remote_service)); if (rctx->sockets != NULL) { printf(" with socketset: "); print_sockets(rctx->sockets); } printf("\n"); // Print Intents intent_category_t category = -1; socklen_t categorylen = sizeof(intent_category_t); if (mampol_get_socketopt(rctx->ctx->sockopts_current, SOL_INTENTS, INTENT_CATEGORY, &categorylen, &category) == 0) { printf("\t\twith category %d\n", category); } int fs = -1; socklen_t fslen = sizeof(int); if (mampol_get_socketopt(rctx->ctx->sockopts_current, SOL_INTENTS, INTENT_FILESIZE, &fslen, &fs) == 0) { printf("\t\twith file size %d\n", fs); } int bitrate = -1; if (mampol_get_socketopt(rctx->ctx->sockopts_current, SOL_INTENTS, INTENT_BITRATE, &fslen, &bitrate) == 0) { printf("\t\twith bitrate %d\n", bitrate); } int duration = -1; if (mampol_get_socketopt(rctx->ctx->sockopts_current, SOL_INTENTS, INTENT_DURATION, &fslen, &duration) == 0) { printf("\t\twith duration %d\n", duration); } printf("\n"); double timestamp = gettimestamp(); _muacc_logtofile(logfile, "%.6f,", timestamp); GSList *spl = in4_enabled; while (spl != NULL) { struct src_prefix_list *cur = spl->data; struct video_info *pfxinfo = cur->policy_info; pfxinfo->reuse = count_sockets_on_prefix(rctx->sockets, cur, logfile); _muacc_logtofile(logfile, "%d,", pfxinfo->reuse); spl = spl->next; } _muacc_logtofile(logfile, ",,,%d,%d,%d,%d,", category, fs, bitrate, duration); struct src_prefix_list *bind_pfx = NULL; // Check if source address was already chosen if(rctx->ctx->bind_sa_req == NULL) { // No source address chosen yet - choose best prefix bind_pfx = choose_prefix(rctx, &sb, category, bitrate, duration); if (bind_pfx != NULL) { set_bind_sa(rctx, bind_pfx, &sb); // Set this prefix' evdns base for name resolution rctx->evdns_base = bind_pfx->evdns_base; } } else { strbuf_printf(&sb, "\tAlready bound to src="); _muacc_print_sockaddr(&sb, rctx->ctx->bind_sa_req, rctx->ctx->bind_sa_req_len); bind_pfx = get_pfx_with_addr(rctx, rctx->ctx->bind_sa_req); } // Attempt to suggest an existing socket on the preferred prefix if (bind_pfx != NULL && rctx->sockets != NULL) { strbuf_printf(&sb, "\n\tPicking a socket on prefix with address "); _muacc_print_sockaddr(&sb, bind_pfx->if_addrs->addr, bind_pfx->if_addrs->addr_len); strbuf_printf(&sb, "\n"); // Filter the request context's socket list, only leaving sockets on our preferred prefix pick_sockets_on_prefix(rctx, bind_pfx); if (rctx-> sockets != NULL) { // At least one matching socket was found strbuf_printf(&sb, "\tFirst candidate socket: %d\n", rctx->sockets->file); /* Provide the information to open a new similar socket, in case the suggested socket cannot be used */ uuid_t context_id; __uuid_copy(context_id, rctx->ctx->ctxid); _muacc_free_ctx(rctx->ctx); rctx->ctx = _muacc_clone_ctx(rctx->sockets->ctx); __uuid_copy(rctx->ctx->ctxid, context_id); printf("%s\n\n", strbuf_export(&sb)); int ret = strbuf_release(&sb); if (ret > 0) { fprintf(stderr, "Strbuf could not be freed! %d\n", ret); } // Send reply back to client _muacc_send_ctx_event(rctx, muacc_act_socketchoose_resp_existing); return 0; } else { strbuf_printf(&sb, "\tDid not find a socket on this prefix\n"); } } else { strbuf_printf(&sb, "\tSocketchoose with empty set or no preferred prefix found\n"); } strbuf_printf(&sb, "\tSocketchoose - suggesting creation of a new socket, resolving %s\n", (rctx->ctx->remote_hostname == NULL ? "" : rctx->ctx->remote_hostname)); rctx->action = muacc_act_socketchoose_resp_new; printf("%s\n\n", strbuf_export(&sb)); int ret = strbuf_release(&sb); if (ret > 0) { fprintf(stderr, "Strbuf could not be freed! %d\n", ret); } return resolve_name(rctx); } int on_new_subflow_request(mam_context_t *mctx, struct mptcp_flow_info *flow) { return 0; }
652971.c
// SPDX-License-Identifier: GPL-2.0 /* * fs/f2fs/file.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/stat.h> #include <linux/buffer_head.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/falloc.h> #include <linux/types.h> #include <linux/compat.h> #include <linux/uaccess.h> #include <linux/mount.h> #include <linux/pagevec.h> #include <linux/uio.h> #include <linux/uuid.h> #include <linux/file.h> #include <linux/nls.h> #include "f2fs.h" #include "node.h" #include "segment.h" #include "xattr.h" #include "acl.h" #include "gc.h" #include "trace.h" #include <trace/events/f2fs.h> static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) { struct inode *inode = file_inode(vmf->vma->vm_file); vm_fault_t ret; down_read(&F2FS_I(inode)->i_mmap_sem); ret = filemap_fault(vmf); up_read(&F2FS_I(inode)->i_mmap_sem); trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret); return ret; } static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) { struct page *page = vmf->page; struct inode *inode = file_inode(vmf->vma->vm_file); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn = { .node_changed = false }; int err; if (unlikely(f2fs_cp_error(sbi))) { err = -EIO; goto err; } if (!f2fs_is_checkpoint_ready(sbi)) { err = -ENOSPC; goto err; } sb_start_pagefault(inode->i_sb); f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); file_update_time(vmf->vma->vm_file); down_read(&F2FS_I(inode)->i_mmap_sem); lock_page(page); if (unlikely(page->mapping != inode->i_mapping || page_offset(page) > i_size_read(inode) || !PageUptodate(page))) { unlock_page(page); err = -EFAULT; goto out_sem; } /* block allocation */ __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); set_new_dnode(&dn, inode, NULL, NULL, 0); err = f2fs_get_block(&dn, page->index); f2fs_put_dnode(&dn); __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); if (err) { unlock_page(page); goto out_sem; } /* fill the page */ f2fs_wait_on_page_writeback(page, DATA, false, true); /* wait for GCed page writeback via META_MAPPING */ f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); /* * check to see if the page is mapped already (no holes) */ if (PageMappedToDisk(page)) goto out_sem; /* page is wholly or partially inside EOF */ if (((loff_t)(page->index + 1) << PAGE_SHIFT) > i_size_read(inode)) { loff_t offset; offset = i_size_read(inode) & ~PAGE_MASK; zero_user_segment(page, offset, PAGE_SIZE); } set_page_dirty(page); if (!PageUptodate(page)) SetPageUptodate(page); f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE); f2fs_update_time(sbi, REQ_TIME); trace_f2fs_vm_page_mkwrite(page, DATA); out_sem: up_read(&F2FS_I(inode)->i_mmap_sem); f2fs_balance_fs(sbi, dn.node_changed); sb_end_pagefault(inode->i_sb); err: return block_page_mkwrite_return(err); } static const struct vm_operations_struct f2fs_file_vm_ops = { .fault = f2fs_filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = f2fs_vm_page_mkwrite, }; static int get_parent_ino(struct inode *inode, nid_t *pino) { struct dentry *dentry; inode = igrab(inode); dentry = d_find_any_alias(inode); iput(inode); if (!dentry) return 0; *pino = parent_ino(dentry); dput(dentry); return 1; } static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); enum cp_reason_type cp_reason = CP_NO_NEEDED; if (!S_ISREG(inode->i_mode)) cp_reason = CP_NON_REGULAR; else if (inode->i_nlink != 1) cp_reason = CP_HARDLINK; else if (is_sbi_flag_set(sbi, SBI_NEED_CP)) cp_reason = CP_SB_NEED_CP; else if (file_wrong_pino(inode)) cp_reason = CP_WRONG_PINO; else if (!f2fs_space_for_roll_forward(sbi)) cp_reason = CP_NO_SPC_ROLL; else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) cp_reason = CP_NODE_NEED_CP; else if (test_opt(sbi, FASTBOOT)) cp_reason = CP_FASTBOOT_MODE; else if (F2FS_OPTION(sbi).active_logs == 2) cp_reason = CP_SPEC_LOG_NUM; else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT && f2fs_need_dentry_mark(sbi, inode->i_ino) && f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, TRANS_DIR_INO)) cp_reason = CP_RECOVER_DIR; return cp_reason; } static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) { struct page *i = find_get_page(NODE_MAPPING(sbi), ino); bool ret = false; /* But we need to avoid that there are some inode updates */ if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino)) ret = true; f2fs_put_page(i, 0); return ret; } static void try_to_fix_pino(struct inode *inode) { struct f2fs_inode_info *fi = F2FS_I(inode); nid_t pino; down_write(&fi->i_sem); if (file_wrong_pino(inode) && inode->i_nlink == 1 && get_parent_ino(inode, &pino)) { f2fs_i_pino_write(inode, pino); file_got_pino(inode); } up_write(&fi->i_sem); } static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, int datasync, bool atomic) { struct inode *inode = file->f_mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t ino = inode->i_ino; int ret = 0; enum cp_reason_type cp_reason = 0; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, .for_reclaim = 0, }; unsigned int seq_id = 0; if (unlikely(f2fs_readonly(inode->i_sb) || is_sbi_flag_set(sbi, SBI_CP_DISABLED))) return 0; trace_f2fs_sync_file_enter(inode); if (S_ISDIR(inode->i_mode)) goto go_write; /* if fdatasync is triggered, let's do in-place-update */ if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) set_inode_flag(inode, FI_NEED_IPU); ret = file_write_and_wait_range(file, start, end); clear_inode_flag(inode, FI_NEED_IPU); if (ret) { trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); return ret; } /* if the inode is dirty, let's recover all the time */ if (!f2fs_skip_inode_update(inode, datasync)) { f2fs_write_inode(inode, NULL); goto go_write; } /* * if there is no written data, don't waste time to write recovery info. */ if (!is_inode_flag_set(inode, FI_APPEND_WRITE) && !f2fs_exist_written_data(sbi, ino, APPEND_INO)) { /* it may call write_inode just prior to fsync */ if (need_inode_page_update(sbi, ino)) goto go_write; if (is_inode_flag_set(inode, FI_UPDATE_WRITE) || f2fs_exist_written_data(sbi, ino, UPDATE_INO)) goto flush_out; goto out; } go_write: /* * Both of fdatasync() and fsync() are able to be recovered from * sudden-power-off. */ down_read(&F2FS_I(inode)->i_sem); cp_reason = need_do_checkpoint(inode); up_read(&F2FS_I(inode)->i_sem); if (cp_reason) { /* all the dirty node pages should be flushed for POR */ ret = f2fs_sync_fs(inode->i_sb, 1); /* * We've secured consistency through sync_fs. Following pino * will be used only for fsynced inodes after checkpoint. */ try_to_fix_pino(inode); clear_inode_flag(inode, FI_APPEND_WRITE); clear_inode_flag(inode, FI_UPDATE_WRITE); goto out; } sync_nodes: atomic_inc(&sbi->wb_sync_req[NODE]); ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id); atomic_dec(&sbi->wb_sync_req[NODE]); if (ret) goto out; /* if cp_error was enabled, we should avoid infinite loop */ if (unlikely(f2fs_cp_error(sbi))) { ret = -EIO; goto out; } if (f2fs_need_inode_block_update(sbi, ino)) { f2fs_mark_inode_dirty_sync(inode, true); f2fs_write_inode(inode, NULL); goto sync_nodes; } /* * If it's atomic_write, it's just fine to keep write ordering. So * here we don't need to wait for node write completion, since we use * node chain which serializes node blocks. If one of node writes are * reordered, we can see simply broken chain, resulting in stopping * roll-forward recovery. It means we'll recover all or none node blocks * given fsync mark. */ if (!atomic) { ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id); if (ret) goto out; } /* once recovery info is written, don't need to tack this */ f2fs_remove_ino_entry(sbi, ino, APPEND_INO); clear_inode_flag(inode, FI_APPEND_WRITE); flush_out: if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ret = f2fs_issue_flush(sbi, inode->i_ino); if (!ret) { f2fs_remove_ino_entry(sbi, ino, UPDATE_INO); clear_inode_flag(inode, FI_UPDATE_WRITE); f2fs_remove_ino_entry(sbi, ino, FLUSH_INO); } f2fs_update_time(sbi, REQ_TIME); out: trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); f2fs_trace_ios(NULL, 1); return ret; } int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) return -EIO; return f2fs_do_sync_file(file, start, end, datasync, false); } static pgoff_t __get_first_dirty_index(struct address_space *mapping, pgoff_t pgofs, int whence) { struct page *page; int nr_pages; if (whence != SEEK_DATA) return 0; /* find first dirty page index */ nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY, 1, &page); if (!nr_pages) return ULONG_MAX; pgofs = page->index; put_page(page); return pgofs; } static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr, pgoff_t dirty, pgoff_t pgofs, int whence) { switch (whence) { case SEEK_DATA: if ((blkaddr == NEW_ADDR && dirty == pgofs) || __is_valid_data_blkaddr(blkaddr)) return true; break; case SEEK_HOLE: if (blkaddr == NULL_ADDR) return true; break; } return false; } static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; loff_t maxbytes = inode->i_sb->s_maxbytes; struct dnode_of_data dn; pgoff_t pgofs, end_offset, dirty; loff_t data_ofs = offset; loff_t isize; int err = 0; inode_lock(inode); isize = i_size_read(inode); if (offset >= isize) goto fail; /* handle inline data case */ if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { if (whence == SEEK_HOLE) data_ofs = isize; goto found; } pgofs = (pgoff_t)(offset >> PAGE_SHIFT); dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) { set_new_dnode(&dn, inode, NULL, NULL, 0); err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE); if (err && err != -ENOENT) { goto fail; } else if (err == -ENOENT) { /* direct node does not exists */ if (whence == SEEK_DATA) { pgofs = f2fs_get_next_page_offset(&dn, pgofs); continue; } else { goto found; } } end_offset = ADDRS_PER_PAGE(dn.node_page, inode); /* find data/hole in dnode block */ for (; dn.ofs_in_node < end_offset; dn.ofs_in_node++, pgofs++, data_ofs = (loff_t)pgofs << PAGE_SHIFT) { block_t blkaddr; blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node); if (__is_valid_data_blkaddr(blkaddr) && !f2fs_is_valid_blkaddr(F2FS_I_SB(inode), blkaddr, DATA_GENERIC_ENHANCE)) { f2fs_put_dnode(&dn); goto fail; } if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty, pgofs, whence)) { f2fs_put_dnode(&dn); goto found; } } f2fs_put_dnode(&dn); } if (whence == SEEK_DATA) goto fail; found: if (whence == SEEK_HOLE && data_ofs > isize) data_ofs = isize; inode_unlock(inode); return vfs_setpos(file, data_ofs, maxbytes); fail: inode_unlock(inode); return -ENXIO; } static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; loff_t maxbytes = inode->i_sb->s_maxbytes; switch (whence) { case SEEK_SET: case SEEK_CUR: case SEEK_END: return generic_file_llseek_size(file, offset, whence, maxbytes, i_size_read(inode)); case SEEK_DATA: case SEEK_HOLE: if (offset < 0) return -ENXIO; return f2fs_seek_block(file, offset, whence); } return -EINVAL; } static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file_inode(file); int err; if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) return -EIO; /* we don't need to use inline_data strictly */ err = f2fs_convert_inline_inode(inode); if (err) return err; file_accessed(file); vma->vm_ops = &f2fs_file_vm_ops; return 0; } static int f2fs_file_open(struct inode *inode, struct file *filp) { int err = fscrypt_file_open(inode, filp); if (err) return err; err = fsverity_file_open(inode, filp); if (err) return err; filp->f_mode |= FMODE_NOWAIT; return dquot_file_open(inode, filp); } void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); struct f2fs_node *raw_node; int nr_free = 0, ofs = dn->ofs_in_node, len = count; __le32 *addr; int base = 0; if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) base = get_extra_isize(dn->inode); raw_node = F2FS_NODE(dn->node_page); addr = blkaddr_in_node(raw_node) + base + ofs; for (; count > 0; count--, addr++, dn->ofs_in_node++) { block_t blkaddr = le32_to_cpu(*addr); if (blkaddr == NULL_ADDR) continue; dn->data_blkaddr = NULL_ADDR; f2fs_set_data_blkaddr(dn); if (__is_valid_data_blkaddr(blkaddr) && !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) continue; f2fs_invalidate_blocks(sbi, blkaddr); if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page)) clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN); nr_free++; } if (nr_free) { pgoff_t fofs; /* * once we invalidate valid blkaddr in range [ofs, ofs + count], * we will invalidate all blkaddr in the whole range. */ fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + ofs; f2fs_update_extent_cache_range(dn, fofs, 0, len); dec_valid_block_count(sbi, dn->inode, nr_free); } dn->ofs_in_node = ofs; f2fs_update_time(sbi, REQ_TIME); trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, dn->ofs_in_node, nr_free); } void f2fs_truncate_data_blocks(struct dnode_of_data *dn) { f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode)); } static int truncate_partial_data_page(struct inode *inode, u64 from, bool cache_only) { loff_t offset = from & (PAGE_SIZE - 1); pgoff_t index = from >> PAGE_SHIFT; struct address_space *mapping = inode->i_mapping; struct page *page; if (!offset && !cache_only) return 0; if (cache_only) { page = find_lock_page(mapping, index); if (page && PageUptodate(page)) goto truncate_out; f2fs_put_page(page, 1); return 0; } page = f2fs_get_lock_data_page(inode, index, true); if (IS_ERR(page)) return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page); truncate_out: f2fs_wait_on_page_writeback(page, DATA, true, true); zero_user(page, offset, PAGE_SIZE - offset); /* An encrypted inode should have a key and truncate the last page. */ f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode)); if (!cache_only) set_page_dirty(page); f2fs_put_page(page, 1); return 0; } int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; pgoff_t free_from; int count = 0, err = 0; struct page *ipage; bool truncate_page = false; trace_f2fs_truncate_blocks_enter(inode, from); free_from = (pgoff_t)F2FS_BLK_ALIGN(from); if (free_from >= sbi->max_file_blocks) goto free_partial; if (lock) f2fs_lock_op(sbi); ipage = f2fs_get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { err = PTR_ERR(ipage); goto out; } if (f2fs_has_inline_data(inode)) { f2fs_truncate_inline_inode(inode, ipage, from); f2fs_put_page(ipage, 1); truncate_page = true; goto out; } set_new_dnode(&dn, inode, ipage, NULL, 0); err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA); if (err) { if (err == -ENOENT) goto free_next; goto out; } count = ADDRS_PER_PAGE(dn.node_page, inode); count -= dn.ofs_in_node; f2fs_bug_on(sbi, count < 0); if (dn.ofs_in_node || IS_INODE(dn.node_page)) { f2fs_truncate_data_blocks_range(&dn, count); free_from += count; } f2fs_put_dnode(&dn); free_next: err = f2fs_truncate_inode_blocks(inode, free_from); out: if (lock) f2fs_unlock_op(sbi); free_partial: /* lastly zero out the first data page */ if (!err) err = truncate_partial_data_page(inode, from, truncate_page); trace_f2fs_truncate_blocks_exit(inode, err); return err; } int f2fs_truncate(struct inode *inode) { int err; if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) return -EIO; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return 0; trace_f2fs_truncate(inode); if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) { f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE); return -EIO; } /* we should check inline_data size */ if (!f2fs_may_inline_data(inode)) { err = f2fs_convert_inline_inode(inode); if (err) return err; } err = f2fs_truncate_blocks(inode, i_size_read(inode), true); if (err) return err; inode->i_mtime = inode->i_ctime = current_time(inode); f2fs_mark_inode_dirty_sync(inode, false); return 0; } int f2fs_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_inode *ri; unsigned int flags; if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { stat->result_mask |= STATX_BTIME; stat->btime.tv_sec = fi->i_crtime.tv_sec; stat->btime.tv_nsec = fi->i_crtime.tv_nsec; } flags = fi->i_flags; if (flags & F2FS_APPEND_FL) stat->attributes |= STATX_ATTR_APPEND; if (IS_ENCRYPTED(inode)) stat->attributes |= STATX_ATTR_ENCRYPTED; if (flags & F2FS_IMMUTABLE_FL) stat->attributes |= STATX_ATTR_IMMUTABLE; if (flags & F2FS_NODUMP_FL) stat->attributes |= STATX_ATTR_NODUMP; if (IS_VERITY(inode)) stat->attributes |= STATX_ATTR_VERITY; stat->attributes_mask |= (STATX_ATTR_APPEND | STATX_ATTR_ENCRYPTED | STATX_ATTR_IMMUTABLE | STATX_ATTR_NODUMP | STATX_ATTR_VERITY); generic_fillattr(inode, stat); /* we need to show initial sectors used for inline_data/dentries */ if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || f2fs_has_inline_dentry(inode)) stat->blocks += (stat->size + 511) >> 9; return 0; } #ifdef CONFIG_F2FS_FS_POSIX_ACL static void __setattr_copy(struct inode *inode, const struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; if (ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; if (ia_valid & ATTR_ATIME) { inode->i_atime = timestamp_truncate(attr->ia_atime, inode); } if (ia_valid & ATTR_MTIME) { inode->i_mtime = timestamp_truncate(attr->ia_mtime, inode); } if (ia_valid & ATTR_CTIME) { inode->i_ctime = timestamp_truncate(attr->ia_ctime, inode); } if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) mode &= ~S_ISGID; set_acl_inode(inode, mode); } } #else #define __setattr_copy setattr_copy #endif int f2fs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); int err; if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) return -EIO; err = setattr_prepare(dentry, attr); if (err) return err; err = fscrypt_prepare_setattr(dentry, attr); if (err) return err; err = fsverity_prepare_setattr(dentry, attr); if (err) return err; if (is_quota_modification(inode, attr)) { err = dquot_initialize(inode); if (err) return err; } if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) || (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) { f2fs_lock_op(F2FS_I_SB(inode)); err = dquot_transfer(inode, attr); if (err) { set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR); f2fs_unlock_op(F2FS_I_SB(inode)); return err; } /* * update uid/gid under lock_op(), so that dquot and inode can * be updated atomically. */ if (attr->ia_valid & ATTR_UID) inode->i_uid = attr->ia_uid; if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; f2fs_mark_inode_dirty_sync(inode, true); f2fs_unlock_op(F2FS_I_SB(inode)); } if (attr->ia_valid & ATTR_SIZE) { loff_t old_size = i_size_read(inode); if (attr->ia_size > MAX_INLINE_DATA(inode)) { /* * should convert inline inode before i_size_write to * keep smaller than inline_data size with inline flag. */ err = f2fs_convert_inline_inode(inode); if (err) return err; } down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); down_write(&F2FS_I(inode)->i_mmap_sem); truncate_setsize(inode, attr->ia_size); if (attr->ia_size <= old_size) err = f2fs_truncate(inode); /* * do not trim all blocks after i_size if target size is * larger than i_size. */ up_write(&F2FS_I(inode)->i_mmap_sem); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); if (err) return err; down_write(&F2FS_I(inode)->i_sem); inode->i_mtime = inode->i_ctime = current_time(inode); F2FS_I(inode)->last_disk_size = i_size_read(inode); up_write(&F2FS_I(inode)->i_sem); } __setattr_copy(inode, attr); if (attr->ia_valid & ATTR_MODE) { err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode)); if (err || is_inode_flag_set(inode, FI_ACL_MODE)) { inode->i_mode = F2FS_I(inode)->i_acl_mode; clear_inode_flag(inode, FI_ACL_MODE); } } /* file size may changed here */ f2fs_mark_inode_dirty_sync(inode, true); /* inode change will produce dirty node pages flushed by checkpoint */ f2fs_balance_fs(F2FS_I_SB(inode), true); return err; } const struct inode_operations f2fs_file_inode_operations = { .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, .set_acl = f2fs_set_acl, #ifdef CONFIG_F2FS_FS_XATTR .listxattr = f2fs_listxattr, #endif .fiemap = f2fs_fiemap, }; static int fill_zero(struct inode *inode, pgoff_t index, loff_t start, loff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct page *page; if (!len) return 0; f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); page = f2fs_get_new_data_page(inode, NULL, index, false); f2fs_unlock_op(sbi); if (IS_ERR(page)) return PTR_ERR(page); f2fs_wait_on_page_writeback(page, DATA, true, true); zero_user(page, start, len); set_page_dirty(page); f2fs_put_page(page, 1); return 0; } int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) { int err; while (pg_start < pg_end) { struct dnode_of_data dn; pgoff_t end_offset, count; set_new_dnode(&dn, inode, NULL, NULL, 0); err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE); if (err) { if (err == -ENOENT) { pg_start = f2fs_get_next_page_offset(&dn, pg_start); continue; } return err; } end_offset = ADDRS_PER_PAGE(dn.node_page, inode); count = min(end_offset - dn.ofs_in_node, pg_end - pg_start); f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); f2fs_truncate_data_blocks_range(&dn, count); f2fs_put_dnode(&dn); pg_start += count; } return 0; } static int punch_hole(struct inode *inode, loff_t offset, loff_t len) { pgoff_t pg_start, pg_end; loff_t off_start, off_end; int ret; ret = f2fs_convert_inline_inode(inode); if (ret) return ret; pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; off_start = offset & (PAGE_SIZE - 1); off_end = (offset + len) & (PAGE_SIZE - 1); if (pg_start == pg_end) { ret = fill_zero(inode, pg_start, off_start, off_end - off_start); if (ret) return ret; } else { if (off_start) { ret = fill_zero(inode, pg_start++, off_start, PAGE_SIZE - off_start); if (ret) return ret; } if (off_end) { ret = fill_zero(inode, pg_end, 0, off_end); if (ret) return ret; } if (pg_start < pg_end) { struct address_space *mapping = inode->i_mapping; loff_t blk_start, blk_end; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); f2fs_balance_fs(sbi, true); blk_start = (loff_t)pg_start << PAGE_SHIFT; blk_end = (loff_t)pg_end << PAGE_SHIFT; down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); down_write(&F2FS_I(inode)->i_mmap_sem); truncate_inode_pages_range(mapping, blk_start, blk_end - 1); f2fs_lock_op(sbi); ret = f2fs_truncate_hole(inode, pg_start, pg_end); f2fs_unlock_op(sbi); up_write(&F2FS_I(inode)->i_mmap_sem); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); } } return ret; } static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr, int *do_replace, pgoff_t off, pgoff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; int ret, done, i; next_dnode: set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); if (ret && ret != -ENOENT) { return ret; } else if (ret == -ENOENT) { if (dn.max_level == 0) return -ENOENT; done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - dn.ofs_in_node, len); blkaddr += done; do_replace += done; goto next; } done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - dn.ofs_in_node, len); for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { *blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node); if (__is_valid_data_blkaddr(*blkaddr) && !f2fs_is_valid_blkaddr(sbi, *blkaddr, DATA_GENERIC_ENHANCE)) { f2fs_put_dnode(&dn); return -EFSCORRUPTED; } if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) { if (test_opt(sbi, LFS)) { f2fs_put_dnode(&dn); return -EOPNOTSUPP; } /* do not invalidate this block address */ f2fs_update_data_blkaddr(&dn, NULL_ADDR); *do_replace = 1; } } f2fs_put_dnode(&dn); next: len -= done; off += done; if (len) goto next_dnode; return 0; } static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr, int *do_replace, pgoff_t off, int len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; int ret, i; for (i = 0; i < len; i++, do_replace++, blkaddr++) { if (*do_replace == 0) continue; set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA); if (ret) { dec_valid_block_count(sbi, inode, 1); f2fs_invalidate_blocks(sbi, *blkaddr); } else { f2fs_update_data_blkaddr(&dn, *blkaddr); } f2fs_put_dnode(&dn); } return 0; } static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, block_t *blkaddr, int *do_replace, pgoff_t src, pgoff_t dst, pgoff_t len, bool full) { struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode); pgoff_t i = 0; int ret; while (i < len) { if (blkaddr[i] == NULL_ADDR && !full) { i++; continue; } if (do_replace[i] || blkaddr[i] == NULL_ADDR) { struct dnode_of_data dn; struct node_info ni; size_t new_size; pgoff_t ilen; set_new_dnode(&dn, dst_inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE); if (ret) return ret; ret = f2fs_get_node_info(sbi, dn.nid, &ni); if (ret) { f2fs_put_dnode(&dn); return ret; } ilen = min((pgoff_t) ADDRS_PER_PAGE(dn.node_page, dst_inode) - dn.ofs_in_node, len - i); do { dn.data_blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node); f2fs_truncate_data_blocks_range(&dn, 1); if (do_replace[i]) { f2fs_i_blocks_write(src_inode, 1, false, false); f2fs_i_blocks_write(dst_inode, 1, true, false); f2fs_replace_block(sbi, &dn, dn.data_blkaddr, blkaddr[i], ni.version, true, false); do_replace[i] = 0; } dn.ofs_in_node++; i++; new_size = (loff_t)(dst + i) << PAGE_SHIFT; if (dst_inode->i_size < new_size) f2fs_i_size_write(dst_inode, new_size); } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR)); f2fs_put_dnode(&dn); } else { struct page *psrc, *pdst; psrc = f2fs_get_lock_data_page(src_inode, src + i, true); if (IS_ERR(psrc)) return PTR_ERR(psrc); pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i, true); if (IS_ERR(pdst)) { f2fs_put_page(psrc, 1); return PTR_ERR(pdst); } f2fs_copy_page(psrc, pdst); set_page_dirty(pdst); f2fs_put_page(pdst, 1); f2fs_put_page(psrc, 1); ret = f2fs_truncate_hole(src_inode, src + i, src + i + 1); if (ret) return ret; i++; } } return 0; } static int __exchange_data_block(struct inode *src_inode, struct inode *dst_inode, pgoff_t src, pgoff_t dst, pgoff_t len, bool full) { block_t *src_blkaddr; int *do_replace; pgoff_t olen; int ret; while (len) { olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len); src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode), array_size(olen, sizeof(block_t)), GFP_KERNEL); if (!src_blkaddr) return -ENOMEM; do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode), array_size(olen, sizeof(int)), GFP_KERNEL); if (!do_replace) { kvfree(src_blkaddr); return -ENOMEM; } ret = __read_out_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen); if (ret) goto roll_back; ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr, do_replace, src, dst, olen, full); if (ret) goto roll_back; src += olen; dst += olen; len -= olen; kvfree(src_blkaddr); kvfree(do_replace); } return 0; roll_back: __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen); kvfree(src_blkaddr); kvfree(do_replace); return ret; } static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); pgoff_t start = offset >> PAGE_SHIFT; pgoff_t end = (offset + len) >> PAGE_SHIFT; int ret; f2fs_balance_fs(sbi, true); /* avoid gc operation during block exchange */ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); down_write(&F2FS_I(inode)->i_mmap_sem); f2fs_lock_op(sbi); f2fs_drop_extent_tree(inode); truncate_pagecache(inode, offset); ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); f2fs_unlock_op(sbi); up_write(&F2FS_I(inode)->i_mmap_sem); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); return ret; } static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) { loff_t new_size; int ret; if (offset + len >= i_size_read(inode)) return -EINVAL; /* collapse range should be aligned to block size of f2fs. */ if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) return -EINVAL; ret = f2fs_convert_inline_inode(inode); if (ret) return ret; /* write out all dirty pages from offset */ ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); if (ret) return ret; ret = f2fs_do_collapse(inode, offset, len); if (ret) return ret; /* write out all moved pages, if possible */ down_write(&F2FS_I(inode)->i_mmap_sem); filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); truncate_pagecache(inode, offset); new_size = i_size_read(inode) - len; truncate_pagecache(inode, new_size); ret = f2fs_truncate_blocks(inode, new_size, true); up_write(&F2FS_I(inode)->i_mmap_sem); if (!ret) f2fs_i_size_write(inode, new_size); return ret; } static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, pgoff_t end) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); pgoff_t index = start; unsigned int ofs_in_node = dn->ofs_in_node; blkcnt_t count = 0; int ret; for (; index < end; index++, dn->ofs_in_node++) { if (datablock_addr(dn->inode, dn->node_page, dn->ofs_in_node) == NULL_ADDR) count++; } dn->ofs_in_node = ofs_in_node; ret = f2fs_reserve_new_blocks(dn, count); if (ret) return ret; dn->ofs_in_node = ofs_in_node; for (index = start; index < end; index++, dn->ofs_in_node++) { dn->data_blkaddr = datablock_addr(dn->inode, dn->node_page, dn->ofs_in_node); /* * f2fs_reserve_new_blocks will not guarantee entire block * allocation. */ if (dn->data_blkaddr == NULL_ADDR) { ret = -ENOSPC; break; } if (dn->data_blkaddr != NEW_ADDR) { f2fs_invalidate_blocks(sbi, dn->data_blkaddr); dn->data_blkaddr = NEW_ADDR; f2fs_set_data_blkaddr(dn); } } f2fs_update_extent_cache_range(dn, start, 0, index - start); return ret; } static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct address_space *mapping = inode->i_mapping; pgoff_t index, pg_start, pg_end; loff_t new_size = i_size_read(inode); loff_t off_start, off_end; int ret = 0; ret = inode_newsize_ok(inode, (len + offset)); if (ret) return ret; ret = f2fs_convert_inline_inode(inode); if (ret) return ret; ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); if (ret) return ret; pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; off_start = offset & (PAGE_SIZE - 1); off_end = (offset + len) & (PAGE_SIZE - 1); if (pg_start == pg_end) { ret = fill_zero(inode, pg_start, off_start, off_end - off_start); if (ret) return ret; new_size = max_t(loff_t, new_size, offset + len); } else { if (off_start) { ret = fill_zero(inode, pg_start++, off_start, PAGE_SIZE - off_start); if (ret) return ret; new_size = max_t(loff_t, new_size, (loff_t)pg_start << PAGE_SHIFT); } for (index = pg_start; index < pg_end;) { struct dnode_of_data dn; unsigned int end_offset; pgoff_t end; down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); down_write(&F2FS_I(inode)->i_mmap_sem); truncate_pagecache_range(inode, (loff_t)index << PAGE_SHIFT, ((loff_t)pg_end << PAGE_SHIFT) - 1); f2fs_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); if (ret) { f2fs_unlock_op(sbi); up_write(&F2FS_I(inode)->i_mmap_sem); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); goto out; } end_offset = ADDRS_PER_PAGE(dn.node_page, inode); end = min(pg_end, end_offset - dn.ofs_in_node + index); ret = f2fs_do_zero_range(&dn, index, end); f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); up_write(&F2FS_I(inode)->i_mmap_sem); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_balance_fs(sbi, dn.node_changed); if (ret) goto out; index = end; new_size = max_t(loff_t, new_size, (loff_t)index << PAGE_SHIFT); } if (off_end) { ret = fill_zero(inode, pg_end, 0, off_end); if (ret) goto out; new_size = max_t(loff_t, new_size, offset + len); } } out: if (new_size > i_size_read(inode)) { if (mode & FALLOC_FL_KEEP_SIZE) file_set_keep_isize(inode); else f2fs_i_size_write(inode, new_size); } return ret; } static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); pgoff_t nr, pg_start, pg_end, delta, idx; loff_t new_size; int ret = 0; new_size = i_size_read(inode) + len; ret = inode_newsize_ok(inode, new_size); if (ret) return ret; if (offset >= i_size_read(inode)) return -EINVAL; /* insert range should be aligned to block size of f2fs. */ if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) return -EINVAL; ret = f2fs_convert_inline_inode(inode); if (ret) return ret; f2fs_balance_fs(sbi, true); down_write(&F2FS_I(inode)->i_mmap_sem); ret = f2fs_truncate_blocks(inode, i_size_read(inode), true); up_write(&F2FS_I(inode)->i_mmap_sem); if (ret) return ret; /* write out all dirty pages from offset */ ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); if (ret) return ret; pg_start = offset >> PAGE_SHIFT; pg_end = (offset + len) >> PAGE_SHIFT; delta = pg_end - pg_start; idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); /* avoid gc operation during block exchange */ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); down_write(&F2FS_I(inode)->i_mmap_sem); truncate_pagecache(inode, offset); while (!ret && idx > pg_start) { nr = idx - pg_start; if (nr > delta) nr = delta; idx -= nr; f2fs_lock_op(sbi); f2fs_drop_extent_tree(inode); ret = __exchange_data_block(inode, inode, idx, idx + delta, nr, false); f2fs_unlock_op(sbi); } up_write(&F2FS_I(inode)->i_mmap_sem); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); /* write out all moved pages, if possible */ down_write(&F2FS_I(inode)->i_mmap_sem); filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); truncate_pagecache(inode, offset); up_write(&F2FS_I(inode)->i_mmap_sem); if (!ret) f2fs_i_size_write(inode, new_size); return ret; } static int expand_inode_data(struct inode *inode, loff_t offset, loff_t len, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_map_blocks map = { .m_next_pgofs = NULL, .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE, .m_may_create = true }; pgoff_t pg_end; loff_t new_size = i_size_read(inode); loff_t off_end; int err; err = inode_newsize_ok(inode, (len + offset)); if (err) return err; err = f2fs_convert_inline_inode(inode); if (err) return err; f2fs_balance_fs(sbi, true); pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT; off_end = (offset + len) & (PAGE_SIZE - 1); map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT; map.m_len = pg_end - map.m_lblk; if (off_end) map.m_len++; if (!map.m_len) return 0; if (f2fs_is_pinned_file(inode)) { block_t len = (map.m_len >> sbi->log_blocks_per_seg) << sbi->log_blocks_per_seg; block_t done = 0; if (map.m_len % sbi->blocks_per_seg) len += sbi->blocks_per_seg; map.m_len = sbi->blocks_per_seg; next_alloc: if (has_not_enough_free_secs(sbi, 0, GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { mutex_lock(&sbi->gc_mutex); err = f2fs_gc(sbi, true, false, NULL_SEGNO); if (err && err != -ENODATA && err != -EAGAIN) goto out_err; } down_write(&sbi->pin_sem); map.m_seg_type = CURSEG_COLD_DATA_PINNED; f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA); err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO); up_write(&sbi->pin_sem); done += map.m_len; len -= map.m_len; map.m_lblk += map.m_len; if (!err && len) goto next_alloc; map.m_len = done; } else { err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); } out_err: if (err) { pgoff_t last_off; if (!map.m_len) return err; last_off = map.m_lblk + map.m_len - 1; /* update new size to the failed position */ new_size = (last_off == pg_end) ? offset + len : (loff_t)(last_off + 1) << PAGE_SHIFT; } else { new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; } if (new_size > i_size_read(inode)) { if (mode & FALLOC_FL_KEEP_SIZE) file_set_keep_isize(inode); else f2fs_i_size_write(inode, new_size); } return err; } static long f2fs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); long ret = 0; if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) return -EIO; if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode))) return -ENOSPC; /* f2fs only support ->fallocate for regular file */ if (!S_ISREG(inode->i_mode)) return -EINVAL; if (IS_ENCRYPTED(inode) && (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) return -EOPNOTSUPP; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)) return -EOPNOTSUPP; inode_lock(inode); if (mode & FALLOC_FL_PUNCH_HOLE) { if (offset >= inode->i_size) goto out; ret = punch_hole(inode, offset, len); } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { ret = f2fs_collapse_range(inode, offset, len); } else if (mode & FALLOC_FL_ZERO_RANGE) { ret = f2fs_zero_range(inode, offset, len, mode); } else if (mode & FALLOC_FL_INSERT_RANGE) { ret = f2fs_insert_range(inode, offset, len); } else { ret = expand_inode_data(inode, offset, len, mode); } if (!ret) { inode->i_mtime = inode->i_ctime = current_time(inode); f2fs_mark_inode_dirty_sync(inode, false); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); } out: inode_unlock(inode); trace_f2fs_fallocate(inode, mode, offset, len, ret); return ret; } static int f2fs_release_file(struct inode *inode, struct file *filp) { /* * f2fs_relase_file is called at every close calls. So we should * not drop any inmemory pages by close called by other process. */ if (!(filp->f_mode & FMODE_WRITE) || atomic_read(&inode->i_writecount) != 1) return 0; /* some remained atomic pages should discarded */ if (f2fs_is_atomic_file(inode)) f2fs_drop_inmem_pages(inode); if (f2fs_is_volatile_file(inode)) { set_inode_flag(inode, FI_DROP_CACHE); filemap_fdatawrite(inode->i_mapping); clear_inode_flag(inode, FI_DROP_CACHE); clear_inode_flag(inode, FI_VOLATILE_FILE); stat_dec_volatile_write(inode); } return 0; } static int f2fs_file_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); /* * If the process doing a transaction is crashed, we should do * roll-back. Otherwise, other reader/write can see corrupted database * until all the writers close its file. Since this should be done * before dropping file lock, it needs to do in ->flush. */ if (f2fs_is_atomic_file(inode) && F2FS_I(inode)->inmem_task == current) f2fs_drop_inmem_pages(inode); return 0; } static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) { struct f2fs_inode_info *fi = F2FS_I(inode); /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) return -EPERM; if ((iflags ^ fi->i_flags) & F2FS_CASEFOLD_FL) { if (!f2fs_sb_has_casefold(F2FS_I_SB(inode))) return -EOPNOTSUPP; if (!f2fs_empty_dir(inode)) return -ENOTEMPTY; } fi->i_flags = iflags | (fi->i_flags & ~mask); if (fi->i_flags & F2FS_PROJINHERIT_FL) set_inode_flag(inode, FI_PROJ_INHERIT); else clear_inode_flag(inode, FI_PROJ_INHERIT); inode->i_ctime = current_time(inode); f2fs_set_inode_flags(inode); f2fs_mark_inode_dirty_sync(inode, true); return 0; } /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */ /* * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL. */ static const struct { u32 iflag; u32 fsflag; } f2fs_fsflags_map[] = { { F2FS_SYNC_FL, FS_SYNC_FL }, { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL }, { F2FS_APPEND_FL, FS_APPEND_FL }, { F2FS_NODUMP_FL, FS_NODUMP_FL }, { F2FS_NOATIME_FL, FS_NOATIME_FL }, { F2FS_INDEX_FL, FS_INDEX_FL }, { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL }, { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL }, { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL }, }; #define F2FS_GETTABLE_FS_FL ( \ FS_SYNC_FL | \ FS_IMMUTABLE_FL | \ FS_APPEND_FL | \ FS_NODUMP_FL | \ FS_NOATIME_FL | \ FS_INDEX_FL | \ FS_DIRSYNC_FL | \ FS_PROJINHERIT_FL | \ FS_ENCRYPT_FL | \ FS_INLINE_DATA_FL | \ FS_NOCOW_FL | \ FS_VERITY_FL | \ FS_CASEFOLD_FL) #define F2FS_SETTABLE_FS_FL ( \ FS_SYNC_FL | \ FS_IMMUTABLE_FL | \ FS_APPEND_FL | \ FS_NODUMP_FL | \ FS_NOATIME_FL | \ FS_DIRSYNC_FL | \ FS_PROJINHERIT_FL | \ FS_CASEFOLD_FL) /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */ static inline u32 f2fs_iflags_to_fsflags(u32 iflags) { u32 fsflags = 0; int i; for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) if (iflags & f2fs_fsflags_map[i].iflag) fsflags |= f2fs_fsflags_map[i].fsflag; return fsflags; } /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */ static inline u32 f2fs_fsflags_to_iflags(u32 fsflags) { u32 iflags = 0; int i; for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) if (fsflags & f2fs_fsflags_map[i].fsflag) iflags |= f2fs_fsflags_map[i].iflag; return iflags; } static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_inode_info *fi = F2FS_I(inode); u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags); if (IS_ENCRYPTED(inode)) fsflags |= FS_ENCRYPT_FL; if (IS_VERITY(inode)) fsflags |= FS_VERITY_FL; if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) fsflags |= FS_INLINE_DATA_FL; if (is_inode_flag_set(inode, FI_PIN_FILE)) fsflags |= FS_NOCOW_FL; fsflags &= F2FS_GETTABLE_FS_FL; return put_user(fsflags, (int __user *)arg); } static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_inode_info *fi = F2FS_I(inode); u32 fsflags, old_fsflags; u32 iflags; int ret; if (!inode_owner_or_capable(inode)) return -EACCES; if (get_user(fsflags, (int __user *)arg)) return -EFAULT; if (fsflags & ~F2FS_GETTABLE_FS_FL) return -EOPNOTSUPP; fsflags &= F2FS_SETTABLE_FS_FL; iflags = f2fs_fsflags_to_iflags(fsflags); if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) return -EOPNOTSUPP; ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags); ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags); if (ret) goto out; ret = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL)); out: inode_unlock(inode); mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_getversion(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); return put_user(inode->i_generation, (int __user *)arg); } static int f2fs_ioc_start_atomic_write(struct file *filp) { struct inode *inode = file_inode(filp); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int ret; if (!inode_owner_or_capable(inode)) return -EACCES; if (!S_ISREG(inode->i_mode)) return -EINVAL; if (filp->f_flags & O_DIRECT) return -EINVAL; ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); if (f2fs_is_atomic_file(inode)) { if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) ret = -EINVAL; goto out; } ret = f2fs_convert_inline_inode(inode); if (ret) goto out; down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); /* * Should wait end_io to count F2FS_WB_CP_DATA correctly by * f2fs_is_atomic_file. */ if (get_dirty_pages(inode)) f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u", inode->i_ino, get_dirty_pages(inode)); ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); if (ret) { up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); goto out; } spin_lock(&sbi->inode_lock[ATOMIC_FILE]); if (list_empty(&fi->inmem_ilist)) list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]); sbi->atomic_files++; spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); /* add inode in inmem_list first and set atomic_file */ set_inode_flag(inode, FI_ATOMIC_FILE); clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); F2FS_I(inode)->inmem_task = current; stat_inc_atomic_write(inode); stat_update_max_atomic_write(inode); out: inode_unlock(inode); mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_commit_atomic_write(struct file *filp) { struct inode *inode = file_inode(filp); int ret; if (!inode_owner_or_capable(inode)) return -EACCES; ret = mnt_want_write_file(filp); if (ret) return ret; f2fs_balance_fs(F2FS_I_SB(inode), true); inode_lock(inode); if (f2fs_is_volatile_file(inode)) { ret = -EINVAL; goto err_out; } if (f2fs_is_atomic_file(inode)) { ret = f2fs_commit_inmem_pages(inode); if (ret) goto err_out; ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); if (!ret) f2fs_drop_inmem_pages(inode); } else { ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false); } err_out: if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) { clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); ret = -EINVAL; } inode_unlock(inode); mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_start_volatile_write(struct file *filp) { struct inode *inode = file_inode(filp); int ret; if (!inode_owner_or_capable(inode)) return -EACCES; if (!S_ISREG(inode->i_mode)) return -EINVAL; ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); if (f2fs_is_volatile_file(inode)) goto out; ret = f2fs_convert_inline_inode(inode); if (ret) goto out; stat_inc_volatile_write(inode); stat_update_max_volatile_write(inode); set_inode_flag(inode, FI_VOLATILE_FILE); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); out: inode_unlock(inode); mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_release_volatile_write(struct file *filp) { struct inode *inode = file_inode(filp); int ret; if (!inode_owner_or_capable(inode)) return -EACCES; ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); if (!f2fs_is_volatile_file(inode)) goto out; if (!f2fs_is_first_block_written(inode)) { ret = truncate_partial_data_page(inode, 0, true); goto out; } ret = punch_hole(inode, 0, F2FS_BLKSIZE); out: inode_unlock(inode); mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_abort_volatile_write(struct file *filp) { struct inode *inode = file_inode(filp); int ret; if (!inode_owner_or_capable(inode)) return -EACCES; ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); if (f2fs_is_atomic_file(inode)) f2fs_drop_inmem_pages(inode); if (f2fs_is_volatile_file(inode)) { clear_inode_flag(inode, FI_VOLATILE_FILE); stat_dec_volatile_write(inode); ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); } clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); inode_unlock(inode); mnt_drop_write_file(filp); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); return ret; } static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct super_block *sb = sbi->sb; __u32 in; int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(in, (__u32 __user *)arg)) return -EFAULT; if (in != F2FS_GOING_DOWN_FULLSYNC) { ret = mnt_want_write_file(filp); if (ret) return ret; } switch (in) { case F2FS_GOING_DOWN_FULLSYNC: sb = freeze_bdev(sb->s_bdev); if (IS_ERR(sb)) { ret = PTR_ERR(sb); goto out; } if (sb) { f2fs_stop_checkpoint(sbi, false); set_sbi_flag(sbi, SBI_IS_SHUTDOWN); thaw_bdev(sb->s_bdev, sb); } break; case F2FS_GOING_DOWN_METASYNC: /* do checkpoint only */ ret = f2fs_sync_fs(sb, 1); if (ret) goto out; f2fs_stop_checkpoint(sbi, false); set_sbi_flag(sbi, SBI_IS_SHUTDOWN); break; case F2FS_GOING_DOWN_NOSYNC: f2fs_stop_checkpoint(sbi, false); set_sbi_flag(sbi, SBI_IS_SHUTDOWN); break; case F2FS_GOING_DOWN_METAFLUSH: f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO); f2fs_stop_checkpoint(sbi, false); set_sbi_flag(sbi, SBI_IS_SHUTDOWN); break; case F2FS_GOING_DOWN_NEED_FSCK: set_sbi_flag(sbi, SBI_NEED_FSCK); set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); set_sbi_flag(sbi, SBI_IS_DIRTY); /* do checkpoint only */ ret = f2fs_sync_fs(sb, 1); goto out; default: ret = -EINVAL; goto out; } f2fs_stop_gc_thread(sbi); f2fs_stop_discard_thread(sbi); f2fs_drop_discard_cmd(sbi); clear_opt(sbi, DISCARD); f2fs_update_time(sbi, REQ_TIME); out: if (in != F2FS_GOING_DOWN_FULLSYNC) mnt_drop_write_file(filp); trace_f2fs_shutdown(sbi, in, ret); return ret; } static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; struct request_queue *q = bdev_get_queue(sb->s_bdev); struct fstrim_range range; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!f2fs_hw_support_discard(F2FS_SB(sb))) return -EOPNOTSUPP; if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range))) return -EFAULT; ret = mnt_want_write_file(filp); if (ret) return ret; range.minlen = max((unsigned int)range.minlen, q->limits.discard_granularity); ret = f2fs_trim_fs(F2FS_SB(sb), &range); mnt_drop_write_file(filp); if (ret < 0) return ret; if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range))) return -EFAULT; f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); return 0; } static bool uuid_is_nonzero(__u8 u[16]) { int i; for (i = 0; i < 16; i++) if (u[i]) return true; return false; } static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode))) return -EOPNOTSUPP; f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); } static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fscrypt_ioctl_get_policy(filp, (void __user *)arg); } static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int err; if (!f2fs_sb_has_encrypt(sbi)) return -EOPNOTSUPP; err = mnt_want_write_file(filp); if (err) return err; down_write(&sbi->sb_lock); if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) goto got_it; /* update superblock with uuid */ generate_random_uuid(sbi->raw_super->encrypt_pw_salt); err = f2fs_commit_super(sbi, false); if (err) { /* undo new data */ memset(sbi->raw_super->encrypt_pw_salt, 0, 16); goto out_err; } got_it: if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt, 16)) err = -EFAULT; out_err: up_write(&sbi->sb_lock); mnt_drop_write_file(filp); return err; } static int f2fs_ioc_get_encryption_policy_ex(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg); } static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fscrypt_ioctl_add_key(filp, (void __user *)arg); } static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fscrypt_ioctl_remove_key(filp, (void __user *)arg); } static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg); } static int f2fs_ioc_get_encryption_key_status(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fscrypt_ioctl_get_key_status(filp, (void __user *)arg); } static int f2fs_ioc_gc(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); __u32 sync; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(sync, (__u32 __user *)arg)) return -EFAULT; if (f2fs_readonly(sbi->sb)) return -EROFS; ret = mnt_want_write_file(filp); if (ret) return ret; if (!sync) { if (!mutex_trylock(&sbi->gc_mutex)) { ret = -EBUSY; goto out; } } else { mutex_lock(&sbi->gc_mutex); } ret = f2fs_gc(sbi, sync, true, NULL_SEGNO); out: mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_gc_range range; u64 end; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg, sizeof(range))) return -EFAULT; if (f2fs_readonly(sbi->sb)) return -EROFS; end = range.start + range.len; if (end < range.start || range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) return -EINVAL; ret = mnt_want_write_file(filp); if (ret) return ret; do_more: if (!range.sync) { if (!mutex_trylock(&sbi->gc_mutex)) { ret = -EBUSY; goto out; } } else { mutex_lock(&sbi->gc_mutex); } ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); range.start += BLKS_PER_SEC(sbi); if (range.start <= end) goto do_more; out: mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (f2fs_readonly(sbi->sb)) return -EROFS; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled."); return -EINVAL; } ret = mnt_want_write_file(filp); if (ret) return ret; ret = f2fs_sync_fs(sbi->sb, 1); mnt_drop_write_file(filp); return ret; } static int f2fs_defragment_range(struct f2fs_sb_info *sbi, struct file *filp, struct f2fs_defragment *range) { struct inode *inode = file_inode(filp); struct f2fs_map_blocks map = { .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE , .m_may_create = false }; struct extent_info ei = {0, 0, 0}; pgoff_t pg_start, pg_end, next_pgofs; unsigned int blk_per_seg = sbi->blocks_per_seg; unsigned int total = 0, sec_num; block_t blk_end = 0; bool fragmented = false; int err; /* if in-place-update policy is enabled, don't waste time here */ if (f2fs_should_update_inplace(inode, NULL)) return -EINVAL; pg_start = range->start >> PAGE_SHIFT; pg_end = (range->start + range->len) >> PAGE_SHIFT; f2fs_balance_fs(sbi, true); inode_lock(inode); /* writeback all dirty pages in the range */ err = filemap_write_and_wait_range(inode->i_mapping, range->start, range->start + range->len - 1); if (err) goto out; /* * lookup mapping info in extent cache, skip defragmenting if physical * block addresses are continuous. */ if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) { if (ei.fofs + ei.len >= pg_end) goto out; } map.m_lblk = pg_start; map.m_next_pgofs = &next_pgofs; /* * lookup mapping info in dnode page cache, skip defragmenting if all * physical block addresses are continuous even if there are hole(s) * in logical blocks. */ while (map.m_lblk < pg_end) { map.m_len = pg_end - map.m_lblk; err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); if (err) goto out; if (!(map.m_flags & F2FS_MAP_FLAGS)) { map.m_lblk = next_pgofs; continue; } if (blk_end && blk_end != map.m_pblk) fragmented = true; /* record total count of block that we're going to move */ total += map.m_len; blk_end = map.m_pblk + map.m_len; map.m_lblk += map.m_len; } if (!fragmented) { total = 0; goto out; } sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi)); /* * make sure there are enough free section for LFS allocation, this can * avoid defragment running in SSR mode when free section are allocated * intensively */ if (has_not_enough_free_secs(sbi, 0, sec_num)) { err = -EAGAIN; goto out; } map.m_lblk = pg_start; map.m_len = pg_end - pg_start; total = 0; while (map.m_lblk < pg_end) { pgoff_t idx; int cnt = 0; do_map: map.m_len = pg_end - map.m_lblk; err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); if (err) goto clear_out; if (!(map.m_flags & F2FS_MAP_FLAGS)) { map.m_lblk = next_pgofs; goto check; } set_inode_flag(inode, FI_DO_DEFRAG); idx = map.m_lblk; while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) { struct page *page; page = f2fs_get_lock_data_page(inode, idx, true); if (IS_ERR(page)) { err = PTR_ERR(page); goto clear_out; } set_page_dirty(page); f2fs_put_page(page, 1); idx++; cnt++; total++; } map.m_lblk = idx; check: if (map.m_lblk < pg_end && cnt < blk_per_seg) goto do_map; clear_inode_flag(inode, FI_DO_DEFRAG); err = filemap_fdatawrite(inode->i_mapping); if (err) goto out; } clear_out: clear_inode_flag(inode, FI_DO_DEFRAG); out: inode_unlock(inode); if (!err) range->len = (u64)total << PAGE_SHIFT; return err; } static int f2fs_ioc_defragment(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_defragment range; int err; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode)) return -EINVAL; if (f2fs_readonly(sbi->sb)) return -EROFS; if (copy_from_user(&range, (struct f2fs_defragment __user *)arg, sizeof(range))) return -EFAULT; /* verify alignment of offset & size */ if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1)) return -EINVAL; if (unlikely((range.start + range.len) >> PAGE_SHIFT > sbi->max_file_blocks)) return -EINVAL; err = mnt_want_write_file(filp); if (err) return err; err = f2fs_defragment_range(sbi, filp, &range); mnt_drop_write_file(filp); f2fs_update_time(sbi, REQ_TIME); if (err < 0) return err; if (copy_to_user((struct f2fs_defragment __user *)arg, &range, sizeof(range))) return -EFAULT; return 0; } static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t len) { struct inode *src = file_inode(file_in); struct inode *dst = file_inode(file_out); struct f2fs_sb_info *sbi = F2FS_I_SB(src); size_t olen = len, dst_max_i_size = 0; size_t dst_osize; int ret; if (file_in->f_path.mnt != file_out->f_path.mnt || src->i_sb != dst->i_sb) return -EXDEV; if (unlikely(f2fs_readonly(src->i_sb))) return -EROFS; if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode)) return -EINVAL; if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst)) return -EOPNOTSUPP; if (src == dst) { if (pos_in == pos_out) return 0; if (pos_out > pos_in && pos_out < pos_in + len) return -EINVAL; } inode_lock(src); if (src != dst) { ret = -EBUSY; if (!inode_trylock(dst)) goto out; } ret = -EINVAL; if (pos_in + len > src->i_size || pos_in + len < pos_in) goto out_unlock; if (len == 0) olen = len = src->i_size - pos_in; if (pos_in + len == src->i_size) len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in; if (len == 0) { ret = 0; goto out_unlock; } dst_osize = dst->i_size; if (pos_out + olen > dst->i_size) dst_max_i_size = pos_out + olen; /* verify the end result is block aligned */ if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) || !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) || !IS_ALIGNED(pos_out, F2FS_BLKSIZE)) goto out_unlock; ret = f2fs_convert_inline_inode(src); if (ret) goto out_unlock; ret = f2fs_convert_inline_inode(dst); if (ret) goto out_unlock; /* write out all dirty pages from offset */ ret = filemap_write_and_wait_range(src->i_mapping, pos_in, pos_in + len); if (ret) goto out_unlock; ret = filemap_write_and_wait_range(dst->i_mapping, pos_out, pos_out + len); if (ret) goto out_unlock; f2fs_balance_fs(sbi, true); down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); if (src != dst) { ret = -EBUSY; if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) goto out_src; } f2fs_lock_op(sbi); ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS, pos_out >> F2FS_BLKSIZE_BITS, len >> F2FS_BLKSIZE_BITS, false); if (!ret) { if (dst_max_i_size) f2fs_i_size_write(dst, dst_max_i_size); else if (dst_osize != dst->i_size) f2fs_i_size_write(dst, dst_osize); } f2fs_unlock_op(sbi); if (src != dst) up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]); out_src: up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); out_unlock: if (src != dst) inode_unlock(dst); out: inode_unlock(src); return ret; } static int f2fs_ioc_move_range(struct file *filp, unsigned long arg) { struct f2fs_move_range range; struct fd dst; int err; if (!(filp->f_mode & FMODE_READ) || !(filp->f_mode & FMODE_WRITE)) return -EBADF; if (copy_from_user(&range, (struct f2fs_move_range __user *)arg, sizeof(range))) return -EFAULT; dst = fdget(range.dst_fd); if (!dst.file) return -EBADF; if (!(dst.file->f_mode & FMODE_WRITE)) { err = -EBADF; goto err_out; } err = mnt_want_write_file(filp); if (err) goto err_out; err = f2fs_move_file_range(filp, range.pos_in, dst.file, range.pos_out, range.len); mnt_drop_write_file(filp); if (err) goto err_out; if (copy_to_user((struct f2fs_move_range __user *)arg, &range, sizeof(range))) err = -EFAULT; err_out: fdput(dst); return err; } static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct sit_info *sm = SIT_I(sbi); unsigned int start_segno = 0, end_segno = 0; unsigned int dev_start_segno = 0, dev_end_segno = 0; struct f2fs_flush_device range; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (f2fs_readonly(sbi->sb)) return -EROFS; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) return -EINVAL; if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg, sizeof(range))) return -EFAULT; if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num || __is_large_section(sbi)) { f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1", range.dev_num, sbi->s_ndevs, sbi->segs_per_sec); return -EINVAL; } ret = mnt_want_write_file(filp); if (ret) return ret; if (range.dev_num != 0) dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk); dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk); start_segno = sm->last_victim[FLUSH_DEVICE]; if (start_segno < dev_start_segno || start_segno >= dev_end_segno) start_segno = dev_start_segno; end_segno = min(start_segno + range.segments, dev_end_segno); while (start_segno < end_segno) { if (!mutex_trylock(&sbi->gc_mutex)) { ret = -EBUSY; goto out; } sm->last_victim[GC_CB] = end_segno + 1; sm->last_victim[GC_GREEDY] = end_segno + 1; sm->last_victim[ALLOC_NEXT] = end_segno + 1; ret = f2fs_gc(sbi, true, true, start_segno); if (ret == -EAGAIN) ret = 0; else if (ret < 0) break; start_segno++; } out: mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_get_features(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature); /* Must validate to set it with SQLite behavior in Android. */ sb_feature |= F2FS_FEATURE_ATOMIC_WRITE; return put_user(sb_feature, (u32 __user *)arg); } #ifdef CONFIG_QUOTA int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) { struct dquot *transfer_to[MAXQUOTAS] = {}; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct super_block *sb = sbi->sb; int err = 0; transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); if (!IS_ERR(transfer_to[PRJQUOTA])) { err = __dquot_transfer(inode, transfer_to); if (err) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); dqput(transfer_to[PRJQUOTA]); } return err; } static int f2fs_ioc_setproject(struct file *filp, __u32 projid) { struct inode *inode = file_inode(filp); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct page *ipage; kprojid_t kprojid; int err; if (!f2fs_sb_has_project_quota(sbi)) { if (projid != F2FS_DEF_PROJID) return -EOPNOTSUPP; else return 0; } if (!f2fs_has_extra_attr(inode)) return -EOPNOTSUPP; kprojid = make_kprojid(&init_user_ns, (projid_t)projid); if (projid_eq(kprojid, F2FS_I(inode)->i_projid)) return 0; err = -EPERM; /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) return err; ipage = f2fs_get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) return PTR_ERR(ipage); if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize, i_projid)) { err = -EOVERFLOW; f2fs_put_page(ipage, 1); return err; } f2fs_put_page(ipage, 1); err = dquot_initialize(inode); if (err) return err; f2fs_lock_op(sbi); err = f2fs_transfer_project_quota(inode, kprojid); if (err) goto out_unlock; F2FS_I(inode)->i_projid = kprojid; inode->i_ctime = current_time(inode); f2fs_mark_inode_dirty_sync(inode, true); out_unlock: f2fs_unlock_op(sbi); return err; } #else int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) { return 0; } static int f2fs_ioc_setproject(struct file *filp, __u32 projid) { if (projid != F2FS_DEF_PROJID) return -EOPNOTSUPP; return 0; } #endif /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */ /* * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS. */ static const struct { u32 iflag; u32 xflag; } f2fs_xflags_map[] = { { F2FS_SYNC_FL, FS_XFLAG_SYNC }, { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE }, { F2FS_APPEND_FL, FS_XFLAG_APPEND }, { F2FS_NODUMP_FL, FS_XFLAG_NODUMP }, { F2FS_NOATIME_FL, FS_XFLAG_NOATIME }, { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT }, }; #define F2FS_SUPPORTED_XFLAGS ( \ FS_XFLAG_SYNC | \ FS_XFLAG_IMMUTABLE | \ FS_XFLAG_APPEND | \ FS_XFLAG_NODUMP | \ FS_XFLAG_NOATIME | \ FS_XFLAG_PROJINHERIT) /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */ static inline u32 f2fs_iflags_to_xflags(u32 iflags) { u32 xflags = 0; int i; for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++) if (iflags & f2fs_xflags_map[i].iflag) xflags |= f2fs_xflags_map[i].xflag; return xflags; } /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */ static inline u32 f2fs_xflags_to_iflags(u32 xflags) { u32 iflags = 0; int i; for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++) if (xflags & f2fs_xflags_map[i].xflag) iflags |= f2fs_xflags_map[i].iflag; return iflags; } static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa) { struct f2fs_inode_info *fi = F2FS_I(inode); simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags)); if (f2fs_sb_has_project_quota(F2FS_I_SB(inode))) fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid); } static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct fsxattr fa; f2fs_fill_fsxattr(inode, &fa); if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa))) return -EFAULT; return 0; } static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct fsxattr fa, old_fa; u32 iflags; int err; if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa))) return -EFAULT; /* Make sure caller has proper permission */ if (!inode_owner_or_capable(inode)) return -EACCES; if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS) return -EOPNOTSUPP; iflags = f2fs_xflags_to_iflags(fa.fsx_xflags); if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) return -EOPNOTSUPP; err = mnt_want_write_file(filp); if (err) return err; inode_lock(inode); f2fs_fill_fsxattr(inode, &old_fa); err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa); if (err) goto out; err = f2fs_setflags_common(inode, iflags, f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS)); if (err) goto out; err = f2fs_ioc_setproject(filp, fa.fsx_projid); out: inode_unlock(inode); mnt_drop_write_file(filp); return err; } int f2fs_pin_file_control(struct inode *inode, bool inc) { struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); /* Use i_gc_failures for normal file as a risk signal. */ if (inc) f2fs_i_gc_failures_write(inode, fi->i_gc_failures[GC_FAILURE_PIN] + 1); if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) { f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials", __func__, inode->i_ino, fi->i_gc_failures[GC_FAILURE_PIN]); clear_inode_flag(inode, FI_PIN_FILE); return -EAGAIN; } return 0; } static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); __u32 pin; int ret = 0; if (get_user(pin, (__u32 __user *)arg)) return -EFAULT; if (!S_ISREG(inode->i_mode)) return -EINVAL; if (f2fs_readonly(F2FS_I_SB(inode)->sb)) return -EROFS; ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); if (f2fs_should_update_outplace(inode, NULL)) { ret = -EINVAL; goto out; } if (!pin) { clear_inode_flag(inode, FI_PIN_FILE); f2fs_i_gc_failures_write(inode, 0); goto done; } if (f2fs_pin_file_control(inode, false)) { ret = -EAGAIN; goto out; } ret = f2fs_convert_inline_inode(inode); if (ret) goto out; set_inode_flag(inode, FI_PIN_FILE); ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; done: f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); out: inode_unlock(inode); mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); __u32 pin = 0; if (is_inode_flag_set(inode, FI_PIN_FILE)) pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]; return put_user(pin, (u32 __user *)arg); } int f2fs_precache_extents(struct inode *inode) { struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_map_blocks map; pgoff_t m_next_extent; loff_t end; int err; if (is_inode_flag_set(inode, FI_NO_EXTENT)) return -EOPNOTSUPP; map.m_lblk = 0; map.m_next_pgofs = NULL; map.m_next_extent = &m_next_extent; map.m_seg_type = NO_CHECK_TYPE; map.m_may_create = false; end = F2FS_I_SB(inode)->max_file_blocks; while (map.m_lblk < end) { map.m_len = end - map.m_lblk; down_write(&fi->i_gc_rwsem[WRITE]); err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE); up_write(&fi->i_gc_rwsem[WRITE]); if (err) return err; map.m_lblk = m_next_extent; } return err; } static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg) { return f2fs_precache_extents(file_inode(filp)); } static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg) { struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); __u64 block_count; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (f2fs_readonly(sbi->sb)) return -EROFS; if (copy_from_user(&block_count, (void __user *)arg, sizeof(block_count))) return -EFAULT; ret = f2fs_resize_fs(sbi, block_count); return ret; } static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) { f2fs_warn(F2FS_I_SB(inode), "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n", inode->i_ino); return -EOPNOTSUPP; } return fsverity_ioctl_enable(filp, (const void __user *)arg); } static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fsverity_ioctl_measure(filp, (void __user *)arg); } static int f2fs_get_volume_name(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); char *vbuf; int count; int err = 0; vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL); if (!vbuf) return -ENOMEM; down_read(&sbi->sb_lock); count = utf16s_to_utf8s(sbi->raw_super->volume_name, ARRAY_SIZE(sbi->raw_super->volume_name), UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME); up_read(&sbi->sb_lock); if (copy_to_user((char __user *)arg, vbuf, min(FSLABEL_MAX, count))) err = -EFAULT; kvfree(vbuf); return err; } static int f2fs_set_volume_name(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); char *vbuf; int err = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX); if (IS_ERR(vbuf)) return PTR_ERR(vbuf); err = mnt_want_write_file(filp); if (err) goto out; down_write(&sbi->sb_lock); memset(sbi->raw_super->volume_name, 0, sizeof(sbi->raw_super->volume_name)); utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN, sbi->raw_super->volume_name, ARRAY_SIZE(sbi->raw_super->volume_name)); err = f2fs_commit_super(sbi, false); up_write(&sbi->sb_lock); mnt_drop_write_file(filp); out: kfree(vbuf); return err; } long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) return -EIO; if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp)))) return -ENOSPC; switch (cmd) { case F2FS_IOC_GETFLAGS: return f2fs_ioc_getflags(filp, arg); case F2FS_IOC_SETFLAGS: return f2fs_ioc_setflags(filp, arg); case F2FS_IOC_GETVERSION: return f2fs_ioc_getversion(filp, arg); case F2FS_IOC_START_ATOMIC_WRITE: return f2fs_ioc_start_atomic_write(filp); case F2FS_IOC_COMMIT_ATOMIC_WRITE: return f2fs_ioc_commit_atomic_write(filp); case F2FS_IOC_START_VOLATILE_WRITE: return f2fs_ioc_start_volatile_write(filp); case F2FS_IOC_RELEASE_VOLATILE_WRITE: return f2fs_ioc_release_volatile_write(filp); case F2FS_IOC_ABORT_VOLATILE_WRITE: return f2fs_ioc_abort_volatile_write(filp); case F2FS_IOC_SHUTDOWN: return f2fs_ioc_shutdown(filp, arg); case FITRIM: return f2fs_ioc_fitrim(filp, arg); case F2FS_IOC_SET_ENCRYPTION_POLICY: return f2fs_ioc_set_encryption_policy(filp, arg); case F2FS_IOC_GET_ENCRYPTION_POLICY: return f2fs_ioc_get_encryption_policy(filp, arg); case F2FS_IOC_GET_ENCRYPTION_PWSALT: return f2fs_ioc_get_encryption_pwsalt(filp, arg); case FS_IOC_GET_ENCRYPTION_POLICY_EX: return f2fs_ioc_get_encryption_policy_ex(filp, arg); case FS_IOC_ADD_ENCRYPTION_KEY: return f2fs_ioc_add_encryption_key(filp, arg); case FS_IOC_REMOVE_ENCRYPTION_KEY: return f2fs_ioc_remove_encryption_key(filp, arg); case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: return f2fs_ioc_remove_encryption_key_all_users(filp, arg); case FS_IOC_GET_ENCRYPTION_KEY_STATUS: return f2fs_ioc_get_encryption_key_status(filp, arg); case F2FS_IOC_GARBAGE_COLLECT: return f2fs_ioc_gc(filp, arg); case F2FS_IOC_GARBAGE_COLLECT_RANGE: return f2fs_ioc_gc_range(filp, arg); case F2FS_IOC_WRITE_CHECKPOINT: return f2fs_ioc_write_checkpoint(filp, arg); case F2FS_IOC_DEFRAGMENT: return f2fs_ioc_defragment(filp, arg); case F2FS_IOC_MOVE_RANGE: return f2fs_ioc_move_range(filp, arg); case F2FS_IOC_FLUSH_DEVICE: return f2fs_ioc_flush_device(filp, arg); case F2FS_IOC_GET_FEATURES: return f2fs_ioc_get_features(filp, arg); case F2FS_IOC_FSGETXATTR: return f2fs_ioc_fsgetxattr(filp, arg); case F2FS_IOC_FSSETXATTR: return f2fs_ioc_fssetxattr(filp, arg); case F2FS_IOC_GET_PIN_FILE: return f2fs_ioc_get_pin_file(filp, arg); case F2FS_IOC_SET_PIN_FILE: return f2fs_ioc_set_pin_file(filp, arg); case F2FS_IOC_PRECACHE_EXTENTS: return f2fs_ioc_precache_extents(filp, arg); case F2FS_IOC_RESIZE_FS: return f2fs_ioc_resize_fs(filp, arg); case FS_IOC_ENABLE_VERITY: return f2fs_ioc_enable_verity(filp, arg); case FS_IOC_MEASURE_VERITY: return f2fs_ioc_measure_verity(filp, arg); case F2FS_IOC_GET_VOLUME_NAME: return f2fs_get_volume_name(filp, arg); case F2FS_IOC_SET_VOLUME_NAME: return f2fs_set_volume_name(filp, arg); default: return -ENOTTY; } } static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); ssize_t ret; if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { ret = -EIO; goto out; } if (iocb->ki_flags & IOCB_NOWAIT) { if (!inode_trylock(inode)) { ret = -EAGAIN; goto out; } } else { inode_lock(inode); } ret = generic_write_checks(iocb, from); if (ret > 0) { bool preallocated = false; size_t target_size = 0; int err; if (iov_iter_fault_in_readable(from, iov_iter_count(from))) set_inode_flag(inode, FI_NO_PREALLOC); if ((iocb->ki_flags & IOCB_NOWAIT)) { if (!f2fs_overwrite_io(inode, iocb->ki_pos, iov_iter_count(from)) || f2fs_has_inline_data(inode) || f2fs_force_buffered_io(inode, iocb, from)) { clear_inode_flag(inode, FI_NO_PREALLOC); inode_unlock(inode); ret = -EAGAIN; goto out; } } else { preallocated = true; target_size = iocb->ki_pos + iov_iter_count(from); err = f2fs_preallocate_blocks(iocb, from); if (err) { clear_inode_flag(inode, FI_NO_PREALLOC); inode_unlock(inode); ret = err; goto out; } } ret = __generic_file_write_iter(iocb, from); clear_inode_flag(inode, FI_NO_PREALLOC); /* if we couldn't write data, we should deallocate blocks. */ if (preallocated && i_size_read(inode) < target_size) f2fs_truncate(inode); if (ret > 0) f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret); } inode_unlock(inode); out: trace_f2fs_file_write_iter(inode, iocb->ki_pos, iov_iter_count(from), ret); if (ret > 0) ret = generic_write_sync(iocb, ret); return ret; } #ifdef CONFIG_COMPAT long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { case F2FS_IOC32_GETFLAGS: cmd = F2FS_IOC_GETFLAGS; break; case F2FS_IOC32_SETFLAGS: cmd = F2FS_IOC_SETFLAGS; break; case F2FS_IOC32_GETVERSION: cmd = F2FS_IOC_GETVERSION; break; case F2FS_IOC_START_ATOMIC_WRITE: case F2FS_IOC_COMMIT_ATOMIC_WRITE: case F2FS_IOC_START_VOLATILE_WRITE: case F2FS_IOC_RELEASE_VOLATILE_WRITE: case F2FS_IOC_ABORT_VOLATILE_WRITE: case F2FS_IOC_SHUTDOWN: case FITRIM: case F2FS_IOC_SET_ENCRYPTION_POLICY: case F2FS_IOC_GET_ENCRYPTION_PWSALT: case F2FS_IOC_GET_ENCRYPTION_POLICY: case FS_IOC_GET_ENCRYPTION_POLICY_EX: case FS_IOC_ADD_ENCRYPTION_KEY: case FS_IOC_REMOVE_ENCRYPTION_KEY: case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: case FS_IOC_GET_ENCRYPTION_KEY_STATUS: case F2FS_IOC_GARBAGE_COLLECT: case F2FS_IOC_GARBAGE_COLLECT_RANGE: case F2FS_IOC_WRITE_CHECKPOINT: case F2FS_IOC_DEFRAGMENT: case F2FS_IOC_MOVE_RANGE: case F2FS_IOC_FLUSH_DEVICE: case F2FS_IOC_GET_FEATURES: case F2FS_IOC_FSGETXATTR: case F2FS_IOC_FSSETXATTR: case F2FS_IOC_GET_PIN_FILE: case F2FS_IOC_SET_PIN_FILE: case F2FS_IOC_PRECACHE_EXTENTS: case F2FS_IOC_RESIZE_FS: case FS_IOC_ENABLE_VERITY: case FS_IOC_MEASURE_VERITY: case F2FS_IOC_GET_VOLUME_NAME: case F2FS_IOC_SET_VOLUME_NAME: break; default: return -ENOIOCTLCMD; } return f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); } #endif const struct file_operations f2fs_file_operations = { .llseek = f2fs_llseek, .read_iter = generic_file_read_iter, .write_iter = f2fs_file_write_iter, .open = f2fs_file_open, .release = f2fs_release_file, .mmap = f2fs_file_mmap, .flush = f2fs_file_flush, .fsync = f2fs_sync_file, .fallocate = f2fs_fallocate, .unlocked_ioctl = f2fs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = f2fs_compat_ioctl, #endif .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, };
966995.c
/* * Copyright (c) 2004, Bull S.A.. All rights reserved. * Created by: Sebastien Decugis * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * This file is a stress test for the pthread_mutex_lock function. * The steps are: * -> For each king of mutex, we create 10*F threads (F is a scalability factor) * -> we call those threads 1 to 10. * -> thread 1 sends signal USR2 to the other 9 threads (which have a handler for it) * -> thread 2 to 6 are loops * { * mutex_lock * if (ctrl) exit * ctrl = 1 * yield * ctrl= 0 * mutex unlock * } * -> thread 7 & 8 have a timedlock instead of lock * -> thread 9 & 10 have a trylock instead of lock * * -> the whole process stop when receiving signal SIGUSR1. * This goal is achieved with a "do_it" variable. * * NOTE: With gcc/linux, the flag "-lrt" must be specified at link time. */ /********************************************************************************************/ /****************************** standard includes *****************************************/ /********************************************************************************************/ #include <pthread.h> #include <errno.h> #include <semaphore.h> #include <signal.h> #include <unistd.h> #if _POSIX_TIMEOUTS < 0 #error "This sample needs POSIX TIMEOUTS option support" #endif #if _POSIX_TIMEOUTS == 0 #warning "This sample needs POSIX TIMEOUTS option support" #endif #if _POSIX_TIMERS < 0 #error "This sample needs POSIX TIMERS option support" #endif #if _POSIX_TIMERS == 0 #warning "This sample needs POSIX TIMERS option support" #endif #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <time.h> /* required for the pthread_mutex_timedlock() function */ /********************************************************************************************/ /****************************** Test framework *****************************************/ /********************************************************************************************/ #include "testfrmw.h" #include "testfrmw.c" /* This header is responsible for defining the following macros: * UNRESOLVED(ret, descr); * where descr is a description of the error and ret is an int (error code for example) * FAILED(descr); * where descr is a short text saying why the test has failed. * PASSED(); * No parameter. * * Both three macros shall terminate the calling process. * The testcase shall not terminate in any other maneer. * * The other file defines the functions * void output_init() * void output(char * string, ...) * * Those may be used to output information. */ /********************************************************************************************/ /********************************** Configuration ******************************************/ /********************************************************************************************/ #ifndef SCALABILITY_FACTOR #define SCALABILITY_FACTOR 1 #endif #ifndef VERBOSE #define VERBOSE 2 #endif #define N 2 /* N * 10 * 6 * SCALABILITY_FACTOR threads will be created */ /********************************************************************************************/ /*********************************** Test case *****************************************/ /********************************************************************************************/ char do_it = 1; #ifndef WITHOUT_XOPEN int types[] = { PTHREAD_MUTEX_NORMAL, PTHREAD_MUTEX_ERRORCHECK, PTHREAD_MUTEX_RECURSIVE, PTHREAD_MUTEX_DEFAULT }; #endif /* The following type represents the data * for one group of ten threads */ typedef struct { pthread_t threads[10]; /* The 10 threads */ pthread_mutex_t mtx; /* The mutex those threads work on */ char ctrl; /* The value used to check the behavior */ char sigok; /* Used to tell the threads they can return */ sem_t semsig; /* Semaphore for synchronizing the signal handler */ int id; /* An identifier for the threads group */ int tcnt; /* we need to make sure the threads are started before killing 'em */ pthread_mutex_t tmtx; unsigned long long sigcnt, opcnt; /* We count every iteration */ } cell_t; pthread_key_t _c; /* this key will always contain a pointer to the thread's cell */ /***** The next function is in charge of sending signal USR2 to * all the other threads in its cell, until the end of the test. */ void *sigthr(void *arg) { int ret; int i = 0; cell_t *c = (cell_t *) arg; do { sched_yield(); ret = pthread_mutex_lock(&(c->tmtx)); if (ret != 0) { UNRESOLVED(ret, "Failed to lock the mutex"); } i = c->tcnt; ret = pthread_mutex_unlock(&(c->tmtx)); if (ret != 0) { UNRESOLVED(ret, "Failed to unlock the mutex"); } } while (i < 9); /* Until we must stop, do */ while (do_it) { /* Wait for the semaphore */ ret = sem_wait(&(c->semsig)); if (ret != 0) { UNRESOLVED(errno, "Sem wait failed in signal thread"); } /* Kill the next thread */ i %= 9; ret = pthread_kill(c->threads[++i], SIGUSR2); if (ret != 0) { UNRESOLVED(ret, "Thread kill failed in signal thread"); } /* Increment the signal counter */ c->sigcnt++; } /* Tell the other threads they can now stop */ do { c->sigok = 1; } while (c->sigok == 0); return NULL; } /***** The next function is the signal handler * for all the others threads in the cell */ void sighdl(int sig) { int ret; cell_t *c = (cell_t *) pthread_getspecific(_c); ret = sem_post(&(c->semsig)); if (ret != 0) { UNRESOLVED(errno, "Unable to post semaphore in signal handler"); } } /***** The next function can return only when the sigthr has terminated. * This avoids the signal thread try to kill a terminated thread. */ void waitsigend(cell_t * c) { while (c->sigok == 0) { sched_yield(); } } /***** The next function aims to control that no other thread * owns the mutex at the same time */ void control(cell_t * c, char *loc) { *loc++; /* change the local control value */ if (c->ctrl != 0) { FAILED("Got a non-zero value - two threads owns the mutex"); } c->ctrl = *loc; sched_yield(); if (c->ctrl != *loc) { FAILED ("Got a different value - another thread touched protected data"); } c->ctrl = 0; /* Avoid some values for the next control */ if (*loc == 120) *loc = -120; if (*loc == -1) *loc = 1; } /***** The next 3 functions are the worker threads */ void *lockthr(void *arg) { int ret; char loc; /* Local value for control */ cell_t *c = (cell_t *) arg; /* Set the thread local data key value (used in the signal handler) */ ret = pthread_setspecific(_c, arg); if (ret != 0) { UNRESOLVED(ret, "Unable to assign the thread-local-data key"); } /* Signal we're started */ ret = pthread_mutex_lock(&(c->tmtx)); if (ret != 0) { UNRESOLVED(ret, "Failed to lock the mutex"); } c->tcnt += 1; ret = pthread_mutex_unlock(&(c->tmtx)); if (ret != 0) { UNRESOLVED(ret, "Failed to unlock the mutex"); } do { /* Lock, control, then unlock */ ret = pthread_mutex_lock(&(c->mtx)); if (ret != 0) { UNRESOLVED(ret, "Mutex lock failed in worker thread"); } control(c, &loc); ret = pthread_mutex_unlock(&(c->mtx)); if (ret != 0) { UNRESOLVED(ret, "Mutex unlock failed in worker thread"); } /* Increment the operation counter */ c->opcnt++; } while (do_it); /* Wait for the signal thread to terminate before we can exit */ waitsigend(c); return NULL; } void *timedlockthr(void *arg) { int ret; char loc; /* Local value for control */ struct timespec ts; cell_t *c = (cell_t *) arg; /* Set the thread local data key value (used in the signal handler) */ ret = pthread_setspecific(_c, arg); if (ret != 0) { UNRESOLVED(ret, "Unable to assign the thread-local-data key"); } /* Signal we're started */ ret = pthread_mutex_lock(&(c->tmtx)); if (ret != 0) { UNRESOLVED(ret, "Failed to lock the mutex"); } c->tcnt += 1; ret = pthread_mutex_unlock(&(c->tmtx)); if (ret != 0) { UNRESOLVED(ret, "Failed to unlock the mutex"); } do { /* Lock, control, then unlock */ do { ret = clock_gettime(CLOCK_REALTIME, &ts); if (ret != 0) { UNRESOLVED(errno, "Unable to get time for timeout"); } ts.tv_sec++; /* We will wait for 1 second */ ret = pthread_mutex_timedlock(&(c->mtx), &ts); } while (ret == ETIMEDOUT); if (ret != 0) { UNRESOLVED(ret, "Timed mutex lock failed in worker thread"); } control(c, &loc); ret = pthread_mutex_unlock(&(c->mtx)); if (ret != 0) { UNRESOLVED(ret, "Mutex unlock failed in worker thread"); } /* Increment the operation counter */ c->opcnt++; } while (do_it); /* Wait for the signal thread to terminate before we can exit */ waitsigend(c); return NULL; } void *trylockthr(void *arg) { int ret; char loc; /* Local value for control */ cell_t *c = (cell_t *) arg; /* Set the thread local data key value (used in the signal handler) */ ret = pthread_setspecific(_c, arg); if (ret != 0) { UNRESOLVED(ret, "Unable to assign the thread-local-data key"); } /* Signal we're started */ ret = pthread_mutex_lock(&(c->tmtx)); if (ret != 0) { UNRESOLVED(ret, "Failed to lock the mutex"); } c->tcnt += 1; ret = pthread_mutex_unlock(&(c->tmtx)); if (ret != 0) { UNRESOLVED(ret, "Failed to unlock the mutex"); } do { /* Lock, control, then unlock */ do { ret = pthread_mutex_trylock(&(c->mtx)); } while (ret == EBUSY); if (ret != 0) { UNRESOLVED(ret, "Mutex lock try failed in worker thread"); } control(c, &loc); ret = pthread_mutex_unlock(&(c->mtx)); if (ret != 0) { UNRESOLVED(ret, "Mutex unlock failed in worker thread"); } /* Increment the operation counter */ c->opcnt++; } while (do_it); /* Wait for the signal thread to terminate before we can exit */ waitsigend(c); return NULL; } /***** The next function initializes a cell_t object * This includes running the threads */ void cell_init(int id, cell_t * c, pthread_mutexattr_t * pma) { int ret, i; pthread_attr_t pa; /* We will specify a minimal stack size */ /* mark this group with its ID */ c->id = id; /* Initialize some other values */ c->sigok = 0; c->ctrl = 0; c->sigcnt = 0; c->opcnt = 0; c->tcnt = 0; /* Initialize the mutex */ ret = pthread_mutex_init(&(c->tmtx), NULL); if (ret != 0) { UNRESOLVED(ret, "Mutex init failed"); } ret = pthread_mutex_init(&(c->mtx), pma); if (ret != 0) { UNRESOLVED(ret, "Mutex init failed"); } #if VERBOSE > 1 output("Mutex initialized in cell %i\n", id); #endif /* Initialize the semaphore */ ret = sem_init(&(c->semsig), 0, 0); if (ret != 0) { UNRESOLVED(errno, "Sem init failed"); } #if VERBOSE > 1 output("Semaphore initialized in cell %i\n", id); #endif /* Create the thread attribute with the minimal size */ ret = pthread_attr_init(&pa); if (ret != 0) { UNRESOLVED(ret, "Unable to create pthread attribute object"); } ret = pthread_attr_setstacksize(&pa, sysconf(_SC_THREAD_STACK_MIN)); if (ret != 0) { UNRESOLVED(ret, "Unable to specify minimal stack size"); } /* Create the signal thread */ ret = pthread_create(&(c->threads[0]), &pa, sigthr, (void *)c); if (ret != 0) { UNRESOLVED(ret, "Unable to create the signal thread"); } /* Create 5 "lock" threads */ for (i = 1; i <= 5; i++) { ret = pthread_create(&(c->threads[i]), &pa, lockthr, (void *)c); if (ret != 0) { UNRESOLVED(ret, "Unable to create a locker thread"); } } /* Create 2 "timedlock" threads */ for (i = 6; i <= 7; i++) { ret = pthread_create(&(c->threads[i]), &pa, timedlockthr, (void *)c); if (ret != 0) { UNRESOLVED(ret, "Unable to create a (timed) locker thread"); } } /* Create 2 "trylock" threads */ for (i = 8; i <= 9; i++) { ret = pthread_create(&(c->threads[i]), &pa, trylockthr, (void *)c); if (ret != 0) { UNRESOLVED(ret, "Unable to create a (try) locker thread"); } } #if VERBOSE > 1 output("All threads initialized in cell %i\n", id); #endif /* Destroy the thread attribute object */ ret = pthread_attr_destroy(&pa); if (ret != 0) { UNRESOLVED(ret, "Unable to destroy thread attribute object"); } /* Tell the signal thread to start working */ ret = sem_post(&(c->semsig)); if (ret != 0) { UNRESOLVED(ret, "Unable to post signal semaphore"); } } /***** The next function destroys a cell_t object * This includes stopping the threads */ void cell_fini(int id, cell_t * c, unsigned long long *globalopcount, unsigned long long *globalsigcount) { int ret, i; /* Just a basic check */ if (id != c->id) { output("Something is wrong: Cell %i has id %i\n", id, c->id); FAILED("Some memory has been corrupted"); } /* Start with joining the threads */ for (i = 0; i < 10; i++) { ret = pthread_join(c->threads[i], NULL); if (ret != 0) { UNRESOLVED(ret, "Unable to join a thread"); } } /* Destroy the semaphore and the mutex */ ret = sem_destroy(&(c->semsig)); if (ret != 0) { UNRESOLVED(errno, "Unable to destroy the semaphore"); } ret = pthread_mutex_destroy(&(c->mtx)); if (ret != 0) { output("Unable to destroy the mutex in cell %i (ret = %i)\n", id, ret); FAILED("Mutex destruction failed"); } /* Report the cell counters */ *globalopcount += c->opcnt; *globalsigcount += c->sigcnt; #if VERBOSE > 1 output ("Counters for cell %i:\n\t%llu locks and unlocks\n\t%llu signals\n", id, c->opcnt, c->sigcnt); #endif /* We are done with this cell. */ } /**** Next function is called when the process is killed with SIGUSR1 * It tells every threads in every cells to stop their work. */ void globalsig(int sig) { output("Signal received, processing. Please wait...\n"); do { do_it = 0; } while (do_it); } /****** * Last but not least, the main function */ int main(int argc, char *argv[]) { /* Main is responsible for : * the mutex attributes initializing * the creation of the cells * the destruction of everything on SIGUSR1 reception */ int ret; int i; struct sigaction sa; unsigned long long globopcnt = 0, globsigcnt = 0; #ifndef WITHOUT_XOPEN int sz = 2 + (sizeof(types) / sizeof(int)); #else int sz = 2; #endif pthread_mutexattr_t ma[sz - 1]; pthread_mutexattr_t *pma[sz]; cell_t data[sz * N * SCALABILITY_FACTOR]; pma[sz - 1] = NULL; #if VERBOSE > 0 output("Mutex lock / unlock stress sample is starting\n"); output("Kill with SIGUSR1 to stop the process\n"); output("\t kill -USR1 <pid>\n\n"); #endif /* Initialize the mutex attributes */ for (i = 0; i < sz - 1; i++) { pma[i] = &ma[i]; ret = pthread_mutexattr_init(pma[i]); if (ret != 0) { UNRESOLVED(ret, "Unable to init a mutex attribute object"); } #ifndef WITHOUT_XOPEN /* we have the mutex attribute types */ if (i != 0) { ret = pthread_mutexattr_settype(pma[i], types[i - 1]); if (ret != 0) { UNRESOLVED(ret, "Unable to set type of a mutex attribute object"); } } #endif } #if VERBOSE > 1 output("%i mutex attribute objects were initialized\n", sz - 1); #endif /* Initialize the thread-local-data key */ ret = pthread_key_create(&_c, NULL); if (ret != 0) { UNRESOLVED(ret, "Unable to initialize TLD key"); } #if VERBOSE > 1 output("TLD key initialized\n"); #endif /* Register the signal handler for SIGUSR1 */ sigemptyset(&sa.sa_mask); sa.sa_flags = 0; sa.sa_handler = globalsig; if ((ret = sigaction(SIGUSR1, &sa, NULL))) { UNRESOLVED(ret, "Unable to register signal handler"); } /* Register the signal handler for SIGUSR2 */ sa.sa_handler = sighdl; if ((ret = sigaction(SIGUSR2, &sa, NULL))) { UNRESOLVED(ret, "Unable to register signal handler"); } /* Start every threads */ #if VERBOSE > 0 output("%i cells of 10 threads are being created...\n", sz * N * SCALABILITY_FACTOR); #endif for (i = 0; i < sz * N * SCALABILITY_FACTOR; i++) cell_init(i, &data[i], pma[i % sz]); #if VERBOSE > 0 output("All threads created and running.\n"); #endif /* We stay here while not interrupted */ do { sched_yield(); } while (do_it); #if VERBOSE > 0 output("Starting to join the threads...\n"); #endif /* Everybody is stopping, we must join them, and destroy the cell data */ for (i = 0; i < sz * N * SCALABILITY_FACTOR; i++) cell_fini(i, &data[i], &globopcnt, &globsigcnt); /* Destroy the mutex attributes objects */ for (i = 0; i < sz - 1; i++) { ret = pthread_mutexattr_destroy(pma[i]); if (ret != 0) { UNRESOLVED(ret, "Unable to destroy a mutex attribute object"); } } /* Destroy the thread-local-data key */ ret = pthread_key_delete(_c); if (ret != 0) { UNRESOLVED(ret, "Unable to destroy TLD key"); } #if VERBOSE > 1 output("TLD key destroyed\n"); #endif /* output the total counters */ #if VERBOSE > 1 output("===============================================\n"); #endif #if VERBOSE > 0 output("Total counters:\n\t%llu locks and unlocks\n\t%llu signals\n", globopcnt, globsigcnt); output("pthread_mutex_lock stress test passed.\n"); #endif PASSED; }
505992.c
#include <assert.h> #include <stdlib.h> #include <string.h> int main() { char *x = malloc(sizeof(char) * 10); x[8] = '\0'; assert(strlen(x) == 8); }
244015.c
/* * COPYRIGHT (C) STMicroelectronics 2015. All rights reserved. * * This software is the confidential and proprietary information of * STMicroelectronics ("Confidential Information"). You shall not * disclose such Confidential Information and shall use it only in * accordance with the terms of the license agreement you entered into * with STMicroelectronics * * Programming Golden Rule: Keep it Simple! * */ /*! * \file VL53L0X_platform_log.c * \brief Code function defintions for Ewok Platform Layer * */ //#include <stdio.h> // sprintf(), vsnprintf(), printf() #include "hal.h" #include "chprintf.h" #include "vl53l0x_i2c_platform.h" #include "vl53l0x_def.h" #include "vl53l0x_platform_log.h" #define trace_print(level, ...) trace_print_module_function(TRACE_MODULE_PLATFORM, level, TRACE_FUNCTION_NONE, ##__VA_ARGS__) #define trace_i2c(...) trace_print_module_function(TRACE_MODULE_NONE, TRACE_LEVEL_NONE, TRACE_FUNCTION_I2C, ##__VA_ARGS__) char debug_string[VL53L0X_MAX_STRING_LENGTH_PLT]; uint32_t _trace_level = TRACE_LEVEL_WARNING; uint32_t _trace_modules = TRACE_MODULE_NONE; uint32_t _trace_functions = TRACE_FUNCTION_NONE; int32_t VL53L0X_trace_config(char *filename, uint32_t modules, uint32_t level, uint32_t functions) { int STATUS = 0; _trace_functions = functions; _trace_level = level; _trace_modules = modules; return STATUS; } void trace_print_module_function(uint32_t module, uint32_t level, uint32_t function, const char *format, ...) { if ( ((level <=_trace_level) && ((module & _trace_modules) > 0)) || ((function & _trace_functions) > 0) ) { va_list arg_list; char message[VL53L0X_MAX_STRING_LENGTH_PLT]; va_start(arg_list, format); vsnprintf(message, VL53L0X_MAX_STRING_LENGTH_PLT, format, arg_list); va_end(arg_list); chprintf((BaseSequentialStream*)&SD2, message); } }
263753.c
/* * UPnP WPS Device * Copyright (c) 2000-2003 Intel Corporation * Copyright (c) 2006-2007 Sony Corporation * Copyright (c) 2008-2009 Atheros Communications * Copyright (c) 2009-2010, Jouni Malinen <[email protected]> * * See below for more details on licensing and code history. */ /* * This has been greatly stripped down from the original file * (upnp_wps_device.c) by Ted Merrill, Atheros Communications * in order to eliminate use of the bulky libupnp library etc. * * History: * upnp_wps_device.c is/was a shim layer between wps_opt_upnp.c and * the libupnp library. * The layering (by Sony) was well done; only a very minor modification * to API of upnp_wps_device.c was required. * libupnp was found to be undesirable because: * -- It consumed too much code and data space * -- It uses multiple threads, making debugging more difficult * and possibly reducing reliability. * -- It uses static variables and only supports one instance. * The shim and libupnp are here replaced by special code written * specifically for the needs of hostapd. * Various shortcuts can and are taken to keep the code size small. * Generally, execution time is not as crucial. * * BUGS: * -- UPnP requires that we be able to resolve domain names. * While uncommon, if we have to do it then it will stall the entire * hostapd program, which is bad. * This is because we use the standard linux getaddrinfo() function * which is syncronous. * An asyncronous solution would be to use the free "ares" library. * -- Does not have a robust output buffering scheme. Uses a single * fixed size output buffer per TCP/HTTP connection, with possible (although * unlikely) possibility of overflow and likely excessive use of RAM. * A better solution would be to write the HTTP output as a buffered stream, * using chunking: (handle header specially, then) generate data with * a printf-like function into a buffer, catching buffer full condition, * then send it out surrounded by http chunking. * -- There is some code that could be separated out into the common * library to be shared with wpa_supplicant. * -- Needs renaming with module prefix to avoid polluting the debugger * namespace and causing possible collisions with other static fncs * and structure declarations when using the debugger. * -- The http error code generation is pretty bogus, hopefully no one cares. * * Author: Ted Merrill, Atheros Communications, based upon earlier work * as explained above and below. * * Copyright: * Copyright 2008 Atheros Communications. * * The original header (of upnp_wps_device.c) reads: * * Copyright (c) 2006-2007 Sony Corporation. All Rights Reserved. * * File Name: upnp_wps_device.c * Description: EAP-WPS UPnP device source * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Sony Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Portions from Intel libupnp files, e.g. genlib/net/http/httpreadwrite.c * typical header: * * Copyright (c) 2000-2003 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither name of Intel Corporation nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Overview of WPS over UPnP: * * UPnP is a protocol that allows devices to discover each other and control * each other. In UPnP terminology, a device is either a "device" (a server * that provides information about itself and allows itself to be controlled) * or a "control point" (a client that controls "devices") or possibly both. * This file implements a UPnP "device". * * For us, we use mostly basic UPnP discovery, but the control part of interest * is WPS carried via UPnP messages. There is quite a bit of basic UPnP * discovery to do before we can get to WPS, however. * * UPnP discovery begins with "devices" send out multicast UDP packets to a * certain fixed multicast IP address and port, and "control points" sending * out other such UDP packets. * * The packets sent by devices are NOTIFY packets (not to be confused with TCP * NOTIFY packets that are used later) and those sent by control points are * M-SEARCH packets. These packets contain a simple HTTP style header. The * packets are sent redundantly to get around packet loss. Devices respond to * M-SEARCH packets with HTTP-like UDP packets containing HTTP/1.1 200 OK * messages, which give similar information as the UDP NOTIFY packets. * * The above UDP packets advertise the (arbitrary) TCP ports that the * respective parties will listen to. The control point can then do a HTTP * SUBSCRIBE (something like an HTTP PUT) after which the device can do a * separate HTTP NOTIFY (also like an HTTP PUT) to do event messaging. * * The control point will also do HTTP GET of the "device file" listed in the * original UDP information from the device (see UPNP_WPS_DEVICE_XML_FILE * data), and based on this will do additional GETs... HTTP POSTs are done to * cause an action. * * Beyond some basic information in HTTP headers, additional information is in * the HTTP bodies, in a format set by the SOAP and XML standards, a markup * language related to HTML used for web pages. This language is intended to * provide the ultimate in self-documentation by providing a universal * namespace based on pseudo-URLs called URIs. Note that although a URI looks * like a URL (a web address), they are never accessed as such but are used * only as identifiers. * * The POST of a GetDeviceInfo gets information similar to what might be * obtained from a probe request or response on Wi-Fi. WPS messages M1-M8 * are passed via a POST of a PutMessage; the M1-M8 WPS messages are converted * to a bin64 ascii representation for encapsulation. When proxying messages, * WLANEvent and PutWLANResponse are used. * * This of course glosses over a lot of details. */ #include "includes.h" #include <time.h> #include <net/if.h> #include <netdb.h> #include <sys/ioctl.h> #include "common.h" #include "uuid.h" #include "base64.h" #include "wps.h" #include "wps_i.h" #include "wps_upnp.h" #include "wps_upnp_i.h" /* * UPnP allows a client ("control point") to send a server like us ("device") * a domain name for registration, and we are supposed to resolve it. This is * bad because, using the standard Linux library, we will stall the entire * hostapd waiting for resolution. * * The "correct" solution would be to use an event driven library for domain * name resolution such as "ares". However, this would increase code size * further. Since it is unlikely that we'll actually see such domain names, we * can just refuse to accept them. */ #define NO_DOMAIN_NAME_RESOLUTION 1 /* 1 to allow only dotted ip addresses */ /* * UPnP does not scale well. If we were in a room with thousands of control * points then potentially we could be expected to handle subscriptions for * each of them, which would exhaust our memory. So we must set a limit. In * practice we are unlikely to see more than one or two. */ #define MAX_SUBSCRIPTIONS 4 /* how many subscribing clients we handle */ #define MAX_ADDR_PER_SUBSCRIPTION 8 /* Maximum number of Probe Request events per second */ #define MAX_EVENTS_PER_SEC 5 static struct upnp_wps_device_sm *shared_upnp_device = NULL; /* Write the current date/time per RFC */ void format_date(struct wpabuf *buf) { const char *weekday_str = "Sun\0Mon\0Tue\0Wed\0Thu\0Fri\0Sat"; const char *month_str = "Jan\0Feb\0Mar\0Apr\0May\0Jun\0" "Jul\0Aug\0Sep\0Oct\0Nov\0Dec"; struct tm *date; time_t t; t = time(NULL); date = gmtime(&t); if (date == NULL) return; wpabuf_printf(buf, "%s, %02d %s %d %02d:%02d:%02d GMT", &weekday_str[date->tm_wday * 4], date->tm_mday, &month_str[date->tm_mon * 4], date->tm_year + 1900, date->tm_hour, date->tm_min, date->tm_sec); } /*************************************************************************** * UUIDs (unique identifiers) * * These are supposed to be unique in all the world. * Sometimes permanent ones are used, sometimes temporary ones * based on random numbers... there are different rules for valid content * of different types. * Each uuid is 16 bytes long. **************************************************************************/ /* uuid_make -- construct a random UUID * The UPnP documents don't seem to offer any guidelines as to which method to * use for constructing UUIDs for subscriptions. Presumably any method from * rfc4122 is good enough; I've chosen random number method. */ static int uuid_make(u8 uuid[UUID_LEN]) { if (os_get_random(uuid, UUID_LEN) < 0) return -1; /* Replace certain bits as specified in rfc4122 or X.667 */ uuid[6] &= 0x0f; uuid[6] |= (4 << 4); /* version 4 == random gen */ uuid[8] &= 0x3f; uuid[8] |= 0x80; return 0; } /* * Subscriber address handling. * Since a subscriber may have an arbitrary number of addresses, we have to * add a bunch of code to handle them. * * Addresses are passed in text, and MAY be domain names instead of the (usual * and expected) dotted IP addresses. Resolving domain names consumes a lot of * resources. Worse, we are currently using the standard Linux getaddrinfo() * which will block the entire program until complete or timeout! The proper * solution would be to use the "ares" library or similar with more state * machine steps etc. or just disable domain name resolution by setting * NO_DOMAIN_NAME_RESOLUTION to 1 at top of this file. */ /* subscr_addr_delete -- delete single unlinked subscriber address * (be sure to unlink first if need be) */ void subscr_addr_delete(struct subscr_addr *a) { /* * Note: do NOT free domain_and_port or path because they point to * memory within the allocation of "a". */ os_free(a); } /* subscr_addr_free_all -- unlink and delete list of subscriber addresses. */ static void subscr_addr_free_all(struct subscription *s) { struct subscr_addr *a, *tmp; dl_list_for_each_safe(a, tmp, &s->addr_list, struct subscr_addr, list) { dl_list_del(&a->list); subscr_addr_delete(a); } } /* subscr_addr_add_url -- add address(es) for one url to subscription */ static void subscr_addr_add_url(struct subscription *s, const char *url, size_t url_len) { int alloc_len; char *scratch_mem = NULL; char *mem; char *host; char *delim; char *path; int port = 80; /* port to send to (default is port 80) */ struct addrinfo hints; struct addrinfo *result = NULL; struct addrinfo *rp; int rerr; size_t host_len, path_len; /* url MUST begin with http: */ if (url_len < 7 || os_strncasecmp(url, "http://", 7)) goto fail; url += 7; url_len -= 7; /* Make a copy of the string to allow modification during parsing */ scratch_mem = dup_binstr(url, url_len); if (scratch_mem == NULL) goto fail; wpa_printf(MSG_DEBUG, "WPS UPnP: Adding URL '%s'", scratch_mem); host = scratch_mem; path = os_strchr(host, '/'); if (path) *path++ = '\0'; /* null terminate host */ /* Process and remove optional port component */ delim = os_strchr(host, ':'); if (delim) { *delim = '\0'; /* null terminate host name for now */ if (isdigit(delim[1])) port = atol(delim + 1); } /* * getaddrinfo does the right thing with dotted decimal notations, or * will resolve domain names. Resolving domain names will unfortunately * hang the entire program until it is resolved or it times out * internal to getaddrinfo; fortunately we think that the use of actual * domain names (vs. dotted decimal notations) should be uncommon. */ os_memset(&hints, 0, sizeof(struct addrinfo)); hints.ai_family = AF_INET; /* IPv4 */ hints.ai_socktype = SOCK_STREAM; #if NO_DOMAIN_NAME_RESOLUTION /* Suppress domain name resolutions that would halt * the program for periods of time */ hints.ai_flags = AI_NUMERICHOST; #else /* Allow domain name resolution. */ hints.ai_flags = 0; #endif hints.ai_protocol = 0; /* Any protocol? */ rerr = getaddrinfo(host, NULL /* fill in port ourselves */, &hints, &result); if (rerr) { wpa_printf(MSG_INFO, "WPS UPnP: Resolve error %d (%s) on: %s", rerr, gai_strerror(rerr), host); goto fail; } if (delim) *delim = ':'; /* Restore port */ host_len = os_strlen(host); path_len = path ? os_strlen(path) : 0; alloc_len = host_len + 1 + 1 + path_len + 1; for (rp = result; rp; rp = rp->ai_next) { struct subscr_addr *a; /* Limit no. of address to avoid denial of service attack */ if (dl_list_len(&s->addr_list) >= MAX_ADDR_PER_SUBSCRIPTION) { wpa_printf(MSG_INFO, "WPS UPnP: subscr_addr_add_url: " "Ignoring excessive addresses"); break; } a = os_zalloc(sizeof(*a) + alloc_len); if (a == NULL) break; mem = (char *) (a + 1); a->domain_and_port = mem; os_memcpy(mem, host, host_len); mem += host_len + 1; a->path = mem; if (path == NULL || path[0] != '/') *mem++ = '/'; if (path) os_memcpy(mem, path, path_len); os_memcpy(&a->saddr, rp->ai_addr, sizeof(a->saddr)); a->saddr.sin_port = htons(port); dl_list_add(&s->addr_list, &a->list); } fail: if (result) freeaddrinfo(result); os_free(scratch_mem); } /* subscr_addr_list_create -- create list from urls in string. * Each url is enclosed by angle brackets. */ static void subscr_addr_list_create(struct subscription *s, const char *url_list) { const char *end; wpa_printf(MSG_DEBUG, "WPS UPnP: Parsing URL list '%s'", url_list); for (;;) { while (*url_list == ' ' || *url_list == '\t') url_list++; if (*url_list != '<') break; url_list++; end = os_strchr(url_list, '>'); if (end == NULL) break; subscr_addr_add_url(s, url_list, end - url_list); url_list = end + 1; } } static void wpabuf_put_property(struct wpabuf *buf, const char *name, const char *value) { wpabuf_put_str(buf, "<e:property>"); wpabuf_printf(buf, "<%s>", name); if (value) wpabuf_put_str(buf, value); wpabuf_printf(buf, "</%s>", name); wpabuf_put_str(buf, "</e:property>\n"); } /** * upnp_wps_device_send_event - Queue event messages for subscribers * @sm: WPS UPnP state machine from upnp_wps_device_init() * * This function queues the last WLANEvent to be sent for all currently * subscribed UPnP control points. sm->wlanevent must have been set with the * encoded data before calling this function. */ static void upnp_wps_device_send_event(struct upnp_wps_device_sm *sm) { /* Enqueue event message for all subscribers */ struct wpabuf *buf; /* holds event message */ int buf_size = 0; struct subscription *s, *tmp; /* Actually, utf-8 is the default, but it doesn't hurt to specify it */ const char *format_head = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" "<e:propertyset xmlns:e=\"urn:schemas-upnp-org:event-1-0\">\n"; const char *format_tail = "</e:propertyset>\n"; struct os_reltime now; if (dl_list_empty(&sm->subscriptions)) { /* optimize */ return; } if (os_get_reltime(&now) == 0) { if (now.sec != sm->last_event_sec) { sm->last_event_sec = now.sec; sm->num_events_in_sec = 1; } else { sm->num_events_in_sec++; /* * In theory, this should apply to all WLANEvent * notifications, but EAP messages are of much higher * priority and Probe Request notifications should not * be allowed to drop EAP messages, so only throttle * Probe Request notifications. */ if (sm->num_events_in_sec > MAX_EVENTS_PER_SEC && sm->wlanevent_type == UPNP_WPS_WLANEVENT_TYPE_PROBE) { wpa_printf(MSG_DEBUG, "WPS UPnP: Throttle " "event notifications (%u seen " "during one second)", sm->num_events_in_sec); return; } } } /* Determine buffer size needed first */ buf_size += os_strlen(format_head); buf_size += 50 + 2 * os_strlen("WLANEvent"); if (sm->wlanevent) buf_size += os_strlen(sm->wlanevent); buf_size += os_strlen(format_tail); buf = wpabuf_alloc(buf_size); if (buf == NULL) return; wpabuf_put_str(buf, format_head); wpabuf_put_property(buf, "WLANEvent", sm->wlanevent); wpabuf_put_str(buf, format_tail); wpa_printf(MSG_MSGDUMP, "WPS UPnP: WLANEvent message:\n%s", (char *) wpabuf_head(buf)); dl_list_for_each_safe(s, tmp, &sm->subscriptions, struct subscription, list) { wps_upnp_event_add( s, buf, sm->wlanevent_type == UPNP_WPS_WLANEVENT_TYPE_PROBE); } wpabuf_free(buf); } /* * Event subscription (subscriber machines register with us to receive event * messages). * This is the result of an incoming HTTP over TCP SUBSCRIBE request. */ /* subscription_destroy -- destroy an unlinked subscription * Be sure to unlink first if necessary. */ void subscription_destroy(struct subscription *s) { struct upnp_wps_device_interface *iface; wpa_printf(MSG_DEBUG, "WPS UPnP: Destroy subscription %p", s); subscr_addr_free_all(s); wps_upnp_event_delete_all(s); dl_list_for_each(iface, &s->sm->interfaces, struct upnp_wps_device_interface, list) upnp_er_remove_notification(iface->wps->registrar, s); os_free(s); } /* subscription_list_age -- remove expired subscriptions */ static void subscription_list_age(struct upnp_wps_device_sm *sm, time_t now) { struct subscription *s, *tmp; dl_list_for_each_safe(s, tmp, &sm->subscriptions, struct subscription, list) { if (s->timeout_time > now) break; wpa_printf(MSG_DEBUG, "WPS UPnP: Removing aged subscription"); dl_list_del(&s->list); subscription_destroy(s); } } /* subscription_find -- return existing subscription matching uuid, if any * returns NULL if not found */ struct subscription * subscription_find(struct upnp_wps_device_sm *sm, const u8 uuid[UUID_LEN]) { struct subscription *s; dl_list_for_each(s, &sm->subscriptions, struct subscription, list) { if (os_memcmp(s->uuid, uuid, UUID_LEN) == 0) return s; /* Found match */ } return NULL; } static struct wpabuf * build_fake_wsc_ack(void) { struct wpabuf *msg = wpabuf_alloc(100); if (msg == NULL) return NULL; wpabuf_put_u8(msg, UPNP_WPS_WLANEVENT_TYPE_EAP); wpabuf_put_str(msg, "00:00:00:00:00:00"); if (wps_build_version(msg) || wps_build_msg_type(msg, WPS_WSC_ACK)) { wpabuf_free(msg); return NULL; } /* Enrollee Nonce */ wpabuf_put_be16(msg, ATTR_ENROLLEE_NONCE); wpabuf_put_be16(msg, WPS_NONCE_LEN); wpabuf_put(msg, WPS_NONCE_LEN); /* Registrar Nonce */ wpabuf_put_be16(msg, ATTR_REGISTRAR_NONCE); wpabuf_put_be16(msg, WPS_NONCE_LEN); wpabuf_put(msg, WPS_NONCE_LEN); if (wps_build_wfa_ext(msg, 0, NULL, 0, 0)) { wpabuf_free(msg); return NULL; } return msg; } /* subscription_first_event -- send format/queue event that is automatically * sent on a new subscription. */ static int subscription_first_event(struct subscription *s) { /* * Actually, utf-8 is the default, but it doesn't hurt to specify it. * * APStatus is apparently a bit set, * 0x1 = configuration change (but is always set?) * 0x10 = ap is locked * * Per UPnP spec, we send out the last value of each variable, even * for WLANEvent, whatever it was. */ char *wlan_event; struct wpabuf *buf; int ap_status = 1; /* TODO: add 0x10 if access point is locked */ const char *head = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" "<e:propertyset xmlns:e=\"urn:schemas-upnp-org:event-1-0\">\n"; const char *tail = "</e:propertyset>\n"; char txt[10]; int ret; if (s->sm->wlanevent == NULL) { /* * There has been no events before the subscription. However, * UPnP device architecture specification requires all the * evented variables to be included, so generate a dummy event * for this particular case using a WSC_ACK and all-zeros * nonces. The ER (UPnP control point) will ignore this, but at * least it will learn that WLANEvent variable will be used in * event notifications in the future. */ struct wpabuf *msg; wpa_printf(MSG_DEBUG, "WPS UPnP: Use a fake WSC_ACK as the " "initial WLANEvent"); msg = build_fake_wsc_ack(); if (msg) { s->sm->wlanevent = base64_encode(wpabuf_head(msg), wpabuf_len(msg), NULL); wpabuf_free(msg); } } wlan_event = s->sm->wlanevent; if (wlan_event == NULL || *wlan_event == '\0') { wpa_printf(MSG_DEBUG, "WPS UPnP: WLANEvent not known for " "initial event message"); wlan_event = ""; } buf = wpabuf_alloc(500 + os_strlen(wlan_event)); if (buf == NULL) return -1; wpabuf_put_str(buf, head); wpabuf_put_property(buf, "STAStatus", "1"); os_snprintf(txt, sizeof(txt), "%d", ap_status); wpabuf_put_property(buf, "APStatus", txt); if (*wlan_event) wpabuf_put_property(buf, "WLANEvent", wlan_event); wpabuf_put_str(buf, tail); ret = wps_upnp_event_add(s, buf, 0); if (ret) { wpabuf_free(buf); return ret; } wpabuf_free(buf); return 0; } /** * subscription_start - Remember a UPnP control point to send events to. * @sm: WPS UPnP state machine from upnp_wps_device_init() * @callback_urls: Callback URLs * Returns: %NULL on error, or pointer to new subscription structure. */ struct subscription * subscription_start(struct upnp_wps_device_sm *sm, const char *callback_urls) { struct subscription *s; time_t now = time(NULL); time_t expire = now + UPNP_SUBSCRIBE_SEC; char str[80]; /* Get rid of expired subscriptions so we have room */ subscription_list_age(sm, now); /* If too many subscriptions, remove oldest */ if (dl_list_len(&sm->subscriptions) >= MAX_SUBSCRIPTIONS) { s = dl_list_first(&sm->subscriptions, struct subscription, list); if (s) { wpa_printf(MSG_INFO, "WPS UPnP: Too many subscriptions, trashing oldest"); dl_list_del(&s->list); subscription_destroy(s); } } s = os_zalloc(sizeof(*s)); if (s == NULL) return NULL; dl_list_init(&s->addr_list); dl_list_init(&s->event_queue); s->sm = sm; s->timeout_time = expire; if (uuid_make(s->uuid) < 0) { subscription_destroy(s); return NULL; } subscr_addr_list_create(s, callback_urls); if (dl_list_empty(&s->addr_list)) { wpa_printf(MSG_DEBUG, "WPS UPnP: No valid callback URLs in " "'%s' - drop subscription", callback_urls); subscription_destroy(s); return NULL; } /* Add to end of list, since it has the highest expiration time */ dl_list_add_tail(&sm->subscriptions, &s->list); /* Queue up immediate event message (our last event) * as required by UPnP spec. */ if (subscription_first_event(s)) { wpa_printf(MSG_INFO, "WPS UPnP: Dropping subscriber due to " "event backlog"); dl_list_del(&s->list); subscription_destroy(s); return NULL; } uuid_bin2str(s->uuid, str, sizeof(str)); wpa_printf(MSG_DEBUG, "WPS UPnP: Subscription %p (SID %s) started with %s", s, str, callback_urls); /* Schedule sending this */ wps_upnp_event_send_all_later(sm); return s; } /* subscription_renew -- find subscription and reset timeout */ struct subscription * subscription_renew(struct upnp_wps_device_sm *sm, const u8 uuid[UUID_LEN]) { time_t now = time(NULL); time_t expire = now + UPNP_SUBSCRIBE_SEC; struct subscription *s = subscription_find(sm, uuid); if (s == NULL) return NULL; wpa_printf(MSG_DEBUG, "WPS UPnP: Subscription renewed"); dl_list_del(&s->list); s->timeout_time = expire; /* add back to end of list, since it now has highest expiry */ dl_list_add_tail(&sm->subscriptions, &s->list); return s; } /** * upnp_wps_device_send_wlan_event - Event notification * @sm: WPS UPnP state machine from upnp_wps_device_init() * @from_mac_addr: Source (Enrollee) MAC address for the event * @ev_type: Event type * @msg: Event data * Returns: 0 on success, -1 on failure * * Tell external Registrars (UPnP control points) that something happened. In * particular, events include WPS messages from clients that are proxied to * external Registrars. */ int upnp_wps_device_send_wlan_event(struct upnp_wps_device_sm *sm, const u8 from_mac_addr[ETH_ALEN], enum upnp_wps_wlanevent_type ev_type, const struct wpabuf *msg) { int ret = -1; char type[2]; const u8 *mac = from_mac_addr; char mac_text[18]; u8 *raw = NULL; size_t raw_len; char *val; size_t val_len; int pos = 0; if (!sm) goto fail; os_snprintf(type, sizeof(type), "%1u", ev_type); raw_len = 1 + 17 + (msg ? wpabuf_len(msg) : 0); raw = os_zalloc(raw_len); if (!raw) goto fail; *(raw + pos) = (u8) ev_type; pos += 1; os_snprintf(mac_text, sizeof(mac_text), MACSTR, MAC2STR(mac)); wpa_printf(MSG_DEBUG, "WPS UPnP: Proxying WLANEvent from %s", mac_text); os_memcpy(raw + pos, mac_text, 17); pos += 17; if (msg) { os_memcpy(raw + pos, wpabuf_head(msg), wpabuf_len(msg)); pos += wpabuf_len(msg); } raw_len = pos; val = base64_encode(raw, raw_len, &val_len); if (val == NULL) goto fail; os_free(sm->wlanevent); sm->wlanevent = val; sm->wlanevent_type = ev_type; upnp_wps_device_send_event(sm); ret = 0; fail: os_free(raw); return ret; } #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) #include <sys/sysctl.h> #include <net/route.h> #include <net/if_dl.h> static int eth_get(const char *device, u8 ea[ETH_ALEN]) { struct if_msghdr *ifm; struct sockaddr_dl *sdl; u_char *p, *buf; size_t len; int mib[] = { CTL_NET, AF_ROUTE, 0, AF_LINK, NET_RT_IFLIST, 0 }; if (sysctl(mib, 6, NULL, &len, NULL, 0) < 0) return -1; if ((buf = os_malloc(len)) == NULL) return -1; if (sysctl(mib, 6, buf, &len, NULL, 0) < 0) { os_free(buf); return -1; } for (p = buf; p < buf + len; p += ifm->ifm_msglen) { ifm = (struct if_msghdr *)p; sdl = (struct sockaddr_dl *)(ifm + 1); if (ifm->ifm_type != RTM_IFINFO || (ifm->ifm_addrs & RTA_IFP) == 0) continue; if (sdl->sdl_family != AF_LINK || sdl->sdl_nlen == 0 || os_memcmp(sdl->sdl_data, device, sdl->sdl_nlen) != 0) continue; os_memcpy(ea, LLADDR(sdl), sdl->sdl_alen); break; } os_free(buf); if (p >= buf + len) { errno = ESRCH; return -1; } return 0; } #endif /* __FreeBSD__ */ /** * get_netif_info - Get hw and IP addresses for network device * @net_if: Selected network interface name * @ip_addr: Buffer for returning IP address in network byte order * @ip_addr_text: Buffer for returning a pointer to allocated IP address text * @mac: Buffer for returning MAC address * Returns: 0 on success, -1 on failure */ int get_netif_info(const char *net_if, unsigned *ip_addr, char **ip_addr_text, u8 mac[ETH_ALEN]) { struct ifreq req; int sock = -1; struct sockaddr_in *addr; struct in_addr in_addr; *ip_addr_text = os_zalloc(16); if (*ip_addr_text == NULL) goto fail; sock = socket(AF_INET, SOCK_DGRAM, 0); if (sock < 0) goto fail; os_strlcpy(req.ifr_name, net_if, sizeof(req.ifr_name)); if (ioctl(sock, SIOCGIFADDR, &req) < 0) { wpa_printf(MSG_ERROR, "WPS UPnP: SIOCGIFADDR failed: %d (%s)", errno, strerror(errno)); goto fail; } addr = (void *) &req.ifr_addr; *ip_addr = addr->sin_addr.s_addr; in_addr.s_addr = *ip_addr; os_snprintf(*ip_addr_text, 16, "%s", inet_ntoa(in_addr)); #ifdef __linux__ os_strlcpy(req.ifr_name, net_if, sizeof(req.ifr_name)); if (ioctl(sock, SIOCGIFHWADDR, &req) < 0) { wpa_printf(MSG_ERROR, "WPS UPnP: SIOCGIFHWADDR failed: " "%d (%s)", errno, strerror(errno)); goto fail; } os_memcpy(mac, req.ifr_addr.sa_data, 6); #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) if (eth_get(net_if, mac) < 0) { wpa_printf(MSG_ERROR, "WPS UPnP: Failed to get MAC address"); goto fail; } #else #error MAC address fetch not implemented #endif close(sock); return 0; fail: if (sock >= 0) close(sock); os_free(*ip_addr_text); *ip_addr_text = NULL; return -1; } static void upnp_wps_free_msearchreply(struct dl_list *head) { struct advertisement_state_machine *a, *tmp; dl_list_for_each_safe(a, tmp, head, struct advertisement_state_machine, list) msearchreply_state_machine_stop(a); } static void upnp_wps_free_subscriptions(struct dl_list *head, struct wps_registrar *reg) { struct subscription *s, *tmp; dl_list_for_each_safe(s, tmp, head, struct subscription, list) { if (reg && s->reg != reg) continue; dl_list_del(&s->list); subscription_destroy(s); } } /** * upnp_wps_device_stop - Stop WPS UPnP operations on an interface * @sm: WPS UPnP state machine from upnp_wps_device_init() */ static void upnp_wps_device_stop(struct upnp_wps_device_sm *sm) { if (!sm || !sm->started) return; wpa_printf(MSG_DEBUG, "WPS UPnP: Stop device"); web_listener_stop(sm); ssdp_listener_stop(sm); upnp_wps_free_msearchreply(&sm->msearch_replies); upnp_wps_free_subscriptions(&sm->subscriptions, NULL); advertisement_state_machine_stop(sm, 1); wps_upnp_event_send_stop_all(sm); os_free(sm->wlanevent); sm->wlanevent = NULL; os_free(sm->ip_addr_text); sm->ip_addr_text = NULL; if (sm->multicast_sd >= 0) close(sm->multicast_sd); sm->multicast_sd = -1; sm->started = 0; } /** * upnp_wps_device_start - Start WPS UPnP operations on an interface * @sm: WPS UPnP state machine from upnp_wps_device_init() * @net_if: Selected network interface name * Returns: 0 on success, -1 on failure */ static int upnp_wps_device_start(struct upnp_wps_device_sm *sm, char *net_if) { if (!sm || !net_if) return -1; if (sm->started) upnp_wps_device_stop(sm); sm->multicast_sd = -1; sm->ssdp_sd = -1; sm->started = 1; sm->advertise_count = 0; /* Fix up linux multicast handling */ if (add_ssdp_network(net_if)) goto fail; /* Determine which IP and mac address we're using */ if (get_netif_info(net_if, &sm->ip_addr, &sm->ip_addr_text, sm->mac_addr)) { wpa_printf(MSG_INFO, "WPS UPnP: Could not get IP/MAC address " "for %s. Does it have IP address?", net_if); goto fail; } /* Listen for incoming TCP connections so that others * can fetch our "xml files" from us. */ if (web_listener_start(sm)) goto fail; /* Set up for receiving discovery (UDP) packets */ if (ssdp_listener_start(sm)) goto fail; /* Set up for sending multicast */ if (ssdp_open_multicast(sm) < 0) goto fail; /* * Broadcast NOTIFY messages to let the world know we exist. * This is done via a state machine since the messages should not be * all sent out at once. */ if (advertisement_state_machine_start(sm)) goto fail; return 0; fail: upnp_wps_device_stop(sm); return -1; } static struct upnp_wps_device_interface * upnp_wps_get_iface(struct upnp_wps_device_sm *sm, void *priv) { struct upnp_wps_device_interface *iface; dl_list_for_each(iface, &sm->interfaces, struct upnp_wps_device_interface, list) { if (iface->priv == priv) return iface; } return NULL; } /** * upnp_wps_device_deinit - Deinitialize WPS UPnP * @sm: WPS UPnP state machine from upnp_wps_device_init() * @priv: External context data that was used in upnp_wps_device_init() call */ void upnp_wps_device_deinit(struct upnp_wps_device_sm *sm, void *priv) { struct upnp_wps_device_interface *iface; struct upnp_wps_peer *peer; if (!sm) return; iface = upnp_wps_get_iface(sm, priv); if (iface == NULL) { wpa_printf(MSG_ERROR, "WPS UPnP: Could not find the interface " "instance to deinit"); return; } wpa_printf(MSG_DEBUG, "WPS UPnP: Deinit interface instance %p", iface); if (dl_list_len(&sm->interfaces) == 1) { wpa_printf(MSG_DEBUG, "WPS UPnP: Deinitializing last instance " "- free global device instance"); upnp_wps_device_stop(sm); } else upnp_wps_free_subscriptions(&sm->subscriptions, iface->wps->registrar); dl_list_del(&iface->list); while ((peer = dl_list_first(&iface->peers, struct upnp_wps_peer, list))) { if (peer->wps) wps_deinit(peer->wps); dl_list_del(&peer->list); os_free(peer); } os_free(iface->ctx->ap_pin); os_free(iface->ctx); os_free(iface); if (dl_list_empty(&sm->interfaces)) { os_free(sm->root_dir); os_free(sm->desc_url); os_free(sm); shared_upnp_device = NULL; } } /** * upnp_wps_device_init - Initialize WPS UPnP * @ctx: callback table; we must eventually free it * @wps: Pointer to longterm WPS context * @priv: External context data that will be used in callbacks * @net_if: Selected network interface name * Returns: WPS UPnP state or %NULL on failure */ struct upnp_wps_device_sm * upnp_wps_device_init(struct upnp_wps_device_ctx *ctx, struct wps_context *wps, void *priv, char *net_if) { struct upnp_wps_device_sm *sm; struct upnp_wps_device_interface *iface; int start = 0; iface = os_zalloc(sizeof(*iface)); if (iface == NULL) { os_free(ctx->ap_pin); os_free(ctx); return NULL; } wpa_printf(MSG_DEBUG, "WPS UPnP: Init interface instance %p", iface); dl_list_init(&iface->peers); iface->ctx = ctx; iface->wps = wps; iface->priv = priv; if (shared_upnp_device) { wpa_printf(MSG_DEBUG, "WPS UPnP: Share existing device " "context"); sm = shared_upnp_device; } else { wpa_printf(MSG_DEBUG, "WPS UPnP: Initialize device context"); sm = os_zalloc(sizeof(*sm)); if (!sm) { wpa_printf(MSG_ERROR, "WPS UPnP: upnp_wps_device_init " "failed"); os_free(iface); os_free(ctx->ap_pin); os_free(ctx); return NULL; } shared_upnp_device = sm; dl_list_init(&sm->msearch_replies); dl_list_init(&sm->subscriptions); dl_list_init(&sm->interfaces); start = 1; } dl_list_add(&sm->interfaces, &iface->list); if (start && upnp_wps_device_start(sm, net_if)) { upnp_wps_device_deinit(sm, priv); return NULL; } return sm; } /** * upnp_wps_subscribers - Check whether there are any event subscribers * @sm: WPS UPnP state machine from upnp_wps_device_init() * Returns: 0 if no subscribers, 1 if subscribers */ int upnp_wps_subscribers(struct upnp_wps_device_sm *sm) { return !dl_list_empty(&sm->subscriptions); } int upnp_wps_set_ap_pin(struct upnp_wps_device_sm *sm, const char *ap_pin) { struct upnp_wps_device_interface *iface; if (sm == NULL) return 0; dl_list_for_each(iface, &sm->interfaces, struct upnp_wps_device_interface, list) { os_free(iface->ctx->ap_pin); if (ap_pin) { iface->ctx->ap_pin = os_strdup(ap_pin); if (iface->ctx->ap_pin == NULL) return -1; } else iface->ctx->ap_pin = NULL; } return 0; }
602539.c
// Copyright 2015-2018 Espressif Systems (Shanghai) PTE LTD // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "esp_netif.h" #include "esp_netif_ppp.h" #include "esp_modem.h" #include "esp_log.h" static const char *TAG = "esp-modem-netif"; /** * @brief ESP32 Modem handle to be used as netif IO object */ typedef struct esp_modem_netif_driver_s { esp_netif_driver_base_t base; /*!< base structure reserved as esp-netif driver */ modem_dte_t *dte; /*!< ptr to the esp_modem objects (DTE) */ } esp_modem_netif_driver_t; static void on_ppp_changed(void *arg, esp_event_base_t event_base, int32_t event_id, void *event_data) { modem_dte_t *dte = arg; if (event_id < NETIF_PP_PHASE_OFFSET) { ESP_LOGI(TAG, "PPP state changed event %d", event_id); // only notify the modem on state/error events, ignoring phase transitions esp_modem_notify_ppp_netif_closed(dte); } } /** * @brief Transmit function called from esp_netif to output network stack data * * Note: This API has to conform to esp-netif transmit prototype * * @param h Opaque pointer representing esp-netif driver, esp_dte in this case of esp_modem * @param data data buffer * @param length length of data to send * * @return ESP_OK on success */ static esp_err_t esp_modem_dte_transmit(void *h, void *buffer, size_t len) { modem_dte_t *dte = h; if (dte->send_data(dte, (const char *)buffer, len) > 0) { return ESP_OK; } return ESP_FAIL; } /** * @brief Post attach adapter for esp-modem * * Used to exchange internal callbacks, context between esp-netif nad modem-netif * * @param esp_netif handle to esp-netif object * @param args pointer to modem-netif driver * * @return ESP_OK on success, modem-start error code if starting failed */ static esp_err_t esp_modem_post_attach_start(esp_netif_t * esp_netif, void * args) { esp_modem_netif_driver_t *driver = args; modem_dte_t *dte = driver->dte; const esp_netif_driver_ifconfig_t driver_ifconfig = { .driver_free_rx_buffer = NULL, .transmit = esp_modem_dte_transmit, .handle = dte }; driver->base.netif = esp_netif; ESP_ERROR_CHECK(esp_netif_set_driver_config(esp_netif, &driver_ifconfig)); // enable both events, so we could notify the modem layer if an error occurred/state changed esp_netif_ppp_config_t ppp_config = { .ppp_error_event_enabled = true, .ppp_phase_event_enabled = true }; esp_netif_ppp_set_params(esp_netif, &ppp_config); ESP_ERROR_CHECK(esp_event_handler_register(NETIF_PPP_STATUS, ESP_EVENT_ANY_ID, &on_ppp_changed, dte)); return esp_modem_start_ppp(dte); } /** * @brief Data path callback from esp-modem to pass data to esp-netif * * @param buffer data pointer * @param len data length * @param context context data used for esp-modem-netif handle * * @return ESP_OK on success */ static esp_err_t modem_netif_receive_cb(void *buffer, size_t len, void *context) { esp_modem_netif_driver_t *driver = context; esp_netif_receive(driver->base.netif, buffer, len, NULL); return ESP_OK; } void *esp_modem_netif_setup(modem_dte_t *dte) { esp_modem_netif_driver_t *driver = calloc(1, sizeof(esp_modem_netif_driver_t)); if (driver == NULL) { ESP_LOGE(TAG, "Cannot allocate esp_modem_netif_driver_t"); goto drv_create_failed; } esp_err_t err = esp_modem_set_rx_cb(dte, modem_netif_receive_cb, driver); if (err != ESP_OK) { ESP_LOGE(TAG, "esp_modem_set_rx_cb failed with: %d", err); goto drv_create_failed; } driver->base.post_attach = esp_modem_post_attach_start; driver->dte = dte; return driver; drv_create_failed: return NULL; } void esp_modem_netif_teardown(void *h) { esp_modem_netif_driver_t *driver = h; free(driver); } esp_err_t esp_modem_netif_clear_default_handlers(void *h) { esp_modem_netif_driver_t *driver = h; esp_err_t ret; ret = esp_modem_remove_event_handler(driver->dte, esp_netif_action_start); if (ret != ESP_OK) { goto clear_event_failed; } ret = esp_modem_remove_event_handler(driver->dte, esp_netif_action_stop); if (ret != ESP_OK) { goto clear_event_failed; } ret = esp_event_handler_unregister(IP_EVENT, IP_EVENT_PPP_GOT_IP, esp_netif_action_connected); if (ret != ESP_OK) { goto clear_event_failed; } ret = esp_event_handler_unregister(IP_EVENT, IP_EVENT_PPP_LOST_IP, esp_netif_action_disconnected); if (ret != ESP_OK) { goto clear_event_failed; } return ESP_OK; clear_event_failed: ESP_LOGE(TAG, "Failed to unregister event handlers"); return ESP_FAIL; } esp_err_t esp_modem_netif_set_default_handlers(void *h, esp_netif_t * esp_netif) { esp_modem_netif_driver_t *driver = h; esp_err_t ret; ret = esp_modem_set_event_handler(driver->dte, esp_netif_action_start, ESP_MODEM_EVENT_PPP_START, esp_netif); if (ret != ESP_OK) { goto set_event_failed; } ret = esp_modem_set_event_handler(driver->dte, esp_netif_action_stop, ESP_MODEM_EVENT_PPP_STOP, esp_netif); if (ret != ESP_OK) { goto set_event_failed; } ret = esp_event_handler_register(IP_EVENT, IP_EVENT_PPP_GOT_IP, esp_netif_action_connected, esp_netif); if (ret != ESP_OK) { goto set_event_failed; } ret = esp_event_handler_register(IP_EVENT, IP_EVENT_PPP_LOST_IP, esp_netif_action_disconnected, esp_netif); if (ret != ESP_OK) { goto set_event_failed; } return ESP_OK; set_event_failed: ESP_LOGE(TAG, "Failed to register event handlers"); esp_modem_netif_clear_default_handlers(driver); return ESP_FAIL; }
744848.c
/* Copyright 2015 Bloomberg Finance L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* simple buffering for stream */ #include <arpa/inet.h> #include <errno.h> #include <netdb.h> #include <netinet/in.h> #include <poll.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <strings.h> #include <sys/uio.h> #include <unistd.h> #include <hostname_support.h> #include <sbuf2.h> #if SBUF2_SERVER # ifndef SBUF2_DFL_SIZE # define SBUF2_DFL_SIZE 1024ULL # endif # include "mem_util.h" # define calloc comdb2_calloc_util # define malloc(size) comdb2_malloc(sb->allocator, size) # define free comdb2_free #else /* SBUF2_SERVER */ # ifndef SBUF2_DFL_SIZE # define SBUF2_DFL_SIZE (1024ULL * 128ULL) # endif #endif /* !SBUF2_SERVER */ #if SBUF2_UNGETC # define SBUF2UNGETC_BUF_MAX 4 #endif #if WITH_SSL #ifdef my_ssl_println #undef my_ssl_println #endif #ifdef my_ssl_eprintln #undef my_ssl_eprintln #endif #define my_ssl_println(fmt, ...) ssl_println("SBUF2", fmt, ##__VA_ARGS__) #define my_ssl_eprintln(fmt, ...) \ ssl_eprintln("SBUF2", "%s: " fmt, __func__, ##__VA_ARGS__) #endif struct sbuf2 { int fd; int flags; int readtimeout; int writetimeout; int rhd, rtl; int whd, wtl; #if SBUF2_UNGETC /* Server always has these. */ int ungetc_buf[SBUF2UNGETC_BUF_MAX]; int ungetc_buf_len; #endif sbuf2writefn write; sbuf2readfn read; unsigned int lbuf; unsigned char *rbuf; unsigned char *wbuf; char *dbgout, *dbgin; void *userptr; #if SBUF2_SERVER comdb2ma allocator; struct sqlclntstate *clnt; #endif #if WITH_SSL /* Server always supports SSL. */ SSL *ssl; X509 *cert; int protocolerr; char sslerr[120]; #endif }; int SBUF2_FUNC(sbuf2fileno)(SBUF2 *sb) { if (sb == NULL) return -1; return sb->fd; } /*just free SBUF2. don't flush or close fd*/ int SBUF2_FUNC(sbuf2free)(SBUF2 *sb) { if (sb == 0) return -1; #if WITH_SSL /* Gracefully shutdown SSL to make the fd re-usable. */ sslio_close(sb, 1); #endif sb->fd = -1; if (sb->rbuf) { free(sb->rbuf); sb->rbuf = NULL; } if (sb->wbuf) { free(sb->wbuf); sb->wbuf = NULL; } if (sb->dbgin) { free(sb->dbgin); sb->dbgin = NULL; } if (sb->dbgout) { free(sb->dbgout); sb->dbgout = NULL; } #if SBUF2_SERVER comdb2ma alloc = sb->allocator; #endif free(sb); #if SBUF2_SERVER comdb2ma_destroy(alloc); #endif return 0; } /* flush output, close fd, and free SBUF2.*/ int SBUF2_FUNC(sbuf2close)(SBUF2 *sb) { if (sb == 0) return -1; if (sb->fd < 0) return -1; if (!(sb->flags & SBUF2_NO_FLUSH)) sbuf2flush(sb); #if WITH_SSL /* We need to send "close notify" alert before closing the underlying fd. */ sslio_close(sb, (sb->flags & SBUF2_NO_CLOSE_FD)); #endif if (!(sb->flags & SBUF2_NO_CLOSE_FD)) close(sb->fd); return sbuf2free(sb); } /* flush output */ int SBUF2_FUNC(sbuf2flush)(SBUF2 *sb) { int cnt = 0, rc, len; if (sb == 0) return -1; while (sb->whd != sb->wtl) { if (sb->wtl > sb->whd) { len = sb->lbuf - sb->wtl; } else { len = sb->whd - sb->wtl; } #if SBUF2_SERVER && WITH_SSL void *ssl; ssl_downgrade: ssl = sb->ssl; rc = sb->write(sb, (char *)&sb->wbuf[sb->wtl], len); if (rc == 0 && sb->ssl != ssl) { /* Fall back to plaintext if client donates the socket to sockpool. */ goto ssl_downgrade; } #else rc = sb->write(sb, (char *)&sb->wbuf[sb->wtl], len); #endif if (rc <= 0) return -1 + rc; cnt += rc; sb->wtl += rc; if (sb->wtl >= sb->lbuf) sb->wtl = 0; } /* this reduces fragmentation for Nagle-disabled sockets*/ sb->whd = sb->wtl = 0; return cnt; } int SBUF2_FUNC(sbuf2putc)(SBUF2 *sb, char c) { int rc; if (sb == 0) return -1; if (sb->wbuf == NULL) { /* lazily establish write buffer */ sb->wbuf = malloc(sb->lbuf); if (sb->wbuf == NULL) return -1; } if ((sb->whd == sb->lbuf - 1 && sb->wtl == 0) || (sb->whd == sb->wtl - 1)) { rc = sbuf2flush(sb); if (rc < 0) return rc; } sb->wbuf[sb->whd] = c; sb->whd++; if (sb->whd >= sb->lbuf) sb->whd = 0; if ((sb->flags & SBUF2_WRITE_LINE) && c == '\n') { rc = sbuf2flush(sb); if (rc < 0) return rc; } return 1; } int SBUF2_FUNC(sbuf2puts)(SBUF2 *sb, char *string) { int rc, ii; if (sb == 0) return -1; for (ii = 0; string[ii]; ii++) { rc = sbuf2putc(sb, string[ii]); if (rc < 0) return rc; } if (sb->flags & SBUF2_DEBUG_LAST_LINE) { if (sb->dbgout) free(sb->dbgout); sb->dbgout = strdup(string); } return ii; } /* returns num items written || <0 for error*/ int SBUF2_FUNC(sbuf2write)(char *ptr, int nbytes, SBUF2 *sb) { int rc, off, left, written = 0; if (sb == 0) return -1; if (sb->wbuf == NULL) { /* lazily establish write buffer */ sb->wbuf = malloc(sb->lbuf); if (sb->wbuf == NULL) return -1; } off = 0; left = nbytes; while (left > 0) { int towrite = 0; if ((sb->whd == sb->lbuf - 1 && sb->wtl == 0) || (sb->whd == sb->wtl - 1)) { rc = sbuf2flush(sb); if (rc < 0) return written; } if (sb->whd < sb->wtl) { towrite = sb->wtl - sb->whd - 1; if (towrite > left) towrite = left; } else { towrite = sb->lbuf - sb->whd - 1; if (sb->wtl != 0) towrite++; if (towrite > left) towrite = left; } memcpy(&sb->wbuf[sb->whd], &ptr[off], towrite); sb->whd += towrite; off += towrite; left -= towrite; written += towrite; if (sb->wtl == 0 && sb->whd >= (sb->lbuf - 1)) { continue; } else if (sb->whd >= sb->lbuf) sb->whd = 0; } return nbytes; } /* returns num items written || <0 for error*/ int SBUF2_FUNC(sbuf2fwrite)(char *ptr, int size, int nitems, SBUF2 *sb) { int rc, ii, jj, off; if (sb == 0) return -1; off = 0; if (!(sb->flags & SBUF2_WRITE_LINE)) sbuf2write(ptr, size * nitems, sb); else { for (ii = 0; ii < nitems; ii++) { for (jj = 0; jj < size; jj++) { rc = sbuf2putc(sb, ptr[off++]); if (rc < 0) return ii; } } } return nitems; } int SBUF2_FUNC(sbuf2getc)(SBUF2 *sb) { int rc, cc; if (sb == 0) return -1; if (sb->rbuf == NULL) { /* lazily establish read buffer */ sb->rbuf = malloc(sb->lbuf); if (sb->rbuf == NULL) return -1; } #if SBUF2_UNGETC if (sb->ungetc_buf_len > 0) { sb->ungetc_buf_len--; return sb->ungetc_buf[sb->ungetc_buf_len]; } #endif if (sb->rtl == sb->rhd) { /*nothing buffered*/ sb->rtl = 0; sb->rhd = 0; #if SBUF2_SERVER && WITH_SSL void *ssl; ssl_downgrade: ssl = sb->ssl; rc = sb->read(sb, (char *)sb->rbuf, sb->lbuf - 1); if (rc == 0 && sb->ssl != ssl) { goto ssl_downgrade; } #else rc = sb->read(sb, (char *)sb->rbuf, sb->lbuf - 1); #endif if (rc <= 0) return -1 + rc; sb->rhd = rc; } cc = sb->rbuf[sb->rtl]; sb->rtl++; if (sb->rtl >= sb->lbuf) sb->rtl = 0; return cc; } #if SBUF2_UNGETC int SBUF2_FUNC(sbuf2ungetc)(char c, SBUF2 *sb) { int i; if (sb == NULL) return -1; i = c; if (i == EOF || (sb->ungetc_buf_len == SBUF2UNGETC_BUF_MAX)) return EOF; sb->ungetc_buf[sb->ungetc_buf_len] = c; sb->ungetc_buf_len++; return c; } #endif /*return null terminated string and len (or <0 if error)*/ int SBUF2_FUNC(sbuf2gets)(char *out, int lout, SBUF2 *sb) { int cc, ii; if (sb == 0) return -1; lout--; for (ii = 0; ii < lout;) { cc = sbuf2getc(sb); if (cc < 0) { if (ii == 0) return cc; /*return error if first char*/ break; } out[ii] = cc; ii++; if (cc == '\n') break; } out[ii] = 0; if (sb->flags & SBUF2_DEBUG_LAST_LINE) { if (sb->dbgin) free(sb->dbgin); sb->dbgin = strdup(out); } return ii; /*return string len*/ } /* returns num items read || <0 for error*/ static int sbuf2fread_int(char *ptr, int size, int nitems, SBUF2 *sb, int *was_timeout) { int need = size * nitems; int done = 0; if (sb->rbuf == NULL) { /* lazily establish read buffer */ sb->rbuf = malloc(sb->lbuf); if (sb->rbuf == NULL) return -1; } #if SBUF2_UNGETC if (sb->ungetc_buf_len > 0) { int from = sb->ungetc_buf_len; while (from && (done < need)) { --from; ptr[done] = sb->ungetc_buf[from]; ++done; --need; } sb->ungetc_buf_len = from; } #endif while (1) { /* if data available in buffer */ if (sb->rtl != sb->rhd) { int buffered = sb->rhd - sb->rtl; int amt = need < buffered ? need : buffered; void *to = ptr + done; void *from = sb->rbuf + sb->rtl; memcpy(to, from, amt); need -= amt; done += amt; sb->rtl += amt; } /* if still need more data */ if (need > 0) { int rc; sb->rtl = 0; sb->rhd = 0; #if SBUF2_SERVER && WITH_SSL void *ssl; ssl_downgrade: ssl = sb->ssl; rc = sb->read(sb, (char *)sb->rbuf, sb->lbuf - 1); if (rc == 0 && sb->ssl != ssl) goto ssl_downgrade; #else rc = sb->read(sb, (char *)sb->rbuf, sb->lbuf - 1); #endif if (rc <= 0) { if (rc == 0) { /* this is a timeout */ if (was_timeout) *was_timeout = 1; } return (done / size); } sb->rhd = rc; continue; } break; } return nitems; } /* returns num items read || <0 for error*/ int SBUF2_FUNC(sbuf2fread)(char *ptr, int size, int nitems, SBUF2 *sb) { return sbuf2fread_int(ptr, size, nitems, sb, NULL); } /* returns num items read || <0 for error*/ int SBUF2_FUNC(sbuf2fread_timeout)(char *ptr, int size, int nitems, SBUF2 *sb, int *was_timeout) { return sbuf2fread_int(ptr, size, nitems, sb, was_timeout); } int SBUF2_FUNC(sbuf2printf)(SBUF2 *sb, const char *fmt, ...) { /*just do sprintf to local buf (limited to 1k), and then emit through sbuf2*/ char lbuf[1024]; va_list ap; if (sb == 0) return -1; va_start(ap, fmt); vsnprintf(lbuf, sizeof(lbuf), fmt, ap); va_end(ap); return sbuf2puts(sb, lbuf); } int SBUF2_FUNC(sbuf2printfx)(SBUF2 *sb, char *buf, int lbuf, char *fmt, ...) { /*do sprintf to user supplied buffer*/ int rc; va_list ap; if (sb == 0) return -1; va_start(ap, fmt); rc = vsnprintf(buf, lbuf, fmt, ap); va_end(ap); if (rc < 0) return rc; return sbuf2puts(sb, buf); } /* default read/write functions for sbuf, which implement timeouts and * retry on EINTR. */ static int swrite_unsecure(SBUF2 *sb, const char *cc, int len) { int rc; struct pollfd pol; if (sb == 0) return -1; if (sb->writetimeout > 0) { do { pol.fd = sb->fd; pol.events = POLLOUT; rc = poll(&pol, 1, sb->writetimeout); } while (rc == -1 && errno == EINTR); if (rc <= 0) return rc; /*timed out or error*/ if ((pol.revents & POLLOUT) == 0) return -100000 + pol.revents; /*can write*/ } return write(sb->fd, cc, len); } static int swrite(SBUF2 *sb, const char *cc, int len) { int rc; #if WITH_SSL if (sb->ssl == NULL) rc = swrite_unsecure(sb, cc, len); else rc = sslio_write(sb, cc, len); #else /* WITH_SSL */ rc = swrite_unsecure(sb, cc, len); #endif /* !WITH_SSL */ return rc; } int SBUF2_FUNC(sbuf2unbufferedwrite)(SBUF2 *sb, const char *cc, int len) { int n; #if !WITH_SSL n = write(sb->fd, cc, len); #else ssl_downgrade: if (sb->ssl == NULL) n = write(sb->fd, cc, len); else { ERR_clear_error(); n = SSL_write(sb->ssl, cc, len); if (n <= 0) { int ioerr = SSL_get_error(sb->ssl, n); switch (ioerr) { case SSL_ERROR_WANT_READ: sb->protocolerr = 0; errno = EAGAIN; break; case SSL_ERROR_WANT_WRITE: sb->protocolerr = 0; errno = EAGAIN; break; case SSL_ERROR_ZERO_RETURN: /* Peer has done a clean shutdown. */ SSL_shutdown(sb->ssl); SSL_free(sb->ssl); sb->ssl = NULL; if (sb->cert) { X509_free(sb->cert); sb->cert = NULL; } goto ssl_downgrade; case SSL_ERROR_SYSCALL: sb->protocolerr = 0; if (n == 0) { ssl_sfeprint(sb->sslerr, sizeof(sb->sslerr), my_ssl_eprintln, "Unexpected EOF observed."); errno = ECONNRESET; } else { ssl_sfeprint(sb->sslerr, sizeof(sb->sslerr), my_ssl_eprintln, "IO error. errno %d.", errno); } break; case SSL_ERROR_SSL: errno = EIO; sb->protocolerr = 1; ssl_sfliberrprint(sb->sslerr, sizeof(sb->sslerr), my_ssl_eprintln, "A failure in SSL library occured"); break; default: errno = EIO; sb->protocolerr = 1; ssl_sfeprint(sb->sslerr, sizeof(sb->sslerr), my_ssl_eprintln, "Failed to establish connection with peer. " "SSL error = %d.", ioerr); break; } } } #endif return n; } static int sread_unsecure(SBUF2 *sb, char *cc, int len) { int rc; struct pollfd pol; if (sb == 0) return -1; if (sb->readtimeout > 0) { do { pol.fd = sb->fd; pol.events = POLLIN; rc = poll(&pol, 1, sb->readtimeout); } while (rc == -1 && errno == EINTR); if (rc <= 0) return rc; /*timed out or error*/ if ((pol.revents & POLLIN) == 0) return -100000 + pol.revents; /*something to read*/ } return read(sb->fd, cc, len); } static int sread(SBUF2 *sb, char *cc, int len) { int rc; #if WITH_SSL if (sb->ssl == NULL) rc = sread_unsecure(sb, cc, len); else rc = sslio_read(sb, cc, len); #else /* WITH_SSL */ rc = sread_unsecure(sb, cc, len); #endif /* !WITH_SSL */ return rc; } int SBUF2_FUNC(sbuf2unbufferedread)(SBUF2 *sb, char *cc, int len) { int n; #if !WITH_SSL n = read(sb->fd, cc, len); #else ssl_downgrade: if (sb->ssl == NULL) n = read(sb->fd, cc, len); else { ERR_clear_error(); n = SSL_read(sb->ssl, cc, len); if (n <= 0) { int ioerr = SSL_get_error(sb->ssl, n); switch (ioerr) { case SSL_ERROR_WANT_READ: sb->protocolerr = 0; errno = EAGAIN; break; case SSL_ERROR_WANT_WRITE: sb->protocolerr = 0; errno = EAGAIN; break; case SSL_ERROR_ZERO_RETURN: /* Peer has done a clean shutdown. */ SSL_shutdown(sb->ssl); SSL_free(sb->ssl); sb->ssl = NULL; if (sb->cert) { X509_free(sb->cert); sb->cert = NULL; } goto ssl_downgrade; case SSL_ERROR_SYSCALL: sb->protocolerr = 0; if (n == 0) { ssl_sfeprint(sb->sslerr, sizeof(sb->sslerr), my_ssl_eprintln, "Unexpected EOF observed."); errno = ECONNRESET; } else { ssl_sfeprint(sb->sslerr, sizeof(sb->sslerr), my_ssl_eprintln, "IO error. errno %d.", errno); } break; case SSL_ERROR_SSL: errno = EIO; sb->protocolerr = 1; ssl_sfliberrprint(sb->sslerr, sizeof(sb->sslerr), my_ssl_eprintln, "A failure in SSL library occured"); break; default: errno = EIO; sb->protocolerr = 1; ssl_sfeprint(sb->sslerr, sizeof(sb->sslerr), my_ssl_eprintln, "Failed to establish connection with peer. " "SSL error = %d.", ioerr); break; } } } #endif return n; } void SBUF2_FUNC(sbuf2settimeout)(SBUF2 *sb, int readtimeout, int writetimeout) { sb->readtimeout = readtimeout; sb->writetimeout = writetimeout; } void SBUF2_FUNC(sbuf2gettimeout)(SBUF2 *sb, int *readtimeout, int *writetimeout) { *readtimeout = sb->readtimeout; *writetimeout = sb->writetimeout; } void SBUF2_FUNC(sbuf2setrw)(SBUF2 *sb, sbuf2readfn read, sbuf2writefn write) { sb->read = read; sb->write = write; } void SBUF2_FUNC(sbuf2setr)(SBUF2 *sb, sbuf2readfn read) { sb->read = read; } void SBUF2_FUNC(sbuf2setw)(SBUF2 *sb, sbuf2writefn write) { sb->write = write; } sbuf2readfn SBUF2_FUNC(sbuf2getr)(SBUF2 *sb) { return sb->read; } sbuf2writefn SBUF2_FUNC(sbuf2getw)(SBUF2 *sb) { return sb->write; } int SBUF2_FUNC(sbuf2setbufsize)(SBUF2 *sb, unsigned int size) { if (size < 1024) size = 1024; free(sb->rbuf); free(sb->wbuf); sb->rbuf = sb->wbuf = 0; sb->rhd = sb->rtl = 0; sb->whd = sb->wtl = 0; sb->lbuf = size; return 0; } void SBUF2_FUNC(sbuf2setflags)(SBUF2 *sb, int flags) { sb->flags |= flags; } SBUF2 *SBUF2_FUNC(sbuf2open)(int fd, int flags) { if (fd < 0) { return NULL; } SBUF2 *sb = NULL; #if SBUF2_SERVER comdb2ma alloc = comdb2ma_create(0, 0, "sbuf2", 0); if (alloc == NULL) { goto error; } /* get malloc to work in server-mode */ SBUF2 dummy = {.allocator = alloc}; sb = &dummy; #endif sb = calloc(1, sizeof(SBUF2)); if (sb == NULL) { goto error; } sb->fd = fd; sb->flags = flags; #if SBUF2_SERVER sb->allocator = alloc; sb->clnt = NULL; #endif #if SBUF2_UNGETC sb->ungetc_buf_len = 0; memset(sb->ungetc_buf, EOF, sizeof(sb->ungetc_buf)); #endif /* default writer/reader */ sb->write = swrite; sb->read = sread; if (sbuf2setbufsize(sb, SBUF2_DFL_SIZE) == 0) { return sb; } error: if (sb) { free(sb); } #if SBUF2_SERVER if (alloc) { comdb2ma_destroy(alloc); } #endif return NULL; } char *SBUF2_FUNC(sbuf2dbgin)(SBUF2 *sb) { if (sb->dbgin != 0) return sb->dbgin; return ""; } char *SBUF2_FUNC(sbuf2dbgout)(SBUF2 *sb) { if (sb->dbgout != 0) return sb->dbgout; return ""; } #if SBUF2_UNGETC int SBUF2_FUNC(sbuf2eof)(SBUF2 *sb) { int i; if (sb == NULL) return -2; errno = 0; i = sbuf2getc(sb); if (i >= 0) { sbuf2ungetc(i, sb); return 0; } else { if (errno == 0) return 1; else return -1; } } #endif #if SBUF2_SERVER void SBUF2_FUNC(sbuf2setclnt)(SBUF2 *sb, struct sqlclntstate *clnt) { sb->clnt = clnt; } struct sqlclntstate *SBUF2_FUNC(sbuf2getclnt)(SBUF2 *sb) { return sb->clnt; } #endif void SBUF2_FUNC(sbuf2setuserptr)(SBUF2 *sb, void *userptr) { sb->userptr = userptr; } void *SBUF2_FUNC(sbuf2getuserptr)(SBUF2 *sb) { return sb->userptr; } void SBUF2_FUNC(sbuf2nextline)(SBUF2 *sb) { char c; while ((c = sbuf2getc(sb)) >= 0 && c != '\n') ; } char *SBUF2_FUNC(get_origin_mach_by_buf)(SBUF2 *sb) { if (sb == NULL || sb->fd == -1) { return NULL; } return get_hostname_by_fileno(sb->fd); } int SBUF2_FUNC(sbuf2lasterror)(SBUF2 *sb, char *err, size_t n) { #if WITH_SSL if (err != NULL) strncpy(err, sb->sslerr, n > sizeof(sb->sslerr) ? sizeof(sb->sslerr) : n); return sb->protocolerr; #else return 0; #endif } #if WITH_SSL # include "ssl_io.c" #endif
819166.c
int main () { int i = 2; i = i++; printf ("%d\n",i); }
283215.c
#include "cc.h" Node* dodecl(void (*f)(int,Type*,Sym*), int c, Type *t, Node *n, int gen) { Sym *s; Node *n1; long v; nearln = lineno; lastfield = 0; loop: if(n != Z) switch(n->op) { default: diag(n, "unknown declarator: %O", n->op); break; case OARRAY: t = typ(TARRAY, t); t->width = 0; n1 = n->right; n = n->left; if(n1 != Z) { complex(n1); v = -1; if(n1->op == OCONST) v = n1->vconst; if(v <= 0) { diag(n, "array size must be a positive constant"); v = 1; } t->width = v * t->link->width; t->nwidth = n1->left; } goto loop; case OIND: t = typ(TIND, t); t->garb = n->garb; n = n->left; goto loop; case OFUNC: t = typ(TFUNC, t); t->down = fnproto(n); n = n->left; goto loop; case OBIT: n1 = n->right; complex(n1); lastfield = -1; if(n1->op == OCONST) lastfield = n1->vconst; if(lastfield < 0) { diag(n, "field width must be non-negative constant"); lastfield = 1; } if(lastfield == 0) { lastbit = 0; firstbit = 1; if(n->left != Z) { diag(n, "zero width named field"); lastfield = 1; } } if(!typei[t->etype]) { diag(n, "field type must be int-like"); t = types[TINT]; lastfield = 1; } if(lastfield > tfield->width*8) { diag(n, "field width larger than field unit"); lastfield = 1; } lastbit += lastfield; if(lastbit > tfield->width*8) { lastbit = lastfield; firstbit = 1; } n = n->left; goto loop; case ONAME: if(f == NODECL) break; s = n->sym; (*f)(c, t, s); if(s->class == CLOCAL) s = mkstatic(s); firstbit = 0; n->sym = s; n->type = s->type; acidvar(s); if(gen) vtgen(n); break; } lastdcl = t; return n; } Sym* mkstatic(Sym *s) { Sym *s1; if(s->class != CLOCAL) return s; snprint(symb, NSYMB, "%s$%d", s->name, s->block); s1 = lookup(); if(s1->class != CSTATIC) { s1->type = s->type; s1->offset = s->offset; s1->block = s->block; s1->class = CSTATIC; } return s1; } /* * make a copy of a typedef * the problem is to split out incomplete * arrays so that it is in the variable * rather than the typedef. */ Type* tcopy(Type *t) { Type *tl, *tx; int et; if(t == T) return t; et = t->etype; if(typesu[et]) return t; tl = tcopy(t->link); if(tl != t->link || (et == TARRAY && t->width == 0)) { tx = typ(TXXX, 0); *tx = *t; tx->link = tl; return tx; } return t; } Node* doinit(Sym *s, Type *t, long o, Node *a) { Node *n, *reta; if(t == T) return Z; if(s->class == CEXTERN) s->class = CGLOBL; if(0) { print("t = %T; o = %ld; n = %s\n", t, o, s->name); prtree(a, "doinit value"); } n = initlist; if(a->op == OINIT) a = a->left; initlist = a; reta = a; init1(s, t, o, 0); if(initlist != Z) diag(initlist, "more initializers than structure: %s", s->name); initlist = n; return reta; } /* * get next major operator, * dont advance initlist. */ Node* peekinit(void) { Node *a; a = initlist; loop: if(a == Z) return a; if(a->op == OLIST) { a = a->left; goto loop; } return a; } /* * consume and return next element on * initlist. expand strings. */ Node* nextinit(void) { Node *a, *n; a = initlist; n = Z; if(a == Z) return a; if(a->op == OLIST) { n = a->right; a = a->left; } initlist = n; return a; } int isstruct(Node *a, Type *t) { Node *n; switch(a->op) { case ODOTDOT: n = a->left; if(n && n->type && sametype(n->type, t)) return 1; case OSTRING: case OLSTRING: case OCONST: case OINIT: case OELEM: return 0; } n = new(ODOTDOT, Z, Z); *n = *a; /* * ODOTDOT is a flag for tcom * a second tcom will not be performed */ a->op = ODOTDOT; a->left = n; a->right = Z; if(tcom(n)) return 0; if(sametype(n->type, t)) return 1; return 0; } void init1(Sym *s, Type *t, long o, int exflag) { Node *a, *r, nod; Type *t1; long e, w, so, mw; a = peekinit(); if(a == Z) return; if(0) { print("t = %T; o = %ld; n = %s\n", t, o, s->name); prtree(a, "init1 value"); } if(exflag && a->op == OINIT){ doinit(s, t, o, nextinit()); return; } switch(t->etype) { default: diag(Z, "unknown type in initialization: %T to: %s", t, s->name); return; case TCHAR: case TUCHAR: case TINT: case TUINT: case TSHORT: case TUSHORT: case TLONG: case TULONG: case TVLONG: case TUVLONG: case TFLOAT: case TDOUBLE: case TIND: single: if(a->op == OARRAY || a->op == OELEM) return; a = nextinit(); if(a == Z) return; if(t->nbits) diag(Z, "cannot initialize bitfields"); if(0 && s->class == CAUTO) return; complex(a); if(a->type == T) return; if(a->op == OCONST) { if(!sametype(a->type, t)) { /* hoop jumping to save malloc */ if(nodcast == Z) nodcast = new(OCAST, Z, Z); nod = *nodcast; nod.left = a; nod.type = t; nod.lineno = a->lineno; complex(&nod); if(nod.type) *a = nod; } if(a->op != OCONST) { /* diag(a, "initializer is not a constant: %s", s->name); */ return; } if(vconst(a) == 0) return; return; } if(t->etype == TIND) { while(a->op == OCAST) { warn(a, "CAST in initialization ignored"); a = a->left; } if(0 && !sametype(t, a->type)) { diag(a, "initialization of incompatible pointers: %s", s->name); print("%T and %T\n", t, a->type); } /* if(a->op == OADDR) a = a->left; */ return; } while(a->op == OCAST) a = a->left; if(a->op == OADDR) { warn(a, "initialize pointer to an integer: %s", s->name); /* a = a->left; */ return; } /* diag(a, "initializer is not a constant: %s", s->name); */ return; case TARRAY: w = t->link->width; if(a->op == OSTRING || a->op == OLSTRING) if(typei[t->link->etype]) { /* * get rid of null if sizes match exactly */ a = nextinit(); /* mw = t->width/w; */ so = a->type->width/a->type->link->width; if(t->width <= 0) t->width = w*(so-1); USED(a); return; } mw = -w; for(e=0;;) { /* * peek ahead for element initializer */ a = peekinit(); if(a == Z) break; if(a->op == OELEM && t->link->etype != TSTRUCT) break; if(a->op == OARRAY) { if(e && exflag) break; a = nextinit(); r = a->left; complex(r); if(r->op != OCONST) { diag(r, "initializer subscript must be constant"); return; } e = r->vconst; if(t->width != 0) if(e < 0 || e*w >= t->width) { diag(a, "initialization index out of range: %ld", e); continue; } } so = e*w; if(so > mw) mw = so; if(t->width != 0) if(mw >= t->width) break; init1(s, t->link, o+so, 1); e++; } if(t->width == 0) t->width = mw+w; return; case TUNION: case TSTRUCT: /* * peek ahead to find type of rhs. * if its a structure, then treat * this element as a variable * rather than an aggregate. */ if(isstruct(a, t)) goto single; if(t->width <= 0) { diag(Z, "incomplete structure: %s", s->name); return; } again: for(t1 = t->link; t1 != T; t1 = t1->down) { if(a->op == OARRAY && t1->etype != TARRAY) break; if(a->op == OELEM) { if(t1->sym != a->sym) continue; nextinit(); } init1(s, t1, o+t1->offset, 1); a = peekinit(); if(a == Z) break; if(a->op == OELEM) goto again; } if(a && a->op == OELEM) diag(a, "structure element not found %F", a); return; } } /* Node* newlist(Node *l, Node *r) { if(r == Z) return l; if(l == Z) return r; return new(OLIST, l, r); } */ void suallign(Type *t) { Type *l; long o, w; o = 0; switch(t->etype) { case TSTRUCT: t->offset = 0; w = 0; for(l = t->link; l != T; l = l->down) { if(l->nbits) { if(l->shift <= 0) { l->shift = -l->shift; w = round(w, tfield->width); o = w; w += tfield->width; } l->offset = o; } else { if(l->width <= 0) if(l->sym) diag(Z, "incomplete structure element: %s", l->sym->name); else diag(Z, "incomplete structure element"); w = align(w, l, Ael1); l->offset = w; w = align(w, l, Ael2); } } w = align(w, t, Asu2); t->width = w; acidtype(t); ttgen(t); return; case TUNION: t->offset = 0; w = 0; for(l = t->link; l != T; l = l->down) { if(l->width <= 0) if(l->sym) diag(Z, "incomplete union element: %s", l->sym->name); else diag(Z, "incomplete union element"); l->offset = 0; l->shift = 0; o = align(align(0, l, Ael1), l, Ael2); if(o > w) w = o; } w = align(w, t, Asu2); t->width = w; acidtype(t); ttgen(t); return; default: diag(Z, "unknown type in suallign: %T", t); break; } } long round(long v, int w) { int r; if(w <= 0 || w > 8) { diag(Z, "rounding by %d", w); w = 1; } r = v%w; if(r) v += w-r; return v; } Type* ofnproto(Node *n) { Type *tl, *tr, *t; if(n == Z) return T; switch(n->op) { case OLIST: tl = ofnproto(n->left); tr = ofnproto(n->right); if(tl == T) return tr; tl->down = tr; return tl; case ONAME: if(n->type == T) n->type = n->sym->type; t = typ(TXXX, T); *t = *n->sym->type; t->down = T; return t; } return T; } #define ANSIPROTO 1 #define OLDPROTO 2 void argmark(Node *n, int pass) { Type *t; autoffset = align(0, thisfn->link, Aarg0); for(; n->left != Z; n = n->left) { if(n->op != OFUNC || n->left->op != ONAME) continue; walkparam(n->right, pass); if(pass != 0 && anyproto(n->right) == OLDPROTO) { t = typ(TFUNC, n->left->sym->type->link); t->down = typ(TOLD, T); t->down->down = ofnproto(n->right); tmerge(t, n->left->sym); n->left->sym->type = t; } break; } autoffset = 0; } void walkparam(Node *n, int pass) { Sym *s; Node *n1; if(n != Z && n->op == OPROTO && n->left == Z && n->type == types[TVOID]) return; loop: if(n == Z) return; switch(n->op) { default: diag(n, "argument not a name/prototype: %O", n->op); break; case OLIST: walkparam(n->left, pass); n = n->right; goto loop; case OPROTO: for(n1 = n; n1 != Z; n1=n1->left) if(n1->op == ONAME) { if(pass == 0) { s = n1->sym; push1(s); s->offset = -1; break; } dodecl(pdecl, CPARAM, n->type, n->left, 1); break; } if(n1) break; if(pass == 0) { /* * extension: * allow no name in argument declaration diag(Z, "no name in argument declaration"); */ break; } dodecl(NODECL, CPARAM, n->type, n->left, 1); pdecl(CPARAM, lastdcl, S); break; case ODOTDOT: break; case ONAME: s = n->sym; if(pass == 0) { push1(s); s->offset = -1; break; } if(s->offset != -1) { autoffset = align(autoffset, s->type, Aarg1); s->offset = autoffset; autoffset = align(autoffset, s->type, Aarg2); } else dodecl(pdecl, CXXX, types[TINT], n, 1); break; } } void markdcl(void) { Decl *d; blockno++; d = push(); d->val = DMARK; d->offset = autoffset; d->block = autobn; autobn = blockno; } Node* revertdcl(void) { Decl *d; Sym *s; for(;;) { d = dclstack; if(d == D) { diag(Z, "pop off dcl stack"); break; } dclstack = d->link; s = d->sym; switch(d->val) { case DMARK: autoffset = d->offset; autobn = d->block; free(d); return Z; case DAUTO: if(0) { if(s->class == CAUTO) warn(Z, "auto declared and not used: %s", s->name); if(s->class == CPARAM) warn(Z, "param declared and not used: %s", s->name); } s->type = d->type; s->class = d->class; s->offset = d->offset; s->block = d->block; s->lineno = d->lineno; break; case DSUE: s->suetag = d->type; s->sueblock = d->block; s->lineno = d->lineno; break; case DLABEL: if(0 && s->label) warn(s->label, "label declared and not used \"%s\"", s->name); s->label = Z; s->lineno = d->lineno; break; } free(d); } return Z; } Type* fnproto(Node *n) { int r; r = anyproto(n->right); if(r == 0 || (r & OLDPROTO)) { if(r & ANSIPROTO) diag(n, "mixed ansi/old function declaration: %F", n->left); return T; } return fnproto1(n->right); } int anyproto(Node *n) { int r; r = 0; loop: if(n == Z) return r; switch(n->op) { case OLIST: r |= anyproto(n->left); n = n->right; goto loop; case ODOTDOT: case OPROTO: return r | ANSIPROTO; } return r | OLDPROTO; } Type* fnproto1(Node *n) { Type *t; if(n == Z) return T; switch(n->op) { case OLIST: t = fnproto1(n->left); if(t != T) t->down = fnproto1(n->right); return t; case OPROTO: lastdcl = T; n = dodecl(NODECL, CXXX, n->type, n->left, 1); t = typ(TXXX, T); if(lastdcl != T) *t = *paramconv(lastdcl, 1); if(n != Z && n->op == ONAME) t->sym = n->sym; return t; case ONAME: diag(n, "incomplete argument prototype"); return typ(TINT, T); case ODOTDOT: return typ(TDOT, T); } diag(n, "unknown op in fnproto"); return T; } void dbgdecl(Sym *s) { print("decl \"%s\": C=%s [B=%d:O=%ld] T=%T\n", s->name, cnames[s->class], s->block, s->offset, s->type); } Decl* push(void) { static Decl zdecl; Decl *d; d = alloc(sizeof(*d)); *d = zdecl; d->link = dclstack; dclstack = d; return d; } Decl* push1(Sym *s) { Decl *d; d = push(); d->sym = s; d->val = DAUTO; d->type = s->type; d->class = s->class; d->offset = s->offset; d->block = s->block; d->lineno = s->lineno; return d; } int sametype(Type *t1, Type *t2) { if(t1 == t2) return 1; return rsametype(t1, t2, 5, 1); } int rsametype(Type *t1, Type *t2, int n, int f) { int et; n--; for(;;) { if(t1 == t2) return 1; if(t1 == T || t2 == T) return 0; if(n <= 0) return 1; et = t1->etype; if(et != t2->etype) return 0; if(et == TFUNC) { if(!rsametype(t1->link, t2->link, n, 0)) return 0; t1 = t1->down; t2 = t2->down; while(t1 != T && t2 != T) { if(t1->etype == TOLD) { t1 = t1->down; continue; } if(t2->etype == TOLD) { t2 = t2->down; continue; } while(t1 != T || t2 != T) { if(!rsametype(t1, t2, n, 0)) return 0; t1 = t1->down; t2 = t2->down; } break; } return 1; } if(et == TARRAY) if(t1->width != t2->width && t1->width != 0 && t2->width != 0) return 0; if(typesu[et] || et == TTUPLE) { if(t1->link == T) snap(t1); if(t2->link == T) snap(t2); t1 = t1->link; t2 = t2->link; for(;;) { if(t1 == t2) return 1; if(!rsametype(t1, t2, n, 0)) return 0; t1 = t1->down; t2 = t2->down; } } t1 = t1->link; t2 = t2->link; if((f || 1) && et == TIND) { if(t1 != T && t1->etype == TVOID) return 1; if(t2 != T && t2->etype == TVOID) return 1; } } /* not reached */ } ulong signature(Type *t, int n) { Type *t1; long s; s = 0; if(n > 0) for(; t; t=t->link) { s = s*thash1 + thash[t->etype]; switch(t->etype) { default: return s; case TARRAY: s = s*thash2 + t->width; break; case TFUNC: case TSTRUCT: case TUNION: for(t1=t; t1; t1=t1->down) s = s*thash3 + signature(t1, n-1); case TIND: break; } } return s; } void snap(Type *t) { if(typesu[t->etype]) if(t->link == T && t->tag && t->tag->suetag) { t->link = t->tag->suetag->link; t->width = t->tag->suetag->width; } } Type* dotag(Sym *s, int et, int bn) { Decl *d; if(bn != 0 && bn != s->sueblock) { d = push(); d->sym = s; d->val = DSUE; d->type = s->suetag; d->block = s->sueblock; d->lineno = s->lineno; s->suetag = T; } if(s->suetag == T) { s->suetag = typ(et, T); s->sueblock = autobn; } if(s->suetag->etype != et) diag(Z, "tag used for more than one type: %s", s->name); if(s->suetag->tag == S) s->suetag->tag = s; return s->suetag; } Node* dcllabel(Sym *s, int f) { Decl *d, d1; Node *n; n = s->label; if(n != Z) { if(f) { if(0) diag(Z, "label reused: %s", s->name); } return n; } d = push(); d->sym = s; d->val = DLABEL; d->lineno = s->lineno; dclstack = d->link; d1 = *firstdcl; *firstdcl = *d; *d = d1; firstdcl->link = d; firstdcl = d; n = new(OXXX, Z, Z); n->sym = s; s->label = n; return n; } Type* paramconv(Type *t, int f) { f = 1; switch(t->etype) { case TUNION: case TSTRUCT: if(t->width <= 0) diag(Z, "incomplete structure: %s", t->tag->name); break; case TARRAY: t = typ(TIND, t->link); t->width = types[TIND]->width; break; case TFUNC: t = typ(TIND, t); t->width = types[TIND]->width; break; case TFLOAT: if(!f) t = types[TDOUBLE]; break; case TCHAR: case TSHORT: if(!f) t = types[TINT]; break; case TUCHAR: case TUSHORT: if(!f) t = types[TUINT]; break; } return t; } void adecl(int c, Type *t, Sym *s) { if(c == CSTATIC) c = CLOCAL; if(t->etype == TFUNC) { if(c == CXXX) c = CEXTERN; if(c == CLOCAL) c = CSTATIC; if(c == CAUTO || c == CEXREG) diag(Z, "function cannot be %s %s", cnames[c], s->name); } if(c == CXXX) c = CAUTO; if(s) { if(s->class == CSTATIC) if(c == CEXTERN || c == CGLOBL) { warn(Z, "just say static: %s", s->name); c = CSTATIC; } if(s->class == CAUTO || s->class == CPARAM || s->class == CLOCAL) if(s->block == autobn) diag(Z, "auto redeclaration of: %s", s->name); if(c != CPARAM) push1(s); s->block = autobn; s->offset = 0; s->type = t; s->class = c; } switch(c) { case CAUTO: autoffset = align(autoffset, t, Aaut3); s->offset = -autoffset; break; case CPARAM: autoffset = align(autoffset, t, Aarg1); if(s) s->offset = autoffset; autoffset = align(autoffset, t, Aarg2); break; } if(s) s->lineno = lineno; } void pdecl(int c, Type *t, Sym *s) { if(s && s->offset != -1) { diag(Z, "not a parameter: %s", s->name); return; } t = paramconv(t, c==CPARAM); if(c == CXXX) c = CPARAM; if(c != CPARAM) { diag(Z, "parameter cannot have class: %s", s->name); c = CPARAM; } adecl(c, t, s); if(s) s->lineno = lineno; } void xdecl(int c, Type *t, Sym *s) { long o; o = 0; if(c == CEXREG) c = CEXTERN; if(c == CXXX) { c = CGLOBL; if(s->class == CEXTERN) s->class = c; } if(c == CEXTERN) if(s->class == CGLOBL) c = CGLOBL; if(c == CAUTO) { diag(Z, "overspecified class: %s %s %s", s->name, cnames[c], cnames[s->class]); c = CEXTERN; } if(s->class == CSTATIC) if(c == CEXTERN || c == CGLOBL) { warn(Z, "overspecified class: %s %s %s", s->name, cnames[c], cnames[s->class]); c = CSTATIC; } if(s->type != T) if(s->class != c || !sametype(t, s->type) || t->etype == TENUM) { diag(Z, "external redeclaration of: %s", s->name); print(" %s %T; %s %T\n", cnames[c], t, cnames[s->class], s->type); } tmerge(t, s); s->type = t; s->class = c; s->block = 0; s->offset = o; } void tmerge(Type *t1, Sym *s) { Type *ta, *tb, *t2; t2 = s->type; /*print("merge %T; %T\n", t1, t2);/**/ for(;;) { if(t1 == T || t2 == T || t1 == t2) break; if(t1->etype != t2->etype) break; switch(t1->etype) { case TFUNC: ta = t1->down; tb = t2->down; if(ta == T) { t1->down = tb; break; } if(tb == T) break; while(ta != T && tb != T) { if(ta == tb) break; /* ignore old-style flag */ if(ta->etype == TOLD) { ta = ta->down; continue; } if(tb->etype == TOLD) { tb = tb->down; continue; } /* checking terminated by ... */ if(ta->etype == TDOT && tb->etype == TDOT) { ta = T; tb = T; break; } if(!sametype(ta, tb)) break; ta = ta->down; tb = tb->down; } if(ta != tb) diag(Z, "function inconsistently declared: %s", s->name); /* take new-style over old-style */ ta = t1->down; tb = t2->down; if(ta != T && ta->etype == TOLD) if(tb != T && tb->etype != TOLD) t1->down = tb; break; case TARRAY: /* should we check array size change? */ if(t2->width > t1->width) t1->width = t2->width; break; case TUNION: case TSTRUCT: return; } t1 = t1->link; t2 = t2->link; } } void edecl(int c, Type *t, Sym *s) { long l; Type *t1; if(s == S) { if(!typesu[t->etype]) diag(Z, "unnamed structure element must be struct/union"); if(c != CXXX) diag(Z, "unnamed structure element cannot have class"); } else if(c != CXXX) diag(Z, "structure element cannot have class: %s", s->name); t1 = t; t = typ(TXXX, T); l = t->lineno; *t = *t1; t->lineno = l; t->sym = s; t->down = T; if(lastfield) { t->shift = lastbit - lastfield; t->nbits = lastfield; if(firstbit) t->shift = -t->shift; if(typeu[t->etype]) t->etype = tufield->etype; else t->etype = tfield->etype; } if(strf == T) strf = t; else strl->down = t; strl = t; } /* * this routine is very suspect. * ansi requires the enum type to * be represented as an 'int' * this means that 0x81234567 * would be illegal. this routine * makes signed and unsigned go * to unsigned. */ Type* maxtype(Type *t1, Type *t2) { if(t1 == T) return t2; if(t2 == T) return t1; if(t1->etype > t2->etype) return t1; return t2; } void doenum(Sym *s, Node *n) { int k = KDEC; Node *nc; nc = Z; if(n) { k = n->kind; complex(n); if(n->op != OCONST && n->op != OSTRING && n->op != OLSTRING) { diag(n, "enum not a constant: %s", s->name); return; } nc = n->left; en.cenum = n->type; en.tenum = maxtype(en.cenum, en.tenum); if(!typefd[en.cenum->etype]) en.lastenum = n->vconst; else en.floatenum = n->fconst; } if(dclstack) push1(s); xdecl(CXXX, types[TENUM], s); if(en.cenum == T) { en.tenum = types[TINT]; en.cenum = types[TINT]; en.lastenum = 0; } s->tenum = en.cenum; if(s->tenum->etype == TIND){ /* string */ nc = n; s->tenum = n->type; } else if(!typefd[s->tenum->etype]) { s->vconst = convvtox(en.lastenum, s->tenum->etype); en.lastenum++; s->tenum = types[TINT]; } else { s->fconst = en.floatenum; if(n) s->cstring = n->cstring; else s->cstring = nil; en.floatenum++; s->tenum = types[TDOUBLE]; } s->nconst = nc; acidvar(s); s->kind = k; etgen(s); } void symadjust(Sym *s, Node *n, long del) { switch(n->op) { default: if(n->left) symadjust(s, n->left, del); if(n->right) symadjust(s, n->right, del); return; case ONAME: return; case OCONST: case OSTRING: case OLSTRING: case OINDREG: case OREGISTER: return; } }
937655.c
#include <stdio.h> #include "ppembed.h" main () { /* with ppembed high-level api */ int failflag; PyObject *pinst; char *arg1="sir", *arg2="robin", *cstr; failflag = PP_Run_Function("module", "klass", "O", &pinst, "()") || PP_Run_Method(pinst, "method", "s", &cstr, "(ss)", arg1, arg2); printf("%s\n", (!failflag) ? cstr : "Can't call objects"); Py_XDECREF(pinst); free(cstr); }
910113.c
/* * BRIEF MODULE DESCRIPTION * Simple Au1xx0 clocks routines. * * Copyright 2001, 2008 MontaVista Software Inc. * Author: MontaVista Software, Inc. <[email protected]> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/spinlock.h> #include <asm/time.h> #include <asm/mach-au1x00/au1000.h> /* * I haven't found anyone that doesn't use a 12 MHz source clock, * but just in case..... */ #define AU1000_SRC_CLK 12000000 static unsigned int au1x00_clock; /* Hz */ static unsigned long uart_baud_base; /* * Set the au1000_clock */ void set_au1x00_speed(unsigned int new_freq) { au1x00_clock = new_freq; } unsigned int get_au1x00_speed(void) { return au1x00_clock; } EXPORT_SYMBOL(get_au1x00_speed); /* * The UART baud base is not known at compile time ... if * we want to be able to use the same code on different * speed CPUs. */ unsigned long get_au1x00_uart_baud_base(void) { return uart_baud_base; } void set_au1x00_uart_baud_base(unsigned long new_baud_base) { uart_baud_base = new_baud_base; } /* * We read the real processor speed from the PLL. This is important * because it is more accurate than computing it from the 32 KHz * counter, if it exists. If we don't have an accurate processor * speed, all of the peripherals that derive their clocks based on * this advertised speed will introduce error and sometimes not work * properly. This function is futher convoluted to still allow configurations * to do that in case they have really, really old silicon with a * write-only PLL register. -- Dan */ unsigned long au1xxx_calc_clock(void) { unsigned long cpu_speed; /* * On early Au1000, sys_cpupll was write-only. Since these * silicon versions of Au1000 are not sold by AMD, we don't bend * over backwards trying to determine the frequency. */ if (au1xxx_cpu_has_pll_wo()) #ifdef CONFIG_SOC_AU1000_FREQUENCY cpu_speed = CONFIG_SOC_AU1000_FREQUENCY; #else cpu_speed = 396000000; #endif else cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK; /* On Alchemy CPU:counter ratio is 1:1 */ mips_hpt_frequency = cpu_speed; /* Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16) */ set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL) & 0x03) + 2) * 16)); set_au1x00_speed(cpu_speed); return cpu_speed; }
165870.c
#include <stdlib.h> #include <ctype.h> #include <string.h> #include <kogata/debug.h> static unsigned long x=123456789,y=362436069,z=521288629,w=88675123,v=886756453; /* replace defaults with five random seed values in calling program */ unsigned long xorshift(void) { unsigned long t; t=(x^(x>>7)); x=y; y=z; z=w; w=v; v=(v^(v<<6))^(t^(t<<13)); return (y+y+1)*v; } int rand(void) { int i = xorshift(); if (i < 0) i = -i; return i; } void srand(unsigned int seed) { x = seed; } void abort() { PANIC("Aborted."); } float strtof(const char *nptr, char **endptr) { return (float)strtod(nptr, endptr); } double strtod(const char *nptr, char **endptr) { // TODO: better (inf, nan, ...) const char* p = nptr; while (isspace(*p)) p++; double val = 0; double sign = 1; if (*p == '-') sign = -1; if (*p == '-' || *p == '+') p++; while (isdigit(*p)) { val = val*10. + (double)((int)*p - '0'); p++; } if (*p == '.') { p++; double fac = 0.1; while (isdigit(*p)) { val += fac * (double)((int)*p - '0'); fac /= 10.; p++; } } if (*p == 'e' || *p == 'E') { p++; int exp = 0; int sexp = 1; if (*p == '-') sexp = -1; if (*p == '-' || *p =='+') p++; while (isdigit(*p)) { exp = exp * 10 + (*p - '0'); p++; } if (sexp == 1) { for (int i = 0; i < exp; i++) val *= 10; } else { for (int i = 0; i < exp; i++) val /= 10; } } if (endptr != NULL) *endptr = (char*)p; return val * sign; } char *getenv(const char *name) { // TODO return 0; } int system(const char *command) { // TODO return -1; } int abs(int j) { if (j < 0) return -j; return j; } /* vim: set sts=0 ts=4 sw=4 tw=0 noet :*/
722194.c
/**************************************************************************** * drivers/power/pm_initialize.c * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. The * ASF licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> #include <nuttx/power/pm.h> #include "pm.h" #if defined(CONFIG_PM_GOVERNOR_ACTIVITY) # include "activity_governor.h" #elif defined(CONFIG_PM_GOVERNOR_GREEDY) # include "greedy_governor.h" #endif #ifdef CONFIG_PM /**************************************************************************** * Public Data ****************************************************************************/ /* All PM global data: */ /* Initialize the registry and the PM global data structures. The PM * global data structure resides in .data which is zeroed at boot time. So * it is only required to initialize non-zero elements of the PM global * data structure here. */ struct pm_global_s g_pmglobals = { .regsem = SEM_INITIALIZER(1) }; /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Name: pm_initialize * * Description: * This function is called by MCU-specific one-time at power on reset in * order to initialize the power management capabilities. This function * must be called *very* early in the initialization sequence *before* any * other device drivers are initialize (since they may attempt to register * with the power management subsystem). * * Input Parameters: * None. * * Returned Value: * None. * ****************************************************************************/ void pm_initialize(void) { FAR const struct pm_governor_s *gov; int i; /* Select governor */ for (i = 0; i < CONFIG_PM_NDOMAINS; i++) { #if defined(CONFIG_PM_GOVERNOR_GREEDY) gov = pm_greedy_governor_initialize(); #elif defined(CONFIG_PM_GOVERNOR_ACTIVITY) gov = pm_activity_governor_initialize(); #else static struct pm_governor_s null; gov = &null; #endif pm_set_governor(i, gov); } } #endif /* CONFIG_PM */
583508.c
/**************************************************************************** * * Copyright 2016 Samsung Electronics All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific * language governing permissions and limitations under the License. * ****************************************************************************/ /**************************************************************************** * external/ftpc/ftpc_chmod.c * * Copyright (C) 2011 Gregory Nutt. All rights reserved. * Author: Gregory Nutt <[email protected]> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name NuttX nor the names of its contributors may be * used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include "ftpc_config.h" #include <debug.h> #include <protocols/ftpc.h> #include "ftpc_internal.h" /**************************************************************************** * Pre-processor Definitions ****************************************************************************/ /**************************************************************************** * Private Types ****************************************************************************/ /**************************************************************************** * Private Data ****************************************************************************/ /**************************************************************************** * Public Data ****************************************************************************/ /**************************************************************************** * Private Functions ****************************************************************************/ /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Name: ftpc_chmod * * Description: * Change the protections on the remote file. * ****************************************************************************/ int ftpc_chmod(SESSION handle, FAR const char *path, FAR const char *mode) { FAR struct ftpc_session_s *session = (FAR struct ftpc_session_s *)handle; /* Does the server support the size CHMOD command? */ if (FTPC_HAS_CHMOD(session)) { (void)ftpc_cmd(session, "SITE CHMOD %s %s", path, mode); /* Check for "502 Command not implemented" */ if (session->code == 502) { /* No.. the server does not support the SITE CHMOD command */ FTPC_CLR_CHMOD(session); } return OK; } else { ndbg("Server does not support SITE CHMOD\n"); } return ERROR; }
816170.c
/* * Copyright 2020 The OpenSSL Project Authors. All Rights Reserved. * * Licensed under the Apache License 2.0 (the "License"). You may not use * this file except in compliance with the License. You can obtain a copy * in the file LICENSE in the source distribution or at * https://www.openssl.org/source/license.html */ #include <stdlib.h> #include <openssl/objects.h> #include "crypto/ec.h" #define ASN1_SEQUENCE 0x30 #define ASN1_OID 0x06 #define OID_FIRST(a, b) a * 40 + b #define DER_840() 0x86, 0x48 /* DER encoding of number 840 is 2 bytes */ #define DER_10045() 0xCE, 0x3D /* DER encoding of number 10045 is 2 bytes */ #define SHA1_SZ 7 #define SHA2_SZ 8 #define SHA3_SZ 9 /* * -- RFC 3279 * ansi-X9-62 OBJECT IDENTIFIER ::= { iso(1) member-body(2) us(840) 10045 } * id-ecSigType OBJECT IDENTIFIER ::= { ansi-X9-62 signatures(4) } * * ecdsa-with-SHA1 OBJECT IDENTIFIER ::= { id-ecSigType 1 } */ #define ENCODE_ALGORITHMIDENTIFIER_SHA1(name) \ static const unsigned char algorithmidentifier_##name##_der[] = { \ ASN1_SEQUENCE, 2 + SHA1_SZ, \ ASN1_OID, SHA1_SZ, OID_FIRST(1, 2), DER_840(), DER_10045(), 4, 1 \ } /* * -- RFC 5758 * * ecdsa-with-SHA224 OBJECT IDENTIFIER ::= { iso(1) member-body(2) * us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 1 } * * ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2) * us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 } * * ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2) * us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 } * * ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2) * us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 } */ #define ENCODE_ALGORITHMIDENTIFIER_SHA2(name, n) \ static const unsigned char algorithmidentifier_##name##_der[] = { \ ASN1_SEQUENCE, 2 + SHA2_SZ, \ ASN1_OID, SHA2_SZ, OID_FIRST(1, 2), DER_840(), DER_10045(), 4, 3, n \ } /* * https://csrc.nist.gov/projects/computer-security-objects-register/algorithm-registration * * sigAlgs OBJECT IDENTIFIER ::= { 2 16 840 1 101 3 4 3 } * * id-ecdsa-with-sha3-224 ::= { sigAlgs 9 } * id-ecdsa-with-sha3-256 ::= { sigAlgs 10 } * id-ecdsa-with-sha3-384 ::= { sigAlgs 11 } * id-ecdsa-with-sha3-512 ::= { sigAlgs 12 } */ #define ENCODE_ALGORITHMIDENTIFIER_SHA3(name, n) \ static const unsigned char algorithmidentifier_##name##_der[] = { \ ASN1_SEQUENCE, 2 + SHA3_SZ, \ ASN1_OID, SHA3_SZ, OID_FIRST(2, 16), DER_840(), 1, 101, 3, 4, 3, n \ } ENCODE_ALGORITHMIDENTIFIER_SHA1(sha1); ENCODE_ALGORITHMIDENTIFIER_SHA2(sha224, 1); ENCODE_ALGORITHMIDENTIFIER_SHA2(sha256, 2); ENCODE_ALGORITHMIDENTIFIER_SHA2(sha384, 3); ENCODE_ALGORITHMIDENTIFIER_SHA2(sha512, 4); ENCODE_ALGORITHMIDENTIFIER_SHA3(sha3_224, 9); ENCODE_ALGORITHMIDENTIFIER_SHA3(sha3_256, 10); ENCODE_ALGORITHMIDENTIFIER_SHA3(sha3_384, 11); ENCODE_ALGORITHMIDENTIFIER_SHA3(sha3_512, 12); /* TODO - Add SHAKE OIDS when they are standardized */ #define MD_CASE(name) \ case NID_##name: \ *len = sizeof(algorithmidentifier_##name##_der); \ return algorithmidentifier_##name##_der const unsigned char *ecdsa_algorithmidentifier_encoding(int md_nid, size_t *len) { switch (md_nid) { MD_CASE(sha1); MD_CASE(sha224); MD_CASE(sha256); MD_CASE(sha384); MD_CASE(sha512); MD_CASE(sha3_224); MD_CASE(sha3_256); MD_CASE(sha3_384); MD_CASE(sha3_512); default: return NULL; } }
831636.c
/* * Copyright (c) 1980, 1983 Regents of the University of California. * All rights reserved. * * Redistribution and use in source and binary forms are permitted * provided that the above copyright notice and this paragraph are * duplicated in all such forms and that any documentation, * advertising materials, and other materials related to such * distribution and use acknowledge that the software was developed * by the University of California, Berkeley. The name of the * University may not be used to endorse or promote products derived * from this software without specific prior written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <sys/param.h> #include <sys/file.h> #include <ndbm.h> #include <pwd.h> #include <stdio.h> #include <string.h> #include <strings.h> #include <stdlib.h> static FILE *_pw_fp; static struct passwd _pw_passwd; static off_t offset; #define MAXLINELENGTH 256 static char line[MAXLINELENGTH]; /* from libc/gen/getpwent.c */ static scanpw() { register char *cp; char *bp; for (;;) { offset = ftell(_pw_fp); if (!(fgets(line, sizeof(line), _pw_fp))) return(0); bp = line; /* skip lines that are too big */ if (!(cp = index(line, '\n'))) { int ch; while ((ch = getc(_pw_fp)) != '\n' && ch != EOF) ; continue; } *cp = '\0'; _pw_passwd.pw_name = strsep(&bp, ":"); _pw_passwd.pw_passwd = strsep(&bp, ":"); offset += _pw_passwd.pw_passwd - line; if (!(cp = strsep(&bp, ":"))) continue; _pw_passwd.pw_uid = atoi(cp); if (!(cp = strsep(&bp, ":"))) continue; _pw_passwd.pw_gid = atoi(cp); _pw_passwd.pw_gecos = strsep(&bp, ":"); _pw_passwd.pw_dir = strsep(&bp, ":"); _pw_passwd.pw_shell = strsep(&bp, ":"); return(1); } /* NOTREACHED */ } /* * Mkpasswd does two things -- use the ``arg'' file to create ``arg''.{pag,dir} * for ndbm, and, if the -p flag is on, create a password file in the original * format. It doesn't use the getpwent(3) routines because it has to figure * out offsets for the encrypted passwords to put in the dbm files. One other * problem is that, since the addition of shadow passwords, getpwent(3) has to * use the dbm databases rather than simply scanning the actual file. This * required the addition of a flag field to the dbm database to distinguish * between a record keyed by name, and one keyed by uid. */ main(argc, argv) int argc; char **argv; { extern int errno, optind; register char *flag, *p, *t; register int makeold; FILE *oldfp; DBM *dp; datum key, content; int ch; char buf[256], nbuf[50]; makeold = 0; while ((ch = getopt(argc, argv, "pv")) != EOF) switch(ch) { case 'p': /* create ``password.orig'' */ makeold = 1; /* FALLTHROUGH */ case 'v': /* backward compatible */ break; case '?': default: usage(); } argc -= optind; argv += optind; if (argc != 1) usage(); if (!(_pw_fp = fopen(*argv, "r"))) { (void)fprintf(stderr, "mkpasswd: %s: can't open for reading.\n", *argv); exit(1); } rmall(*argv); (void)umask(0); /* open old password format file, dbm files */ if (makeold) { int oldfd; (void)sprintf(buf, "%s.orig", *argv); if ((oldfd = open(buf, O_WRONLY|O_CREAT|O_EXCL, 0644)) < 0) { (void)fprintf(stderr, "mkpasswd: %s: %s\n", buf, strerror(errno)); exit(1); } if (!(oldfp = fdopen(oldfd, "w"))) { (void)fprintf(stderr, "mkpasswd: %s: fdopen failed.\n", buf); exit(1); } } if (!(dp = dbm_open(*argv, O_WRONLY|O_CREAT|O_EXCL, 0644))) { (void)fprintf(stderr, "mkpasswd: %s: %s\n", *argv, strerror(errno)); exit(1); } content.dptr = buf; while (scanpw()) { /* create dbm entry */ p = buf; #define COMPACT(e) t = e; while (*p++ = *t++); COMPACT(_pw_passwd.pw_name); (void)sprintf(nbuf, "%ld", offset); COMPACT(nbuf); bcopy((char *)&_pw_passwd.pw_uid, p, sizeof(int)); p += sizeof(int); bcopy((char *)&_pw_passwd.pw_gid, p, sizeof(int)); p += sizeof(int); COMPACT(_pw_passwd.pw_gecos); COMPACT(_pw_passwd.pw_dir); COMPACT(_pw_passwd.pw_shell); flag = p; *p++ = _PW_KEYBYNAME; content.dsize = p - buf; #ifdef debug (void)printf("store %s, uid %d\n", _pw_passwd.pw_name, _pw_passwd.pw_uid); #endif key.dptr = _pw_passwd.pw_name; key.dsize = strlen(_pw_passwd.pw_name); if (dbm_store(dp, key, content, DBM_INSERT) < 0) goto bad; key.dptr = (char *)&_pw_passwd.pw_uid; key.dsize = sizeof(int); *flag = _PW_KEYBYUID; if (dbm_store(dp, key, content, DBM_INSERT) < 0) goto bad; /* create original format password file entry */ if (!makeold) continue; fprintf(oldfp, "%s:%d:%d:%d:%s:%s:%s\n", _pw_passwd.pw_name, offset, _pw_passwd.pw_uid, _pw_passwd.pw_gid, _pw_passwd.pw_gecos, _pw_passwd.pw_dir, _pw_passwd.pw_shell); } dbm_close(dp); exit(0); bad: (void)fprintf(stderr, "mkpasswd: dbm_store failed.\n"); rmall(*argv); exit(1); } rmall(fname) char *fname; { register char *p; char buf[MAXPATHLEN]; for (p = strcpy(buf, fname); *p; ++p); bcopy(".pag", p, 5); (void)unlink(buf); bcopy(".dir", p, 5); (void)unlink(buf); bcopy(".orig", p, 6); (void)unlink(buf); } usage() { (void)fprintf(stderr, "usage: mkpasswd [-p] passwd_file\n"); exit(1); }
545480.c
/*- * Copyright (c) 2014 Leon Dang <[email protected]> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * XHCI options: * -s <n>,xhci,{devices} * * devices: * tablet USB tablet mouse */ /* * xHCI DRD control flow digram. * +---------------------------+ * | ACRN DM | * | +---------------------+ | * | | xhci emulator | | * | | | | * | | +---------------+ | | * | | | drd emulator |<----------+ +----------------------+ * | | +---------------+ | | | | app | * | +---------|-----------+ | | +----------------------+ * +------------|--------------+ | echo H or D | * | SOS USER SPACE | | UOS USER SPACE * -------------|--------------------|-------------|----------------- * v SOS KERNEL SPACE | v UOS KERNEL SPACE * +------------------------------+ | +--------------------------+ * | native drd sysfs interface | | |native drd sysfs interface| * +------------------------------+ | +--------------------------+ * | | | * v | v * +------------------------+ | +----------------------+ * | natvie drd driver | +----| native drd driver | * +------------------------+ +----------------------+ * | * -------------|--------------------------------------------------- * HARDWARE | * +------------|----------+ * |xHCI v | +-----------+ * | +----------------+ | | xDCI | * | | switch control | | +-----------+ * | +-------+--------+ | | * +-----------+-----------+ | * | | | * | +----+---------+ * | | * | +------+------+ * +-----| PHY MUX | * +---+-----+---+ * | | * +---+ +---+ * +---+----+ +----+---+ * |USB2 PHY| |USB3 PHY| * +--------+ +--------+ */ #include <sys/types.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include <unistd.h> #include <fcntl.h> #include <ctype.h> #include "usb.h" #include "usbdi.h" #include "xhcireg.h" #include "dm.h" #include "pci_core.h" #include "xhci.h" #include "usb_pmapper.h" #include "vmmapi.h" #undef LOG_TAG #define LOG_TAG "xHCI: " #define XHCI_MAX_DEVS 20 /* 10 root hub + 10 external hub */ #define XHCI_MAX_SLOTS 64 /* min allowed by Windows drivers */ /* * XHCI data structures can be up to 64k, but limit paddr_guest2host mapping * to 4k to avoid going over the guest physical memory barrier. */ #define XHCI_PADDR_SZ 4096 /* paddr_guest2host max size */ #define XHCI_ERST_MAX 0 /* max 2^entries event ring seg tbl */ #define XHCI_CAPLEN (4*8) /* offset of op register space */ #define XHCI_HCCPRAMS2 0x1C /* offset of HCCPARAMS2 register */ #define XHCI_PORTREGS_START 0x400 #define XHCI_DOORBELL_MAX 256 #define XHCI_STREAMS_MAX 1 /* 4-15 in XHCI spec */ /* caplength and hci-version registers */ #define XHCI_SET_CAPLEN(x) ((x) & 0xFF) #define XHCI_SET_HCIVERSION(x) (((x) & 0xFFFF) << 16) #define XHCI_GET_HCIVERSION(x) (((x) >> 16) & 0xFFFF) /* hcsparams1 register */ #define XHCI_SET_HCSP1_MAXSLOTS(x) ((x) & 0xFF) #define XHCI_SET_HCSP1_MAXINTR(x) (((x) & 0x7FF) << 8) #define XHCI_SET_HCSP1_MAXPORTS(x) (((x) & 0xFF) << 24) /* hcsparams2 register */ #define XHCI_SET_HCSP2_IST(x) ((x) & 0x0F) #define XHCI_SET_HCSP2_ERSTMAX(x) (((x) & 0x0F) << 4) #define XHCI_SET_HCSP2_MAXSCRATCH_HI(x) (((x) & 0x1F) << 21) #define XHCI_SET_HCSP2_MAXSCRATCH_LO(x) (((x) & 0x1F) << 27) /* hcsparams3 register */ #define XHCI_SET_HCSP3_U1EXITLATENCY(x) ((x) & 0xFF) #define XHCI_SET_HCSP3_U2EXITLATENCY(x) (((x) & 0xFFFF) << 16) /* hccparams1 register */ #define XHCI_SET_HCCP1_AC64(x) ((x) & 0x01) #define XHCI_SET_HCCP1_BNC(x) (((x) & 0x01) << 1) #define XHCI_SET_HCCP1_CSZ(x) (((x) & 0x01) << 2) #define XHCI_SET_HCCP1_PPC(x) (((x) & 0x01) << 3) #define XHCI_SET_HCCP1_PIND(x) (((x) & 0x01) << 4) #define XHCI_SET_HCCP1_LHRC(x) (((x) & 0x01) << 5) #define XHCI_SET_HCCP1_LTC(x) (((x) & 0x01) << 6) #define XHCI_SET_HCCP1_NSS(x) (((x) & 0x01) << 7) #define XHCI_SET_HCCP1_PAE(x) (((x) & 0x01) << 8) #define XHCI_SET_HCCP1_SPC(x) (((x) & 0x01) << 9) #define XHCI_SET_HCCP1_SEC(x) (((x) & 0x01) << 10) #define XHCI_SET_HCCP1_CFC(x) (((x) & 0x01) << 11) #define XHCI_SET_HCCP1_MAXPSA(x) (((x) & 0x0F) << 12) #define XHCI_SET_HCCP1_XECP(x) (((x) & 0xFFFF) << 16) /* hccparams2 register */ #define XHCI_SET_HCCP2_U3C(x) ((x) & 0x01) #define XHCI_SET_HCCP2_CMC(x) (((x) & 0x01) << 1) #define XHCI_SET_HCCP2_FSC(x) (((x) & 0x01) << 2) #define XHCI_SET_HCCP2_CTC(x) (((x) & 0x01) << 3) #define XHCI_SET_HCCP2_LEC(x) (((x) & 0x01) << 4) #define XHCI_SET_HCCP2_CIC(x) (((x) & 0x01) << 5) /* other registers */ #define XHCI_SET_DOORBELL(x) ((x) & ~0x03) #define XHCI_SET_RTSOFFSET(x) ((x) & ~0x0F) /* register masks */ #define XHCI_PS_PLS_MASK (0xF << 5) /* port link state */ #define XHCI_PS_SPEED_MASK (0xF << 10) /* port speed */ #define XHCI_PS_PIC_MASK (0x3 << 14) /* port indicator */ /* port register set */ #define XHCI_PORTREGS_BASE 0x400 /* base offset */ #define XHCI_PORTREGS_PORT0 0x3F0 #define XHCI_PORTREGS_SETSZ 0x10 /* size of a set */ #define MASK_64_HI(x) ((x) & ~0xFFFFFFFFULL) #define MASK_64_LO(x) ((x) & 0xFFFFFFFFULL) #define FIELD_REPLACE(a, b, m, s) (((a) & ~((m) << (s))) | \ (((b) & (m)) << (s))) #define FIELD_COPY(a, b, m, s) (((a) & ~((m) << (s))) | \ (((b) & ((m) << (s))))) struct pci_xhci_trb_ring { uint64_t ringaddr; /* current dequeue guest address */ uint32_t ccs; /* consumer cycle state */ }; /* device endpoint transfer/stream rings */ struct pci_xhci_dev_ep { union { struct xhci_trb *_epu_tr; struct xhci_stream_ctx *_epu_sctx; } _ep_trbsctx; #define ep_tr _ep_trbsctx._epu_tr #define ep_sctx _ep_trbsctx._epu_sctx union { struct pci_xhci_trb_ring _epu_trb; struct pci_xhci_trb_ring *_epu_sctx_trbs; } _ep_trb_rings; #define ep_ringaddr _ep_trb_rings._epu_trb.ringaddr #define ep_ccs _ep_trb_rings._epu_trb.ccs #define ep_sctx_trbs _ep_trb_rings._epu_sctx_trbs struct usb_data_xfer *ep_xfer; /* transfer chain */ }; /* device context base address array: maps slot->device context */ struct xhci_dcbaa { uint64_t dcba[USB_MAX_DEVICES+1]; /* xhci_dev_ctx ptrs */ }; /* port status registers */ struct pci_xhci_portregs { uint32_t portsc; /* port status and control */ uint32_t portpmsc; /* port pwr mgmt status & control */ uint32_t portli; /* port link info */ uint32_t porthlpmc; /* port hardware LPM control */ } __attribute__((packed)); #define XHCI_PS_SPEED_SET(x) (((x) & 0xF) << 10) /* xHC operational registers */ struct pci_xhci_opregs { uint32_t usbcmd; /* usb command */ uint32_t usbsts; /* usb status */ uint32_t pgsz; /* page size */ uint32_t dnctrl; /* device notification control */ uint64_t crcr; /* command ring control */ uint64_t dcbaap; /* device ctx base addr array ptr */ uint32_t config; /* configure */ /* guest mapped addresses: */ struct xhci_trb *cr_p; /* crcr dequeue */ struct xhci_dcbaa *dcbaa_p; /* dev ctx array ptr */ }; /* xHC runtime registers */ struct pci_xhci_rtsregs { uint32_t mfindex; /* microframe index */ struct { /* interrupter register set */ uint32_t iman; /* interrupter management */ uint32_t imod; /* interrupter moderation */ uint32_t erstsz; /* event ring segment table size */ uint32_t rsvd; uint64_t erstba; /* event ring seg-tbl base addr */ uint64_t erdp; /* event ring dequeue ptr */ } intrreg __attribute__((packed)); /* guest mapped addresses */ struct xhci_event_ring_seg *erstba_p; struct xhci_trb *erst_p; /* event ring segment tbl */ int er_deq_seg; /* event ring dequeue segment */ int er_enq_idx; /* event ring enqueue index - xHCI */ int er_enq_seg; /* event ring enqueue segment */ uint32_t er_events_cnt; /* number of events in ER */ uint32_t event_pcs; /* producer cycle state flag */ }; struct pci_xhci_excap_ptr { uint8_t cap_id; uint8_t cap_ptr; } __attribute__((packed)); struct pci_xhci_excap_drd_apl { struct pci_xhci_excap_ptr excap_ptr; uint8_t padding[102]; /* Followed native xHCI MMIO layout */ uint32_t drdcfg0; uint32_t drdcfg1; } __attribute__((packed)); struct pci_xhci_excap_prot { struct pci_xhci_excap_ptr excap_ptr; uint8_t rev_min; uint8_t rev_maj; char string[4]; uint8_t port_off; uint8_t port_cnt; uint16_t psic_prot_def; uint32_t reserve; } __attribute__((packed)); struct pci_xhci_excap { uint32_t start; uint32_t end; void *data; }; static DEFINE_EXCP_PROT(u2_prot, 0x08, 2, XHCI_MAX_DEVS/2 + 1, XHCI_MAX_DEVS/2); static DEFINE_EXCP_PROT(u3_prot, 0x14, 3, 1, XHCI_MAX_DEVS/2); static DEFINE_EXCP_VENDOR_DRD(XHCI_ID_DRD_INTEL, 0x00, 0x00, 0x00); /* * Extended capabilities layout of APL platform. * excap start excap end register value * 0x8000 0x8010 0x02000802 * 0x8020 0x8030 0x03001402 * 0x8070 0x80E0 0x000000C0 */ struct pci_xhci_excap excap_group_apl[] = { {0x8000, 0x8010, &excap_u2_prot}, {0x8020, 0x8030, &excap_u3_prot}, {0x8070, 0x80E0, &excap_drd_apl}, {EXCAP_GROUP_END, EXCAP_GROUP_END, EXCAP_GROUP_NULL} }; /* * default xhci extended capabilities * excap start excap end register value * 0x8000 0x8010 0x02000802 * 0x8020 0x8030 0x03001402 */ struct pci_xhci_excap excap_group_dft[] = { {0x8000, 0x8010, &excap_u2_prot}, {0x8020, 0x8030, &excap_u3_prot}, {EXCAP_GROUP_END, EXCAP_GROUP_END, EXCAP_GROUP_NULL} }; struct pci_xhci_vdev; /* * USB device emulation container. * This is referenced from usb_hci->dev; 1 pci_xhci_dev_emu for each * emulated device instance. */ struct pci_xhci_dev_emu { struct pci_xhci_vdev *xdev; /* XHCI contexts */ struct xhci_dev_ctx *dev_ctx; struct pci_xhci_dev_ep eps[XHCI_MAX_ENDPOINTS]; int dev_slotstate; struct usb_devemu *dev_ue; /* USB emulated dev */ void *dev_instance; /* device's instance */ struct usb_hci hci; }; struct pci_xhci_vdev { struct pci_vdev *dev; pthread_mutex_t mtx; uint32_t caplength; /* caplen & hciversion */ uint32_t hcsparams1; /* structural parameters 1 */ uint32_t hcsparams2; /* structural parameters 2 */ uint32_t hcsparams3; /* structural parameters 3 */ uint32_t hccparams1; /* capability parameters 1 */ uint32_t dboff; /* doorbell offset */ uint32_t rtsoff; /* runtime register space offset */ uint32_t hccparams2; /* capability parameters 2 */ uint32_t excapoff; /* ext-capability registers offset */ uint32_t regsend; /* end of configuration registers */ struct pci_xhci_opregs opregs; struct pci_xhci_rtsregs rtsregs; struct pci_xhci_portregs *portregs; struct pci_xhci_dev_emu **devices; /* XHCI[port] = device */ struct pci_xhci_dev_emu **slots; /* slots assigned from 1 */ bool slot_allocated[XHCI_MAX_SLOTS]; int ndevices; uint16_t pid; uint16_t vid; void *excap_ptr; int (*excap_write)(struct pci_xhci_vdev *, uint64_t, uint64_t); int usb2_port_start; int usb3_port_start; uint16_t port_map_tbl[USB_NATIVE_NUM_BUS][USB_NATIVE_NUM_PORT]; struct usb_native_devinfo native_dev_info[USB_NATIVE_NUM_BUS][USB_NATIVE_NUM_PORT]; struct timespec mf_prev_time; /* previous time of accessing MFINDEX */ }; /* portregs and devices arrays are set up to start from idx=1 */ #define XHCI_PORTREG_PTR(x, n) (&(x)->portregs[(n)]) #define XHCI_DEVINST_PTR(x, n) ((x)->devices[(n)]) #define XHCI_SLOTDEV_PTR(x, n) ((x)->slots[(n)]) #define XHCI_HALTED(xdev) ((xdev)->opregs.usbsts & XHCI_STS_HCH) #define XHCI_GADDR(xdev, a) paddr_guest2host((xdev)->dev->vmctx, (a), \ XHCI_PADDR_SZ - ((a) & (XHCI_PADDR_SZ-1))) /* port mapping status */ #define VPORT_FREE (0) #define VPORT_ASSIGNED (1) #define VPORT_CONNECTED (2) #define VPORT_EMULATED (3) #define VPORT_HUB_CONNECTED (4) /* helpers for get port mapping information */ #define VPORT_NUM(state) (state & 0xFF) #define VPORT_STATE(state) ((state >> 8) & 0xFF) #define VPORT_NUM_STATE(status, num) (((status & 0xFF) << 8) | (num & 0xFF)) struct pci_xhci_option_elem { char *parse_opt; int (*parse_fn)(struct pci_xhci_vdev *, char *); }; static int xhci_in_use; /* map USB errors to XHCI */ static const int xhci_usb_errors[USB_ERR_MAX] = { [USB_ERR_NORMAL_COMPLETION] = XHCI_TRB_ERROR_SUCCESS, [USB_ERR_PENDING_REQUESTS] = XHCI_TRB_ERROR_RESOURCE, [USB_ERR_NOT_STARTED] = XHCI_TRB_ERROR_ENDP_NOT_ON, [USB_ERR_INVAL] = XHCI_TRB_ERROR_INVALID, [USB_ERR_NOMEM] = XHCI_TRB_ERROR_RESOURCE, [USB_ERR_CANCELLED] = XHCI_TRB_ERROR_STOPPED, [USB_ERR_BAD_ADDRESS] = XHCI_TRB_ERROR_PARAMETER, [USB_ERR_BAD_BUFSIZE] = XHCI_TRB_ERROR_PARAMETER, [USB_ERR_BAD_FLAG] = XHCI_TRB_ERROR_PARAMETER, [USB_ERR_NO_CALLBACK] = XHCI_TRB_ERROR_STALL, [USB_ERR_IN_USE] = XHCI_TRB_ERROR_RESOURCE, [USB_ERR_NO_ADDR] = XHCI_TRB_ERROR_RESOURCE, [USB_ERR_NO_PIPE] = XHCI_TRB_ERROR_RESOURCE, [USB_ERR_ZERO_NFRAMES] = XHCI_TRB_ERROR_UNDEFINED, [USB_ERR_ZERO_MAXP] = XHCI_TRB_ERROR_UNDEFINED, [USB_ERR_SET_ADDR_FAILED] = XHCI_TRB_ERROR_RESOURCE, [USB_ERR_NO_POWER] = XHCI_TRB_ERROR_ENDP_NOT_ON, [USB_ERR_TOO_DEEP] = XHCI_TRB_ERROR_RESOURCE, [USB_ERR_IOERROR] = XHCI_TRB_ERROR_TRB, [USB_ERR_NOT_CONFIGURED] = XHCI_TRB_ERROR_ENDP_NOT_ON, [USB_ERR_TIMEOUT] = XHCI_TRB_ERROR_CMD_ABORTED, [USB_ERR_SHORT_XFER] = XHCI_TRB_ERROR_SHORT_PKT, [USB_ERR_STALLED] = XHCI_TRB_ERROR_STALL, [USB_ERR_INTERRUPTED] = XHCI_TRB_ERROR_CMD_ABORTED, [USB_ERR_DMA_LOAD_FAILED] = XHCI_TRB_ERROR_DATA_BUF, [USB_ERR_BAD_CONTEXT] = XHCI_TRB_ERROR_TRB, [USB_ERR_NO_ROOT_HUB] = XHCI_TRB_ERROR_UNDEFINED, [USB_ERR_NO_INTR_THREAD] = XHCI_TRB_ERROR_UNDEFINED, [USB_ERR_NOT_LOCKED] = XHCI_TRB_ERROR_UNDEFINED, }; #define USB_TO_XHCI_ERR(e) ((e) < USB_ERR_MAX ? xhci_usb_errors[(e)] : \ XHCI_TRB_ERROR_INVALID) static int pci_xhci_insert_event(struct pci_xhci_vdev *xdev, struct xhci_trb *evtrb, int do_intr); static void pci_xhci_dump_trb(struct xhci_trb *trb); static void pci_xhci_assert_interrupt(struct pci_xhci_vdev *xdev); static void pci_xhci_reset_slot(struct pci_xhci_vdev *xdev, int slot); static void pci_xhci_reset_port(struct pci_xhci_vdev *xdev, int portn, int warm); static void pci_xhci_update_ep_ring(struct pci_xhci_vdev *xdev, struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, struct xhci_endp_ctx *ep_ctx, uint32_t streamid, uint64_t ringaddr, int ccs); static void pci_xhci_init_port(struct pci_xhci_vdev *xdev, int portn); static int pci_xhci_connect_port(struct pci_xhci_vdev *xdev, int port, int usb_speed, int need_intr); static int pci_xhci_disconnect_port(struct pci_xhci_vdev *xdev, int port, int need_intr); static struct pci_xhci_dev_emu *pci_xhci_dev_create(struct pci_xhci_vdev * xdev, void *dev_data); static void pci_xhci_dev_destroy(struct pci_xhci_dev_emu *de); static void pci_xhci_set_evtrb(struct xhci_trb *evtrb, uint64_t port, uint32_t errcode, uint32_t evtype); static int pci_xhci_xfer_complete(struct pci_xhci_vdev *xdev, struct usb_data_xfer *xfer, uint32_t slot, uint32_t epid, int *do_intr); static inline int pci_xhci_is_valid_portnum(int n); static int pci_xhci_parse_tablet(struct pci_xhci_vdev *xdev, char *opts); static int pci_xhci_parse_log_level(struct pci_xhci_vdev *xdev, char *opts); static int pci_xhci_parse_extcap(struct pci_xhci_vdev *xdev, char *opts); static struct pci_xhci_option_elem xhci_option_table[] = { {"tablet", pci_xhci_parse_tablet}, {"log", pci_xhci_parse_log_level}, {"cap", pci_xhci_parse_extcap} }; static enum usb_native_dev_type pci_xhci_get_dev_type(struct pci_xhci_vdev *xdev, void *dev_data) { uint16_t port, bus; struct usb_native_devinfo *di; assert(dev_data); di = dev_data; if (usb_get_parent_dev_type(di->priv_data, &bus, &port) == USB_HUB) { if (VPORT_STATE(xdev->port_map_tbl[bus][port]) == VPORT_HUB_CONNECTED) { di->port += PORT_HUB_BASE; return USB_VALID_SUB_DEV; } else return USB_INVALID_SUB_DEV; } return USB_DEV; } static int pci_xhci_get_free_rh_port(struct pci_xhci_vdev *xdev, struct usb_native_devinfo *di) { volatile int bus; volatile int ports, porte; volatile int i, j; int used; assert(xdev); assert(di); if (di->bcd < 0x300) { bus = 1; ports = xdev->usb2_port_start; } else { bus = 2; ports = xdev->usb3_port_start; } porte = ports + (XHCI_MAX_DEVS / 2); for (i = ports; i < porte; i++) { used = 0; for (j = 1; j < USB_NATIVE_NUM_PORT; ++j) { if (VPORT_NUM(xdev->port_map_tbl[bus][j]) == i) { used = 1; break; } } if (!used) return i; } return -1; } static int pci_xhci_native_usb_dev_conn_cb(void *hci_data, void *dev_data) { struct pci_xhci_vdev *xdev; struct usb_native_devinfo *di; int port; int need_intr = 1; enum usb_native_dev_type type; int state; int rc; xdev = hci_data; assert(xdev); assert(dev_data); assert(xdev->devices); assert(xdev->slots); di = dev_data; /* print physical information about new device */ UPRINTF(LDBG, "%04x:%04x %d-%d connecting.\r\n", di->vid, di->pid, di->bus, di->port); type = pci_xhci_get_dev_type(xdev, di); if (type == USB_DEV) { if (VPORT_STATE(xdev->port_map_tbl[di->bus][di->port]) == VPORT_FREE) { UPRINTF(LDBG, "%04x:%04x %d-%d doesn't belong to this" " vm, bye.\r\n", di->vid, di->pid, di->bus, di->port); goto errout; } } else if (type == USB_INVALID_SUB_DEV) return 0; state = VPORT_STATE(xdev->port_map_tbl[di->bus][di->port]); if (state == VPORT_CONNECTED || state == VPORT_EMULATED || state == VPORT_HUB_CONNECTED) { UPRINTF(LFTL, "do not support multiple hubs currently, reject " "device %d-%d\r\n", di->bus, di->port); goto errout; } rc = usb_dev_is_hub(di->priv_data); if (rc == USB_HUB) { xdev->port_map_tbl[di->bus][di->port] = VPORT_NUM_STATE(VPORT_HUB_CONNECTED, 0); return 0; } else if (rc == USB_TYPE_INVALID) { UPRINTF(LWRN, "usb_dev_is_hub failed\r\n"); goto errout; } UPRINTF(LDBG, "%04x:%04x %d-%d belong to this vm.\r\n", di->vid, di->pid, di->bus, di->port); port = pci_xhci_get_free_rh_port(xdev, di); if (port < 0) { UPRINTF(LFTL, "no free virtual port for native device %d-%d" "\r\n", di->bus, di->port); goto errout; } UPRINTF(LDBG, "%04X:%04X %d-%d is attached to virtual port %d.\r\n", di->vid, di->pid, di->bus, di->port, port); xdev->native_dev_info[di->bus][di->port] = *di; xdev->port_map_tbl[di->bus][di->port] = VPORT_NUM_STATE(VPORT_CONNECTED, port); /* TODO: should revisit in deeper level */ if (vm_get_suspend_mode() != VM_SUSPEND_NONE || xhci_in_use == 0) need_intr = 0; /* Trigger port change event for the arriving device */ if (pci_xhci_connect_port(xdev, port, di->speed, need_intr)) UPRINTF(LFTL, "fail to report port event\n"); return 0; errout: return -1; } static int pci_xhci_native_usb_dev_disconn_cb(void *hci_data, void *dev_data) { struct pci_xhci_vdev *xdev; struct pci_xhci_dev_emu *edev; struct usb_native_devinfo di; struct usb_dev *udev; uint8_t port, slot; uint16_t status; int need_intr = 1; assert(hci_data); assert(dev_data); xdev = hci_data; assert(xdev->devices); di = *((struct usb_native_devinfo *)dev_data); if (!pci_xhci_is_valid_portnum(di.port)) { UPRINTF(LFTL, "invalid physical port %d\r\n", di.port); return -1; } status = xdev->port_map_tbl[di.bus][di.port]; if (VPORT_STATE(status) == VPORT_HUB_CONNECTED) { xdev->port_map_tbl[di.bus][di.port] = VPORT_NUM_STATE(VPORT_ASSIGNED, 0); return 0; } for (port = 1; port <= XHCI_MAX_DEVS; ++port) { edev = xdev->devices[port]; if (!edev) continue; udev = edev->dev_instance; if (udev->info.port == di.port) { int old_t, new_t; uint8_t old_ports[7]; /* max USB hub tiers are 7 */ uint8_t new_ports[7]; /* get tiers and port info */ old_t = libusb_get_port_numbers(udev->info.priv_data, old_ports, sizeof(old_ports)); new_t = libusb_get_port_numbers(di.priv_data, new_ports, sizeof(new_ports)); if (old_t == new_t && !memcmp(old_ports, new_ports, old_t)) { di = udev->info; break; } UPRINTF(LFTL, "multi-hub is not supported yet\r\n"); } } if (port > XHCI_MAX_DEVS) { if (VPORT_STATE(status) == VPORT_CONNECTED && VPORT_NUM(status) > 0) { /* * When this place is reached, it means the physical * USB device is disconnected before the emulation * procedure is started. The related states should be * cleared for future connecting. */ UPRINTF(LFTL, "disconnect VPORT_CONNECTED device: " "%d-%d vport %d\r\n", di.bus, di.port, VPORT_NUM(status)); pci_xhci_disconnect_port(xdev, VPORT_NUM(status), 0); xdev->port_map_tbl[di.bus][di.port] = VPORT_NUM_STATE( VPORT_ASSIGNED, 0); } UPRINTF(LFTL, "fail to find physical port %d\r\n", di.port); return -1; } for (slot = 1; slot < XHCI_MAX_SLOTS; ++slot) if (xdev->slots[slot] == edev) break; assert(VPORT_STATE(status) == VPORT_EMULATED || VPORT_STATE(status) == VPORT_CONNECTED); xdev->port_map_tbl[di.bus][di.port] = VPORT_NUM_STATE(VPORT_ASSIGNED, 0); /* TODO: should revisit this in deeper level */ if (vm_get_suspend_mode() != VM_SUSPEND_NONE) { XHCI_PORTREG_PTR(xdev, port)->portsc &= ~(XHCI_PS_CSC | XHCI_PS_CCS | XHCI_PS_PED | XHCI_PS_PP); edev->dev_slotstate = XHCI_ST_DISABLED; xdev->devices[port] = NULL; xdev->slots[slot] = NULL; pci_xhci_dev_destroy(edev); need_intr = 0; } UPRINTF(LDBG, "report virtual port %d status\r\n", port); if (pci_xhci_disconnect_port(xdev, port, need_intr)) { UPRINTF(LFTL, "fail to report event\r\n"); return -1; } /* * At this point, the resources allocated for virtual device * should not be released, it should be released in the * pci_xhci_cmd_disable_slot function. */ return 0; } /* * return value: * = 0: succeed without interrupt * > 0: succeed with interrupt * < 0: failure */ static int pci_xhci_usb_dev_notify_cb(void *hci_data, void *udev_data) { int slot, epid, intr, rc; struct usb_data_xfer *xfer; struct pci_xhci_dev_emu *edev; struct pci_xhci_vdev *xdev; xfer = udev_data; if (!xfer) return -1; epid = xfer->epid; edev = xfer->dev; if (!edev) return -1; xdev = edev->xdev; if (!xdev) return -1; slot = edev->hci.hci_address; rc = pci_xhci_xfer_complete(xdev, xfer, slot, epid, &intr); if (rc) return -1; else if (intr) return 1; else return 0; } static int pci_xhci_usb_dev_intr_cb(void *hci_data, void *udev_data) { struct pci_xhci_dev_emu *edev; edev = hci_data; if (edev && edev->xdev) pci_xhci_assert_interrupt(edev->xdev); return 0; } static struct pci_xhci_dev_emu* pci_xhci_dev_create(struct pci_xhci_vdev *xdev, void *dev_data) { struct usb_devemu *ue = NULL; struct pci_xhci_dev_emu *de = NULL; void *ud = NULL; int rc; assert(xdev); assert(dev_data); ue = calloc(1, sizeof(struct usb_devemu)); if (!ue) return NULL; /* * TODO: at present, the following functions are * enough. But for the purpose to be compatible with * usb_mouse.c, the high level design including the * function interface should be changed and refined * in future. */ ue->ue_init = usb_dev_init; ue->ue_request = usb_dev_request; ue->ue_data = usb_dev_data; ue->ue_info = usb_dev_info; ue->ue_reset = usb_dev_reset; ue->ue_remove = NULL; ue->ue_stop = NULL; ue->ue_deinit = usb_dev_deinit; ue->ue_devtype = USB_DEV_PORT_MAPPER; ud = ue->ue_init(dev_data, NULL); if (!ud) goto errout; rc = ue->ue_info(ud, USB_INFO_VERSION, &ue->ue_usbver, sizeof(ue->ue_usbver)); if (rc < 0) goto errout; rc = ue->ue_info(ud, USB_INFO_SPEED, &ue->ue_usbspeed, sizeof(ue->ue_usbspeed)); if (rc < 0) goto errout; de = calloc(1, sizeof(struct pci_xhci_dev_emu)); if (!de) goto errout; de->xdev = xdev; de->dev_ue = ue; de->dev_instance = ud; de->hci.dev = NULL; de->hci.hci_intr = NULL; de->hci.hci_event = NULL; de->hci.hci_address = 0; return de; errout: if (ud) ue->ue_deinit(ud); free(ue); free(de); return NULL; } static void pci_xhci_dev_destroy(struct pci_xhci_dev_emu *de) { struct usb_devemu *ue; struct usb_dev *ud; if (de) { ue = de->dev_ue; ud = de->dev_instance; if (ue) { if (ue->ue_devtype == USB_DEV_PORT_MAPPER) { assert(ue->ue_deinit); if (ue->ue_deinit) ue->ue_deinit(ud); } } else return; if (ue->ue_devtype == USB_DEV_PORT_MAPPER) free(ue); free(de); } } static inline int pci_xhci_is_valid_portnum(int n) { return n > 0 && n <= XHCI_MAX_DEVS; } static int pci_xhci_convert_speed(int lspeed) { /* according to xhci spec, zero means undefined speed */ int speed = 0; switch (lspeed) { case USB_SPEED_LOW: speed = 0x2; break; case USB_SPEED_FULL: speed = 0x1; break; case USB_SPEED_HIGH: speed = 0x3; break; case USB_SPEED_SUPER: speed = 0x4; break; default: UPRINTF(LFTL, "unkown speed %08x\r\n", lspeed); } return speed; } static int pci_xhci_change_port(struct pci_xhci_vdev *xdev, int port, int usb_speed, int conn, int need_intr) { int speed, error; struct xhci_trb evtrb; struct pci_xhci_portregs *reg; assert(xdev != NULL); reg = XHCI_PORTREG_PTR(xdev, port); if (conn == 0) { reg->portsc &= ~XHCI_PS_CCS; reg->portsc |= (XHCI_PS_CSC | XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET)); } else { speed = pci_xhci_convert_speed(usb_speed); reg->portsc = XHCI_PS_CCS | XHCI_PS_PP | XHCI_PS_CSC; reg->portsc |= XHCI_PS_SPEED_SET(speed); } if (!need_intr) return 0; /* make an event for the guest OS */ pci_xhci_set_evtrb(&evtrb, port, XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE); /* put it in the event ring */ error = pci_xhci_insert_event(xdev, &evtrb, 1); if (error != XHCI_TRB_ERROR_SUCCESS) UPRINTF(LWRN, "fail to report port change\r\n"); UPRINTF(LDBG, "%s: port %d:%08X\r\n", __func__, port, reg->portsc); return (error == XHCI_TRB_ERROR_SUCCESS) ? 0 : -1; } static int pci_xhci_connect_port(struct pci_xhci_vdev *xdev, int port, int usb_speed, int intr) { return pci_xhci_change_port(xdev, port, usb_speed, 1, intr); } static int pci_xhci_disconnect_port(struct pci_xhci_vdev *xdev, int port, int intr) { /* for disconnect, the speed is useless */ return pci_xhci_change_port(xdev, port, 0, 0, intr); } static void pci_xhci_set_evtrb(struct xhci_trb *evtrb, uint64_t port, uint32_t errcode, uint32_t evtype) { evtrb->qwTrb0 = port << 24; evtrb->dwTrb2 = XHCI_TRB_2_ERROR_SET(errcode); evtrb->dwTrb3 = XHCI_TRB_3_TYPE_SET(evtype); } /* controller reset */ static void pci_xhci_reset(struct pci_xhci_vdev *xdev) { int i; xdev->rtsregs.er_enq_idx = 0; xdev->rtsregs.er_events_cnt = 0; xdev->rtsregs.event_pcs = 1; for (i = 1; i <= XHCI_MAX_SLOTS; i++) pci_xhci_reset_slot(xdev, i); } static uint32_t pci_xhci_usbcmd_write(struct pci_xhci_vdev *xdev, uint32_t cmd) { int do_intr = 0; int i; if (cmd & XHCI_CMD_RS) { do_intr = (xdev->opregs.usbcmd & XHCI_CMD_RS) == 0; xdev->opregs.usbcmd |= XHCI_CMD_RS; xdev->opregs.usbsts &= ~XHCI_STS_HCH; xdev->opregs.usbsts |= XHCI_STS_PCD; /* Queue port change event on controller run from stop */ if (do_intr) for (i = 1; i <= XHCI_MAX_DEVS; i++) { struct pci_xhci_dev_emu *dev; struct pci_xhci_portregs *port; struct xhci_trb evtrb; dev = XHCI_DEVINST_PTR(xdev, i); if (dev == NULL) continue; port = XHCI_PORTREG_PTR(xdev, i); port->portsc |= XHCI_PS_CSC | XHCI_PS_CCS; port->portsc &= ~XHCI_PS_PLS_MASK; /* * XHCI 4.19.3 USB2 RxDetect->Polling, * USB3 Polling->U0 */ if (dev->dev_ue->ue_usbver == 2) port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_POLL); else port->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_U0); pci_xhci_set_evtrb(&evtrb, i, XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE); if (pci_xhci_insert_event(xdev, &evtrb, 0) != XHCI_TRB_ERROR_SUCCESS) break; } } else { xdev->opregs.usbcmd &= ~XHCI_CMD_RS; xdev->opregs.usbsts |= XHCI_STS_HCH; xdev->opregs.usbsts &= ~XHCI_STS_PCD; } /* start execution of schedule; stop when set to 0 */ cmd |= xdev->opregs.usbcmd & XHCI_CMD_RS; if (cmd & XHCI_CMD_HCRST) { /* reset controller */ pci_xhci_reset(xdev); cmd &= ~XHCI_CMD_HCRST; } cmd &= ~(XHCI_CMD_CSS | XHCI_CMD_CRS); if (do_intr) pci_xhci_assert_interrupt(xdev); return cmd; } static void pci_xhci_portregs_write(struct pci_xhci_vdev *xdev, uint64_t offset, uint64_t value) { struct xhci_trb evtrb; struct pci_xhci_portregs *p; int port; uint32_t oldpls, newpls; if (xdev->portregs == NULL) return; port = (offset - XHCI_PORTREGS_PORT0) / XHCI_PORTREGS_SETSZ; offset = (offset - XHCI_PORTREGS_PORT0) % XHCI_PORTREGS_SETSZ; UPRINTF(LDBG, "portregs wr offset 0x%lx, port %u: 0x%lx\r\n", offset, port, value); assert(port >= 0); if (port > XHCI_MAX_DEVS) { UPRINTF(LWRN, "portregs_write port %d > ndevices\r\n", port); return; } if (XHCI_DEVINST_PTR(xdev, port) == NULL) { UPRINTF(LDBG, "portregs_write to unattached port %d\r\n", port); } p = XHCI_PORTREG_PTR(xdev, port); switch (offset) { case 0: /* port reset or warm reset */ if (value & (XHCI_PS_PR | XHCI_PS_WPR)) { pci_xhci_reset_port(xdev, port, value & XHCI_PS_WPR); break; } if ((p->portsc & XHCI_PS_PP) == 0) { UPRINTF(LWRN, "portregs_write to unpowered " "port %d\r\n", port); break; } /* Port status and control register */ oldpls = XHCI_PS_PLS_GET(p->portsc); newpls = XHCI_PS_PLS_GET(value); p->portsc &= XHCI_PS_PED | XHCI_PS_PLS_MASK | XHCI_PS_SPEED_MASK | XHCI_PS_PIC_MASK; if (XHCI_DEVINST_PTR(xdev, port)) p->portsc |= XHCI_PS_CCS; p->portsc |= (value & ~(XHCI_PS_OCA | XHCI_PS_PR | XHCI_PS_PED | XHCI_PS_PLS_MASK | /* link state */ XHCI_PS_SPEED_MASK | XHCI_PS_PIC_MASK | /* port indicator */ XHCI_PS_LWS | XHCI_PS_DR | XHCI_PS_WPR)); /* clear control bits */ p->portsc &= ~(value & (XHCI_PS_CSC | XHCI_PS_PEC | XHCI_PS_WRC | XHCI_PS_OCC | XHCI_PS_PRC | XHCI_PS_PLC | XHCI_PS_CEC | XHCI_PS_CAS)); /* port disable request; for USB3, don't care */ if (value & XHCI_PS_PED) UPRINTF(LDBG, "Disable port %d request\r\n", port); if (!(value & XHCI_PS_LWS)) break; UPRINTF(LDBG, "Port new PLS: %d\r\n", newpls); switch (newpls) { case 0: /* U0 */ case 3: /* U3 */ if (oldpls != newpls) { p->portsc &= ~XHCI_PS_PLS_MASK; p->portsc |= XHCI_PS_PLS_SET(newpls) | XHCI_PS_PLC; if (oldpls != 0 && newpls == 0) { pci_xhci_set_evtrb(&evtrb, port, XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE); pci_xhci_insert_event(xdev, &evtrb, 1); } } break; default: UPRINTF(LWRN, "Unhandled change port %d PLS %u\r\n", port, newpls); break; } break; case 4: /* Port power management status and control register */ p->portpmsc = value; break; case 8: /* Port link information register */ UPRINTF(LDBG, "attempted write to PORTLI, port %d\r\n", port); break; case 12: /* * Port hardware LPM control register. * For USB3, this register is reserved. */ p->porthlpmc = value; break; } } static int pci_xhci_apl_drdregs_write(struct pci_xhci_vdev *xdev, uint64_t offset, uint64_t value) { int rc = 0, fd; char *mstr; int msz = 0; uint32_t drdcfg1 = 0; struct pci_xhci_excap *excap; struct pci_xhci_excap_drd_apl *excap_drd; assert(xdev); excap = xdev->excap_ptr; while (excap && excap->start != XHCI_APL_DRDCAP_BASE) excap++; if (!excap || !excap->data || excap->start != XHCI_APL_DRDCAP_BASE) { UPRINTF(LWRN, "drd extended capability can't be found\r\n"); return -1; } excap_drd = excap->data; offset -= XHCI_APL_DRDREGS_BASE; if (offset != XHCI_DRD_MUX_CFG0) { UPRINTF(LWRN, "drd configuration register access failed.\r\n"); return -1; } if (excap_drd->drdcfg0 == value) { UPRINTF(LDBG, "No mode switch action. Current drd: %s mode\r\n", excap_drd->drdcfg1 & XHCI_DRD_CFG1_HOST_MODE ? "host" : "device"); return 0; } excap_drd->drdcfg0 = value; if (value & XHCI_DRD_CFG0_IDPIN_EN) { if ((value & XHCI_DRD_CFG0_IDPIN) == 0) { mstr = XHCI_NATIVE_DRD_HOST_MODE; msz = strlen(XHCI_NATIVE_DRD_HOST_MODE); drdcfg1 |= XHCI_DRD_CFG1_HOST_MODE; } else { mstr = XHCI_NATIVE_DRD_DEV_MODE; msz = strlen(XHCI_NATIVE_DRD_DEV_MODE); drdcfg1 &= ~XHCI_DRD_CFG1_HOST_MODE; } } else return 0; fd = open(XHCI_NATIVE_DRD_SWITCH_PATH, O_WRONLY); if (fd < 0) { UPRINTF(LWRN, "drd native interface open failed\r\n"); return -1; } rc = write(fd, mstr, msz); close(fd); if (rc == msz) excap_drd->drdcfg1 = drdcfg1; else { UPRINTF(LWRN, "drd native interface write " "%s mode failed, drdcfg0: 0x%x, " "drdcfg1: 0x%x.\r\n", value & XHCI_DRD_CFG0_IDPIN ? "device" : "host", excap_drd->drdcfg0, excap_drd->drdcfg1); return -1; } return 0; } static void pci_xhci_excap_write(struct pci_xhci_vdev *xdev, uint64_t offset, uint64_t value) { int rc = 0; assert(xdev); if (xdev->excap_ptr && xdev->excap_write) rc = xdev->excap_write(xdev, offset, value); else UPRINTF(LWRN, "write invalid offset 0x%lx\r\n", offset); if (rc) UPRINTF(LWRN, "something wrong for xhci excap offset " "0x%lx write \r\n", offset); } struct xhci_dev_ctx * pci_xhci_get_dev_ctx(struct pci_xhci_vdev *xdev, uint32_t slot) { uint64_t devctx_addr; struct xhci_dev_ctx *devctx; assert(slot > 0 && slot <= xdev->ndevices); assert(xdev->opregs.dcbaa_p != NULL); devctx_addr = xdev->opregs.dcbaa_p->dcba[slot]; if (devctx_addr == 0) { UPRINTF(LDBG, "get_dev_ctx devctx_addr == 0\r\n"); return NULL; } UPRINTF(LDBG, "get dev ctx, slot %u devctx addr %016lx\r\n", slot, devctx_addr); devctx = XHCI_GADDR(xdev, devctx_addr & ~0x3FUL); return devctx; } struct xhci_trb * pci_xhci_trb_next(struct pci_xhci_vdev *xdev, struct xhci_trb *curtrb, uint64_t *guestaddr) { struct xhci_trb *next; assert(curtrb != NULL); if (XHCI_TRB_3_TYPE_GET(curtrb->dwTrb3) == XHCI_TRB_TYPE_LINK) { if (guestaddr) *guestaddr = curtrb->qwTrb0 & ~0xFUL; next = XHCI_GADDR(xdev, curtrb->qwTrb0 & ~0xFUL); } else { if (guestaddr) *guestaddr += sizeof(struct xhci_trb) & ~0xFUL; next = curtrb + 1; } return next; } static void pci_xhci_assert_interrupt(struct pci_xhci_vdev *xdev) { xdev->rtsregs.intrreg.erdp |= XHCI_ERDP_LO_BUSY; xdev->rtsregs.intrreg.iman |= XHCI_IMAN_INTR_PEND; xdev->opregs.usbsts |= XHCI_STS_EINT; /* only trigger interrupt if permitted */ if ((xdev->opregs.usbcmd & XHCI_CMD_INTE) && (xdev->rtsregs.intrreg.iman & XHCI_IMAN_INTR_ENA)) { if (pci_msi_enabled(xdev->dev)) pci_generate_msi(xdev->dev, 0); else pci_lintr_assert(xdev->dev); } } static void pci_xhci_deassert_interrupt(struct pci_xhci_vdev *xdev) { if (!pci_msi_enabled(xdev->dev)) pci_lintr_assert(xdev->dev); } static int pci_xhci_init_ep(struct pci_xhci_dev_emu *dev, int epid) { struct xhci_dev_ctx *dev_ctx; struct pci_xhci_dev_ep *devep; struct xhci_endp_ctx *ep_ctx; uint32_t pstreams; int i; dev_ctx = dev->dev_ctx; ep_ctx = &dev_ctx->ctx_ep[epid]; devep = &dev->eps[epid]; pstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0); if (pstreams > 0) { UPRINTF(LDBG, "init_ep %d with pstreams %d\r\n", epid, pstreams); assert(devep->ep_sctx_trbs == NULL); devep->ep_sctx = XHCI_GADDR(dev->xdev, ep_ctx->qwEpCtx2 & XHCI_EPCTX_2_TR_DQ_PTR_MASK); devep->ep_sctx_trbs = calloc(pstreams, sizeof(struct pci_xhci_trb_ring)); for (i = 0; i < pstreams; i++) { devep->ep_sctx_trbs[i].ringaddr = devep->ep_sctx[i].qwSctx0 & XHCI_SCTX_0_TR_DQ_PTR_MASK; devep->ep_sctx_trbs[i].ccs = XHCI_SCTX_0_DCS_GET(devep->ep_sctx[i].qwSctx0); } } else { UPRINTF(LDBG, "init_ep %d with no pstreams\r\n", epid); devep->ep_ringaddr = ep_ctx->qwEpCtx2 & XHCI_EPCTX_2_TR_DQ_PTR_MASK; devep->ep_ccs = XHCI_EPCTX_2_DCS_GET(ep_ctx->qwEpCtx2); devep->ep_tr = XHCI_GADDR(dev->xdev, devep->ep_ringaddr); UPRINTF(LDBG, "init_ep tr DCS %x\r\n", devep->ep_ccs); } if (devep->ep_xfer == NULL) { devep->ep_xfer = malloc(sizeof(struct usb_data_xfer)); if (devep->ep_xfer) { USB_DATA_XFER_INIT(devep->ep_xfer); devep->ep_xfer->dev = (void *)dev; devep->ep_xfer->epid = epid; } else return -1; } return 0; } static void pci_xhci_disable_ep(struct pci_xhci_dev_emu *dev, int epid) { struct xhci_dev_ctx *dev_ctx; struct pci_xhci_dev_ep *devep; struct xhci_endp_ctx *ep_ctx; UPRINTF(LDBG, "pci_xhci disable_ep %d\r\n", epid); dev_ctx = dev->dev_ctx; ep_ctx = &dev_ctx->ctx_ep[epid]; ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_DISABLED; devep = &dev->eps[epid]; if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) > 0 && devep->ep_sctx_trbs != NULL) free(devep->ep_sctx_trbs); if (devep->ep_xfer != NULL) { free(devep->ep_xfer); devep->ep_xfer = NULL; } memset(devep, 0, sizeof(struct pci_xhci_dev_ep)); } /* reset device at slot and data structures related to it */ static void pci_xhci_reset_slot(struct pci_xhci_vdev *xdev, int slot) { struct pci_xhci_dev_emu *dev; dev = XHCI_SLOTDEV_PTR(xdev, slot); if (!dev) UPRINTF(LDBG, "reset unassigned slot (%d)?\r\n", slot); else dev->dev_slotstate = XHCI_ST_DISABLED; /* TODO: reset ring buffer pointers */ } static int pci_xhci_insert_event(struct pci_xhci_vdev *xdev, struct xhci_trb *evtrb, int do_intr) { struct pci_xhci_rtsregs *rts; uint64_t erdp; int erdp_idx; int err; struct xhci_trb *evtrbptr; err = XHCI_TRB_ERROR_SUCCESS; rts = &xdev->rtsregs; erdp = rts->intrreg.erdp & ~0xF; erdp_idx = (erdp - rts->erstba_p[rts->er_deq_seg].qwEvrsTablePtr) / sizeof(struct xhci_trb); UPRINTF(LDBG, "insert event 0[%lx] 2[%x] 3[%x]\r\n" "\terdp idx %d/seg %d, enq idx %d/seg %d, pcs %u\r\n" "\t(erdp=0x%lx, erst=0x%lx, tblsz=%u, do_intr %d)\r\n", evtrb->qwTrb0, evtrb->dwTrb2, evtrb->dwTrb3, erdp_idx, rts->er_deq_seg, rts->er_enq_idx, rts->er_enq_seg, rts->event_pcs, erdp, rts->erstba_p->qwEvrsTablePtr, rts->erstba_p->dwEvrsTableSize, do_intr); evtrbptr = &rts->erst_p[rts->er_enq_idx]; /* TODO: multi-segment table */ if (rts->er_events_cnt >= rts->erstba_p->dwEvrsTableSize) { UPRINTF(LWRN, "[%d] cannot insert event; ring full\r\n", __LINE__); err = XHCI_TRB_ERROR_EV_RING_FULL; goto done; } if (rts->er_events_cnt == rts->erstba_p->dwEvrsTableSize - 1) { struct xhci_trb errev; if ((evtrbptr->dwTrb3 & 0x1) == (rts->event_pcs & 0x1)) { UPRINTF(LWRN, "[%d] insert evt err: ring full\r\n", __LINE__); errev.qwTrb0 = 0; errev.dwTrb2 = XHCI_TRB_2_ERROR_SET( XHCI_TRB_ERROR_EV_RING_FULL); errev.dwTrb3 = XHCI_TRB_3_TYPE_SET( XHCI_TRB_EVENT_HOST_CTRL) | rts->event_pcs; rts->er_events_cnt++; memcpy(&rts->erst_p[rts->er_enq_idx], &errev, sizeof(struct xhci_trb)); rts->er_enq_idx = (rts->er_enq_idx + 1) % rts->erstba_p->dwEvrsTableSize; err = XHCI_TRB_ERROR_EV_RING_FULL; do_intr = 1; goto done; } } else { rts->er_events_cnt++; } evtrb->dwTrb3 &= ~XHCI_TRB_3_CYCLE_BIT; evtrb->dwTrb3 |= rts->event_pcs; memcpy(&rts->erst_p[rts->er_enq_idx], evtrb, sizeof(struct xhci_trb)); rts->er_enq_idx = (rts->er_enq_idx + 1) % rts->erstba_p->dwEvrsTableSize; if (rts->er_enq_idx == 0) rts->event_pcs ^= 1; done: if (do_intr) pci_xhci_assert_interrupt(xdev); return err; } static struct usb_native_devinfo * pci_xhci_find_native_devinfo_by_vport(struct pci_xhci_vdev *xdev, uint8_t vport) { int i, j; assert(xdev); for (i = 0; i < USB_NATIVE_NUM_BUS; ++i) for (j = 0; j < USB_NATIVE_NUM_PORT; ++j) if (VPORT_NUM(xdev->port_map_tbl[i][j]) == vport) return &xdev->native_dev_info[i][j]; return NULL; } static uint32_t pci_xhci_cmd_enable_slot(struct pci_xhci_vdev *xdev, uint32_t *slot) { uint32_t cmderr; int i; cmderr = XHCI_TRB_ERROR_SUCCESS; for (i = 1; i <= XHCI_MAX_SLOTS; i++) if (xdev->slot_allocated[i] == false) break; if (i > XHCI_MAX_SLOTS) cmderr = XHCI_TRB_ERROR_NO_SLOTS; else { xdev->slot_allocated[i] = true; *slot = i; } UPRINTF(LDBG, "enable slot (error=%d) return slot %u\r\n", cmderr != XHCI_TRB_ERROR_SUCCESS, *slot); return cmderr; } static uint32_t pci_xhci_cmd_disable_slot(struct pci_xhci_vdev *xdev, uint32_t slot) { struct pci_xhci_dev_emu *dev; struct usb_dev *udev; struct usb_native_devinfo *di; uint32_t cmderr; int i; UPRINTF(LDBG, "pci_xhci disable slot %u\r\n", slot); cmderr = XHCI_TRB_ERROR_NO_SLOTS; if (xdev->portregs == NULL) goto done; if (slot > xdev->ndevices) { cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; goto done; } dev = XHCI_SLOTDEV_PTR(xdev, slot); if (dev) { if (dev->dev_slotstate == XHCI_ST_DISABLED) { cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; } else { dev->dev_slotstate = XHCI_ST_DISABLED; cmderr = XHCI_TRB_ERROR_SUCCESS; /* TODO: reset events and endpoints */ } } else { UPRINTF(LDBG, "disable NULL device, slot %d\r\n", slot); goto done; } for (i = 1; i <= XHCI_MAX_DEVS; ++i) if (dev == xdev->devices[i]) break; if (i <= XHCI_MAX_DEVS && XHCI_PORTREG_PTR(xdev, i)) { XHCI_PORTREG_PTR(xdev, i)->portsc &= ~(XHCI_PS_CSC | XHCI_PS_CCS | XHCI_PS_PED | XHCI_PS_PP); udev = dev->dev_instance; assert(udev); xdev->devices[i] = NULL; xdev->slots[slot] = NULL; xdev->slot_allocated[slot] = false; di = &udev->info; xdev->port_map_tbl[di->bus][di->port] = VPORT_NUM_STATE(VPORT_ASSIGNED, 0); UPRINTF(LINF, "disable slot %d for native device %d-%d" "\r\n", slot, di->bus, di->port); pci_xhci_dev_destroy(dev); } else UPRINTF(LWRN, "invalid slot %d\r\n", slot); done: return cmderr; } static uint32_t pci_xhci_cmd_reset_device(struct pci_xhci_vdev *xdev, uint32_t slot) { struct pci_xhci_dev_emu *dev; struct xhci_dev_ctx *dev_ctx; struct xhci_endp_ctx *ep_ctx; uint32_t cmderr; int i; cmderr = XHCI_TRB_ERROR_NO_SLOTS; if (xdev->portregs == NULL) goto done; UPRINTF(LDBG, "pci_xhci reset device slot %u\r\n", slot); dev = XHCI_SLOTDEV_PTR(xdev, slot); if (!dev || dev->dev_slotstate == XHCI_ST_DISABLED) cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; else { dev->dev_slotstate = XHCI_ST_DEFAULT; dev->hci.hci_address = 0; dev_ctx = pci_xhci_get_dev_ctx(xdev, slot); if (!dev_ctx) { cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; goto done; } /* slot state */ dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_DEFAULT, 0x1F, 27); /* number of contexts */ dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE(dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27); /* reset all eps other than ep-0 */ for (i = 2; i <= 31; i++) { ep_ctx = &dev_ctx->ctx_ep[i]; ep_ctx->dwEpCtx0 = FIELD_REPLACE(ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_DISABLED, 0x7, 0); } cmderr = XHCI_TRB_ERROR_SUCCESS; } pci_xhci_reset_slot(xdev, slot); done: return cmderr; } static uint32_t pci_xhci_cmd_address_device(struct pci_xhci_vdev *xdev, uint32_t slot, struct xhci_trb *trb) { struct pci_xhci_dev_emu *dev; struct xhci_input_dev_ctx *input_ctx; struct xhci_slot_ctx *islot_ctx; struct xhci_dev_ctx *dev_ctx; struct xhci_endp_ctx *ep0_ctx; struct usb_native_devinfo *di; uint32_t cmderr; uint8_t rh_port; input_ctx = XHCI_GADDR(xdev, trb->qwTrb0 & ~0xFUL); islot_ctx = &input_ctx->ctx_slot; ep0_ctx = &input_ctx->ctx_ep[1]; cmderr = XHCI_TRB_ERROR_SUCCESS; UPRINTF(LDBG, "address device, input ctl: D 0x%08x A 0x%08x,\r\n" " slot %08x %08x %08x %08x\r\n" " ep0 %08x %08x %016lx %08x\r\n", input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1, islot_ctx->dwSctx0, islot_ctx->dwSctx1, islot_ctx->dwSctx2, islot_ctx->dwSctx3, ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, ep0_ctx->dwEpCtx4); /* when setting address: drop-ctx=0, add-ctx=slot+ep0 */ if ((input_ctx->ctx_input.dwInCtx0 != 0) || (input_ctx->ctx_input.dwInCtx1 & 0x03) != 0x03) { UPRINTF(LDBG, "address device, input ctl invalid\r\n"); cmderr = XHCI_TRB_ERROR_TRB; goto done; } if (slot <= 0 || slot > XHCI_MAX_SLOTS || xdev->slot_allocated[slot] == false) { UPRINTF(LDBG, "address device, invalid slot %d\r\n", slot); cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; goto done; } dev = xdev->slots[slot]; if (!dev) { rh_port = XHCI_SCTX_1_RH_PORT_GET(islot_ctx->dwSctx1); di = pci_xhci_find_native_devinfo_by_vport(xdev, rh_port); if (di == NULL) { cmderr = XHCI_TRB_ERROR_TRB; UPRINTF(LFTL, "invalid root hub port %d\r\n", rh_port); goto done; } UPRINTF(LDBG, "create virtual device for %d-%d on virtual " "port %d\r\n", di->bus, di->port, rh_port); dev = pci_xhci_dev_create(xdev, di); if (!dev) { UPRINTF(LFTL, "fail to create device for %d-%d\r\n", di->bus, di->port); goto done; } xdev->devices[rh_port] = dev; xdev->ndevices++; xdev->slots[slot] = dev; dev->hci.hci_address = slot; } /* assign address to slot */ dev_ctx = pci_xhci_get_dev_ctx(xdev, slot); if (!dev_ctx) { cmderr = XHCI_TRB_ERROR_CONTEXT_STATE; goto done; } UPRINTF(LDBG, "address device, dev ctx\r\n" " slot %08x %08x %08x %08x\r\n", dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3); dev = XHCI_SLOTDEV_PTR(xdev, slot); assert(dev != NULL); dev->hci.hci_address = slot; dev->dev_ctx = dev_ctx; if (dev->dev_ue->ue_reset == NULL || dev->dev_ue->ue_reset(dev->dev_instance) < 0) { cmderr = XHCI_TRB_ERROR_ENDP_NOT_ON; goto done; } memcpy(&dev_ctx->ctx_slot, islot_ctx, sizeof(struct xhci_slot_ctx)); dev_ctx->ctx_slot.dwSctx3 = XHCI_SCTX_3_SLOT_STATE_SET(XHCI_ST_SLCTX_ADDRESSED) | XHCI_SCTX_3_DEV_ADDR_SET(slot); memcpy(&dev_ctx->ctx_ep[1], ep0_ctx, sizeof(struct xhci_endp_ctx)); ep0_ctx = &dev_ctx->ctx_ep[1]; ep0_ctx->dwEpCtx0 = (ep0_ctx->dwEpCtx0 & ~0x7) | XHCI_EPCTX_0_EPSTATE_SET(XHCI_ST_EPCTX_RUNNING); if (pci_xhci_init_ep(dev, 1)) { cmderr = XHCI_TRB_ERROR_INCOMPAT_DEV; goto done; } dev->dev_slotstate = XHCI_ST_ADDRESSED; UPRINTF(LDBG, "address device, output ctx\r\n" " slot %08x %08x %08x %08x\r\n" " ep0 %08x %08x %016lx %08x\r\n", dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3, ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, ep0_ctx->dwEpCtx4); done: return cmderr; } static uint32_t pci_xhci_cmd_config_ep(struct pci_xhci_vdev *xdev, uint32_t slot, struct xhci_trb *trb) { struct xhci_input_dev_ctx *input_ctx; struct pci_xhci_dev_emu *dev; struct xhci_dev_ctx *dev_ctx; struct xhci_endp_ctx *ep_ctx, *iep_ctx; uint32_t cmderr; int i; cmderr = XHCI_TRB_ERROR_SUCCESS; UPRINTF(LDBG, "config_ep slot %u\r\n", slot); dev = XHCI_SLOTDEV_PTR(xdev, slot); if (dev == NULL) { cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; goto done; } if ((trb->dwTrb3 & XHCI_TRB_3_DCEP_BIT) != 0) { UPRINTF(LDBG, "config_ep - deconfigure ep slot %u\r\n", slot); if (dev->dev_ue->ue_stop != NULL) dev->dev_ue->ue_stop(dev->dev_instance); dev->dev_slotstate = XHCI_ST_ADDRESSED; dev->hci.hci_address = 0; dev_ctx = pci_xhci_get_dev_ctx(xdev, slot); if (!dev_ctx) { cmderr = XHCI_TRB_ERROR_TRB; goto done; } /* number of contexts */ dev_ctx->ctx_slot.dwSctx0 = FIELD_REPLACE(dev_ctx->ctx_slot.dwSctx0, 1, 0x1F, 27); /* slot state */ dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_ADDRESSED, 0x1F, 27); /* disable endpoints */ for (i = 2; i < 32; i++) pci_xhci_disable_ep(dev, i); cmderr = XHCI_TRB_ERROR_SUCCESS; goto done; } if (dev->dev_slotstate < XHCI_ST_ADDRESSED) { UPRINTF(LWRN, "config_ep slotstate x%x != addressed\r\n", dev->dev_slotstate); cmderr = XHCI_TRB_ERROR_SLOT_NOT_ON; goto done; } /* In addressed/configured state; * for each drop endpoint ctx flag: * ep->state = DISABLED * for each add endpoint ctx flag: * cp(ep-in, ep-out) * ep->state = RUNNING * for each drop+add endpoint flag: * reset ep resources * cp(ep-in, ep-out) * ep->state = RUNNING * if input->DisabledCtx[2-31] < 30: (at least 1 ep not disabled) * slot->state = configured */ input_ctx = XHCI_GADDR(xdev, trb->qwTrb0 & ~0xFUL); dev_ctx = dev->dev_ctx; UPRINTF(LDBG, "config_ep inputctx: D:x%08x A:x%08x 7:x%08x\r\n", input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1, input_ctx->ctx_input.dwInCtx7); for (i = 2; i <= 31; i++) { ep_ctx = &dev_ctx->ctx_ep[i]; if (input_ctx->ctx_input.dwInCtx0 & XHCI_INCTX_0_DROP_MASK(i)) { UPRINTF(LDBG, " config ep - dropping ep %d\r\n", i); pci_xhci_disable_ep(dev, i); } if (input_ctx->ctx_input.dwInCtx1 & XHCI_INCTX_1_ADD_MASK(i)) { iep_ctx = &input_ctx->ctx_ep[i]; UPRINTF(LDBG, " enable ep%d %08x %08x %016lx %08x\r\n", i, iep_ctx->dwEpCtx0, iep_ctx->dwEpCtx1, iep_ctx->qwEpCtx2, iep_ctx->dwEpCtx4); memcpy(ep_ctx, iep_ctx, sizeof(struct xhci_endp_ctx)); if (pci_xhci_init_ep(dev, i)) { cmderr = XHCI_TRB_ERROR_RESOURCE; goto error; } /* ep state */ ep_ctx->dwEpCtx0 = FIELD_REPLACE(ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); } } /* slot state to configured */ dev_ctx->ctx_slot.dwSctx3 = FIELD_REPLACE(dev_ctx->ctx_slot.dwSctx3, XHCI_ST_SLCTX_CONFIGURED, 0x1F, 27); dev_ctx->ctx_slot.dwSctx0 = FIELD_COPY(dev_ctx->ctx_slot.dwSctx0, input_ctx->ctx_slot.dwSctx0, 0x1F, 27); dev->dev_slotstate = XHCI_ST_CONFIGURED; UPRINTF(LDBG, "EP configured; slot %u [0]=0x%08x [1]=0x%08x" " [2]=0x%08x [3]=0x%08x\r\n", slot, dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3); done: return cmderr; error: for (; i >= 2; --i) pci_xhci_disable_ep(dev, i); return cmderr; } static uint32_t pci_xhci_cmd_reset_ep(struct pci_xhci_vdev *xdev, uint32_t slot, struct xhci_trb *trb) { struct pci_xhci_dev_emu *dev; struct pci_xhci_dev_ep *devep; struct xhci_dev_ctx *dev_ctx; struct xhci_endp_ctx *ep_ctx; uint32_t cmderr, epid; uint32_t type; epid = XHCI_TRB_3_EP_GET(trb->dwTrb3); UPRINTF(LDBG, "reset ep %u: slot %u\r\n", epid, slot); cmderr = XHCI_TRB_ERROR_SUCCESS; type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); dev = XHCI_SLOTDEV_PTR(xdev, slot); assert(dev != NULL); if (type == XHCI_TRB_TYPE_STOP_EP && (trb->dwTrb3 & XHCI_TRB_3_SUSP_EP_BIT) != 0) { /* XXX suspend endpoint for 10ms */ } if (epid < 1 || epid > 31) { UPRINTF(LDBG, "reset ep: invalid epid %u\r\n", epid); cmderr = XHCI_TRB_ERROR_TRB; goto done; } dev_ctx = dev->dev_ctx; assert(dev_ctx != NULL); ep_ctx = &dev_ctx->ctx_ep[epid]; if (type == XHCI_TRB_TYPE_RESET_EP && (ep_ctx->dwEpCtx0 & 0x7) != XHCI_ST_EPCTX_HALTED) { cmderr = XHCI_TRB_ERROR_CONTEXT_STATE; goto done; } devep = &dev->eps[epid]; if (devep->ep_xfer != NULL) USB_DATA_XFER_RESET(devep->ep_xfer); ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED; if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) == 0) ep_ctx->qwEpCtx2 = devep->ep_ringaddr | devep->ep_ccs; UPRINTF(LDBG, "reset ep[%u] %08x %08x %016lx %08x\r\n", epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2, ep_ctx->dwEpCtx4); done: return cmderr; } static uint32_t pci_xhci_find_stream(struct pci_xhci_vdev *xdev, struct xhci_endp_ctx *ep, uint32_t streamid, struct xhci_stream_ctx **osctx) { struct xhci_stream_ctx *sctx; uint32_t maxpstreams; maxpstreams = XHCI_EPCTX_0_MAXP_STREAMS_GET(ep->dwEpCtx0); if (maxpstreams == 0) return XHCI_TRB_ERROR_TRB; if (maxpstreams > XHCI_STREAMS_MAX) return XHCI_TRB_ERROR_INVALID_SID; if (XHCI_EPCTX_0_LSA_GET(ep->dwEpCtx0) == 0) { UPRINTF(LWRN, "find_stream; LSA bit not set\r\n"); return XHCI_TRB_ERROR_INVALID_SID; } /* only support primary stream */ if (streamid > maxpstreams) return XHCI_TRB_ERROR_STREAM_TYPE; sctx = XHCI_GADDR(xdev, ep->qwEpCtx2 & ~0xFUL) + streamid; if (!XHCI_SCTX_0_SCT_GET(sctx->qwSctx0)) return XHCI_TRB_ERROR_STREAM_TYPE; *osctx = sctx; return XHCI_TRB_ERROR_SUCCESS; } static uint32_t pci_xhci_cmd_set_tr(struct pci_xhci_vdev *xdev, uint32_t slot, struct xhci_trb *trb) { struct pci_xhci_dev_emu *dev; struct pci_xhci_dev_ep *devep; struct xhci_dev_ctx *dev_ctx; struct xhci_endp_ctx *ep_ctx; uint32_t cmderr, epid; uint32_t streamid; cmderr = XHCI_TRB_ERROR_SUCCESS; dev = XHCI_SLOTDEV_PTR(xdev, slot); assert(dev != NULL); UPRINTF(LDBG, "set_tr: new-tr x%016lx, SCT %u DCS %u\r\n" " stream-id %u, slot %u, epid %u, C %u\r\n", (trb->qwTrb0 & ~0xF), (uint32_t)((trb->qwTrb0 >> 1) & 0x7), (uint32_t)(trb->qwTrb0 & 0x1), (trb->dwTrb2 >> 16) & 0xFFFF, XHCI_TRB_3_SLOT_GET(trb->dwTrb3), XHCI_TRB_3_EP_GET(trb->dwTrb3), trb->dwTrb3 & 0x1); epid = XHCI_TRB_3_EP_GET(trb->dwTrb3); if (epid < 1 || epid > 31) { UPRINTF(LDBG, "set_tr_deq: invalid epid %u\r\n", epid); cmderr = XHCI_TRB_ERROR_TRB; goto done; } dev_ctx = dev->dev_ctx; assert(dev_ctx != NULL); ep_ctx = &dev_ctx->ctx_ep[epid]; devep = &dev->eps[epid]; switch (XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)) { case XHCI_ST_EPCTX_STOPPED: case XHCI_ST_EPCTX_ERROR: break; default: UPRINTF(LDBG, "cmd set_tr invalid state %x\r\n", XHCI_EPCTX_0_EPSTATE_GET(ep_ctx->dwEpCtx0)); cmderr = XHCI_TRB_ERROR_CONTEXT_STATE; goto done; } streamid = XHCI_TRB_2_STREAM_GET(trb->dwTrb2); if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) > 0) { struct xhci_stream_ctx *sctx; sctx = NULL; cmderr = pci_xhci_find_stream(xdev, ep_ctx, streamid, &sctx); if (sctx != NULL) { assert(devep->ep_sctx != NULL); devep->ep_sctx[streamid].qwSctx0 = trb->qwTrb0; devep->ep_sctx_trbs[streamid].ringaddr = trb->qwTrb0 & ~0xF; devep->ep_sctx_trbs[streamid].ccs = XHCI_EPCTX_2_DCS_GET(trb->qwTrb0); } } else { if (streamid != 0) { UPRINTF(LDBG, "cmd set_tr streamid %x != 0\r\n", streamid); } ep_ctx->qwEpCtx2 = trb->qwTrb0 & ~0xFUL; devep->ep_ringaddr = ep_ctx->qwEpCtx2 & ~0xFUL; devep->ep_ccs = trb->qwTrb0 & 0x1; devep->ep_tr = XHCI_GADDR(xdev, devep->ep_ringaddr); UPRINTF(LDBG, "set_tr first TRB:\r\n"); pci_xhci_dump_trb(devep->ep_tr); } ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_STOPPED; done: return cmderr; } static uint32_t pci_xhci_cmd_eval_ctx(struct pci_xhci_vdev *xdev, uint32_t slot, struct xhci_trb *trb) { struct xhci_input_dev_ctx *input_ctx; struct xhci_slot_ctx *islot_ctx; struct xhci_dev_ctx *dev_ctx; struct xhci_endp_ctx *ep0_ctx; uint32_t cmderr; input_ctx = XHCI_GADDR(xdev, trb->qwTrb0 & ~0xFUL); islot_ctx = &input_ctx->ctx_slot; ep0_ctx = &input_ctx->ctx_ep[1]; cmderr = XHCI_TRB_ERROR_SUCCESS; UPRINTF(LDBG, "eval ctx, input ctl: D 0x%08x A 0x%08x,\r\n" " slot %08x %08x %08x %08x\r\n" " ep0 %08x %08x %016lx %08x\r\n", input_ctx->ctx_input.dwInCtx0, input_ctx->ctx_input.dwInCtx1, islot_ctx->dwSctx0, islot_ctx->dwSctx1, islot_ctx->dwSctx2, islot_ctx->dwSctx3, ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, ep0_ctx->dwEpCtx4); /* this command expects drop-ctx=0 & add-ctx=slot+ep0 */ if ((input_ctx->ctx_input.dwInCtx0 != 0) || (input_ctx->ctx_input.dwInCtx1 & 0x03) == 0) { UPRINTF(LWRN, "eval ctx, input ctl invalid\r\n"); cmderr = XHCI_TRB_ERROR_TRB; goto done; } /* assign address to slot; in this emulation, slot_id = address */ dev_ctx = pci_xhci_get_dev_ctx(xdev, slot); if (dev_ctx == NULL) { cmderr = XHCI_TRB_ERROR_CMD_ABORTED; goto done; } UPRINTF(LDBG, "eval ctx, dev ctx\r\n" " slot %08x %08x %08x %08x\r\n", dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3); if (input_ctx->ctx_input.dwInCtx1 & 0x01) { /* slot ctx */ /* set max exit latency */ dev_ctx->ctx_slot.dwSctx1 = FIELD_COPY(dev_ctx->ctx_slot.dwSctx1, input_ctx->ctx_slot.dwSctx1, 0xFFFF, 0); /* set interrupter target */ dev_ctx->ctx_slot.dwSctx2 = FIELD_COPY(dev_ctx->ctx_slot.dwSctx2, input_ctx->ctx_slot.dwSctx2, 0x3FF, 22); } if (input_ctx->ctx_input.dwInCtx1 & 0x02) { /* control ctx */ /* set max packet size */ dev_ctx->ctx_ep[1].dwEpCtx1 = FIELD_COPY(dev_ctx->ctx_ep[1].dwEpCtx1, ep0_ctx->dwEpCtx1, 0xFFFF, 16); ep0_ctx = &dev_ctx->ctx_ep[1]; } UPRINTF(LDBG, "eval ctx, output ctx\r\n" " slot %08x %08x %08x %08x\r\n" " ep0 %08x %08x %016lx %08x\r\n", dev_ctx->ctx_slot.dwSctx0, dev_ctx->ctx_slot.dwSctx1, dev_ctx->ctx_slot.dwSctx2, dev_ctx->ctx_slot.dwSctx3, ep0_ctx->dwEpCtx0, ep0_ctx->dwEpCtx1, ep0_ctx->qwEpCtx2, ep0_ctx->dwEpCtx4); done: return cmderr; } static int pci_xhci_complete_commands(struct pci_xhci_vdev *xdev) { struct xhci_trb evtrb; struct xhci_trb *trb; uint64_t crcr; uint32_t ccs; /* cycle state (XHCI 4.9.2) */ uint32_t type; uint32_t slot; uint32_t cmderr; int error; error = 0; xdev->opregs.crcr |= XHCI_CRCR_LO_CRR; trb = xdev->opregs.cr_p; ccs = xdev->opregs.crcr & XHCI_CRCR_LO_RCS; crcr = xdev->opregs.crcr & ~0xF; while (1) { xdev->opregs.cr_p = trb; type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); if ((trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT) != (ccs & XHCI_TRB_3_CYCLE_BIT)) break; UPRINTF(LDBG, "cmd type 0x%x, Trb0 x%016lx dwTrb2 x%08x" " dwTrb3 x%08x, TRB_CYCLE %u/ccs %u\r\n", type, trb->qwTrb0, trb->dwTrb2, trb->dwTrb3, trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT, ccs); cmderr = XHCI_TRB_ERROR_SUCCESS; evtrb.dwTrb2 = 0; evtrb.dwTrb3 = (ccs & XHCI_TRB_3_CYCLE_BIT) | XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_CMD_COMPLETE); slot = 0; switch (type) { case XHCI_TRB_TYPE_LINK: /* 0x06 */ if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT) ccs ^= XHCI_CRCR_LO_RCS; break; case XHCI_TRB_TYPE_ENABLE_SLOT: /* 0x09 */ cmderr = pci_xhci_cmd_enable_slot(xdev, &slot); break; case XHCI_TRB_TYPE_DISABLE_SLOT: /* 0x0A */ slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); cmderr = pci_xhci_cmd_disable_slot(xdev, slot); break; case XHCI_TRB_TYPE_ADDRESS_DEVICE: /* 0x0B */ slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); cmderr = pci_xhci_cmd_address_device(xdev, slot, trb); break; case XHCI_TRB_TYPE_CONFIGURE_EP: /* 0x0C */ slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); cmderr = pci_xhci_cmd_config_ep(xdev, slot, trb); break; case XHCI_TRB_TYPE_EVALUATE_CTX: /* 0x0D */ slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); cmderr = pci_xhci_cmd_eval_ctx(xdev, slot, trb); break; case XHCI_TRB_TYPE_RESET_EP: /* 0x0E */ UPRINTF(LDBG, "Reset Endpoint on slot %d\r\n", slot); slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); cmderr = pci_xhci_cmd_reset_ep(xdev, slot, trb); break; case XHCI_TRB_TYPE_STOP_EP: /* 0x0F */ UPRINTF(LDBG, "Stop Endpoint on slot %d\r\n", slot); slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); cmderr = pci_xhci_cmd_reset_ep(xdev, slot, trb); break; case XHCI_TRB_TYPE_SET_TR_DEQUEUE: /* 0x10 */ slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); cmderr = pci_xhci_cmd_set_tr(xdev, slot, trb); break; case XHCI_TRB_TYPE_RESET_DEVICE: /* 0x11 */ slot = XHCI_TRB_3_SLOT_GET(trb->dwTrb3); cmderr = pci_xhci_cmd_reset_device(xdev, slot); break; case XHCI_TRB_TYPE_FORCE_EVENT: /* 0x12 */ /* TODO: */ break; case XHCI_TRB_TYPE_NEGOTIATE_BW: /* 0x13 */ break; case XHCI_TRB_TYPE_SET_LATENCY_TOL: /* 0x14 */ break; case XHCI_TRB_TYPE_GET_PORT_BW: /* 0x15 */ break; case XHCI_TRB_TYPE_FORCE_HEADER: /* 0x16 */ break; case XHCI_TRB_TYPE_NOOP_CMD: /* 0x17 */ break; default: UPRINTF(LDBG, "unsupported cmd %x\r\n", type); break; } if (type != XHCI_TRB_TYPE_LINK) { /* * insert command completion event and assert intr */ evtrb.qwTrb0 = crcr; evtrb.dwTrb2 |= XHCI_TRB_2_ERROR_SET(cmderr); evtrb.dwTrb3 |= XHCI_TRB_3_SLOT_SET(slot); UPRINTF(LDBG, "command 0x%x result: 0x%x\r\n", type, cmderr); pci_xhci_insert_event(xdev, &evtrb, 1); } trb = pci_xhci_trb_next(xdev, trb, &crcr); } xdev->opregs.crcr = crcr | (xdev->opregs.crcr & XHCI_CRCR_LO_CA) | ccs; xdev->opregs.crcr &= ~XHCI_CRCR_LO_CRR; return error; } static void pci_xhci_dump_trb(struct xhci_trb *trb) { static const char *const trbtypes[] = { "RESERVED", "NORMAL", "SETUP_STAGE", "DATA_STAGE", "STATUS_STAGE", "ISOCH", "LINK", "EVENT_DATA", "NOOP", "ENABLE_SLOT", "DISABLE_SLOT", "ADDRESS_DEVICE", "CONFIGURE_EP", "EVALUATE_CTX", "RESET_EP", "STOP_EP", "SET_TR_DEQUEUE", "RESET_DEVICE", "FORCE_EVENT", "NEGOTIATE_BW", "SET_LATENCY_TOL", "GET_PORT_BW", "FORCE_HEADER", "NOOP_CMD" }; uint32_t type; type = XHCI_TRB_3_TYPE_GET(trb->dwTrb3); UPRINTF(LDBG, "trb[@%p] type x%02x %s 0:x%016lx 2:x%08x " "3:x%08x\r\n", trb, type, type <= XHCI_TRB_TYPE_NOOP_CMD ? trbtypes[type] : "INVALID", trb->qwTrb0, trb->dwTrb2, trb->dwTrb3); } static int pci_xhci_xfer_complete(struct pci_xhci_vdev *xdev, struct usb_data_xfer *xfer, uint32_t slot, uint32_t epid, int *do_intr) { struct xhci_dev_ctx *dev_ctx; struct xhci_endp_ctx *ep_ctx; struct xhci_trb *trb; struct xhci_trb evtrb; uint32_t trbflags; uint32_t edtla; uint32_t i; int err = XHCI_TRB_ERROR_SUCCESS; dev_ctx = pci_xhci_get_dev_ctx(xdev, slot); assert(dev_ctx != NULL); ep_ctx = &dev_ctx->ctx_ep[epid]; /* err is used as completion code and sent to guest driver */ switch (xfer->status) { case USB_ERR_STALLED: ep_ctx->dwEpCtx0 = (ep_ctx->dwEpCtx0 & ~0x7) | XHCI_ST_EPCTX_HALTED; err = XHCI_TRB_ERROR_STALL; break; case USB_ERR_SHORT_XFER: err = XHCI_TRB_ERROR_SHORT_PKT; break; case USB_ERR_TIMEOUT: case USB_ERR_IOERROR: err = XHCI_TRB_ERROR_XACT; break; case USB_ERR_BAD_BUFSIZE: err = XHCI_TRB_ERROR_BABBLE; break; case USB_ERR_NORMAL_COMPLETION: break; default: UPRINTF(LFTL, "unknown error %d\r\n", xfer->status); } *do_intr = 0; edtla = 0; /* go through list of TRBs and insert event(s) */ for (i = (uint32_t)xfer->head; xfer->ndata > 0; ) { evtrb.qwTrb0 = (uint64_t)xfer->data[i].hci_data; trb = XHCI_GADDR(xdev, evtrb.qwTrb0); trbflags = trb->dwTrb3; UPRINTF(LDBG, "xfer[%d] done?%u:%d trb %x %016lx %x " "(err %d) IOC?%d\r\n", i, xfer->data[i].processed, xfer->data[i].blen, XHCI_TRB_3_TYPE_GET(trbflags), evtrb.qwTrb0, trbflags, err, trb->dwTrb3 & XHCI_TRB_3_IOC_BIT ? 1 : 0); if (xfer->data[i].processed < USB_XFER_BLK_HANDLED) { xfer->head = (int)i; break; } xfer->data[i].processed = USB_XFER_BLK_FREE; xfer->ndata--; xfer->head = (xfer->head + 1) % USB_MAX_XFER_BLOCKS; edtla += xfer->data[i].bdone; trb->dwTrb3 = (trb->dwTrb3 & ~0x1) | (xfer->data[i].ccs); /* Only interrupt if IOC or short packet */ if (!(trb->dwTrb3 & XHCI_TRB_3_IOC_BIT) && !((err == XHCI_TRB_ERROR_SHORT_PKT) && (trb->dwTrb3 & XHCI_TRB_3_ISP_BIT))) { i = (i + 1) % USB_MAX_XFER_BLOCKS; continue; } evtrb.dwTrb2 = XHCI_TRB_2_ERROR_SET(err) | XHCI_TRB_2_REM_SET(xfer->data[i].blen); evtrb.dwTrb3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_EVENT_TRANSFER) | XHCI_TRB_3_SLOT_SET(slot) | XHCI_TRB_3_EP_SET(epid); if (XHCI_TRB_3_TYPE_GET(trbflags) == XHCI_TRB_TYPE_EVENT_DATA) { UPRINTF(LDBG, "EVENT_DATA edtla %u\r\n", edtla); evtrb.qwTrb0 = trb->qwTrb0; evtrb.dwTrb2 = (edtla & 0xFFFFF) | XHCI_TRB_2_ERROR_SET(err); evtrb.dwTrb3 |= XHCI_TRB_3_ED_BIT; edtla = 0; } *do_intr = 1; err = pci_xhci_insert_event(xdev, &evtrb, 0); if (err != XHCI_TRB_ERROR_SUCCESS) break; i = (i + 1) % USB_MAX_XFER_BLOCKS; } return err; } static void pci_xhci_update_ep_ring(struct pci_xhci_vdev *xdev, struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, struct xhci_endp_ctx *ep_ctx, uint32_t streamid, uint64_t ringaddr, int ccs) { if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) != 0) { devep->ep_sctx[streamid].qwSctx0 = (ringaddr & ~0xFUL) | (ccs & 0x1); devep->ep_sctx_trbs[streamid].ringaddr = ringaddr & ~0xFUL; devep->ep_sctx_trbs[streamid].ccs = ccs & 0x1; ep_ctx->qwEpCtx2 = (ep_ctx->qwEpCtx2 & ~0x1) | (ccs & 0x1); UPRINTF(LDBG, "update ep-ring stream %d, addr %lx\r\n", streamid, devep->ep_sctx[streamid].qwSctx0); } else { devep->ep_ringaddr = ringaddr & ~0xFUL; devep->ep_ccs = ccs & 0x1; devep->ep_tr = XHCI_GADDR(xdev, ringaddr & ~0xFUL); ep_ctx->qwEpCtx2 = (ringaddr & ~0xFUL) | (ccs & 0x1); UPRINTF(LDBG, "update ep-ring, addr %lx\r\n", (devep->ep_ringaddr | devep->ep_ccs)); } } /* * Outstanding transfer still in progress (device NAK'd earlier) so retry * the transfer again to see if it succeeds. */ static int pci_xhci_try_usb_xfer(struct pci_xhci_vdev *xdev, struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, struct xhci_endp_ctx *ep_ctx, uint32_t slot, uint32_t epid) { struct usb_data_xfer *xfer; int err; int do_intr; ep_ctx->dwEpCtx0 = FIELD_REPLACE(ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); err = 0; do_intr = 0; xfer = devep->ep_xfer; USB_DATA_XFER_LOCK(xfer); /* outstanding requests queued up */ if (dev->dev_ue->ue_data != NULL) { err = dev->dev_ue->ue_data(dev->dev_instance, xfer, epid & 0x1 ? USB_XFER_IN : USB_XFER_OUT, epid/2); if (err == USB_ERR_CANCELLED) { if (USB_DATA_GET_ERRCODE(&xfer->data[xfer->head]) == USB_NAK) err = XHCI_TRB_ERROR_SUCCESS; } /* * Only for usb_mouse.c, emulation with port mapping will do it * by the libusb callback function. */ else if (dev->dev_ue->ue_devtype == USB_DEV_STATIC) { err = pci_xhci_xfer_complete(xdev, xfer, slot, epid, &do_intr); if (err == XHCI_TRB_ERROR_SUCCESS && do_intr) pci_xhci_assert_interrupt(xdev); /* XXX should not do it if error? */ USB_DATA_XFER_RESET(xfer); } } USB_DATA_XFER_UNLOCK(xfer); return err; } static int pci_xhci_handle_transfer(struct pci_xhci_vdev *xdev, struct pci_xhci_dev_emu *dev, struct pci_xhci_dev_ep *devep, struct xhci_endp_ctx *ep_ctx, struct xhci_trb *trb, uint32_t slot, uint32_t epid, uint64_t addr, uint32_t ccs, uint32_t streamid) { struct xhci_trb *setup_trb; struct usb_data_xfer *xfer; struct usb_data_xfer_block *xfer_block; uint64_t val; uint32_t trbflags; int do_intr, err; int do_retry; ep_ctx->dwEpCtx0 = FIELD_REPLACE(ep_ctx->dwEpCtx0, XHCI_ST_EPCTX_RUNNING, 0x7, 0); xfer = devep->ep_xfer; USB_DATA_XFER_LOCK(xfer); UPRINTF(LDBG, "handle_transfer slot %u\r\n", slot); retry: err = 0; do_retry = 0; do_intr = 0; setup_trb = NULL; while (1) { pci_xhci_dump_trb(trb); trbflags = trb->dwTrb3; if (XHCI_TRB_3_TYPE_GET(trbflags) != XHCI_TRB_TYPE_LINK && (trbflags & XHCI_TRB_3_CYCLE_BIT) != (ccs & XHCI_TRB_3_CYCLE_BIT)) { UPRINTF(LDBG, "Cycle-bit changed trbflags %x," " ccs %x\r\n", trbflags & XHCI_TRB_3_CYCLE_BIT, ccs); break; } xfer_block = NULL; switch (XHCI_TRB_3_TYPE_GET(trbflags)) { case XHCI_TRB_TYPE_LINK: if (trb->dwTrb3 & XHCI_TRB_3_TC_BIT) ccs ^= 0x1; xfer_block = usb_data_xfer_append(xfer, NULL, 0, (void *)addr, ccs); if (!xfer_block) { err = XHCI_TRB_ERROR_STALL; goto errout; } xfer_block->processed = USB_XFER_BLK_FREE; break; case XHCI_TRB_TYPE_SETUP_STAGE: if ((trbflags & XHCI_TRB_3_IDT_BIT) == 0 || XHCI_TRB_2_BYTES_GET(trb->dwTrb2) != 8) { UPRINTF(LDBG, "invalid setup trb\r\n"); err = XHCI_TRB_ERROR_TRB; goto errout; } setup_trb = trb; val = trb->qwTrb0; if (!xfer->ureq) xfer->ureq = malloc( sizeof(struct usb_device_request)); if (!xfer->ureq) { err = XHCI_TRB_ERROR_STALL; goto errout; } memcpy(xfer->ureq, &val, sizeof(struct usb_device_request)); xfer_block = usb_data_xfer_append(xfer, NULL, 0, (void *)addr, ccs); if (!xfer_block) { free(xfer->ureq); xfer->ureq = NULL; err = XHCI_TRB_ERROR_STALL; goto errout; } xfer_block->processed = USB_XFER_BLK_HANDLED; break; case XHCI_TRB_TYPE_NORMAL: case XHCI_TRB_TYPE_ISOCH: if (setup_trb != NULL) { UPRINTF(LWRN, "trb not supposed to be in " "ctl scope\r\n"); err = XHCI_TRB_ERROR_TRB; goto errout; } /* fall through */ case XHCI_TRB_TYPE_DATA_STAGE: xfer_block = usb_data_xfer_append(xfer, (void *)(trbflags & XHCI_TRB_3_IDT_BIT ? &trb->qwTrb0 : XHCI_GADDR(xdev, trb->qwTrb0)), trb->dwTrb2 & 0x1FFFF, (void *)addr, ccs); break; case XHCI_TRB_TYPE_STATUS_STAGE: xfer_block = usb_data_xfer_append(xfer, NULL, 0, (void *)addr, ccs); break; case XHCI_TRB_TYPE_NOOP: xfer_block = usb_data_xfer_append(xfer, NULL, 0, (void *)addr, ccs); if (!xfer_block) { err = XHCI_TRB_ERROR_STALL; goto errout; } xfer_block->processed = USB_XFER_BLK_HANDLED; break; case XHCI_TRB_TYPE_EVENT_DATA: xfer_block = usb_data_xfer_append(xfer, NULL, 0, (void *)addr, ccs); if (!xfer_block) { err = XHCI_TRB_ERROR_TRB; goto errout; } if ((epid > 1) && (trbflags & XHCI_TRB_3_IOC_BIT)) xfer_block->processed = USB_XFER_BLK_HANDLED; break; default: UPRINTF(LWRN, "handle xfer unexpected trb type " "0x%x\r\n", XHCI_TRB_3_TYPE_GET(trbflags)); err = XHCI_TRB_ERROR_TRB; goto errout; } trb = pci_xhci_trb_next(xdev, trb, &addr); UPRINTF(LDBG, "next trb: 0x%lx\r\n", (uint64_t)trb); if (xfer_block) { xfer_block->trbnext = addr; xfer_block->streamid = streamid; /* FIXME: * should add some code to process the scenario in * which endpoint stop command is comming in the * middle of many data transfers. */ pci_xhci_update_ep_ring(xdev, dev, devep, ep_ctx, xfer_block->streamid, xfer_block->trbnext, xfer_block->ccs); } /* handle current batch that requires interrupt on complete */ if (trbflags & XHCI_TRB_3_IOC_BIT) { UPRINTF(LDBG, "trb IOC bit set\r\n"); do_retry = 1; break; } } UPRINTF(LDBG, "[%d]: xfer->ndata %u\r\n", __LINE__, xfer->ndata); if (xfer->ndata <= 0) goto errout; if (epid == 1) { err = USB_ERR_NOT_STARTED; if (dev->dev_ue->ue_request != NULL) err = dev->dev_ue->ue_request(dev->dev_instance, xfer); setup_trb = NULL; } else { /* handle data transfer */ pci_xhci_try_usb_xfer(xdev, dev, devep, ep_ctx, slot, epid); err = XHCI_TRB_ERROR_SUCCESS; goto errout; } err = USB_TO_XHCI_ERR(err); if ((err == XHCI_TRB_ERROR_SUCCESS) || (err == XHCI_TRB_ERROR_SHORT_PKT)) { err = pci_xhci_xfer_complete(xdev, xfer, slot, epid, &do_intr); if (err != XHCI_TRB_ERROR_SUCCESS) do_retry = 0; } errout: if (err == XHCI_TRB_ERROR_EV_RING_FULL) UPRINTF(LDBG, "[%d]: event ring full\r\n", __LINE__); if (!do_retry) USB_DATA_XFER_UNLOCK(xfer); if (do_intr) pci_xhci_assert_interrupt(xdev); if (do_retry) { if (epid == 1) USB_DATA_XFER_RESET(xfer); UPRINTF(LDBG, "[%d]: retry:continuing with next TRBs\r\n", __LINE__); goto retry; } if (epid == 1) USB_DATA_XFER_RESET(xfer); return err; } static void pci_xhci_device_doorbell(struct pci_xhci_vdev *xdev, uint32_t slot, uint32_t epid, uint32_t streamid) { struct pci_xhci_dev_emu *dev; struct pci_xhci_dev_ep *devep; struct xhci_dev_ctx *dev_ctx; struct xhci_endp_ctx *ep_ctx; struct pci_xhci_trb_ring *sctx_tr; struct xhci_trb *trb; uint64_t ringaddr; uint32_t ccs; UPRINTF(LDBG, "doorbell slot %u epid %u stream %u\r\n", slot, epid, streamid); if (slot == 0 || slot > xdev->ndevices) { UPRINTF(LWRN, "invalid doorbell slot %u\r\n", slot); return; } dev = XHCI_SLOTDEV_PTR(xdev, slot); devep = &dev->eps[epid]; dev_ctx = pci_xhci_get_dev_ctx(xdev, slot); if (!dev_ctx) return; ep_ctx = &dev_ctx->ctx_ep[epid]; sctx_tr = NULL; UPRINTF(LDBG, "device doorbell ep[%u] %08x %08x %016lx %08x\r\n", epid, ep_ctx->dwEpCtx0, ep_ctx->dwEpCtx1, ep_ctx->qwEpCtx2, ep_ctx->dwEpCtx4); if (ep_ctx->qwEpCtx2 == 0) return; /* * In USB emulation with port mapping, the following transfer should * NOT be called, or else the interrupt transfer will result * of invalid and infinite loop. It is used by usb_mouse.c only. */ /* handle pending transfers */ if (dev->dev_ue && dev->dev_ue->ue_devtype == USB_DEV_STATIC && devep->ep_xfer->ndata > 0) { pci_xhci_try_usb_xfer(xdev, dev, devep, ep_ctx, slot, epid); return; } /* get next trb work item */ if (XHCI_EPCTX_0_MAXP_STREAMS_GET(ep_ctx->dwEpCtx0) != 0) { sctx_tr = &devep->ep_sctx_trbs[streamid]; ringaddr = sctx_tr->ringaddr; ccs = sctx_tr->ccs; trb = XHCI_GADDR(xdev, sctx_tr->ringaddr & ~0xFUL); UPRINTF(LDBG, "doorbell, stream %u, ccs %lx, trb ccs %x\r\n", streamid, ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT, trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT); } else { ringaddr = devep->ep_ringaddr; ccs = devep->ep_ccs; trb = devep->ep_tr; UPRINTF(LDBG, "doorbell, ccs %lx, trb ccs %x\r\n", ep_ctx->qwEpCtx2 & XHCI_TRB_3_CYCLE_BIT, trb->dwTrb3 & XHCI_TRB_3_CYCLE_BIT); } if (XHCI_TRB_3_TYPE_GET(trb->dwTrb3) == 0) { UPRINTF(LDBG, "ring %lx trb[%lx] EP %u is RESERVED?\r\n", ep_ctx->qwEpCtx2, devep->ep_ringaddr, epid); return; } pci_xhci_handle_transfer(xdev, dev, devep, ep_ctx, trb, slot, epid, ringaddr, ccs, streamid); } static void pci_xhci_dbregs_write(struct pci_xhci_vdev *xdev, uint64_t offset, uint64_t value) { offset = (offset - xdev->dboff) / sizeof(uint32_t); UPRINTF(LDBG, "doorbell write offset 0x%lx: 0x%lx\r\n", offset, value); if (XHCI_HALTED(xdev)) { UPRINTF(LWRN, "pci_xhci: controller halted\r\n"); return; } if (offset == 0) pci_xhci_complete_commands(xdev); else if (xdev->portregs != NULL) pci_xhci_device_doorbell(xdev, offset, XHCI_DB_TARGET_GET(value), XHCI_DB_SID_GET(value)); } static void pci_xhci_rtsregs_write(struct pci_xhci_vdev *xdev, uint64_t offset, uint64_t value) { struct pci_xhci_rtsregs *rts; offset -= xdev->rtsoff; if (offset == 0) { UPRINTF(LWRN, "attempted write to MFINDEX\r\n"); return; } UPRINTF(LDBG, "runtime regs write offset 0x%lx: 0x%lx\r\n", offset, value); offset -= 0x20; /* start of intrreg */ rts = &xdev->rtsregs; switch (offset) { case 0x00: if (value & XHCI_IMAN_INTR_PEND) rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND; rts->intrreg.iman = (value & XHCI_IMAN_INTR_ENA) | (rts->intrreg.iman & XHCI_IMAN_INTR_PEND); if (!(value & XHCI_IMAN_INTR_ENA)) pci_xhci_deassert_interrupt(xdev); break; case 0x04: rts->intrreg.imod = value; break; case 0x08: rts->intrreg.erstsz = value & 0xFFFF; break; case 0x10: /* ERSTBA low bits */ rts->intrreg.erstba = MASK_64_HI(xdev->rtsregs.intrreg.erstba) | (value & ~0x3F); break; case 0x14: /* ERSTBA high bits */ rts->intrreg.erstba = (value << 32) | MASK_64_LO(xdev->rtsregs.intrreg.erstba); rts->erstba_p = XHCI_GADDR(xdev, xdev->rtsregs.intrreg.erstba & ~0x3FUL); rts->erst_p = XHCI_GADDR(xdev, xdev->rtsregs.erstba_p->qwEvrsTablePtr & ~0x3FUL); UPRINTF(LDBG, "wr erstba erst (%p) ptr 0x%lx, sz %u\r\n", rts->erstba_p, rts->erstba_p->qwEvrsTablePtr, rts->erstba_p->dwEvrsTableSize); break; case 0x18: /* ERDP low bits */ rts->intrreg.erdp = MASK_64_HI(xdev->rtsregs.intrreg.erdp) | (rts->intrreg.erdp & XHCI_ERDP_LO_BUSY) | (value & ~0xF); if (value & XHCI_ERDP_LO_BUSY) { rts->intrreg.erdp &= ~XHCI_ERDP_LO_BUSY; rts->intrreg.iman &= ~XHCI_IMAN_INTR_PEND; } rts->er_deq_seg = XHCI_ERDP_LO_SINDEX(value); break; case 0x1C: /* ERDP high bits */ rts->intrreg.erdp = (value << 32) | MASK_64_LO(xdev->rtsregs.intrreg.erdp); if (rts->er_events_cnt > 0) { uint64_t erdp; uint32_t erdp_i; erdp = rts->intrreg.erdp & ~0xF; erdp_i = (erdp - rts->erstba_p->qwEvrsTablePtr) / sizeof(struct xhci_trb); if (erdp_i <= rts->er_enq_idx) rts->er_events_cnt = rts->er_enq_idx - erdp_i; else rts->er_events_cnt = rts->erstba_p->dwEvrsTableSize - (erdp_i - rts->er_enq_idx); UPRINTF(LDBG, "erdp 0x%lx, events cnt %u\r\n", erdp, rts->er_events_cnt); } break; default: UPRINTF(LWRN, "attempted write to RTS offset 0x%lx\r\n", offset); break; } } static uint64_t pci_xhci_portregs_read(struct pci_xhci_vdev *xdev, uint64_t offset) { int port; uint32_t *p; if (xdev->portregs == NULL) return 0; port = (offset - 0x3F0) / 0x10; if (port > XHCI_MAX_DEVS) { UPRINTF(LWRN, "portregs_read port %d >= XHCI_MAX_DEVS\r\n", port); /* return default value for unused port */ return XHCI_PS_SPEED_SET(3); } offset = (offset - 0x3F0) % 0x10; p = &xdev->portregs[port].portsc; p += offset / sizeof(uint32_t); UPRINTF(LDBG, "portregs read offset 0x%lx port %u -> 0x%x\r\n", offset, port, *p); return *p; } static void pci_xhci_hostop_write(struct pci_xhci_vdev *xdev, uint64_t offset, uint64_t value) { offset -= XHCI_CAPLEN; if (offset < 0x400) UPRINTF(LDBG, "hostop write offset 0x%lx: 0x%lx\r\n", offset, value); switch (offset) { case XHCI_USBCMD: xdev->opregs.usbcmd = pci_xhci_usbcmd_write(xdev, value & 0x3F0F); break; case XHCI_USBSTS: /* clear bits on write */ xdev->opregs.usbsts &= ~(value & (XHCI_STS_HSE|XHCI_STS_EINT|XHCI_STS_PCD|XHCI_STS_SSS| XHCI_STS_RSS|XHCI_STS_SRE|XHCI_STS_CNR)); break; case XHCI_PAGESIZE: /* read only */ break; case XHCI_DNCTRL: xdev->opregs.dnctrl = value & 0xFFFF; break; case XHCI_CRCR_LO: if (xdev->opregs.crcr & XHCI_CRCR_LO_CRR) { xdev->opregs.crcr &= ~(XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA); xdev->opregs.crcr |= value & (XHCI_CRCR_LO_CS|XHCI_CRCR_LO_CA); } else { xdev->opregs.crcr = MASK_64_HI(xdev->opregs.crcr) | (value & (0xFFFFFFC0 | XHCI_CRCR_LO_RCS)); } break; case XHCI_CRCR_HI: if (!(xdev->opregs.crcr & XHCI_CRCR_LO_CRR)) { xdev->opregs.crcr = MASK_64_LO(xdev->opregs.crcr) | (value << 32); xdev->opregs.cr_p = XHCI_GADDR(xdev, xdev->opregs.crcr & ~0xF); } /* if (xdev->opregs.crcr & XHCI_CRCR_LO_CS) */ /* TODO: Stop operation of Command Ring */ /* if (xdev->opregs.crcr & XHCI_CRCR_LO_CA) */ /* TODO: Abort command */ break; case XHCI_DCBAAP_LO: xdev->opregs.dcbaap = MASK_64_HI(xdev->opregs.dcbaap) | (value & 0xFFFFFFC0); break; case XHCI_DCBAAP_HI: xdev->opregs.dcbaap = MASK_64_LO(xdev->opregs.dcbaap) | (value << 32); xdev->opregs.dcbaa_p = XHCI_GADDR(xdev, xdev->opregs.dcbaap & ~0x3FUL); UPRINTF(LDBG, "opregs dcbaap = 0x%lx (vaddr 0x%lx)\r\n", xdev->opregs.dcbaap, (uint64_t)xdev->opregs.dcbaa_p); break; case XHCI_CONFIG: xdev->opregs.config = value & 0x03FF; break; default: if (offset >= 0x400) pci_xhci_portregs_write(xdev, offset, value); break; } } static void pci_xhci_write(struct vmctx *ctx, int vcpu, struct pci_vdev *dev, int baridx, uint64_t offset, int size, uint64_t value) { struct pci_xhci_vdev *xdev; xdev = dev->arg; assert(baridx == 0); pthread_mutex_lock(&xdev->mtx); if (offset < XHCI_CAPLEN) /* read only registers */ UPRINTF(LWRN, "write RO-CAPs offset %ld\r\n", offset); else if (offset < xdev->dboff) pci_xhci_hostop_write(xdev, offset, value); else if (offset < xdev->rtsoff) pci_xhci_dbregs_write(xdev, offset, value); else if (offset < xdev->excapoff) pci_xhci_rtsregs_write(xdev, offset, value); else if (offset < xdev->regsend) pci_xhci_excap_write(xdev, offset, value); else UPRINTF(LWRN, "write invalid offset %ld\r\n", offset); pthread_mutex_unlock(&xdev->mtx); } static uint64_t pci_xhci_hostcap_read(struct pci_xhci_vdev *xdev, uint64_t offset) { uint64_t value; switch (offset) { case XHCI_CAPLENGTH: /* 0x00 */ value = xdev->caplength; break; case XHCI_HCSPARAMS1: /* 0x04 */ value = xdev->hcsparams1; break; case XHCI_HCSPARAMS2: /* 0x08 */ value = xdev->hcsparams2; break; case XHCI_HCSPARAMS3: /* 0x0C */ value = xdev->hcsparams3; break; case XHCI_HCSPARAMS0: /* 0x10 */ value = xdev->hccparams1; break; case XHCI_DBOFF: /* 0x14 */ value = xdev->dboff; break; case XHCI_RTSOFF: /* 0x18 */ value = xdev->rtsoff; break; case XHCI_HCCPRAMS2: /* 0x1C */ value = xdev->hccparams2; break; default: value = 0; break; } UPRINTF(LDBG, "hostcap read offset 0x%lx -> 0x%lx\r\n", offset, value); return value; } static uint64_t pci_xhci_hostop_read(struct pci_xhci_vdev *xdev, uint64_t offset) { uint64_t value; offset = (offset - XHCI_CAPLEN); switch (offset) { case XHCI_USBCMD: /* 0x00 */ value = xdev->opregs.usbcmd; break; case XHCI_USBSTS: /* 0x04 */ value = xdev->opregs.usbsts; break; case XHCI_PAGESIZE: /* 0x08 */ value = xdev->opregs.pgsz; break; case XHCI_DNCTRL: /* 0x14 */ value = xdev->opregs.dnctrl; break; case XHCI_CRCR_LO: /* 0x18 */ value = xdev->opregs.crcr & XHCI_CRCR_LO_CRR; break; case XHCI_CRCR_HI: /* 0x1C */ value = 0; break; case XHCI_DCBAAP_LO: /* 0x30 */ value = xdev->opregs.dcbaap & 0xFFFFFFFF; break; case XHCI_DCBAAP_HI: /* 0x34 */ value = (xdev->opregs.dcbaap >> 32) & 0xFFFFFFFF; break; case XHCI_CONFIG: /* 0x38 */ value = xdev->opregs.config; break; default: if (offset >= 0x400) value = pci_xhci_portregs_read(xdev, offset); else value = 0; break; } if (offset < 0x400) UPRINTF(LDBG, "hostop read offset 0x%lx -> 0x%lx\r\n", offset, value); return value; } static uint64_t pci_xhci_dbregs_read(struct pci_xhci_vdev *xdev, uint64_t offset) { /* read doorbell always returns 0 */ return 0; } static uint64_t pci_xhci_rtsregs_read(struct pci_xhci_vdev *xdev, uint64_t offset) { uint32_t value; struct timespec t; uint64_t time_diff; offset -= xdev->rtsoff; value = 0; if (offset == XHCI_MFINDEX) { clock_gettime(CLOCK_MONOTONIC, &t); time_diff = (t.tv_sec - xdev->mf_prev_time.tv_sec) * 1000000 + (t.tv_nsec - xdev->mf_prev_time.tv_nsec) / 1000; xdev->mf_prev_time = t; value = time_diff / 125; if (value >= 1) xdev->rtsregs.mfindex += value; } else if (offset >= 0x20) { int item; uint32_t *p; offset -= 0x20; item = offset % 32; assert(offset < sizeof(xdev->rtsregs.intrreg)); p = &xdev->rtsregs.intrreg.iman; p += item / sizeof(uint32_t); value = *p; } UPRINTF(LDBG, "rtsregs read offset 0x%lx -> 0x%x\r\n", offset, value); return value; } static uint64_t pci_xhci_excap_read(struct pci_xhci_vdev *xdev, uint64_t offset) { uint32_t value = 0; uint32_t off = offset; struct pci_xhci_excap *excap; assert(xdev); excap = xdev->excap_ptr; while (excap && excap->start != EXCAP_GROUP_END) { if (off >= excap->start && off < excap->end) break; excap++; } if (!excap || excap->start == EXCAP_GROUP_END) { UPRINTF(LWRN, "extended capability 0x%lx can't be found\r\n", offset); return value; } if (excap->start != EXCAP_GROUP_END) { off -= excap->start; memcpy(&value, (uint32_t *)excap->data + off / 4, sizeof(uint32_t)); } return value; } static uint64_t pci_xhci_read(struct vmctx *ctx, int vcpu, struct pci_vdev *dev, int baridx, uint64_t offset, int size) { struct pci_xhci_vdev *xdev; uint32_t value; xdev = dev->arg; assert(baridx == 0); pthread_mutex_lock(&xdev->mtx); if (offset < XHCI_CAPLEN) value = pci_xhci_hostcap_read(xdev, offset); else if (offset < xdev->dboff) value = pci_xhci_hostop_read(xdev, offset); else if (offset < xdev->rtsoff) value = pci_xhci_dbregs_read(xdev, offset); else if (offset < xdev->excapoff) value = pci_xhci_rtsregs_read(xdev, offset); else if (offset < xdev->regsend) value = pci_xhci_excap_read(xdev, offset); else { value = 0; UPRINTF(LDBG, "read invalid offset %ld\r\n", offset); } pthread_mutex_unlock(&xdev->mtx); switch (size) { case 1: value &= 0xFF; break; case 2: value &= 0xFFFF; break; case 4: value &= 0xFFFFFFFF; break; } return value; } static void pci_xhci_reset_port(struct pci_xhci_vdev *xdev, int portn, int warm) { struct pci_xhci_portregs *port; struct xhci_trb evtrb; struct usb_native_devinfo *di; int speed, error; assert(portn <= XHCI_MAX_DEVS); UPRINTF(LDBG, "reset port %d\r\n", portn); port = XHCI_PORTREG_PTR(xdev, portn); di = pci_xhci_find_native_devinfo_by_vport(xdev, portn); if (!di) { UPRINTF(LWRN, "fail to reset port %d\r\n", portn); return; } speed = pci_xhci_convert_speed(di->speed); port->portsc &= ~(XHCI_PS_PLS_MASK | XHCI_PS_PR | XHCI_PS_PRC); port->portsc |= XHCI_PS_PED | XHCI_PS_SPEED_SET(speed); if (warm && di->bcd >= 0x300) port->portsc |= XHCI_PS_WRC; if ((port->portsc & XHCI_PS_PRC) == 0) { port->portsc |= XHCI_PS_PRC; pci_xhci_set_evtrb(&evtrb, portn, XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE); error = pci_xhci_insert_event(xdev, &evtrb, 1); if (error != XHCI_TRB_ERROR_SUCCESS) UPRINTF(LWRN, "reset port insert event " "failed\n"); } } static void pci_xhci_init_port(struct pci_xhci_vdev *xdev, int portn) { XHCI_PORTREG_PTR(xdev, portn)->portsc = XHCI_PS_PLS_SET(UPS_PORT_LS_RX_DET) | XHCI_PS_PP; } static int pci_xhci_dev_intr(struct usb_hci *hci, int epctx) { struct pci_xhci_dev_emu *dev; struct xhci_dev_ctx *dev_ctx; struct xhci_trb evtrb; struct pci_xhci_vdev *xdev; struct pci_xhci_portregs *p; struct xhci_endp_ctx *ep_ctx; int error = 0; int dir_in; int epid; dir_in = epctx & 0x80; epid = epctx & ~0x80; /* HW endpoint contexts are 0-15; convert to epid based on dir */ epid = (epid * 2) + (dir_in ? 1 : 0); assert(epid >= 1 && epid <= 31); dev = hci->dev; xdev = dev->xdev; /* check if device is ready; OS has to initialise it */ if (xdev->rtsregs.erstba_p == NULL || (xdev->opregs.usbcmd & XHCI_CMD_RS) == 0 || dev->dev_ctx == NULL) return 0; p = XHCI_PORTREG_PTR(xdev, hci->hci_port); /* raise event if link U3 (suspended) state */ if (XHCI_PS_PLS_GET(p->portsc) == 3) { p->portsc &= ~XHCI_PS_PLS_MASK; p->portsc |= XHCI_PS_PLS_SET(UPS_PORT_LS_RESUME); if ((p->portsc & XHCI_PS_PLC) != 0) return 0; p->portsc |= XHCI_PS_PLC; pci_xhci_set_evtrb(&evtrb, hci->hci_port, XHCI_TRB_ERROR_SUCCESS, XHCI_TRB_EVENT_PORT_STS_CHANGE); error = pci_xhci_insert_event(xdev, &evtrb, 0); if (error != XHCI_TRB_ERROR_SUCCESS) goto done; } dev_ctx = dev->dev_ctx; ep_ctx = &dev_ctx->ctx_ep[epid]; if ((ep_ctx->dwEpCtx0 & 0x7) == XHCI_ST_EPCTX_DISABLED) { UPRINTF(LWRN, "device interrupt on disabled endpoint %d\r\n", epid); return 0; } UPRINTF(LDBG, "device interrupt on endpoint %d\r\n", epid); pci_xhci_device_doorbell(xdev, hci->hci_port, epid, 0); done: return error; } static int pci_xhci_dev_event(struct usb_hci *hci, enum hci_usbev evid, void *param) { UPRINTF(LDBG, "xhci device event port %d\r\n", hci->hci_port); return 0; } static void pci_xhci_device_usage(char *opt) { static const char *usage_str = "usage:\r\n" " -s <n>,xhci,[bus1-port1,bus2-port2]:[tablet]:[log=x]:[cap=x]\r\n" " eg: -s 8,xhci,1-2,2-2\r\n" " eg: -s 7,xhci,tablet:log=D\r\n" " eg: -s 7,xhci,1-2,2-2:tablet\r\n" " eg: -s 7,xhci,1-2,2-2:tablet:log=D:cap=apl\r\n" " Note: please follow the board hardware design, assign the " " ports according to the receptacle connection\r\n"; UPRINTF(LFTL, "error: invalid options: \"%s\"\r\n", opt); UPRINTF(LFTL, "%s", usage_str); } static int pci_xhci_parse_log_level(struct pci_xhci_vdev *xdev, char *opts) { char level; char *s, *o; int rc = 0; assert(opts); o = s = strdup(opts); if (!(s && s[0] == 'l' && s[1] == 'o' && s[2] == 'g')) { rc = -1; goto errout; } s = strchr(opts, '='); if (!s) { rc = -2; goto errout; } level = *(s+1); usb_parse_log_level(level); errout: if (rc) printf("USB: fail to set log level, rc=%d\r\n", rc); free(o); return rc; } static int pci_xhci_parse_bus_port(struct pci_xhci_vdev *xdev, char *opts) { int rc = 0, cnt; uint32_t port, bus; assert(xdev); assert(opts); /* 'bus-port' format */ cnt = sscanf(opts, "%u-%u", &bus, &port); if (cnt == EOF || cnt < 2 || bus >= USB_NATIVE_NUM_BUS || port >= USB_NATIVE_NUM_PORT) { rc = -1; goto errout; } if (!usb_native_is_bus_existed(bus) || !usb_native_is_port_existed(bus, port)) { rc = -2; goto errout; } xdev->port_map_tbl[bus][port] = VPORT_NUM_STATE(VPORT_ASSIGNED, 0); return 0; errout: if (rc) UPRINTF(LWRN, "%s fails, rc=%d\r\n", __func__, rc); return rc; } static int pci_xhci_parse_tablet(struct pci_xhci_vdev *xdev, char *opts) { char *cfg, *str; void *devins; struct usb_devemu *ue; struct pci_xhci_dev_emu *dev = NULL; uint8_t port_u2, port_u3; int rc = 0; assert(xdev); assert(opts); if (strncmp(opts, "tablet", sizeof("tablet") - 1)) { rc = -1; goto errout; } str = opts; cfg = strchr(str, '='); cfg = cfg ? cfg + 1 : ""; ue = usb_emu_finddev(opts); if (ue == NULL) { rc = -2; goto errout; } dev = calloc(1, sizeof(struct pci_xhci_dev_emu)); if (!dev) { rc = -3; goto errout; } dev->xdev = xdev; dev->hci.dev = dev; dev->hci.hci_intr = pci_xhci_dev_intr; dev->hci.hci_event = pci_xhci_dev_event; /* * This is a safe operation because there is no other * device created and port_u2/port_u3 definitely points * to an empty position in xdev->devices */ port_u2 = xdev->usb3_port_start - 1; port_u3 = xdev->usb2_port_start - 1; if (ue->ue_usbver == 2) { dev->hci.hci_port = port_u2 + 1; xdev->devices[port_u2] = dev; } else { dev->hci.hci_port = port_u3 + 1; xdev->devices[port_u3] = dev; } dev->hci.hci_address = 0; devins = ue->ue_init(&dev->hci, cfg); if (devins == NULL) { rc = -4; goto errout; } dev->dev_ue = ue; dev->dev_instance = devins; /* assign slot number to device */ xdev->ndevices++; xdev->slots[xdev->ndevices] = dev; return 0; errout: if (dev) { if (ue) { if (dev == xdev->devices[port_u2]) xdev->devices[port_u2] = NULL; if (dev == xdev->devices[port_u3]) xdev->devices[port_u3] = NULL; } free(dev); } UPRINTF(LFTL, "fail to parse tablet, rc=%d\r\n", rc); return rc; } static int pci_xhci_parse_extcap(struct pci_xhci_vdev *xdev, char *opts) { char *cap; char *s, *o; int rc = 0; assert(opts); cap = o = s = strdup(opts); s = strchr(opts, '='); if (!s) { rc = -1; goto errout; } cap = s + 1; if (!strncmp(cap, "apl", 3)) { xdev->excap_write = pci_xhci_apl_drdregs_write; xdev->excap_ptr = excap_group_apl; xdev->vid = XHCI_PCI_VENDOR_ID_INTEL; xdev->pid = XHCI_PCI_DEVICE_ID_INTEL_APL; } else rc = -2; if (((struct pci_xhci_excap *)(xdev->excap_ptr))->start == EXCAP_GROUP_END) { xdev->excap_write = NULL; xdev->excap_ptr = excap_group_dft; xdev->vid = XHCI_PCI_VENDOR_ID_DFLT; xdev->pid = XHCI_PCI_DEVICE_ID_DFLT; UPRINTF(LWRN, "Invalid xhci excap, force set " "default excap\r\n"); } errout: if (rc) printf("USB: fail to set vendor capability, rc=%d\r\n", rc); free(o); return rc; } static int pci_xhci_parse_opts(struct pci_xhci_vdev *xdev, char *opts) { char *s, *t, *n; int i, rc = 0; struct pci_xhci_option_elem *elem; int (*f)(struct pci_xhci_vdev *, char *); int elem_cnt; assert(xdev); if (!opts) { rc = -1; goto errout; } /* allocate neccessary resources during parsing*/ xdev->devices = calloc(XHCI_MAX_DEVS + 1, sizeof(*xdev->devices)); xdev->slots = calloc(XHCI_MAX_SLOTS, sizeof(*xdev->slots)); xdev->portregs = calloc(XHCI_MAX_DEVS + 1, sizeof(*xdev->portregs)); if (!xdev->devices || !xdev->slots || !xdev->portregs) { rc = -2; goto errout; } s = strdup(opts); UPRINTF(LDBG, "options: %s\r\n", s); elem = xhci_option_table; elem_cnt = sizeof(xhci_option_table) / sizeof(*elem); for (t = strtok(s, ",:"); t; t = strtok(NULL, ",:")) { if (isdigit(t[0])) { /* bus-port */ if (pci_xhci_parse_bus_port(xdev, t)) { rc = -3; goto errout; } } else { for (i = 0; i < elem_cnt; i++) { n = elem[i].parse_opt; f = elem[i].parse_fn; if (!n || !f) continue; if (!strncmp(t, n, strlen(n))) { f(xdev, t); break; } } if (i >= elem_cnt) { rc = -4; goto errout; } } } /* do not use the zero index element */ for (i = 1; i <= XHCI_MAX_DEVS; i++) pci_xhci_init_port(xdev, i); errout: if (rc) { if (xdev->devices) { for (i = 1; i <= XHCI_MAX_DEVS && xdev->devices[i]; i++) free(xdev->devices[i]); xdev->ndevices = 0; xdev->devices = NULL; free(xdev->devices); } if (xdev->slots) { free(xdev->slots); xdev->slots = NULL; } if (xdev->portregs) { free(xdev->portregs); xdev->portregs = NULL; } UPRINTF(LFTL, "fail to parse xHCI options, rc=%d\r\n", rc); if (opts) pci_xhci_device_usage(opts); return rc; } free(s); return xdev->ndevices; } static int pci_xhci_init(struct vmctx *ctx, struct pci_vdev *dev, char *opts) { struct pci_xhci_vdev *xdev; struct pci_xhci_excap *excap; int error; if (xhci_in_use) { UPRINTF(LWRN, "controller already defined\r\n"); return -1; } xdev = calloc(1, sizeof(struct pci_xhci_vdev)); if (!xdev) { UPRINTF(LWRN, "%s:%d fail to allocate memory\n", __func__, __LINE__); return -1; } dev->arg = xdev; xdev->dev = dev; xdev->usb2_port_start = (XHCI_MAX_DEVS/2) + 1; xdev->usb3_port_start = 1; xdev->excap_ptr = excap_group_dft; xdev->vid = XHCI_PCI_DEVICE_ID_DFLT; xdev->pid = XHCI_PCI_VENDOR_ID_DFLT; xdev->rtsregs.mfindex = 0; clock_gettime(CLOCK_MONOTONIC, &xdev->mf_prev_time); /* discover devices */ error = pci_xhci_parse_opts(xdev, opts); if (error < 0) goto done; else error = 0; if (usb_dev_sys_init(pci_xhci_native_usb_dev_conn_cb, pci_xhci_native_usb_dev_disconn_cb, pci_xhci_usb_dev_notify_cb, pci_xhci_usb_dev_intr_cb, xdev, usb_get_log_level()) < 0) { error = -3; goto done; } xdev->caplength = XHCI_SET_CAPLEN(XHCI_CAPLEN) | XHCI_SET_HCIVERSION(0x0100); xdev->hcsparams1 = XHCI_SET_HCSP1_MAXPORTS(XHCI_MAX_DEVS) | XHCI_SET_HCSP1_MAXINTR(1) | /* interrupters */ XHCI_SET_HCSP1_MAXSLOTS(XHCI_MAX_SLOTS); xdev->hcsparams2 = XHCI_SET_HCSP2_ERSTMAX(XHCI_ERST_MAX) | XHCI_SET_HCSP2_IST(0x04); xdev->hcsparams3 = 0; /* no latency */ xdev->hccparams1 = XHCI_SET_HCCP1_NSS(1) | /* no 2nd-streams */ XHCI_SET_HCCP1_SPC(1) | /* short packet */ XHCI_SET_HCCP1_MAXPSA(XHCI_STREAMS_MAX); xdev->hccparams2 = XHCI_SET_HCCP2_LEC(1) | XHCI_SET_HCCP2_U3C(1); xdev->dboff = XHCI_SET_DOORBELL(XHCI_CAPLEN + XHCI_PORTREGS_START + XHCI_MAX_DEVS * sizeof(struct pci_xhci_portregs)); /* dboff must be 32-bit aligned */ if (xdev->dboff & 0x3) xdev->dboff = (xdev->dboff + 0x3) & ~0x3; /* rtsoff must be 32-bytes aligned */ xdev->rtsoff = XHCI_SET_RTSOFFSET(xdev->dboff + (XHCI_MAX_SLOTS+1) * 32); if (xdev->rtsoff & 0x1F) xdev->rtsoff = (xdev->rtsoff + 0x1F) & ~0x1F; UPRINTF(LDBG, "dboff: 0x%x, rtsoff: 0x%x\r\n", xdev->dboff, xdev->rtsoff); xdev->opregs.usbsts = XHCI_STS_HCH; xdev->opregs.pgsz = XHCI_PAGESIZE_4K; pci_xhci_reset(xdev); /* xdev->excap_ptr should be assigned to global array in which * it need include two items at least and field start must be * ended by EXCAP_GROUP_END at last item. */ excap = xdev->excap_ptr; xdev->excapoff = excap->start; if (!excap) { UPRINTF(LWRN, "Failed to set xHCI extended capability\r\n"); return -1; } do { xdev->regsend = excap->end; excap++; } while (excap && excap->start != EXCAP_GROUP_END); /* * Set extended capabilities pointer to be after regsend; * value of excap field is 32-bit offset. */ xdev->hccparams1 |= XHCI_SET_HCCP1_XECP(XHCI_EXCAP_PTR); pci_set_cfgdata16(dev, PCIR_DEVICE, xdev->pid); pci_set_cfgdata16(dev, PCIR_VENDOR, xdev->vid); pci_set_cfgdata8(dev, PCIR_CLASS, PCIC_SERIALBUS); pci_set_cfgdata8(dev, PCIR_SUBCLASS, PCIS_SERIALBUS_USB); pci_set_cfgdata8(dev, PCIR_PROGIF, PCIP_SERIALBUS_USB_XHCI); pci_set_cfgdata8(dev, PCI_USBREV, PCI_USB_REV_3_0); pci_emul_add_msicap(dev, 1); /* regsend registers */ pci_emul_alloc_bar(dev, 0, PCIBAR_MEM32, xdev->regsend); UPRINTF(LDBG, "pci_emu_alloc: %d\r\n", xdev->regsend); pci_lintr_request(dev); pthread_mutex_init(&xdev->mtx, NULL); xhci_in_use = 1; done: if (error) { UPRINTF(LFTL, "%s fail, error=%d\n", __func__, error); free(xdev); } return error; } static void pci_xhci_deinit(struct vmctx *ctx, struct pci_vdev *dev, char *opts) { int i; struct pci_xhci_vdev *xdev; struct pci_xhci_dev_emu *de; assert(dev); xdev = dev->arg; UPRINTF(LINF, "de-initialization\r\n"); assert(xdev); assert(xdev->devices); for (i = 1; i <= XHCI_MAX_DEVS; ++i) { de = xdev->devices[i]; if (de) { xdev->devices[i] = NULL; pci_xhci_dev_destroy(de); xdev->ndevices--; } } free(xdev->devices); free(xdev->slots); free(xdev->portregs); usb_dev_sys_deinit(); pthread_mutex_destroy(&xdev->mtx); free(xdev); xhci_in_use = 0; } struct pci_vdev_ops pci_ops_xhci = { .class_name = "xhci", .vdev_init = pci_xhci_init, .vdev_deinit = pci_xhci_deinit, .vdev_barwrite = pci_xhci_write, .vdev_barread = pci_xhci_read }; DEFINE_PCI_DEVTYPE(pci_ops_xhci);
385276.c
file1.c
938643.c
/* MDI.C * * Copyright 1994, Bob Amstadt * 1995,1996 Alex Korobka * * This file contains routines to support MDI (Multiple Document * Interface) features . * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA * * Notes: Fairly complete implementation. * Also, Excel and WinWord do _not_ use MDI so if you're trying * to fix them look elsewhere. * * Notes on how the "More Windows..." is implemented: * * When we have more than 9 opened windows, a "More Windows..." * option appears in the "Windows" menu. Each child window has * a WND* associated with it, accessible via the children list of * the parent window. This WND* has a wIDmenu member, which reflects * the position of the child in the window list. For example, with * 9 child windows, we could have the following pattern: * * * * Name of the child window pWndChild->wIDmenu * Doc1 5000 * Doc2 5001 * Doc3 5002 * Doc4 5003 * Doc5 5004 * Doc6 5005 * Doc7 5006 * Doc8 5007 * Doc9 5008 * * * The "Windows" menu, as the "More windows..." dialog, are constructed * in this order. If we add a child, we would have the following list: * * * Name of the child window pWndChild->wIDmenu * Doc1 5000 * Doc2 5001 * Doc3 5002 * Doc4 5003 * Doc5 5004 * Doc6 5005 * Doc7 5006 * Doc8 5007 * Doc9 5008 * Doc10 5009 * * But only 5000 to 5008 would be displayed in the "Windows" menu. We want * the last created child to be in the menu, so we swap the last child with * the 9th... Doc9 will be accessible via the "More Windows..." option. * * Doc1 5000 * Doc2 5001 * Doc3 5002 * Doc4 5003 * Doc5 5004 * Doc6 5005 * Doc7 5006 * Doc8 5007 * Doc9 5009 * Doc10 5008 * */ #include <stdlib.h> #include <stdarg.h> #include <stdio.h> #include <string.h> #include <math.h> #define OEMRESOURCE #include "windef.h" #include "winbase.h" #include "wingdi.h" #include "winuser.h" #include "wownt32.h" #include "wine/winuser16.h" #include "wine/unicode.h" #include "win.h" #include "controls.h" #include "user_private.h" #include "wine/debug.h" WINE_DEFAULT_DEBUG_CHANNEL(mdi); #define MDI_MAXTITLELENGTH 0xa1 #define WM_MDICALCCHILDSCROLL 0x10ac /* this is exactly what Windows uses */ /* "More Windows..." definitions */ #define MDI_MOREWINDOWSLIMIT 9 /* after this number of windows, a "More Windows..." option will appear under the Windows menu */ #define MDI_IDC_LISTBOX 100 #define IDS_MDI_MOREWINDOWS 13 #define MDIF_NEEDUPDATE 0x0001 typedef struct { /* At some points, particularly when switching MDI children, active and * maximized MDI children may be not the same window, so we need to track * them separately. * The only place where we switch to/from maximized state is DefMDIChildProc * WM_SIZE/SIZE_MAXIMIZED handler. We get that notification only after the * ShowWindow(SW_SHOWMAXIMIZED) request, therefore window is guaranteed to * be visible at the time we get the notification, and it's safe to assume * that hwndChildMaximized is always visible. * If the app plays games with WS_VISIBLE, WS_MAXIMIZE or any other window * states it must keep coherency with USER32 on its own. This is true for * Windows as well. */ UINT nActiveChildren; HWND hwndChildMaximized; HWND hwndActiveChild; HWND *child; /* array of tracked children */ HMENU hFrameMenu; HMENU hWindowMenu; UINT idFirstChild; LPWSTR frameTitle; UINT nTotalCreated; UINT mdiFlags; UINT sbRecalc; /* SB_xxx flags for scrollbar fixup */ } MDICLIENTINFO; static HBITMAP hBmpClose = 0; /* ----------------- declarations ----------------- */ static void MDI_UpdateFrameText( HWND, HWND, BOOL, LPCWSTR); static BOOL MDI_AugmentFrameMenu( HWND, HWND ); static BOOL MDI_RestoreFrameMenu( HWND, HWND ); static LONG MDI_ChildActivate( HWND, HWND ); static LRESULT MDI_RefreshMenu(MDICLIENTINFO *); static HWND MDI_MoreWindowsDialog(HWND); static LRESULT WINAPI MDIClientWndProcA( HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam ); static LRESULT WINAPI MDIClientWndProcW( HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam ); /* -------- Miscellaneous service functions ---------- * * MDI_GetChildByID */ static HWND MDI_GetChildByID(HWND hwnd, UINT id, MDICLIENTINFO *ci) { int i; for (i = 0; ci->nActiveChildren; i++) { if (GetWindowLongPtrW( ci->child[i], GWLP_ID ) == id) return ci->child[i]; } return 0; } static void MDI_PostUpdate(HWND hwnd, MDICLIENTINFO* ci, WORD recalc) { if( !(ci->mdiFlags & MDIF_NEEDUPDATE) ) { ci->mdiFlags |= MDIF_NEEDUPDATE; PostMessageA( hwnd, WM_MDICALCCHILDSCROLL, 0, 0); } ci->sbRecalc = recalc; } /********************************************************************* * MDIClient class descriptor */ static const WCHAR mdiclientW[] = {'M','D','I','C','l','i','e','n','t',0}; const struct builtin_class_descr MDICLIENT_builtin_class = { mdiclientW, /* name */ 0, /* style */ MDIClientWndProcA, /* procA */ MDIClientWndProcW, /* procW */ sizeof(MDICLIENTINFO), /* extra */ IDC_ARROW, /* cursor */ (HBRUSH)(COLOR_APPWORKSPACE+1) /* brush */ }; static MDICLIENTINFO *get_client_info( HWND client ) { MDICLIENTINFO *ret = NULL; WND *win = WIN_GetPtr( client ); if (win) { if (win == WND_OTHER_PROCESS || win == WND_DESKTOP) { if (IsWindow(client)) WARN( "client %p belongs to other process\n", client ); return NULL; } if (win->flags & WIN_ISMDICLIENT) ret = (MDICLIENTINFO *)win->wExtra; else WARN( "%p is not an MDI client\n", client ); WIN_ReleasePtr( win ); } return ret; } static BOOL is_close_enabled(HWND hwnd, HMENU hSysMenu) { if (GetClassLongW(hwnd, GCL_STYLE) & CS_NOCLOSE) return FALSE; if (!hSysMenu) hSysMenu = GetSystemMenu(hwnd, FALSE); if (hSysMenu) { UINT state = GetMenuState(hSysMenu, SC_CLOSE, MF_BYCOMMAND); if (state == 0xFFFFFFFF || (state & (MF_DISABLED | MF_GRAYED))) return FALSE; } return TRUE; } /********************************************************************** * MDI_GetWindow * * returns "activatable" child different from the current or zero */ static HWND MDI_GetWindow(MDICLIENTINFO *clientInfo, HWND hWnd, BOOL bNext, DWORD dwStyleMask ) { int i; HWND *list; HWND last = 0; dwStyleMask |= WS_DISABLED | WS_VISIBLE; if( !hWnd ) hWnd = clientInfo->hwndActiveChild; if (!(list = WIN_ListChildren( GetParent(hWnd) ))) return 0; i = 0; /* start from next after hWnd */ while (list[i] && list[i] != hWnd) i++; if (list[i]) i++; for ( ; list[i]; i++) { if (GetWindow( list[i], GW_OWNER )) continue; if ((GetWindowLongW( list[i], GWL_STYLE ) & dwStyleMask) != WS_VISIBLE) continue; last = list[i]; if (bNext) goto found; } /* now restart from the beginning */ for (i = 0; list[i] && list[i] != hWnd; i++) { if (GetWindow( list[i], GW_OWNER )) continue; if ((GetWindowLongW( list[i], GWL_STYLE ) & dwStyleMask) != WS_VISIBLE) continue; last = list[i]; if (bNext) goto found; } found: HeapFree( GetProcessHeap(), 0, list ); return last; } /********************************************************************** * MDI_CalcDefaultChildPos * * It seems that the default height is about 2/3 of the client rect */ void MDI_CalcDefaultChildPos( HWND hwndClient, INT total, LPPOINT lpPos, INT delta, UINT *id ) { INT nstagger; RECT rect; INT spacing = GetSystemMetrics(SM_CYCAPTION) + GetSystemMetrics(SM_CYFRAME) - 1; if (total < 0) /* we are called from CreateWindow */ { MDICLIENTINFO *ci = get_client_info(hwndClient); total = ci ? ci->nTotalCreated : 0; *id = ci->idFirstChild + ci->nActiveChildren; TRACE("MDI child id %04x\n", *id); } GetClientRect( hwndClient, &rect ); if( rect.bottom - rect.top - delta >= spacing ) rect.bottom -= delta; nstagger = (rect.bottom - rect.top)/(3 * spacing); lpPos[1].x = (rect.right - rect.left - nstagger * spacing); lpPos[1].y = (rect.bottom - rect.top - nstagger * spacing); lpPos[0].x = lpPos[0].y = spacing * (total%(nstagger+1)); } /********************************************************************** * MDISetMenu */ static LRESULT MDISetMenu( HWND hwnd, HMENU hmenuFrame, HMENU hmenuWindow) { MDICLIENTINFO *ci; HWND hwndFrame = GetParent(hwnd); TRACE("%p, frame menu %p, window menu %p\n", hwnd, hmenuFrame, hmenuWindow); if (hmenuFrame && !IsMenu(hmenuFrame)) { WARN("hmenuFrame is not a menu handle\n"); return 0L; } if (hmenuWindow && !IsMenu(hmenuWindow)) { WARN("hmenuWindow is not a menu handle\n"); return 0L; } if (!(ci = get_client_info( hwnd ))) return 0; TRACE("old frame menu %p, old window menu %p\n", ci->hFrameMenu, ci->hWindowMenu); if (hmenuFrame) { if (hmenuFrame == ci->hFrameMenu) return (LRESULT)hmenuFrame; if (ci->hwndChildMaximized) MDI_RestoreFrameMenu( hwndFrame, ci->hwndChildMaximized ); } if( hmenuWindow && hmenuWindow != ci->hWindowMenu ) { /* delete menu items from ci->hWindowMenu * and add them to hmenuWindow */ /* Agent newsreader calls this function with ci->hWindowMenu == NULL */ if( ci->hWindowMenu && ci->nActiveChildren ) { UINT nActiveChildren_old = ci->nActiveChildren; /* Remove all items from old Window menu */ ci->nActiveChildren = 0; MDI_RefreshMenu(ci); ci->hWindowMenu = hmenuWindow; /* Add items to the new Window menu */ ci->nActiveChildren = nActiveChildren_old; MDI_RefreshMenu(ci); } else ci->hWindowMenu = hmenuWindow; } if (hmenuFrame) { SetMenu(hwndFrame, hmenuFrame); if( hmenuFrame != ci->hFrameMenu ) { HMENU oldFrameMenu = ci->hFrameMenu; ci->hFrameMenu = hmenuFrame; if (ci->hwndChildMaximized) MDI_AugmentFrameMenu( hwndFrame, ci->hwndChildMaximized ); return (LRESULT)oldFrameMenu; } } else { /* SetMenu() may already have been called, meaning that this window * already has its menu. But they may have done a SetMenu() on * an MDI window, and called MDISetMenu() after the fact, meaning * that the "if" to this "else" wouldn't catch the need to * augment the frame menu. */ if( ci->hwndChildMaximized ) MDI_AugmentFrameMenu( hwndFrame, ci->hwndChildMaximized ); } return 0; } /********************************************************************** * MDIRefreshMenu */ static LRESULT MDI_RefreshMenu(MDICLIENTINFO *ci) { UINT i, count, visible, id; WCHAR buf[MDI_MAXTITLELENGTH]; TRACE("children %u, window menu %p\n", ci->nActiveChildren, ci->hWindowMenu); if (!ci->hWindowMenu) return 0; if (!IsMenu(ci->hWindowMenu)) { WARN("Window menu handle %p is no more valid\n", ci->hWindowMenu); return 0; } /* Windows finds the last separator in the menu, and if after it * there is a menu item with MDI magic ID removes all existing * menu items after it, and then adds visible MDI children. */ count = GetMenuItemCount(ci->hWindowMenu); for (i = 0; i < count; i++) { MENUITEMINFOW mii; memset(&mii, 0, sizeof(mii)); mii.cbSize = sizeof(mii); mii.fMask = MIIM_TYPE; if (GetMenuItemInfoW(ci->hWindowMenu, i, TRUE, &mii)) { if (mii.fType & MF_SEPARATOR) { /* Windows checks only ID of the menu item */ memset(&mii, 0, sizeof(mii)); mii.cbSize = sizeof(mii); mii.fMask = MIIM_ID; if (GetMenuItemInfoW(ci->hWindowMenu, i + 1, TRUE, &mii)) { if (mii.wID == ci->idFirstChild) { TRACE("removing %u items including separator\n", count - i); while (RemoveMenu(ci->hWindowMenu, i, MF_BYPOSITION)) /* nothing */; break; } } } } } visible = 0; for (i = 0; i < ci->nActiveChildren; i++) { if (GetWindowLongW(ci->child[i], GWL_STYLE) & WS_VISIBLE) { id = ci->idFirstChild + visible; if (visible == MDI_MOREWINDOWSLIMIT) { LoadStringW(user32_module, IDS_MDI_MOREWINDOWS, buf, sizeof(buf)/sizeof(WCHAR)); AppendMenuW(ci->hWindowMenu, MF_STRING, id, buf); break; } if (!visible) /* Visio expects that separator has id 0 */ AppendMenuW(ci->hWindowMenu, MF_SEPARATOR, 0, NULL); visible++; SetWindowLongPtrW(ci->child[i], GWLP_ID, id); buf[0] = '&'; buf[1] = '0' + visible; buf[2] = ' '; InternalGetWindowText(ci->child[i], buf + 3, sizeof(buf)/sizeof(WCHAR) - 3); TRACE("Adding %p, id %u %s\n", ci->child[i], id, debugstr_w(buf)); AppendMenuW(ci->hWindowMenu, MF_STRING, id, buf); if (ci->child[i] == ci->hwndActiveChild) CheckMenuItem(ci->hWindowMenu, id, MF_CHECKED); } else TRACE("MDI child %p is not visible, skipping\n", ci->child[i]); } return (LRESULT)ci->hFrameMenu; } /* ------------------ MDI child window functions ---------------------- */ /********************************************************************** * MDI_ChildGetMinMaxInfo * * Note: The rule here is that client rect of the maximized MDI child * is equal to the client rect of the MDI client window. */ static void MDI_ChildGetMinMaxInfo( HWND client, HWND hwnd, MINMAXINFO* lpMinMax ) { RECT rect; GetClientRect( client, &rect ); AdjustWindowRectEx( &rect, GetWindowLongW( hwnd, GWL_STYLE ), 0, GetWindowLongW( hwnd, GWL_EXSTYLE )); lpMinMax->ptMaxSize.x = rect.right -= rect.left; lpMinMax->ptMaxSize.y = rect.bottom -= rect.top; lpMinMax->ptMaxPosition.x = rect.left; lpMinMax->ptMaxPosition.y = rect.top; TRACE("max rect (%d,%d - %d, %d)\n", rect.left,rect.top,rect.right,rect.bottom); } /********************************************************************** * MDI_SwitchActiveChild * * Note: SetWindowPos sends WM_CHILDACTIVATE to the child window that is * being activated */ static void MDI_SwitchActiveChild( MDICLIENTINFO *ci, HWND hwndTo, BOOL activate ) { HWND hwndPrev; hwndPrev = ci->hwndActiveChild; TRACE("from %p, to %p\n", hwndPrev, hwndTo); if ( hwndTo != hwndPrev ) { BOOL was_zoomed = IsZoomed(hwndPrev); if (was_zoomed) { /* restore old MDI child */ SendMessageW( hwndPrev, WM_SETREDRAW, FALSE, 0 ); ShowWindow( hwndPrev, SW_RESTORE ); SendMessageW( hwndPrev, WM_SETREDRAW, TRUE, 0 ); /* activate new MDI child */ SetWindowPos( hwndTo, HWND_TOP, 0, 0, 0, 0, SWP_NOMOVE | SWP_NOSIZE ); /* maximize new MDI child */ ShowWindow( hwndTo, SW_MAXIMIZE ); } /* activate new MDI child */ SetWindowPos( hwndTo, HWND_TOP, 0, 0, 0, 0, SWP_NOMOVE | SWP_NOSIZE | (activate ? 0 : SWP_NOACTIVATE) ); } } /********************************************************************** * MDIDestroyChild */ static LRESULT MDIDestroyChild( HWND client, MDICLIENTINFO *ci, HWND child, BOOL flagDestroy ) { UINT i; TRACE("# of managed children %u\n", ci->nActiveChildren); if( child == ci->hwndActiveChild ) { HWND next = MDI_GetWindow(ci, child, TRUE, 0); /* flagDestroy == 0 means we were called from WM_PARENTNOTIFY handler */ if (flagDestroy && next) MDI_SwitchActiveChild(ci, next, TRUE); else { ShowWindow(child, SW_HIDE); if (child == ci->hwndChildMaximized) { HWND frame = GetParent(client); MDI_RestoreFrameMenu(frame, child); ci->hwndChildMaximized = 0; MDI_UpdateFrameText(frame, client, TRUE, NULL); } if (flagDestroy) MDI_ChildActivate(client, 0); } } for (i = 0; i < ci->nActiveChildren; i++) { if (ci->child[i] == child) { HWND *new_child = HeapAlloc(GetProcessHeap(), 0, (ci->nActiveChildren - 1) * sizeof(HWND)); memcpy(new_child, ci->child, i * sizeof(HWND)); if (i + 1 < ci->nActiveChildren) memcpy(new_child + i, ci->child + i + 1, (ci->nActiveChildren - i - 1) * sizeof(HWND)); HeapFree(GetProcessHeap(), 0, ci->child); ci->child = new_child; ci->nActiveChildren--; break; } } if (flagDestroy) { SendMessageW(client, WM_MDIREFRESHMENU, 0, 0); MDI_PostUpdate(GetParent(child), ci, SB_BOTH+1); DestroyWindow(child); } TRACE("child destroyed - %p\n", child); return 0; } /********************************************************************** * MDI_ChildActivate * * Called in response to WM_CHILDACTIVATE, or when last MDI child * is being deactivated. */ static LONG MDI_ChildActivate( HWND client, HWND child ) { MDICLIENTINFO *clientInfo; HWND prevActiveWnd, frame; BOOL isActiveFrameWnd; clientInfo = get_client_info( client ); if (clientInfo->hwndActiveChild == child) return 0; TRACE("%p\n", child); frame = GetParent(client); isActiveFrameWnd = (GetActiveWindow() == frame); prevActiveWnd = clientInfo->hwndActiveChild; /* deactivate prev. active child */ if(prevActiveWnd) { SendMessageW( prevActiveWnd, WM_NCACTIVATE, FALSE, 0L ); SendMessageW( prevActiveWnd, WM_MDIACTIVATE, (WPARAM)prevActiveWnd, (LPARAM)child); } MDI_SwitchActiveChild( clientInfo, child, FALSE ); clientInfo->hwndActiveChild = child; MDI_RefreshMenu(clientInfo); if( isActiveFrameWnd ) { SendMessageW( child, WM_NCACTIVATE, TRUE, 0L); /* Let the client window manage focus for children, but if the focus * is already on the client (for instance this is the 1st child) then * SetFocus won't work. It appears that Windows sends WM_SETFOCUS * manually in this case. */ if (SetFocus(client) == client) SendMessageW( client, WM_SETFOCUS, (WPARAM)client, 0 ); } SendMessageW( child, WM_MDIACTIVATE, (WPARAM)prevActiveWnd, (LPARAM)child ); return TRUE; } /* -------------------- MDI client window functions ------------------- */ /********************************************************************** * CreateMDIMenuBitmap */ static HBITMAP CreateMDIMenuBitmap(void) { HDC hDCSrc = CreateCompatibleDC(0); HDC hDCDest = CreateCompatibleDC(hDCSrc); HBITMAP hbClose = LoadBitmapW(0, MAKEINTRESOURCEW(OBM_OLD_CLOSE) ); HBITMAP hbCopy; HBITMAP hobjSrc, hobjDest; hobjSrc = SelectObject(hDCSrc, hbClose); hbCopy = CreateCompatibleBitmap(hDCSrc,GetSystemMetrics(SM_CXSIZE),GetSystemMetrics(SM_CYSIZE)); hobjDest = SelectObject(hDCDest, hbCopy); BitBlt(hDCDest, 0, 0, GetSystemMetrics(SM_CXSIZE), GetSystemMetrics(SM_CYSIZE), hDCSrc, GetSystemMetrics(SM_CXSIZE), 0, SRCCOPY); SelectObject(hDCSrc, hobjSrc); DeleteObject(hbClose); DeleteDC(hDCSrc); hobjSrc = SelectObject( hDCDest, GetStockObject(BLACK_PEN) ); MoveToEx( hDCDest, GetSystemMetrics(SM_CXSIZE) - 1, 0, NULL ); LineTo( hDCDest, GetSystemMetrics(SM_CXSIZE) - 1, GetSystemMetrics(SM_CYSIZE) - 1); SelectObject(hDCDest, hobjSrc ); SelectObject(hDCDest, hobjDest); DeleteDC(hDCDest); return hbCopy; } /********************************************************************** * MDICascade */ static LONG MDICascade( HWND client, MDICLIENTINFO *ci ) { HWND *win_array; BOOL has_icons = FALSE; int i, total; if (ci->hwndChildMaximized) SendMessageW(client, WM_MDIRESTORE, (WPARAM)ci->hwndChildMaximized, 0); if (ci->nActiveChildren == 0) return 0; if (!(win_array = WIN_ListChildren( client ))) return 0; /* remove all the windows we don't want */ for (i = total = 0; win_array[i]; i++) { if (!IsWindowVisible( win_array[i] )) continue; if (GetWindow( win_array[i], GW_OWNER )) continue; /* skip owned windows */ if (IsIconic( win_array[i] )) { has_icons = TRUE; continue; } win_array[total++] = win_array[i]; } win_array[total] = 0; if (total) { INT delta = 0, n = 0, i; POINT pos[2]; if (has_icons) delta = GetSystemMetrics(SM_CYICONSPACING) + GetSystemMetrics(SM_CYICON); /* walk the list (backwards) and move windows */ for (i = total - 1; i >= 0; i--) { LONG style; LONG posOptions = SWP_DRAWFRAME | SWP_NOACTIVATE | SWP_NOZORDER; MDI_CalcDefaultChildPos(client, n++, pos, delta, NULL); TRACE("move %p to (%d,%d) size [%d,%d]\n", win_array[i], pos[0].x, pos[0].y, pos[1].x, pos[1].y); style = GetWindowLongW(win_array[i], GWL_STYLE); if (!(style & WS_SIZEBOX)) posOptions |= SWP_NOSIZE; SetWindowPos( win_array[i], 0, pos[0].x, pos[0].y, pos[1].x, pos[1].y, posOptions); } } HeapFree( GetProcessHeap(), 0, win_array ); if (has_icons) ArrangeIconicWindows( client ); return 0; } /********************************************************************** * MDITile */ static void MDITile( HWND client, MDICLIENTINFO *ci, WPARAM wParam ) { HWND *win_array; int i, total; BOOL has_icons = FALSE; if (ci->hwndChildMaximized) SendMessageW(client, WM_MDIRESTORE, (WPARAM)ci->hwndChildMaximized, 0); if (ci->nActiveChildren == 0) return; if (!(win_array = WIN_ListChildren( client ))) return; /* remove all the windows we don't want */ for (i = total = 0; win_array[i]; i++) { if (!IsWindowVisible( win_array[i] )) continue; if (GetWindow( win_array[i], GW_OWNER )) continue; /* skip owned windows (icon titles) */ if (IsIconic( win_array[i] )) { has_icons = TRUE; continue; } if ((wParam & MDITILE_SKIPDISABLED) && !IsWindowEnabled( win_array[i] )) continue; win_array[total++] = win_array[i]; } win_array[total] = 0; TRACE("%u windows to tile\n", total); if (total) { HWND *pWnd = win_array; RECT rect; int x, y, xsize, ysize; int rows, columns, r, c, i; GetClientRect(client,&rect); rows = (int) sqrt((double)total); columns = total / rows; if( wParam & MDITILE_HORIZONTAL ) /* version >= 3.1 */ { i = rows; rows = columns; /* exchange r and c */ columns = i; } if (has_icons) { y = rect.bottom - 2 * GetSystemMetrics(SM_CYICONSPACING) - GetSystemMetrics(SM_CYICON); rect.bottom = ( y - GetSystemMetrics(SM_CYICON) < rect.top )? rect.bottom: y; } ysize = rect.bottom / rows; xsize = rect.right / columns; for (x = i = 0, c = 1; c <= columns && *pWnd; c++) { if (c == columns) { rows = total - i; ysize = rect.bottom / rows; } y = 0; for (r = 1; r <= rows && *pWnd; r++, i++) { LONG posOptions = SWP_DRAWFRAME | SWP_NOACTIVATE | SWP_NOZORDER; LONG style = GetWindowLongW(win_array[i], GWL_STYLE); if (!(style & WS_SIZEBOX)) posOptions |= SWP_NOSIZE; SetWindowPos(*pWnd, 0, x, y, xsize, ysize, posOptions); y += ysize; pWnd++; } x += xsize; } } HeapFree( GetProcessHeap(), 0, win_array ); if (has_icons) ArrangeIconicWindows( client ); } /* ----------------------- Frame window ---------------------------- */ /********************************************************************** * MDI_AugmentFrameMenu */ static BOOL MDI_AugmentFrameMenu( HWND frame, HWND hChild ) { HMENU menu = GetMenu( frame ); HMENU hSysPopup = 0; HBITMAP hSysMenuBitmap = 0; HICON hIcon; TRACE("frame %p,child %p\n",frame,hChild); if( !menu ) return 0; /* create a copy of sysmenu popup and insert it into frame menu bar */ if (!(hSysPopup = GetSystemMenu(hChild, FALSE))) { TRACE("child %p doesn't have a system menu\n", hChild); return 0; } AppendMenuW(menu, MF_HELP | MF_BITMAP, SC_CLOSE, is_close_enabled(hChild, hSysPopup) ? (LPCWSTR)HBMMENU_MBAR_CLOSE : (LPCWSTR)HBMMENU_MBAR_CLOSE_D ); AppendMenuW(menu, MF_HELP | MF_BITMAP, SC_RESTORE, (LPCWSTR)HBMMENU_MBAR_RESTORE ); AppendMenuW(menu, MF_HELP | MF_BITMAP, SC_MINIMIZE, (LPCWSTR)HBMMENU_MBAR_MINIMIZE ) ; /* The system menu is replaced by the child icon */ hIcon = (HICON)SendMessageW(hChild, WM_GETICON, ICON_SMALL, 0); if (!hIcon) hIcon = (HICON)SendMessageW(hChild, WM_GETICON, ICON_BIG, 0); if (!hIcon) hIcon = LoadImageW(0, MAKEINTRESOURCEW(IDI_WINLOGO), IMAGE_ICON, 0, 0, LR_DEFAULTCOLOR); if (hIcon) { HDC hMemDC; HBITMAP hBitmap, hOldBitmap; HBRUSH hBrush; HDC hdc = GetDC(hChild); if (hdc) { int cx, cy; cx = GetSystemMetrics(SM_CXSMICON); cy = GetSystemMetrics(SM_CYSMICON); hMemDC = CreateCompatibleDC(hdc); hBitmap = CreateCompatibleBitmap(hdc, cx, cy); hOldBitmap = SelectObject(hMemDC, hBitmap); SetMapMode(hMemDC, MM_TEXT); hBrush = CreateSolidBrush(GetSysColor(COLOR_MENU)); DrawIconEx(hMemDC, 0, 0, hIcon, cx, cy, 0, hBrush, DI_NORMAL); SelectObject (hMemDC, hOldBitmap); DeleteObject(hBrush); DeleteDC(hMemDC); ReleaseDC(hChild, hdc); hSysMenuBitmap = hBitmap; } } if( !InsertMenuA(menu,0,MF_BYPOSITION | MF_BITMAP | MF_POPUP, (UINT_PTR)hSysPopup, (LPSTR)hSysMenuBitmap)) { TRACE("not inserted\n"); DestroyMenu(hSysPopup); return 0; } EnableMenuItem(hSysPopup, SC_SIZE, MF_BYCOMMAND | MF_GRAYED); EnableMenuItem(hSysPopup, SC_MOVE, MF_BYCOMMAND | MF_GRAYED); EnableMenuItem(hSysPopup, SC_MAXIMIZE, MF_BYCOMMAND | MF_GRAYED); SetMenuDefaultItem(hSysPopup, SC_CLOSE, FALSE); /* redraw menu */ DrawMenuBar(frame); return 1; } /********************************************************************** * MDI_RestoreFrameMenu */ static BOOL MDI_RestoreFrameMenu( HWND frame, HWND hChild ) { MENUITEMINFOW menuInfo; HMENU menu = GetMenu( frame ); INT nItems; UINT iId; TRACE("frame %p, child %p\n", frame, hChild); if( !menu ) return 0; /* if there is no system buttons then nothing to do */ nItems = GetMenuItemCount(menu) - 1; iId = GetMenuItemID(menu, nItems); if ( !(iId == SC_RESTORE || iId == SC_CLOSE) ) return 0; /* * Remove the system menu, If that menu is the icon of the window * as it is in win95, we have to delete the bitmap. */ memset(&menuInfo, 0, sizeof(menuInfo)); menuInfo.cbSize = sizeof(menuInfo); menuInfo.fMask = MIIM_DATA | MIIM_TYPE; GetMenuItemInfoW(menu, 0, TRUE, &menuInfo); RemoveMenu(menu,0,MF_BYPOSITION); if ( (menuInfo.fType & MFT_BITMAP) && (LOWORD(menuInfo.dwTypeData)!=0) && (LOWORD(menuInfo.dwTypeData)!=HBITMAP_16(hBmpClose)) ) { DeleteObject(HBITMAP_32(LOWORD(menuInfo.dwTypeData))); } /* close */ DeleteMenu(menu, SC_CLOSE, MF_BYCOMMAND); /* restore */ DeleteMenu(menu, SC_RESTORE, MF_BYCOMMAND); /* minimize */ DeleteMenu(menu, SC_MINIMIZE, MF_BYCOMMAND); DrawMenuBar(frame); return 1; } /********************************************************************** * MDI_UpdateFrameText * * used when child window is maximized/restored * * Note: lpTitle can be NULL */ static void MDI_UpdateFrameText( HWND frame, HWND hClient, BOOL repaint, LPCWSTR lpTitle ) { WCHAR lpBuffer[MDI_MAXTITLELENGTH+1]; MDICLIENTINFO *ci = get_client_info( hClient ); TRACE("frameText %s\n", debugstr_w(lpTitle)); if (!ci) return; if (!lpTitle && !ci->frameTitle) /* first time around, get title from the frame window */ { GetWindowTextW( frame, lpBuffer, sizeof(lpBuffer)/sizeof(WCHAR) ); lpTitle = lpBuffer; } /* store new "default" title if lpTitle is not NULL */ if (lpTitle) { HeapFree( GetProcessHeap(), 0, ci->frameTitle ); if ((ci->frameTitle = HeapAlloc( GetProcessHeap(), 0, (strlenW(lpTitle)+1)*sizeof(WCHAR)))) strcpyW( ci->frameTitle, lpTitle ); } if (ci->frameTitle) { if (ci->hwndChildMaximized) { /* combine frame title and child title if possible */ static const WCHAR lpBracket[] = {' ','-',' ','[',0}; static const WCHAR lpBracket2[] = {']',0}; int i_frame_text_length = strlenW(ci->frameTitle); lstrcpynW( lpBuffer, ci->frameTitle, MDI_MAXTITLELENGTH); if( i_frame_text_length + 6 < MDI_MAXTITLELENGTH ) { strcatW( lpBuffer, lpBracket ); if (GetWindowTextW( ci->hwndActiveChild, lpBuffer + i_frame_text_length + 4, MDI_MAXTITLELENGTH - i_frame_text_length - 5 )) strcatW( lpBuffer, lpBracket2 ); else lpBuffer[i_frame_text_length] = 0; /* remove bracket */ } } else { lstrcpynW(lpBuffer, ci->frameTitle, MDI_MAXTITLELENGTH+1 ); } } else lpBuffer[0] = '\0'; DefWindowProcW( frame, WM_SETTEXT, 0, (LPARAM)lpBuffer ); if (repaint) SetWindowPos( frame, 0,0,0,0,0, SWP_FRAMECHANGED | SWP_NOSIZE | SWP_NOMOVE | SWP_NOACTIVATE | SWP_NOZORDER ); } /* ----------------------------- Interface ---------------------------- */ /********************************************************************** * MDIClientWndProc_common */ static LRESULT MDIClientWndProc_common( HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam, BOOL unicode ) { MDICLIENTINFO *ci; TRACE("%p %04x (%s) %08lx %08lx\n", hwnd, message, SPY_GetMsgName(message, hwnd), wParam, lParam); if (!(ci = get_client_info( hwnd ))) { if (message == WM_NCCREATE) { WND *wndPtr = WIN_GetPtr( hwnd ); wndPtr->flags |= WIN_ISMDICLIENT; WIN_ReleasePtr( wndPtr ); } return unicode ? DefWindowProcW( hwnd, message, wParam, lParam ) : DefWindowProcA( hwnd, message, wParam, lParam ); } switch (message) { case WM_CREATE: { /* Since we are using only cs->lpCreateParams, we can safely * cast to LPCREATESTRUCTA here */ LPCREATESTRUCTA cs = (LPCREATESTRUCTA)lParam; WND *wndPtr = WIN_GetPtr( hwnd ); /* Translation layer doesn't know what's in the cs->lpCreateParams * so we have to keep track of what environment we're in. */ if( wndPtr->flags & WIN_ISWIN32 ) { LPCLIENTCREATESTRUCT ccs = cs->lpCreateParams; ci->hWindowMenu = ccs->hWindowMenu; ci->idFirstChild = ccs->idFirstChild; } else { LPCLIENTCREATESTRUCT16 ccs = MapSL(PtrToUlong(cs->lpCreateParams)); ci->hWindowMenu = HMENU_32(ccs->hWindowMenu); ci->idFirstChild = ccs->idFirstChild; } WIN_ReleasePtr( wndPtr ); ci->hwndChildMaximized = 0; ci->child = NULL; ci->nActiveChildren = 0; ci->nTotalCreated = 0; ci->frameTitle = NULL; ci->mdiFlags = 0; ci->hFrameMenu = GetMenu(cs->hwndParent); if (!hBmpClose) hBmpClose = CreateMDIMenuBitmap(); TRACE("Client created: hwnd %p, Window menu %p, idFirst = %04x\n", hwnd, ci->hWindowMenu, ci->idFirstChild ); return 0; } case WM_DESTROY: { if( ci->hwndChildMaximized ) MDI_RestoreFrameMenu(GetParent(hwnd), ci->hwndChildMaximized); ci->nActiveChildren = 0; MDI_RefreshMenu(ci); HeapFree( GetProcessHeap(), 0, ci->child ); HeapFree( GetProcessHeap(), 0, ci->frameTitle ); return 0; } case WM_MDIACTIVATE: { if( ci->hwndActiveChild != (HWND)wParam ) SetWindowPos((HWND)wParam, 0,0,0,0,0, SWP_NOSIZE | SWP_NOMOVE); return 0; } case WM_MDICASCADE: return MDICascade(hwnd, ci); case WM_MDICREATE: if (lParam) { HWND child; if (unicode) { MDICREATESTRUCTW *csW = (MDICREATESTRUCTW *)lParam; child = CreateWindowExW(WS_EX_MDICHILD, csW->szClass, csW->szTitle, csW->style, csW->x, csW->y, csW->cx, csW->cy, hwnd, 0, csW->hOwner, (LPVOID)csW->lParam); } else { MDICREATESTRUCTA *csA = (MDICREATESTRUCTA *)lParam; child = CreateWindowExA(WS_EX_MDICHILD, csA->szClass, csA->szTitle, csA->style, csA->x, csA->y, csA->cx, csA->cy, hwnd, 0, csA->hOwner, (LPVOID)csA->lParam); } return (LRESULT)child; } return 0; case WM_MDIDESTROY: return MDIDestroyChild( hwnd, ci, WIN_GetFullHandle( (HWND)wParam ), TRUE ); case WM_MDIGETACTIVE: if (lParam) *(BOOL *)lParam = IsZoomed(ci->hwndActiveChild); return (LRESULT)ci->hwndActiveChild; case WM_MDIICONARRANGE: ci->mdiFlags |= MDIF_NEEDUPDATE; ArrangeIconicWindows( hwnd ); ci->sbRecalc = SB_BOTH+1; SendMessageW( hwnd, WM_MDICALCCHILDSCROLL, 0, 0 ); return 0; case WM_MDIMAXIMIZE: ShowWindow( (HWND)wParam, SW_MAXIMIZE ); return 0; case WM_MDINEXT: /* lParam != 0 means previous window */ { HWND next = MDI_GetWindow( ci, WIN_GetFullHandle( (HWND)wParam ), !lParam, 0 ); MDI_SwitchActiveChild( ci, next, TRUE ); break; } case WM_MDIRESTORE: ShowWindow( (HWND)wParam, SW_SHOWNORMAL ); return 0; case WM_MDISETMENU: return MDISetMenu( hwnd, (HMENU)wParam, (HMENU)lParam ); case WM_MDIREFRESHMENU: return MDI_RefreshMenu( ci ); case WM_MDITILE: ci->mdiFlags |= MDIF_NEEDUPDATE; ShowScrollBar( hwnd, SB_BOTH, FALSE ); MDITile( hwnd, ci, wParam ); ci->mdiFlags &= ~MDIF_NEEDUPDATE; return 0; case WM_VSCROLL: case WM_HSCROLL: ci->mdiFlags |= MDIF_NEEDUPDATE; ScrollChildren( hwnd, message, wParam, lParam ); ci->mdiFlags &= ~MDIF_NEEDUPDATE; return 0; case WM_SETFOCUS: if (ci->hwndActiveChild && !IsIconic( ci->hwndActiveChild )) SetFocus( ci->hwndActiveChild ); return 0; case WM_NCACTIVATE: if( ci->hwndActiveChild ) SendMessageW(ci->hwndActiveChild, message, wParam, lParam); break; case WM_PARENTNOTIFY: switch (LOWORD(wParam)) { case WM_CREATE: if (GetWindowLongW((HWND)lParam, GWL_EXSTYLE) & WS_EX_MDICHILD) { ci->nTotalCreated++; ci->nActiveChildren++; if (!ci->child) ci->child = HeapAlloc(GetProcessHeap(), 0, sizeof(HWND)); else ci->child = HeapReAlloc(GetProcessHeap(), 0, ci->child, sizeof(HWND) * ci->nActiveChildren); TRACE("Adding MDI child %p, # of children %d\n", (HWND)lParam, ci->nActiveChildren); ci->child[ci->nActiveChildren - 1] = (HWND)lParam; } break; case WM_LBUTTONDOWN: { HWND child; POINT pt; pt.x = (short)LOWORD(lParam); pt.y = (short)HIWORD(lParam); child = ChildWindowFromPoint(hwnd, pt); TRACE("notification from %p (%i,%i)\n",child,pt.x,pt.y); if( child && child != hwnd && child != ci->hwndActiveChild ) SetWindowPos(child, 0,0,0,0,0, SWP_NOSIZE | SWP_NOMOVE ); break; } case WM_DESTROY: return MDIDestroyChild( hwnd, ci, WIN_GetFullHandle( (HWND)lParam ), FALSE ); } return 0; case WM_SIZE: if( ci->hwndActiveChild && IsZoomed(ci->hwndActiveChild) ) { RECT rect; rect.left = 0; rect.top = 0; rect.right = LOWORD(lParam); rect.bottom = HIWORD(lParam); AdjustWindowRectEx(&rect, GetWindowLongA(ci->hwndActiveChild, GWL_STYLE), 0, GetWindowLongA(ci->hwndActiveChild, GWL_EXSTYLE) ); MoveWindow(ci->hwndActiveChild, rect.left, rect.top, rect.right - rect.left, rect.bottom - rect.top, 1); } else MDI_PostUpdate(hwnd, ci, SB_BOTH+1); break; case WM_MDICALCCHILDSCROLL: if( (ci->mdiFlags & MDIF_NEEDUPDATE) && ci->sbRecalc ) { CalcChildScroll(hwnd, ci->sbRecalc-1); ci->sbRecalc = 0; ci->mdiFlags &= ~MDIF_NEEDUPDATE; } return 0; } return unicode ? DefWindowProcW( hwnd, message, wParam, lParam ) : DefWindowProcA( hwnd, message, wParam, lParam ); } /*********************************************************************** * MDIClientWndProcA */ static LRESULT WINAPI MDIClientWndProcA( HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam ) { if (!IsWindow(hwnd)) return 0; return MDIClientWndProc_common( hwnd, message, wParam, lParam, FALSE ); } /*********************************************************************** * MDIClientWndProcW */ static LRESULT WINAPI MDIClientWndProcW( HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam ) { if (!IsWindow(hwnd)) return 0; return MDIClientWndProc_common( hwnd, message, wParam, lParam, TRUE ); } /*********************************************************************** * DefFrameProcA (USER32.@) */ LRESULT WINAPI DefFrameProcA( HWND hwnd, HWND hwndMDIClient, UINT message, WPARAM wParam, LPARAM lParam) { if (hwndMDIClient) { switch (message) { case WM_SETTEXT: { DWORD len = MultiByteToWideChar( CP_ACP, 0, (LPSTR)lParam, -1, NULL, 0 ); LPWSTR text = HeapAlloc( GetProcessHeap(), 0, len * sizeof(WCHAR) ); MultiByteToWideChar( CP_ACP, 0, (LPSTR)lParam, -1, text, len ); MDI_UpdateFrameText( hwnd, hwndMDIClient, FALSE, text ); HeapFree( GetProcessHeap(), 0, text ); } return 1; /* success. FIXME: check text length */ case WM_COMMAND: case WM_NCACTIVATE: case WM_NEXTMENU: case WM_SETFOCUS: case WM_SIZE: return DefFrameProcW( hwnd, hwndMDIClient, message, wParam, lParam ); } } return DefWindowProcA(hwnd, message, wParam, lParam); } /*********************************************************************** * DefFrameProcW (USER32.@) */ LRESULT WINAPI DefFrameProcW( HWND hwnd, HWND hwndMDIClient, UINT message, WPARAM wParam, LPARAM lParam) { MDICLIENTINFO *ci = get_client_info( hwndMDIClient ); TRACE("%p %p %04x (%s) %08lx %08lx\n", hwnd, hwndMDIClient, message, SPY_GetMsgName(message, hwnd), wParam, lParam); if (ci) { switch (message) { case WM_COMMAND: { WORD id = LOWORD(wParam); /* check for possible syscommands for maximized MDI child */ if (id < ci->idFirstChild || id >= ci->idFirstChild + ci->nActiveChildren) { if( (id - 0xf000) & 0xf00f ) break; if( !ci->hwndChildMaximized ) break; switch( id ) { case SC_CLOSE: if (!is_close_enabled(ci->hwndActiveChild, 0)) break; case SC_SIZE: case SC_MOVE: case SC_MINIMIZE: case SC_MAXIMIZE: case SC_NEXTWINDOW: case SC_PREVWINDOW: case SC_RESTORE: return SendMessageW( ci->hwndChildMaximized, WM_SYSCOMMAND, wParam, lParam); } } else { HWND childHwnd; if (id - ci->idFirstChild == MDI_MOREWINDOWSLIMIT) /* User chose "More Windows..." */ childHwnd = MDI_MoreWindowsDialog(hwndMDIClient); else /* User chose one of the windows listed in the "Windows" menu */ childHwnd = MDI_GetChildByID(hwndMDIClient, id, ci); if( childHwnd ) SendMessageW( hwndMDIClient, WM_MDIACTIVATE, (WPARAM)childHwnd, 0 ); } } break; case WM_NCACTIVATE: SendMessageW(hwndMDIClient, message, wParam, lParam); break; case WM_SETTEXT: MDI_UpdateFrameText( hwnd, hwndMDIClient, FALSE, (LPWSTR)lParam ); return 1; /* success. FIXME: check text length */ case WM_SETFOCUS: SetFocus(hwndMDIClient); break; case WM_SIZE: MoveWindow(hwndMDIClient, 0, 0, LOWORD(lParam), HIWORD(lParam), TRUE); break; case WM_NEXTMENU: { MDINEXTMENU *next_menu = (MDINEXTMENU *)lParam; if (!IsIconic(hwnd) && ci->hwndActiveChild && !IsZoomed(ci->hwndActiveChild)) { /* control menu is between the frame system menu and * the first entry of menu bar */ WND *wndPtr = WIN_GetPtr(hwnd); if( (wParam == VK_LEFT && GetMenu(hwnd) == next_menu->hmenuIn) || (wParam == VK_RIGHT && GetSubMenu(wndPtr->hSysMenu, 0) == next_menu->hmenuIn) ) { WIN_ReleasePtr(wndPtr); wndPtr = WIN_GetPtr(ci->hwndActiveChild); next_menu->hmenuNext = GetSubMenu(wndPtr->hSysMenu, 0); next_menu->hwndNext = ci->hwndActiveChild; } WIN_ReleasePtr(wndPtr); } return 0; } } } return DefWindowProcW( hwnd, message, wParam, lParam ); } /*********************************************************************** * DefMDIChildProcA (USER32.@) */ LRESULT WINAPI DefMDIChildProcA( HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam ) { HWND client = GetParent(hwnd); MDICLIENTINFO *ci = get_client_info( client ); TRACE("%p %04x (%s) %08lx %08lx\n", hwnd, message, SPY_GetMsgName(message, hwnd), wParam, lParam); hwnd = WIN_GetFullHandle( hwnd ); if (!ci) return DefWindowProcA( hwnd, message, wParam, lParam ); switch (message) { case WM_SETTEXT: DefWindowProcA(hwnd, message, wParam, lParam); if( ci->hwndChildMaximized == hwnd ) MDI_UpdateFrameText( GetParent(client), client, TRUE, NULL ); return 1; /* success. FIXME: check text length */ case WM_GETMINMAXINFO: case WM_MENUCHAR: case WM_CLOSE: case WM_SETFOCUS: case WM_CHILDACTIVATE: case WM_SYSCOMMAND: case WM_SHOWWINDOW: case WM_SETVISIBLE: case WM_SIZE: case WM_NEXTMENU: case WM_SYSCHAR: case WM_DESTROY: return DefMDIChildProcW( hwnd, message, wParam, lParam ); } return DefWindowProcA(hwnd, message, wParam, lParam); } /*********************************************************************** * DefMDIChildProcW (USER32.@) */ LRESULT WINAPI DefMDIChildProcW( HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam ) { HWND client = GetParent(hwnd); MDICLIENTINFO *ci = get_client_info( client ); TRACE("%p %04x (%s) %08lx %08lx\n", hwnd, message, SPY_GetMsgName(message, hwnd), wParam, lParam); hwnd = WIN_GetFullHandle( hwnd ); if (!ci) return DefWindowProcW( hwnd, message, wParam, lParam ); switch (message) { case WM_SETTEXT: DefWindowProcW(hwnd, message, wParam, lParam); if( ci->hwndChildMaximized == hwnd ) MDI_UpdateFrameText( GetParent(client), client, TRUE, NULL ); return 1; /* success. FIXME: check text length */ case WM_GETMINMAXINFO: MDI_ChildGetMinMaxInfo( client, hwnd, (MINMAXINFO *)lParam ); return 0; case WM_MENUCHAR: return 0x00010000; /* MDI children don't have menu bars */ case WM_CLOSE: SendMessageW( client, WM_MDIDESTROY, (WPARAM)hwnd, 0 ); return 0; case WM_SETFOCUS: if (ci->hwndActiveChild != hwnd) MDI_ChildActivate( client, hwnd ); break; case WM_CHILDACTIVATE: MDI_ChildActivate( client, hwnd ); return 0; case WM_SYSCOMMAND: switch (wParam & 0xfff0) { case SC_MOVE: if( ci->hwndChildMaximized == hwnd ) return 0; break; case SC_RESTORE: case SC_MINIMIZE: break; case SC_MAXIMIZE: if (ci->hwndChildMaximized == hwnd) return SendMessageW( GetParent(client), message, wParam, lParam); break; case SC_NEXTWINDOW: SendMessageW( client, WM_MDINEXT, 0, 0); return 0; case SC_PREVWINDOW: SendMessageW( client, WM_MDINEXT, 0, 1); return 0; } break; case WM_SHOWWINDOW: case WM_SETVISIBLE: if (ci->hwndChildMaximized) ci->mdiFlags &= ~MDIF_NEEDUPDATE; else MDI_PostUpdate(client, ci, SB_BOTH+1); break; case WM_SIZE: /* This is the only place where we switch to/from maximized state */ /* do not change */ TRACE("current active %p, maximized %p\n", ci->hwndActiveChild, ci->hwndChildMaximized); if( ci->hwndChildMaximized == hwnd && wParam != SIZE_MAXIMIZED ) { HWND frame; ci->hwndChildMaximized = 0; frame = GetParent(client); MDI_RestoreFrameMenu( frame, hwnd ); MDI_UpdateFrameText( frame, client, TRUE, NULL ); } if( wParam == SIZE_MAXIMIZED ) { HWND frame, hMaxChild = ci->hwndChildMaximized; if( hMaxChild == hwnd ) break; if( hMaxChild) { SendMessageW( hMaxChild, WM_SETREDRAW, FALSE, 0 ); MDI_RestoreFrameMenu( GetParent(client), hMaxChild ); ShowWindow( hMaxChild, SW_SHOWNOACTIVATE ); SendMessageW( hMaxChild, WM_SETREDRAW, TRUE, 0 ); } TRACE("maximizing child %p\n", hwnd ); /* keep track of the maximized window. */ ci->hwndChildMaximized = hwnd; /* !!! */ frame = GetParent(client); MDI_AugmentFrameMenu( frame, hwnd ); MDI_UpdateFrameText( frame, client, TRUE, NULL ); } if( wParam == SIZE_MINIMIZED ) { HWND switchTo = MDI_GetWindow( ci, hwnd, TRUE, WS_MINIMIZE ); if (!switchTo) switchTo = hwnd; SendMessageW( switchTo, WM_CHILDACTIVATE, 0, 0 ); } MDI_PostUpdate(client, ci, SB_BOTH+1); break; case WM_NEXTMENU: { MDINEXTMENU *next_menu = (MDINEXTMENU *)lParam; HWND parent = GetParent(client); if( wParam == VK_LEFT ) /* switch to frame system menu */ { WND *wndPtr = WIN_GetPtr( parent ); next_menu->hmenuNext = GetSubMenu( wndPtr->hSysMenu, 0 ); WIN_ReleasePtr( wndPtr ); } if( wParam == VK_RIGHT ) /* to frame menu bar */ { next_menu->hmenuNext = GetMenu(parent); } next_menu->hwndNext = parent; return 0; } case WM_SYSCHAR: if (wParam == '-') { SendMessageW( hwnd, WM_SYSCOMMAND, (WPARAM)SC_KEYMENU, (DWORD)VK_SPACE); return 0; } break; case WM_DESTROY: /* Remove itself from the Window menu */ MDI_RefreshMenu(ci); break; } return DefWindowProcW(hwnd, message, wParam, lParam); } /********************************************************************** * CreateMDIWindowA (USER32.@) Creates a MDI child * * RETURNS * Success: Handle to created window * Failure: NULL */ HWND WINAPI CreateMDIWindowA( LPCSTR lpClassName, /* [in] Pointer to registered child class name */ LPCSTR lpWindowName, /* [in] Pointer to window name */ DWORD dwStyle, /* [in] Window style */ INT X, /* [in] Horizontal position of window */ INT Y, /* [in] Vertical position of window */ INT nWidth, /* [in] Width of window */ INT nHeight, /* [in] Height of window */ HWND hWndParent, /* [in] Handle to parent window */ HINSTANCE hInstance, /* [in] Handle to application instance */ LPARAM lParam) /* [in] Application-defined value */ { TRACE("(%s,%s,%08x,%d,%d,%d,%d,%p,%p,%08lx)\n", debugstr_a(lpClassName),debugstr_a(lpWindowName),dwStyle,X,Y, nWidth,nHeight,hWndParent,hInstance,lParam); return CreateWindowExA(WS_EX_MDICHILD, lpClassName, lpWindowName, dwStyle, X, Y, nWidth, nHeight, hWndParent, 0, hInstance, (LPVOID)lParam); } /*********************************************************************** * CreateMDIWindowW (USER32.@) Creates a MDI child * * RETURNS * Success: Handle to created window * Failure: NULL */ HWND WINAPI CreateMDIWindowW( LPCWSTR lpClassName, /* [in] Pointer to registered child class name */ LPCWSTR lpWindowName, /* [in] Pointer to window name */ DWORD dwStyle, /* [in] Window style */ INT X, /* [in] Horizontal position of window */ INT Y, /* [in] Vertical position of window */ INT nWidth, /* [in] Width of window */ INT nHeight, /* [in] Height of window */ HWND hWndParent, /* [in] Handle to parent window */ HINSTANCE hInstance, /* [in] Handle to application instance */ LPARAM lParam) /* [in] Application-defined value */ { TRACE("(%s,%s,%08x,%d,%d,%d,%d,%p,%p,%08lx)\n", debugstr_w(lpClassName), debugstr_w(lpWindowName), dwStyle, X, Y, nWidth, nHeight, hWndParent, hInstance, lParam); return CreateWindowExW(WS_EX_MDICHILD, lpClassName, lpWindowName, dwStyle, X, Y, nWidth, nHeight, hWndParent, 0, hInstance, (LPVOID)lParam); } /********************************************************************** * TranslateMDISysAccel (USER32.@) */ BOOL WINAPI TranslateMDISysAccel( HWND hwndClient, LPMSG msg ) { if (msg->message == WM_KEYDOWN || msg->message == WM_SYSKEYDOWN) { MDICLIENTINFO *ci = get_client_info( hwndClient ); WPARAM wParam = 0; if (!ci || !IsWindowEnabled(ci->hwndActiveChild)) return 0; /* translate if the Ctrl key is down and Alt not. */ if( (GetKeyState(VK_CONTROL) & 0x8000) && !(GetKeyState(VK_MENU) & 0x8000)) { switch( msg->wParam ) { case VK_F6: case VK_TAB: wParam = ( GetKeyState(VK_SHIFT) & 0x8000 ) ? SC_NEXTWINDOW : SC_PREVWINDOW; break; case VK_F4: case VK_RBUTTON: if (is_close_enabled(ci->hwndActiveChild, 0)) { wParam = SC_CLOSE; break; } /* fall through */ default: return 0; } TRACE("wParam = %04lx\n", wParam); SendMessageW(ci->hwndActiveChild, WM_SYSCOMMAND, wParam, (LPARAM)msg->wParam); return 1; } } return 0; /* failure */ } /*********************************************************************** * CalcChildScroll (USER32.@) */ void WINAPI CalcChildScroll( HWND hwnd, INT scroll ) { SCROLLINFO info; RECT childRect, clientRect; HWND *list; GetClientRect( hwnd, &clientRect ); SetRectEmpty( &childRect ); if ((list = WIN_ListChildren( hwnd ))) { int i; for (i = 0; list[i]; i++) { DWORD style = GetWindowLongW( list[i], GWL_STYLE ); if (style & WS_MAXIMIZE) { HeapFree( GetProcessHeap(), 0, list ); ShowScrollBar( hwnd, SB_BOTH, FALSE ); return; } if (style & WS_VISIBLE) { RECT rect; GetWindowRect( list[i], &rect ); UnionRect( &childRect, &rect, &childRect ); } } HeapFree( GetProcessHeap(), 0, list ); } MapWindowPoints( 0, hwnd, (POINT *)&childRect, 2 ); UnionRect( &childRect, &clientRect, &childRect ); /* set common info values */ info.cbSize = sizeof(info); info.fMask = SIF_POS | SIF_RANGE; /* set the specific */ switch( scroll ) { case SB_BOTH: case SB_HORZ: info.nMin = childRect.left; info.nMax = childRect.right - clientRect.right; info.nPos = clientRect.left - childRect.left; SetScrollInfo(hwnd, SB_HORZ, &info, TRUE); if (scroll == SB_HORZ) break; /* fall through */ case SB_VERT: info.nMin = childRect.top; info.nMax = childRect.bottom - clientRect.bottom; info.nPos = clientRect.top - childRect.top; SetScrollInfo(hwnd, SB_VERT, &info, TRUE); break; } } /*********************************************************************** * ScrollChildren (USER32.@) */ void WINAPI ScrollChildren(HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam) { INT newPos = -1; INT curPos, length, minPos, maxPos, shift; RECT rect; GetClientRect( hWnd, &rect ); switch(uMsg) { case WM_HSCROLL: GetScrollRange(hWnd,SB_HORZ,&minPos,&maxPos); curPos = GetScrollPos(hWnd,SB_HORZ); length = (rect.right - rect.left) / 2; shift = GetSystemMetrics(SM_CYHSCROLL); break; case WM_VSCROLL: GetScrollRange(hWnd,SB_VERT,&minPos,&maxPos); curPos = GetScrollPos(hWnd,SB_VERT); length = (rect.bottom - rect.top) / 2; shift = GetSystemMetrics(SM_CXVSCROLL); break; default: return; } switch( wParam ) { case SB_LINEUP: newPos = curPos - shift; break; case SB_LINEDOWN: newPos = curPos + shift; break; case SB_PAGEUP: newPos = curPos - length; break; case SB_PAGEDOWN: newPos = curPos + length; break; case SB_THUMBPOSITION: newPos = LOWORD(lParam); break; case SB_THUMBTRACK: return; case SB_TOP: newPos = minPos; break; case SB_BOTTOM: newPos = maxPos; break; case SB_ENDSCROLL: CalcChildScroll(hWnd,(uMsg == WM_VSCROLL)?SB_VERT:SB_HORZ); return; } if( newPos > maxPos ) newPos = maxPos; else if( newPos < minPos ) newPos = minPos; SetScrollPos(hWnd, (uMsg == WM_VSCROLL)?SB_VERT:SB_HORZ , newPos, TRUE); if( uMsg == WM_VSCROLL ) ScrollWindowEx(hWnd ,0 ,curPos - newPos, NULL, NULL, 0, NULL, SW_INVALIDATE | SW_ERASE | SW_SCROLLCHILDREN ); else ScrollWindowEx(hWnd ,curPos - newPos, 0, NULL, NULL, 0, NULL, SW_INVALIDATE | SW_ERASE | SW_SCROLLCHILDREN ); } /****************************************************************************** * CascadeWindows (USER32.@) Cascades MDI child windows * * RETURNS * Success: Number of cascaded windows. * Failure: 0 */ WORD WINAPI CascadeWindows (HWND hwndParent, UINT wFlags, const RECT *lpRect, UINT cKids, const HWND *lpKids) { FIXME("(%p,0x%08x,...,%u,...): stub\n", hwndParent, wFlags, cKids); return 0; } /*********************************************************************** * CascadeChildWindows (USER32.@) */ WORD WINAPI CascadeChildWindows( HWND parent, UINT flags ) { return CascadeWindows( parent, flags, NULL, 0, NULL ); } /****************************************************************************** * TileWindows (USER32.@) Tiles MDI child windows * * RETURNS * Success: Number of tiled windows. * Failure: 0 */ WORD WINAPI TileWindows (HWND hwndParent, UINT wFlags, const RECT *lpRect, UINT cKids, const HWND *lpKids) { FIXME("(%p,0x%08x,...,%u,...): stub\n", hwndParent, wFlags, cKids); return 0; } /*********************************************************************** * TileChildWindows (USER32.@) */ WORD WINAPI TileChildWindows( HWND parent, UINT flags ) { return TileWindows( parent, flags, NULL, 0, NULL ); } /************************************************************************ * "More Windows..." functionality */ /* MDI_MoreWindowsDlgProc * * This function will process the messages sent to the "More Windows..." * dialog. * Return values: 0 = cancel pressed * HWND = ok pressed or double-click in the list... * */ static INT_PTR WINAPI MDI_MoreWindowsDlgProc (HWND hDlg, UINT iMsg, WPARAM wParam, LPARAM lParam) { switch (iMsg) { case WM_INITDIALOG: { UINT widest = 0; UINT length; UINT i; MDICLIENTINFO *ci = get_client_info( (HWND)lParam ); HWND hListBox = GetDlgItem(hDlg, MDI_IDC_LISTBOX); for (i = 0; i < ci->nActiveChildren; i++) { WCHAR buffer[MDI_MAXTITLELENGTH]; if (!InternalGetWindowText( ci->child[i], buffer, sizeof(buffer)/sizeof(WCHAR) )) continue; SendMessageW(hListBox, LB_ADDSTRING, 0, (LPARAM)buffer ); SendMessageW(hListBox, LB_SETITEMDATA, i, (LPARAM)ci->child[i] ); length = strlenW(buffer); /* FIXME: should use GetTextExtentPoint */ if (length > widest) widest = length; } /* Make sure the horizontal scrollbar scrolls ok */ SendMessageW(hListBox, LB_SETHORIZONTALEXTENT, widest * 6, 0); /* Set the current selection */ SendMessageW(hListBox, LB_SETCURSEL, MDI_MOREWINDOWSLIMIT, 0); return TRUE; } case WM_COMMAND: switch (LOWORD(wParam)) { default: if (HIWORD(wParam) != LBN_DBLCLK) break; /* fall through */ case IDOK: { /* windows are sorted by menu ID, so we must return the * window associated to the given id */ HWND hListBox = GetDlgItem(hDlg, MDI_IDC_LISTBOX); UINT index = SendMessageW(hListBox, LB_GETCURSEL, 0, 0); LRESULT res = SendMessageW(hListBox, LB_GETITEMDATA, index, 0); EndDialog(hDlg, res); return TRUE; } case IDCANCEL: EndDialog(hDlg, 0); return TRUE; } break; } return FALSE; } /* * * MDI_MoreWindowsDialog * * Prompts the user with a listbox containing the opened * documents. The user can then choose a windows and click * on OK to set the current window to the one selected, or * CANCEL to cancel. The function returns a handle to the * selected window. */ static HWND MDI_MoreWindowsDialog(HWND hwnd) { LPCVOID template; HRSRC hRes; HANDLE hDlgTmpl; hRes = FindResourceA(user32_module, "MDI_MOREWINDOWS", (LPSTR)RT_DIALOG); if (hRes == 0) return 0; hDlgTmpl = LoadResource(user32_module, hRes ); if (hDlgTmpl == 0) return 0; template = LockResource( hDlgTmpl ); if (template == 0) return 0; return (HWND) DialogBoxIndirectParamA(user32_module, template, hwnd, MDI_MoreWindowsDlgProc, (LPARAM) hwnd); }
33778.c
//------------------------------------------------------------------------------ // GB_AsaxbitB: hard-coded saxpy-bitmap method for a semiring //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB_AxB_defs__times_rminus_int8.h" #ifndef GBCOMPACT //------------------------------------------------------------------------------ // C=A*B, C<M>=A*B, C<!M>=A*B: saxpy method, C is bitmap/full //------------------------------------------------------------------------------ #include "GB_AxB_saxpy3_template.h" GrB_Info GB (_AsaxbitB__times_rminus_int8) ( GrB_Matrix C, // bitmap or full const GrB_Matrix M, const bool Mask_comp, const bool Mask_struct, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix B, bool B_is_pattern, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_AxB_saxpy_template.c" return (GrB_SUCCESS) ; #endif } #endif
544708.c
/**************************************************************************** * * Copyright (c) 2015 Vijay Venkatraman. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name PX4 nor the names of its contributors may be * used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************/ #include <px4_defines.h> #include <string.h> #include <stdbool.h> #include <stdlib.h> #include <fcntl.h> #include <unistd.h> #include <errno.h> #include <sys/stat.h> #include <atomic_ops.h> #include "systemlib/param/param.h" #include <shmem.h> #include <drivers/drv_hrt.h> //#define SHMEM_DEBUG int mem_fd; unsigned char *map_base, *virt_addr; struct shmem_info *shmem_info_p; static void *map_memory(off_t target); int get_shmem_lock(const char *caller_file_name, int caller_line_number); void release_shmem_lock(void); void init_shared_memory(void); void copy_params_to_shmem(struct param_info_s *); void update_to_shmem(param_t param, union param_value_u value); int update_from_shmem(param_t param, union param_value_u *value); void update_index_from_shmem(void); uint64_t update_from_shmem_prev_time = 0, update_from_shmem_current_time = 0; static unsigned char krait_changed_index[MAX_SHMEM_PARAMS / 8 + 1]; // Small helper to get log2 for ints static unsigned log2_for_int(unsigned v) { unsigned r = 0; while (v >>= 1) { ++r; } return r; } struct param_wbuf_s { param_t param; union param_value_u val; bool unsaved; }; extern struct param_wbuf_s *param_find_changed(param_t param); static void *map_memory(off_t target) { return (void *)(target + LOCK_SIZE); } int get_shmem_lock(const char *caller_file_name, int caller_line_number) { // TODO: don't do this for now return 0; unsigned char *lock = (unsigned char *)(MAP_ADDRESS + LOCK_OFFSET); unsigned int i = 0; while (!atomic_compare_and_set(lock, 1, 0)) { PX4_INFO("Could not get lock, file name: %s, line number: %d.\n", caller_file_name, caller_line_number); i++; usleep(1000); if (i > 100) { break; } } if (i > 100) { return -1; } else { PX4_DEBUG("Lock acquired, file name: %s, line number: %d\n", caller_file_name, caller_line_number); } return 0; //got the lock } void release_shmem_lock(void) { // TODO: don't do this either return; unsigned char *lock = (unsigned char *)(MAP_ADDRESS + LOCK_OFFSET); *lock = 1; return; } void init_shared_memory(void) { //PX4_INFO("Value at lock address is %d\n", *(unsigned int*)0xfbfc000); virt_addr = map_memory(MAP_ADDRESS); shmem_info_p = (struct shmem_info *)virt_addr; //PX4_INFO("adsp memory mapped\n"); } void copy_params_to_shmem(struct param_info_s *param_info_base) { param_t param; unsigned int i; if (get_shmem_lock(__FILE__, __LINE__) != 0) { PX4_INFO("Could not get shmem lock\n"); return; } //else PX4_INFO("Got lock\n"); for (param = 0; param < param_count(); param++) { //{PX4_INFO("writing to offset %d\n", (unsigned char*)(shmem_info_p->adsp_params[param].name)-(unsigned char*)shmem_info_p);} struct param_wbuf_s *s = param_find_changed(param); if (s == NULL) { shmem_info_p->params_val[param] = param_info_base[param].val; } else { shmem_info_p->params_val[param] = s->val; } #ifdef SHMEM_DEBUG if (param_type(param) == PARAM_TYPE_INT32) { PX4_INFO("%d: written %d for param %s to shared mem", param, shmem_info_p->params_val[param].i, param_name(param)); } else if (param_type(param) == PARAM_TYPE_FLOAT) { PX4_INFO("%d: written %f for param %s to shared mem", param, shmem_info_p->params_val[param].f, param_name(param)); } #endif } for (i = 0; i < MAX_SHMEM_PARAMS / 8 + 1; i++) { shmem_info_p->adsp_changed_index[i] = 0; krait_changed_index[i] = 0; } release_shmem_lock(); //PX4_INFO("Released lock\n"); } /*update value and param's change bit in shared memory*/ void update_to_shmem(param_t param, union param_value_u value) { unsigned int byte_changed, bit_changed; if (!handle_in_range(param)) { return; } if (get_shmem_lock(__FILE__, __LINE__) != 0) { PX4_ERR("Could not get shmem lock\n"); return; } shmem_info_p->params_val[param] = value; byte_changed = param / 8; bit_changed = 1 << param % 8; shmem_info_p->adsp_changed_index[byte_changed] |= bit_changed; //PX4_INFO("set %d bit on adsp index[%d] to %d\n", bit_changed, byte_changed, shmem_info_p->adsp_changed_index[byte_changed]); #ifdef SHMEM_DEBUG if (param_type(param) == PARAM_TYPE_INT32) { PX4_INFO("Set value %d for param %s to shmem, set adsp index %d:%d\n", value.i, param_name(param), byte_changed, bit_changed); } else if (param_type(param) == PARAM_TYPE_FLOAT) { PX4_INFO("Set value %f for param %s to shmem, set adsp index %d:%d\n", value.f, param_name(param), byte_changed, bit_changed); } #endif release_shmem_lock(); } void update_index_from_shmem(void) { unsigned int i; if (get_shmem_lock(__FILE__, __LINE__) != 0) { PX4_ERR("Could not get shmem lock\n"); return; } PX4_DEBUG("Updating index from shmem\n"); for (i = 0; i < MAX_SHMEM_PARAMS / 8 + 1; i++) { // Check if any param has been changed. if (krait_changed_index[i] != shmem_info_p->krait_changed_index[i]) { // If a param has changed, we need to find out which one. // From the byte and bit that is different, we can resolve the param number. unsigned bit = log2_for_int(krait_changed_index[i] ^ shmem_info_p->krait_changed_index[i]); param_t param_to_get = i * 8 + bit; // Update our krait_changed_index as well. krait_changed_index[i] = shmem_info_p->krait_changed_index[i]; // FIXME: this is a hack but it gets the param so that it gets added // to the local list param_values in param_shmem.c. int32_t dummy; param_get(param_to_get, &dummy); } } release_shmem_lock(); } static void update_value_from_shmem(param_t param, union param_value_u *value) { unsigned int byte_changed, bit_changed; if (get_shmem_lock(__FILE__, __LINE__) != 0) { PX4_ERR("Could not get shmem lock\n"); return; } *value = shmem_info_p->params_val[param]; /*also clear the index since we are holding the lock*/ byte_changed = param / 8; bit_changed = 1 << param % 8; shmem_info_p->krait_changed_index[byte_changed] &= ~bit_changed; release_shmem_lock(); #ifdef SHMEM_DEBUG if (param_type(param) == PARAM_TYPE_INT32) { PX4_INFO("Got value %d for param %s from shmem, cleared krait index %d:%d\n", value->i, param_name(param), byte_changed, bit_changed); } else if (param_type(param) == PARAM_TYPE_FLOAT) { PX4_INFO("Got value %f for param %s from shmem, cleared krait index %d:%d\n", value->f, param_name(param), byte_changed, bit_changed); } #endif } int update_from_shmem(param_t param, union param_value_u *value) { unsigned int byte_changed, bit_changed; unsigned int retval = 0; if (!handle_in_range(param) || value == NULL) { return retval; } update_from_shmem_current_time = hrt_absolute_time(); if ((update_from_shmem_current_time - update_from_shmem_prev_time) > 1000000) { //update every 1 second update_from_shmem_prev_time = update_from_shmem_current_time; update_index_from_shmem(); } byte_changed = param / 8; bit_changed = 1 << param % 8; if (krait_changed_index[byte_changed] & bit_changed) { update_value_from_shmem(param, value); krait_changed_index[byte_changed] &= ~bit_changed; retval = 1; } //else {PX4_INFO("no change to param %s\n", param_name(param));} PX4_DEBUG("%s %d bit on krait changed index[%d]\n", (retval) ? "cleared" : "unchanged", bit_changed, byte_changed); return retval; }
582680.c
// // AOJ0150.c // // // Created by n_knuu on 2014/03/31. // // #include <stdio.h> #include <math.h> #define MAX 10000 int main(void) { int sieve[MAX+1]={0},i,j,num; for (i=2; i<=sqrt(MAX); i++) { if (sieve[i]==0) { for (j=i*2; j<=MAX; j+=i) sieve[j]=1; } } while (scanf("%d",&num)!=EOF&&num!=0) { if (num%2==0) num--; while (sieve[num]!=0||sieve[num-2]!=0) { num-=2; } printf("%d %d\n",num-2,num); } return 0; }
102224.c
/* * This file is part of Freecell Solver. It is subject to the license terms in * the COPYING.txt file found in the top-level directory of this distribution * and at http://fc-solve.shlomifish.org/docs/distro/COPYING.html . No part of * Freecell Solver, including this file, may be copied, modified, propagated, * or distributed except according to the terms contained in the COPYING file. * * Copyright (c) 2012 Shlomi Fish */ // fcc_brfs_test.c - provide utility routines to test fcc_brfs.h #include "fcs_conf.h" #undef FCS_RCS_STATES #include "dbm_common.h" #include "delta_states_any.h" #include "fcc_brfs.h" #include "fcc_brfs_test.h" #include "render_state.h" static void fc_solve_state_string_to_enc( const fcs_dbm_variant_type local_variant, fcs_delta_stater *const delta, const char *const state_s_proto, fcs_encoded_state_buffer *const enc_state) { fcs_state_keyval_pair state; DECLARE_IND_BUF_T(state_indirect_stacks_buffer) fc_solve_initial_user_state_to_c(state_s_proto, &state, FREECELLS_NUM, STACKS_NUM, DECKS_NUM, state_indirect_stacks_buffer); fcs_init_and_encode_state(delta, local_variant, &(state), enc_state); } /* * The char * returned is malloc()ed and should be free()ed. */ DLLEXPORT int fc_solve_user_INTERNAL_find_fcc_start_points( const fcs_dbm_variant_type local_variant, const char *init_state_str_proto, const int start_state_moves_count, const fcs_fcc_move *const start_state_moves, fcs_FCC_start_point_result **const out_fcc_start_points, long *const out_num_new_positions) { fcs_state_keyval_pair init_state; fcs_encoded_state_buffer enc_state; fcs_state_locs_struct locs; fc_solve_init_locs(&locs); fcs_encoded_state_buffer min_by_sorting; void *tree_recycle_bin = NULL; DECLARE_IND_BUF_T(indirect_stacks_buffer) fc_solve_initial_user_state_to_c(init_state_str_proto, &init_state, FREECELLS_NUM, STACKS_NUM, DECKS_NUM, indirect_stacks_buffer); fcs_delta_stater delta; fc_solve_delta_stater_init(&delta, FCS_DBM_VARIANT_2FC_FREECELL, &(init_state.s), STACKS_NUM, FREECELLS_NUM PASS_ON_NOT_FC_ONLY(FCS_SEQ_BUILT_BY_ALTERNATE_COLOR)); fcs_init_and_encode_state(&delta, local_variant, &(init_state), &enc_state); fcs_FCC_start_points_list start_points_list = { .list = NULL, .recycle_bin = NULL}; meta_allocator meta_alloc; fc_solve_meta_compact_allocator_init(&meta_alloc); fc_solve_compact_allocator_init( &(start_points_list.allocator), &meta_alloc); var_AUTO(do_next_fcc_start_points_exist, fcc_brfs_kaz_tree_create(&meta_alloc, &tree_recycle_bin)); dict_t *does_min_by_sorting_exist = fcc_brfs_kaz_tree_create(&meta_alloc, &tree_recycle_bin); fcs_lru_cache does_state_exist_in_any_FCC_cache; cache_init(&does_state_exist_in_any_FCC_cache, 1000, &meta_alloc); compact_allocator moves_list_compact_alloc; fc_solve_compact_allocator_init(&(moves_list_compact_alloc), &(meta_alloc)); fcs_fcc_moves_seq_allocator moves_list_allocator = { .recycle_bin = NULL, .allocator = &(moves_list_compact_alloc)}; fcs_fcc_moves_seq start_state_moves_seq = { .count = start_state_moves_count, .moves_list = NULL}; { fcs_fcc_moves_list_item **moves_iter = &(start_state_moves_seq.moves_list); for (int i = 0; i < start_state_moves_count;) { if (i % FCS_FCC_NUM_MOVES_IN_ITEM == 0) { *(moves_iter) = fc_solve_fcc_alloc_moves_list_item(&moves_list_allocator); } (*moves_iter)->data.s[i % FCS_FCC_NUM_MOVES_IN_ITEM] = start_state_moves[i]; if ((++i) % FCS_FCC_NUM_MOVES_IN_ITEM == 0) { moves_iter = &((*moves_iter)->next); } } } add_start_point_context ctx = { .do_next_fcc_start_points_exist = do_next_fcc_start_points_exist, .next_start_points_list = &start_points_list, .moves_list_allocator = &moves_list_allocator, }; bool is_min_by_sorting_new; perform_FCC_brfs(local_variant, &(init_state), enc_state, &start_state_moves_seq, fc_solve_add_start_point_in_mem, &ctx, &is_min_by_sorting_new, &min_by_sorting, does_min_by_sorting_exist, &does_state_exist_in_any_FCC_cache, out_num_new_positions, &moves_list_allocator, &meta_alloc); const fcs_FCC_start_point *iter = start_points_list.list; size_t states_count = 0; while (iter) { states_count++; iter = iter->next; } fcs_FCC_start_point_result *const ret = *out_fcc_start_points = SMALLOC(ret, states_count + 1); ret[states_count].count = 0; iter = start_points_list.list; for (size_t i = 0; i < states_count; i++) { fcs_state_keyval_pair state; DECLARE_IND_BUF_T(state_indirect_stacks_buffer) ret[i].count = iter->moves_seq.count; ret[i].moves = SMALLOC(ret[i].moves, (size_t)ret[i].count); { fcs_fcc_moves_list_item *moves_iter = iter->moves_seq.moves_list; for (int moves_idx = 0; moves_idx < ret[i].count;) { ret[i].moves[moves_idx] = moves_iter->data.s[moves_idx % FCS_FCC_NUM_MOVES_IN_ITEM]; if ((++moves_idx) % FCS_FCC_NUM_MOVES_IN_ITEM == 0) { moves_iter = moves_iter->next; } } } fc_solve_delta_stater_decode_into_state( &delta, iter->enc_state.s, &(state), state_indirect_stacks_buffer); ret[i].state_as_string = SMALLOC(ret[i].state_as_string, 1000); FCS__RENDER_STATE(ret[i].state_as_string, &(state.s), &locs); iter = iter->next; } fc_solve_compact_allocator_finish(&(start_points_list.allocator)); fc_solve_compact_allocator_finish(&(moves_list_compact_alloc)); fc_solve_delta_stater_release(&delta); fc_solve_kaz_tree_destroy(do_next_fcc_start_points_exist); fc_solve_kaz_tree_destroy(does_min_by_sorting_exist); cache_destroy(&does_state_exist_in_any_FCC_cache); fc_solve_meta_compact_allocator_finish(&meta_alloc); return 0; } DLLEXPORT void fc_solve_user_INTERNAL_free_fcc_start_points( fcs_FCC_start_point_result *const fcc_start_points) { for (fcs_FCC_start_point_result *iter = fcc_start_points; iter->count; iter++) { free(iter->state_as_string); free(iter->moves); } free(fcc_start_points); } DLLEXPORT int fc_solve_user_INTERNAL_is_fcc_new( const fcs_dbm_variant_type local_variant, const char *init_state_str_proto, const char *start_state_str_proto, /* NULL-terminated */ const char **min_states, /* NULL-terminated */ const char **states_in_cache, bool *const out_is_fcc_new) { fcs_state_keyval_pair init_state; fcs_encoded_state_buffer enc_state; fcs_encoded_state_buffer start_enc_state; fcs_encoded_state_buffer min_by_sorting; void *tree_recycle_bin = NULL; DECLARE_IND_BUF_T(indirect_stacks_buffer) fc_solve_initial_user_state_to_c(init_state_str_proto, &init_state, FREECELLS_NUM, STACKS_NUM, DECKS_NUM, indirect_stacks_buffer); fcs_delta_stater delta; fc_solve_delta_stater_init(&delta, FCS_DBM_VARIANT_2FC_FREECELL, &(init_state.s), STACKS_NUM, FREECELLS_NUM PASS_ON_NOT_FC_ONLY(FCS_SEQ_BUILT_BY_ALTERNATE_COLOR)); fcs_init_and_encode_state(&delta, local_variant, &(init_state), &enc_state); fc_solve_state_string_to_enc( local_variant, &delta, start_state_str_proto, &(start_enc_state)); meta_allocator meta_alloc; fc_solve_meta_compact_allocator_init(&meta_alloc); fcs_FCC_start_points_list start_points_list = { .list = NULL, .recycle_bin = NULL}; fc_solve_compact_allocator_init( &(start_points_list.allocator), &meta_alloc); var_AUTO(do_next_fcc_start_points_exist, fcc_brfs_kaz_tree_create(&meta_alloc, &tree_recycle_bin)); dict_t *does_min_by_sorting_exist = fcc_brfs_kaz_tree_create(&meta_alloc, &tree_recycle_bin); compact_allocator temp_allocator; fc_solve_compact_allocator_init(&(temp_allocator), &meta_alloc); fcs_fcc_moves_seq_allocator moves_list_allocator = { .recycle_bin = NULL, .allocator = &(temp_allocator)}; /* Populate does_min_by_sorting_exist from min_states */ { const char **min_states_iter = min_states; for (; *(min_states_iter); min_states_iter++) { fcs_encoded_state_buffer *const min_enc_state = (fcs_encoded_state_buffer *)fcs_compact_alloc_ptr( &(temp_allocator), sizeof(*min_enc_state)); fc_solve_state_string_to_enc( local_variant, &delta, *(min_states_iter), min_enc_state); fc_solve_kaz_tree_alloc_insert( does_min_by_sorting_exist, min_enc_state); } } fcs_lru_cache does_state_exist_in_any_FCC_cache; cache_init(&does_state_exist_in_any_FCC_cache, 1000000, &meta_alloc); /* Populate does_state_exist_in_any_FCC_cache from states_in_cache */ { const char **min_states_iter = states_in_cache; for (; *(min_states_iter); min_states_iter++) { fcs_encoded_state_buffer *const min_enc_state = (fcs_encoded_state_buffer *)fcs_compact_alloc_ptr( &(temp_allocator), sizeof(*min_enc_state)); fc_solve_state_string_to_enc( local_variant, &delta, *(min_states_iter), min_enc_state); cache_insert( &does_state_exist_in_any_FCC_cache, min_enc_state, NULL, '\0'); } } fcs_fcc_moves_seq init_moves_seq = {.moves_list = NULL, .count = 0}; add_start_point_context ctx = { .do_next_fcc_start_points_exist = do_next_fcc_start_points_exist, .next_start_points_list = &start_points_list, .moves_list_allocator = &moves_list_allocator, }; long num_new_positions_temp; perform_FCC_brfs(local_variant, &(init_state), start_enc_state, &init_moves_seq, fc_solve_add_start_point_in_mem, &ctx, out_is_fcc_new, &min_by_sorting, does_min_by_sorting_exist, &does_state_exist_in_any_FCC_cache, &num_new_positions_temp, &moves_list_allocator, &meta_alloc); fc_solve_compact_allocator_finish(&(start_points_list.allocator)); fc_solve_compact_allocator_finish(&(temp_allocator)); fc_solve_delta_stater_release(&delta); fc_solve_kaz_tree_destroy(do_next_fcc_start_points_exist); fc_solve_kaz_tree_destroy(does_min_by_sorting_exist); cache_destroy(&does_state_exist_in_any_FCC_cache); fc_solve_meta_compact_allocator_finish(&meta_alloc); return 0; }
125562.c
#line 3 "coll_ml_lex.c" #define YY_INT_ALIGNED short int /* A lexical scanner generated by flex */ #define yy_create_buffer coll_ml_config_yy_create_buffer #define yy_delete_buffer coll_ml_config_yy_delete_buffer #define yy_flex_debug coll_ml_config_yy_flex_debug #define yy_init_buffer coll_ml_config_yy_init_buffer #define yy_flush_buffer coll_ml_config_yy_flush_buffer #define yy_load_buffer_state coll_ml_config_yy_load_buffer_state #define yy_switch_to_buffer coll_ml_config_yy_switch_to_buffer #define yyin coll_ml_config_yyin #define yyleng coll_ml_config_yyleng #define yylex coll_ml_config_yylex #define yylineno coll_ml_config_yylineno #define yyout coll_ml_config_yyout #define yyrestart coll_ml_config_yyrestart #define yytext coll_ml_config_yytext #define yywrap coll_ml_config_yywrap #define yyalloc coll_ml_config_yyalloc #define yyrealloc coll_ml_config_yyrealloc #define yyfree coll_ml_config_yyfree #define FLEX_SCANNER #define YY_FLEX_MAJOR_VERSION 2 #define YY_FLEX_MINOR_VERSION 5 #define YY_FLEX_SUBMINOR_VERSION 35 #if YY_FLEX_SUBMINOR_VERSION > 0 #define FLEX_BETA #endif /* First, we deal with platform-specific or compiler-specific issues. */ /* begin standard C headers. */ #include <stdio.h> #include <string.h> #include <errno.h> #include <stdlib.h> /* end standard C headers. */ /* flex integer type definitions */ #ifndef FLEXINT_H #define FLEXINT_H /* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */ #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 says to define __STDC_LIMIT_MACROS before including stdint.h, * if you want the limit (max/min) macros for int types. */ #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS 1 #endif #include <inttypes.h> typedef int8_t flex_int8_t; typedef uint8_t flex_uint8_t; typedef int16_t flex_int16_t; typedef uint16_t flex_uint16_t; typedef int32_t flex_int32_t; typedef uint32_t flex_uint32_t; #else typedef signed char flex_int8_t; typedef short int flex_int16_t; typedef int flex_int32_t; typedef unsigned char flex_uint8_t; typedef unsigned short int flex_uint16_t; typedef unsigned int flex_uint32_t; #endif /* ! C99 */ /* Limits of integral types. */ #ifndef INT8_MIN #define INT8_MIN (-128) #endif #ifndef INT16_MIN #define INT16_MIN (-32767-1) #endif #ifndef INT32_MIN #define INT32_MIN (-2147483647-1) #endif #ifndef INT8_MAX #define INT8_MAX (127) #endif #ifndef INT16_MAX #define INT16_MAX (32767) #endif #ifndef INT32_MAX #define INT32_MAX (2147483647) #endif #ifndef UINT8_MAX #define UINT8_MAX (255U) #endif #ifndef UINT16_MAX #define UINT16_MAX (65535U) #endif #ifndef UINT32_MAX #define UINT32_MAX (4294967295U) #endif #endif /* ! FLEXINT_H */ #ifdef __cplusplus /* The "const" storage-class-modifier is valid. */ #define YY_USE_CONST #else /* ! __cplusplus */ /* C99 requires __STDC__ to be defined as 1. */ #if defined (__STDC__) #define YY_USE_CONST #endif /* defined (__STDC__) */ #endif /* ! __cplusplus */ #ifdef YY_USE_CONST #define yyconst const #else #define yyconst #endif /* Returned upon end-of-file. */ #define YY_NULL 0 /* Promotes a possibly negative, possibly signed char to an unsigned * integer for use as an array index. If the signed char is negative, * we want to instead treat it as an 8-bit unsigned char, hence the * double cast. */ #define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c) /* Enter a start condition. This macro really ought to take a parameter, * but we do it the disgusting crufty way forced on us by the ()-less * definition of BEGIN. */ #define BEGIN (yy_start) = 1 + 2 * /* Translate the current start state into a value that can be later handed * to BEGIN to return to the state. The YYSTATE alias is for lex * compatibility. */ #define YY_START (((yy_start) - 1) / 2) #define YYSTATE YY_START /* Action number for EOF rule of a given start state. */ #define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1) /* Special action meaning "start processing a new file". */ #define YY_NEW_FILE coll_ml_config_yyrestart(coll_ml_config_yyin ) #define YY_END_OF_BUFFER_CHAR 0 /* Size of default input buffer. */ #ifndef YY_BUF_SIZE #define YY_BUF_SIZE 16384 #endif /* The state buf must be large enough to hold one state per character in the main buffer. */ #define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type)) #ifndef YY_TYPEDEF_YY_BUFFER_STATE #define YY_TYPEDEF_YY_BUFFER_STATE typedef struct yy_buffer_state *YY_BUFFER_STATE; #endif extern int coll_ml_config_yyleng; extern FILE *coll_ml_config_yyin, *coll_ml_config_yyout; #define EOB_ACT_CONTINUE_SCAN 0 #define EOB_ACT_END_OF_FILE 1 #define EOB_ACT_LAST_MATCH 2 #define YY_LESS_LINENO(n) /* Return all but the first "n" matched characters back to the input stream. */ #define yyless(n) \ do \ { \ /* Undo effects of setting up coll_ml_config_yytext. */ \ int yyless_macro_arg = (n); \ YY_LESS_LINENO(yyless_macro_arg);\ *yy_cp = (yy_hold_char); \ YY_RESTORE_YY_MORE_OFFSET \ (yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \ YY_DO_BEFORE_ACTION; /* set up coll_ml_config_yytext again */ \ } \ while ( 0 ) #define unput(c) yyunput( c, (yytext_ptr) ) #ifndef YY_TYPEDEF_YY_SIZE_T #define YY_TYPEDEF_YY_SIZE_T typedef size_t yy_size_t; #endif #ifndef YY_STRUCT_YY_BUFFER_STATE #define YY_STRUCT_YY_BUFFER_STATE struct yy_buffer_state { FILE *yy_input_file; char *yy_ch_buf; /* input buffer */ char *yy_buf_pos; /* current position in input buffer */ /* Size of input buffer in bytes, not including room for EOB * characters. */ yy_size_t yy_buf_size; /* Number of characters read into yy_ch_buf, not including EOB * characters. */ int yy_n_chars; /* Whether we "own" the buffer - i.e., we know we created it, * and can realloc() it to grow it, and should free() it to * delete it. */ int yy_is_our_buffer; /* Whether this is an "interactive" input source; if so, and * if we're using stdio for input, then we want to use getc() * instead of fread(), to make sure we stop fetching input after * each newline. */ int yy_is_interactive; /* Whether we're considered to be at the beginning of a line. * If so, '^' rules will be active on the next match, otherwise * not. */ int yy_at_bol; int yy_bs_lineno; /**< The line count. */ int yy_bs_column; /**< The column count. */ /* Whether to try to fill the input buffer when we reach the * end of it. */ int yy_fill_buffer; int yy_buffer_status; #define YY_BUFFER_NEW 0 #define YY_BUFFER_NORMAL 1 /* When an EOF's been seen but there's still some text to process * then we mark the buffer as YY_EOF_PENDING, to indicate that we * shouldn't try reading from the input source any more. We might * still have a bunch of tokens to match, though, because of * possible backing-up. * * When we actually see the EOF, we change the status to "new" * (via coll_ml_config_yyrestart()), so that the user can continue scanning by * just pointing coll_ml_config_yyin at a new input file. */ #define YY_BUFFER_EOF_PENDING 2 }; #endif /* !YY_STRUCT_YY_BUFFER_STATE */ /* Stack of input buffers. */ static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */ static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */ static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */ /* We provide macros for accessing buffer states in case in the * future we want to put the buffer states in a more general * "scanner state". * * Returns the top of the stack, or NULL. */ #define YY_CURRENT_BUFFER ( (yy_buffer_stack) \ ? (yy_buffer_stack)[(yy_buffer_stack_top)] \ : NULL) /* Same as previous macro, but useful when we know that the buffer stack is not * NULL or when we need an lvalue. For internal use only. */ #define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)] /* yy_hold_char holds the character lost when coll_ml_config_yytext is formed. */ static char yy_hold_char; static int yy_n_chars; /* number of characters read into yy_ch_buf */ int coll_ml_config_yyleng; /* Points to current character in buffer. */ static char *yy_c_buf_p = (char *) 0; static int yy_init = 0; /* whether we need to initialize */ static int yy_start = 0; /* start state number */ /* Flag which is used to allow coll_ml_config_yywrap()'s to do buffer switches * instead of setting up a fresh coll_ml_config_yyin. A bit of a hack ... */ static int yy_did_buffer_switch_on_eof; void coll_ml_config_yyrestart (FILE *input_file ); void coll_ml_config_yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ); YY_BUFFER_STATE coll_ml_config_yy_create_buffer (FILE *file,int size ); void coll_ml_config_yy_delete_buffer (YY_BUFFER_STATE b ); void coll_ml_config_yy_flush_buffer (YY_BUFFER_STATE b ); void coll_ml_config_yypush_buffer_state (YY_BUFFER_STATE new_buffer ); void coll_ml_config_yypop_buffer_state (void ); static void coll_ml_config_yyensure_buffer_stack (void ); static void coll_ml_config_yy_load_buffer_state (void ); static void coll_ml_config_yy_init_buffer (YY_BUFFER_STATE b,FILE *file ); #define YY_FLUSH_BUFFER coll_ml_config_yy_flush_buffer(YY_CURRENT_BUFFER ) YY_BUFFER_STATE coll_ml_config_yy_scan_buffer (char *base,yy_size_t size ); YY_BUFFER_STATE coll_ml_config_yy_scan_string (yyconst char *yy_str ); YY_BUFFER_STATE coll_ml_config_yy_scan_bytes (yyconst char *bytes,int len ); void *coll_ml_config_yyalloc (yy_size_t ); void *coll_ml_config_yyrealloc (void *,yy_size_t ); void coll_ml_config_yyfree (void * ); #define yy_new_buffer coll_ml_config_yy_create_buffer #define yy_set_interactive(is_interactive) \ { \ if ( ! YY_CURRENT_BUFFER ){ \ coll_ml_config_yyensure_buffer_stack (); \ YY_CURRENT_BUFFER_LVALUE = \ coll_ml_config_yy_create_buffer(coll_ml_config_yyin,YY_BUF_SIZE ); \ } \ YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \ } #define yy_set_bol(at_bol) \ { \ if ( ! YY_CURRENT_BUFFER ){\ coll_ml_config_yyensure_buffer_stack (); \ YY_CURRENT_BUFFER_LVALUE = \ coll_ml_config_yy_create_buffer(coll_ml_config_yyin,YY_BUF_SIZE ); \ } \ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \ } #define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol) /* Begin user sect3 */ typedef unsigned char YY_CHAR; FILE *coll_ml_config_yyin = (FILE *) 0, *coll_ml_config_yyout = (FILE *) 0; typedef int yy_state_type; extern int coll_ml_config_yylineno; int coll_ml_config_yylineno = 1; extern char *coll_ml_config_yytext; #define yytext_ptr coll_ml_config_yytext static yy_state_type yy_get_previous_state (void ); static yy_state_type yy_try_NUL_trans (yy_state_type current_state ); static int yy_get_next_buffer (void ); static void yy_fatal_error (yyconst char msg[] ); /* Done after the current pattern has been matched and before the * corresponding action - sets up coll_ml_config_yytext. */ #define YY_DO_BEFORE_ACTION \ (yytext_ptr) = yy_bp; \ coll_ml_config_yyleng = (size_t) (yy_cp - yy_bp); \ (yy_hold_char) = *yy_cp; \ *yy_cp = '\0'; \ (yy_c_buf_p) = yy_cp; #define YY_NUM_RULES 25 #define YY_END_OF_BUFFER 26 /* This struct is not used in this scanner, but its presence is necessary. */ struct yy_trans_info { flex_int32_t yy_verify; flex_int32_t yy_nxt; }; static yyconst flex_int16_t yy_acclist[94] = { 0, 5, 5, 26, 24, 25, 20, 24, 25, 1, 25, 24, 25, 21, 24, 25, 24, 25, 14, 24, 25, 19, 24, 25, 9, 24, 25, 5, 25, 7, 25, 6, 25, 17, 25, 17, 25, 16, 25, 17, 25, 16399, 12, 25, 12, 25, 11, 25, 12, 25,16394, 25, 25, 25, 25, 25, 8215, 25,16407, 25, 22, 25, 8215, 25,16407, 20, 1, 14, 19, 9, 2, 21, 4, 14, 19, 9, 5, 6, 6, 8,16399, 8207,16394, 8202, 18, 13, 8215,16407, 8215, 22, 8215, 16407, 8215, 3 } ; static yyconst flex_int16_t yy_accept[88] = { 0, 1, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 6, 9, 11, 13, 16, 18, 21, 24, 27, 29, 31, 33, 35, 37, 39, 42, 44, 46, 48, 51, 52, 53, 54, 55, 56, 59, 60, 62, 65, 66, 67, 68, 69, 70, 70, 71, 72, 73, 73, 74, 75, 76, 77, 78, 79, 80, 80, 81, 81, 82, 82, 83, 83, 84, 84, 84, 84, 85, 85, 85, 85, 86, 88, 89, 89, 90, 92, 93, 93, 94, 94 } ; static yyconst flex_int32_t yy_ec[256] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 5, 1, 1, 1, 1, 1, 1, 6, 1, 1, 7, 7, 8, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 1, 9, 10, 11, 1, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 12, 13, 14, 1, 7, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 } ; static yyconst flex_int32_t yy_meta[15] = { 0, 1, 2, 3, 2, 1, 4, 5, 5, 1, 1, 6, 1, 5, 7 } ; static yyconst flex_int16_t yy_base[101] = { 0, 0, 0, 12, 13, 19, 0, 33, 0, 46, 56, 66, 0, 79, 82, 175, 203, 85, 203, 169, 164, 43, 50, 51, 57, 0, 203, 56, 203, 61, 203, 96, 203, 67, 203, 106, 203, 157, 164, 147, 152, 152, 119, 203, 117, 0, 203, 70, 71, 74, 115, 203, 110, 203, 113, 75, 88, 89, 0, 93, 96, 203, 101, 0, 0, 203, 107, 0, 0, 203, 104, 111, 0, 203, 99, 109, 0, 203, 104, 94, 0, 203, 64, 54, 14, 203, 203, 123, 130, 137, 144, 151, 158, 164, 168, 172, 177, 179, 185, 188, 195 } ; static yyconst flex_int16_t yy_def[101] = { 0, 86, 1, 87, 87, 86, 5, 86, 7, 88, 88, 88, 11, 89, 89, 86, 86, 86, 86, 90, 86, 86, 86, 86, 86, 91, 86, 92, 86, 93, 86, 93, 86, 94, 86, 94, 86, 95, 96, 97, 98, 99, 99, 86, 42, 17, 86, 86, 86, 86, 90, 86, 86, 86, 100, 86, 86, 86, 91, 92, 92, 86, 93, 31, 31, 86, 94, 35, 35, 86, 95, 96, 71, 86, 97, 98, 75, 86, 99, 99, 42, 86, 42, 42, 100, 86, 0, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86 } ; static yyconst flex_int16_t yy_nxt[218] = { 0, 16, 17, 18, 17, 19, 16, 20, 21, 22, 23, 16, 24, 16, 16, 26, 26, 85, 27, 27, 28, 29, 30, 29, 28, 28, 31, 31, 28, 28, 28, 28, 31, 28, 32, 33, 34, 33, 32, 32, 35, 35, 32, 32, 32, 32, 35, 32, 37, 53, 37, 54, 55, 56, 55, 56, 83, 38, 37, 57, 37, 57, 60, 62, 61, 62, 83, 38, 39, 66, 39, 66, 55, 56, 55, 56, 57, 55, 57, 55, 40, 42, 43, 44, 42, 43, 44, 45, 46, 45, 56, 57, 56, 57, 47, 48, 79, 49, 64, 86, 64, 86, 60, 62, 61, 62, 79, 65, 68, 66, 68, 66, 77, 75, 73, 71, 85, 52, 51, 83, 69, 80, 81, 82, 25, 25, 25, 25, 25, 25, 25, 36, 36, 36, 36, 36, 36, 36, 41, 41, 41, 41, 41, 41, 41, 50, 50, 50, 50, 50, 50, 50, 58, 58, 79, 77, 58, 58, 58, 59, 59, 75, 59, 59, 59, 59, 63, 73, 71, 63, 67, 52, 51, 67, 70, 86, 86, 86, 70, 72, 72, 74, 86, 86, 86, 86, 74, 76, 76, 78, 78, 86, 78, 78, 78, 78, 84, 84, 84, 84, 84, 84, 84, 15, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86 } ; static yyconst flex_int16_t yy_chk[218] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 4, 84, 3, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 9, 21, 9, 21, 22, 23, 22, 23, 83, 9, 10, 24, 10, 24, 27, 29, 27, 29, 82, 10, 11, 33, 11, 33, 47, 48, 47, 48, 49, 55, 49, 55, 11, 13, 13, 13, 14, 14, 14, 17, 17, 17, 56, 57, 56, 57, 17, 17, 79, 17, 31, 59, 31, 59, 60, 62, 60, 62, 78, 31, 35, 66, 35, 66, 75, 74, 71, 70, 54, 52, 50, 44, 35, 42, 42, 42, 87, 87, 87, 87, 87, 87, 87, 88, 88, 88, 88, 88, 88, 88, 89, 89, 89, 89, 89, 89, 89, 90, 90, 90, 90, 90, 90, 90, 91, 91, 41, 40, 91, 91, 91, 92, 92, 39, 92, 92, 92, 92, 93, 38, 37, 93, 94, 20, 19, 94, 95, 15, 0, 0, 95, 96, 96, 97, 0, 0, 0, 0, 97, 98, 98, 99, 99, 0, 99, 99, 99, 99, 100, 100, 100, 100, 100, 100, 100, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86 } ; extern int coll_ml_config_yy_flex_debug; int coll_ml_config_yy_flex_debug = 0; static yy_state_type *yy_state_buf=0, *yy_state_ptr=0; static char *yy_full_match; static int yy_lp; static int yy_looking_for_trail_begin = 0; static int yy_full_lp; static int *yy_full_state; #define YY_TRAILING_MASK 0x2000 #define YY_TRAILING_HEAD_MASK 0x4000 #define REJECT \ { \ *yy_cp = (yy_hold_char); /* undo effects of setting up coll_ml_config_yytext */ \ yy_cp = (yy_full_match); /* restore poss. backed-over text */ \ (yy_lp) = (yy_full_lp); /* restore orig. accepting pos. */ \ (yy_state_ptr) = (yy_full_state); /* restore orig. state */ \ yy_current_state = *(yy_state_ptr); /* restore curr. state */ \ ++(yy_lp); \ goto find_rule; \ } #define yymore() yymore_used_but_not_detected #define YY_MORE_ADJ 0 #define YY_RESTORE_YY_MORE_OFFSET char *coll_ml_config_yytext; #line 1 "coll_ml_lex.l" #line 4 "coll_ml_lex.l" #include "opal_config.h" #include <stdio.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include "coll_ml_lex.h" BEGIN_C_DECLS /* * local functions */ static int finish_parsing(void) ; static int coll_ml_config_yywrap(void); END_C_DECLS /* * global variables */ int coll_ml_config_yynewlines = 1; bool coll_ml_config_parse_done = false; char *coll_ml_config_string = NULL; #define yyterminate() \ return finish_parsing() #line 606 "coll_ml_lex.c" #define INITIAL 0 #define comment 1 #define section_name 2 #define collective_name 3 #define section_end 4 #define collective_end 5 #define value 6 #ifndef YY_NO_UNISTD_H /* Special case for "unistd.h", since it is non-ANSI. We include it way * down here because we want the user's section 1 to have been scanned first. * The user has a chance to override it with an option. */ #include <unistd.h> #endif #ifndef YY_EXTRA_TYPE #define YY_EXTRA_TYPE void * #endif static int yy_init_globals (void ); /* Accessor methods to globals. These are made visible to non-reentrant scanners for convenience. */ int coll_ml_config_yylex_destroy (void ); int coll_ml_config_yyget_debug (void ); void coll_ml_config_yyset_debug (int debug_flag ); YY_EXTRA_TYPE coll_ml_config_yyget_extra (void ); void coll_ml_config_yyset_extra (YY_EXTRA_TYPE user_defined ); FILE *coll_ml_config_yyget_in (void ); void coll_ml_config_yyset_in (FILE * in_str ); FILE *coll_ml_config_yyget_out (void ); void coll_ml_config_yyset_out (FILE * out_str ); int coll_ml_config_yyget_leng (void ); char *coll_ml_config_yyget_text (void ); int coll_ml_config_yyget_lineno (void ); void coll_ml_config_yyset_lineno (int line_number ); /* Macros after this point can all be overridden by user definitions in * section 1. */ #ifndef YY_SKIP_YYWRAP #ifdef __cplusplus extern "C" int coll_ml_config_yywrap (void ); #else extern int coll_ml_config_yywrap (void ); #endif #endif #ifndef yytext_ptr static void yy_flex_strncpy (char *,yyconst char *,int ); #endif #ifdef YY_NEED_STRLEN static int yy_flex_strlen (yyconst char * ); #endif #ifndef YY_NO_INPUT #ifdef __cplusplus static int yyinput (void ); #else static int input (void ); #endif #endif /* Amount of stuff to slurp up with each read. */ #ifndef YY_READ_BUF_SIZE #define YY_READ_BUF_SIZE 8192 #endif /* Copy whatever the last rule matched to the standard output. */ #ifndef ECHO /* This used to be an fputs(), but since the string might contain NUL's, * we now use fwrite(). */ #define ECHO fwrite( coll_ml_config_yytext, coll_ml_config_yyleng, 1, coll_ml_config_yyout ) #endif /* Gets input and stuffs it into "buf". number of characters read, or YY_NULL, * is returned in "result". */ #ifndef YY_INPUT #define YY_INPUT(buf,result,max_size) \ if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \ { \ int c = '*'; \ int n; \ for ( n = 0; n < max_size && \ (c = getc( coll_ml_config_yyin )) != EOF && c != '\n'; ++n ) \ buf[n] = (char) c; \ if ( c == '\n' ) \ buf[n++] = (char) c; \ if ( c == EOF && ferror( coll_ml_config_yyin ) ) \ YY_FATAL_ERROR( "input in flex scanner failed" ); \ result = n; \ } \ else \ { \ errno=0; \ while ( (result = fread(buf, 1, max_size, coll_ml_config_yyin))==0 && ferror(coll_ml_config_yyin)) \ { \ if( errno != EINTR) \ { \ YY_FATAL_ERROR( "input in flex scanner failed" ); \ break; \ } \ errno=0; \ clearerr(coll_ml_config_yyin); \ } \ }\ \ #endif /* No semi-colon after return; correct usage is to write "yyterminate();" - * we don't want an extra ';' after the "return" because that will cause * some compilers to complain about unreachable statements. */ #ifndef yyterminate #define yyterminate() return YY_NULL #endif /* Number of entries by which start-condition stack grows. */ #ifndef YY_START_STACK_INCR #define YY_START_STACK_INCR 25 #endif /* Report a fatal error. */ #ifndef YY_FATAL_ERROR #define YY_FATAL_ERROR(msg) yy_fatal_error( msg ) #endif /* end tables serialization structures and prototypes */ /* Default declaration of generated scanner - a define so the user can * easily add parameters. */ #ifndef YY_DECL #define YY_DECL_IS_OURS 1 extern int coll_ml_config_yylex (void); #define YY_DECL int coll_ml_config_yylex (void) #endif /* !YY_DECL */ /* Code executed at the beginning of each rule, after coll_ml_config_yytext and coll_ml_config_yyleng * have been set up. */ #ifndef YY_USER_ACTION #define YY_USER_ACTION #endif /* Code executed at the end of each rule. */ #ifndef YY_BREAK #define YY_BREAK break; #endif #define YY_RULE_SETUP \ YY_USER_ACTION /** The main scanner function which does all the work. */ YY_DECL { register yy_state_type yy_current_state; register char *yy_cp, *yy_bp; register int yy_act; #line 46 "coll_ml_lex.l" #line 795 "coll_ml_lex.c" if ( !(yy_init) ) { (yy_init) = 1; #ifdef YY_USER_INIT YY_USER_INIT; #endif /* Create the reject buffer large enough to save one state per allowed character. */ if ( ! (yy_state_buf) ) (yy_state_buf) = (yy_state_type *)coll_ml_config_yyalloc(YY_STATE_BUF_SIZE ); if ( ! (yy_state_buf) ) YY_FATAL_ERROR( "out of dynamic memory in coll_ml_config_yylex()" ); if ( ! (yy_start) ) (yy_start) = 1; /* first start state */ if ( ! coll_ml_config_yyin ) coll_ml_config_yyin = stdin; if ( ! coll_ml_config_yyout ) coll_ml_config_yyout = stdout; if ( ! YY_CURRENT_BUFFER ) { coll_ml_config_yyensure_buffer_stack (); YY_CURRENT_BUFFER_LVALUE = coll_ml_config_yy_create_buffer(coll_ml_config_yyin,YY_BUF_SIZE ); } coll_ml_config_yy_load_buffer_state( ); } while ( 1 ) /* loops until end-of-file is reached */ { yy_cp = (yy_c_buf_p); /* Support of coll_ml_config_yytext. */ *yy_cp = (yy_hold_char); /* yy_bp points to the position in yy_ch_buf of the start of * the current run. */ yy_bp = yy_cp; yy_current_state = (yy_start); (yy_state_ptr) = (yy_state_buf); *(yy_state_ptr)++ = yy_current_state; yy_match: do { register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)]; while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) { yy_current_state = (int) yy_def[yy_current_state]; if ( yy_current_state >= 87 ) yy_c = yy_meta[(unsigned int) yy_c]; } yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; *(yy_state_ptr)++ = yy_current_state; ++yy_cp; } while ( yy_base[yy_current_state] != 203 ); yy_find_action: yy_current_state = *--(yy_state_ptr); (yy_lp) = yy_accept[yy_current_state]; find_rule: /* we branch to this label when backing up */ for ( ; ; ) /* until we find what rule we matched */ { if ( (yy_lp) && (yy_lp) < yy_accept[yy_current_state + 1] ) { yy_act = yy_acclist[(yy_lp)]; if ( yy_act & YY_TRAILING_HEAD_MASK || (yy_looking_for_trail_begin) ) { if ( yy_act == (yy_looking_for_trail_begin) ) { (yy_looking_for_trail_begin) = 0; yy_act &= ~YY_TRAILING_HEAD_MASK; break; } } else if ( yy_act & YY_TRAILING_MASK ) { (yy_looking_for_trail_begin) = yy_act & ~YY_TRAILING_MASK; (yy_looking_for_trail_begin) |= YY_TRAILING_HEAD_MASK; } else { (yy_full_match) = yy_cp; (yy_full_state) = (yy_state_ptr); (yy_full_lp) = (yy_lp); break; } ++(yy_lp); goto find_rule; } --yy_cp; yy_current_state = *--(yy_state_ptr); (yy_lp) = yy_accept[yy_current_state]; } YY_DO_BEFORE_ACTION; do_action: /* This label is used only to access EOF actions. */ switch ( yy_act ) { /* beginning of action switch */ case 1: /* rule 1 can match eol */ YY_RULE_SETUP #line 48 "coll_ml_lex.l" { ++coll_ml_config_yynewlines; return COLL_ML_CONFIG_PARSE_NEWLINE; } YY_BREAK case 2: /* rule 2 can match eol */ YY_RULE_SETUP #line 50 "coll_ml_lex.l" { ++coll_ml_config_yynewlines; return COLL_ML_CONFIG_PARSE_NEWLINE; } YY_BREAK case 3: /* rule 3 can match eol */ YY_RULE_SETUP #line 52 "coll_ml_lex.l" { ++coll_ml_config_yynewlines; return COLL_ML_CONFIG_PARSE_NEWLINE; } YY_BREAK case 4: YY_RULE_SETUP #line 55 "coll_ml_lex.l" { BEGIN(comment); return COLL_ML_CONFIG_PARSE_NEWLINE; } YY_BREAK case 5: YY_RULE_SETUP #line 57 "coll_ml_lex.l" ; /* Eat up non '*'s */ YY_BREAK case 6: YY_RULE_SETUP #line 58 "coll_ml_lex.l" ; /* Eat '*'s not followed by a '/' */ YY_BREAK case 7: /* rule 7 can match eol */ YY_RULE_SETUP #line 59 "coll_ml_lex.l" { ++coll_ml_config_yynewlines; return COLL_ML_CONFIG_PARSE_NEWLINE; } YY_BREAK case 8: YY_RULE_SETUP #line 61 "coll_ml_lex.l" { BEGIN(INITIAL); /* Done with block comment */ return COLL_ML_CONFIG_PARSE_NEWLINE; } YY_BREAK case 9: YY_RULE_SETUP #line 64 "coll_ml_lex.l" { BEGIN(collective_name); } YY_BREAK case 10: YY_RULE_SETUP #line 65 "coll_ml_lex.l" { BEGIN(collective_end); return COLL_ML_CONFIG_PARSE_COLLECTIVE; } YY_BREAK case 11: /* rule 11 can match eol */ YY_RULE_SETUP #line 68 "coll_ml_lex.l" { ++coll_ml_config_yynewlines; return COLL_ML_CONFIG_PARSE_ERROR; } YY_BREAK case 12: YY_RULE_SETUP #line 70 "coll_ml_lex.l" { return COLL_ML_CONFIG_PARSE_ERROR; } YY_BREAK case 13: /* rule 13 can match eol */ YY_RULE_SETUP #line 71 "coll_ml_lex.l" { BEGIN(INITIAL); ++coll_ml_config_yynewlines; return COLL_ML_CONFIG_PARSE_NEWLINE; } YY_BREAK case 14: YY_RULE_SETUP #line 76 "coll_ml_lex.l" { BEGIN(section_name); } YY_BREAK case 15: YY_RULE_SETUP #line 77 "coll_ml_lex.l" { BEGIN(section_end); return COLL_ML_CONFIG_PARSE_SECTION; } YY_BREAK case 16: /* rule 16 can match eol */ YY_RULE_SETUP #line 80 "coll_ml_lex.l" { ++coll_ml_config_yynewlines; return COLL_ML_CONFIG_PARSE_ERROR; } YY_BREAK case 17: YY_RULE_SETUP #line 82 "coll_ml_lex.l" { return COLL_ML_CONFIG_PARSE_ERROR; } YY_BREAK case 18: /* rule 18 can match eol */ YY_RULE_SETUP #line 83 "coll_ml_lex.l" { BEGIN(INITIAL); ++coll_ml_config_yynewlines; return COLL_ML_CONFIG_PARSE_NEWLINE; } YY_BREAK case 19: YY_RULE_SETUP #line 88 "coll_ml_lex.l" { BEGIN(value); return COLL_ML_CONFIG_PARSE_EQUAL; } YY_BREAK case 20: YY_RULE_SETUP #line 90 "coll_ml_lex.l" ; /* whitespace */ YY_BREAK case 21: YY_RULE_SETUP #line 91 "coll_ml_lex.l" { return COLL_ML_CONFIG_PARSE_SINGLE_WORD; } YY_BREAK case 22: /* rule 22 can match eol */ YY_RULE_SETUP #line 93 "coll_ml_lex.l" { BEGIN(INITIAL); ++coll_ml_config_yynewlines; return COLL_ML_CONFIG_PARSE_NEWLINE; } YY_BREAK case 23: YY_RULE_SETUP #line 96 "coll_ml_lex.l" { return COLL_ML_CONFIG_PARSE_VALUE; } YY_BREAK case 24: YY_RULE_SETUP #line 99 "coll_ml_lex.l" { return COLL_ML_CONFIG_PARSE_ERROR; } YY_BREAK case 25: YY_RULE_SETUP #line 100 "coll_ml_lex.l" ECHO; YY_BREAK #line 1063 "coll_ml_lex.c" case YY_STATE_EOF(INITIAL): case YY_STATE_EOF(comment): case YY_STATE_EOF(section_name): case YY_STATE_EOF(collective_name): case YY_STATE_EOF(section_end): case YY_STATE_EOF(collective_end): case YY_STATE_EOF(value): yyterminate(); case YY_END_OF_BUFFER: { /* Amount of text matched not including the EOB char. */ int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1; /* Undo the effects of YY_DO_BEFORE_ACTION. */ *yy_cp = (yy_hold_char); YY_RESTORE_YY_MORE_OFFSET if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW ) { /* We're scanning a new file or input source. It's * possible that this happened because the user * just pointed coll_ml_config_yyin at a new source and called * coll_ml_config_yylex(). If so, then we have to assure * consistency between YY_CURRENT_BUFFER and our * globals. Here is the right place to do so, because * this is the first action (other than possibly a * back-up) that will match for the new input source. */ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; YY_CURRENT_BUFFER_LVALUE->yy_input_file = coll_ml_config_yyin; YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL; } /* Note that here we test for yy_c_buf_p "<=" to the position * of the first EOB in the buffer, since yy_c_buf_p will * already have been incremented past the NUL character * (since all states make transitions on EOB to the * end-of-buffer state). Contrast this with the test * in input(). */ if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] ) { /* This was really a NUL. */ yy_state_type yy_next_state; (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text; yy_current_state = yy_get_previous_state( ); /* Okay, we're now positioned to make the NUL * transition. We couldn't have * yy_get_previous_state() go ahead and do it * for us because it doesn't know how to deal * with the possibility of jamming (and we don't * want to build jamming into it because then it * will run more slowly). */ yy_next_state = yy_try_NUL_trans( yy_current_state ); yy_bp = (yytext_ptr) + YY_MORE_ADJ; if ( yy_next_state ) { /* Consume the NUL. */ yy_cp = ++(yy_c_buf_p); yy_current_state = yy_next_state; goto yy_match; } else { yy_cp = (yy_c_buf_p); goto yy_find_action; } } else switch ( yy_get_next_buffer( ) ) { case EOB_ACT_END_OF_FILE: { (yy_did_buffer_switch_on_eof) = 0; if ( coll_ml_config_yywrap( ) ) { /* Note: because we've taken care in * yy_get_next_buffer() to have set up * coll_ml_config_yytext, we can now set up * yy_c_buf_p so that if some total * hoser (like flex itself) wants to * call the scanner after we return the * YY_NULL, it'll still work - another * YY_NULL will get returned. */ (yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ; yy_act = YY_STATE_EOF(YY_START); goto do_action; } else { if ( ! (yy_did_buffer_switch_on_eof) ) YY_NEW_FILE; } break; } case EOB_ACT_CONTINUE_SCAN: (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text; yy_current_state = yy_get_previous_state( ); yy_cp = (yy_c_buf_p); yy_bp = (yytext_ptr) + YY_MORE_ADJ; goto yy_match; case EOB_ACT_LAST_MATCH: (yy_c_buf_p) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)]; yy_current_state = yy_get_previous_state( ); yy_cp = (yy_c_buf_p); yy_bp = (yytext_ptr) + YY_MORE_ADJ; goto yy_find_action; } break; } default: YY_FATAL_ERROR( "fatal flex scanner internal error--no action found" ); } /* end of action switch */ } /* end of scanning one token */ } /* end of coll_ml_config_yylex */ /* yy_get_next_buffer - try to read in a new buffer * * Returns a code representing an action: * EOB_ACT_LAST_MATCH - * EOB_ACT_CONTINUE_SCAN - continue scanning from current position * EOB_ACT_END_OF_FILE - end of file */ static int yy_get_next_buffer (void) { register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf; register char *source = (yytext_ptr); register int number_to_move, i; int ret_val; if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] ) YY_FATAL_ERROR( "fatal flex scanner internal error--end of buffer missed" ); if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 ) { /* Don't try to fill the buffer, so this is an EOF. */ if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 ) { /* We matched a single character, the EOB, so * treat this as a final EOF. */ return EOB_ACT_END_OF_FILE; } else { /* We matched some text prior to the EOB, first * process it. */ return EOB_ACT_LAST_MATCH; } } /* Try to read more data. */ /* First move last chars to start of buffer. */ number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr)) - 1; for ( i = 0; i < number_to_move; ++i ) *(dest++) = *(source++); if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING ) /* don't do the read, it's not guaranteed to return an EOF, * just force an EOF */ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0; else { int num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1; while ( num_to_read <= 0 ) { /* Not enough room in the buffer - grow it. */ YY_FATAL_ERROR( "input buffer overflow, can't enlarge buffer because scanner uses REJECT" ); } if ( num_to_read > YY_READ_BUF_SIZE ) num_to_read = YY_READ_BUF_SIZE; /* Read in more data. */ YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]), (yy_n_chars), (size_t) num_to_read ); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); } if ( (yy_n_chars) == 0 ) { if ( number_to_move == YY_MORE_ADJ ) { ret_val = EOB_ACT_END_OF_FILE; coll_ml_config_yyrestart(coll_ml_config_yyin ); } else { ret_val = EOB_ACT_LAST_MATCH; YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_EOF_PENDING; } } else ret_val = EOB_ACT_CONTINUE_SCAN; if ((yy_size_t) ((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) { /* Extend the array by 50%, plus the number we really need. */ yy_size_t new_size = (yy_n_chars) + number_to_move + ((yy_n_chars) >> 1); YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) coll_ml_config_yyrealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size ); if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf ) YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" ); } (yy_n_chars) += number_to_move; YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR; YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR; (yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0]; return ret_val; } /* yy_get_previous_state - get the state just before the EOB char was reached */ static yy_state_type yy_get_previous_state (void) { register yy_state_type yy_current_state; register char *yy_cp; yy_current_state = (yy_start); (yy_state_ptr) = (yy_state_buf); *(yy_state_ptr)++ = yy_current_state; for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp ) { register YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1); while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) { yy_current_state = (int) yy_def[yy_current_state]; if ( yy_current_state >= 87 ) yy_c = yy_meta[(unsigned int) yy_c]; } yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; *(yy_state_ptr)++ = yy_current_state; } return yy_current_state; } /* yy_try_NUL_trans - try to make a transition on the NUL character * * synopsis * next_state = yy_try_NUL_trans( current_state ); */ static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state ) { register int yy_is_jam; register YY_CHAR yy_c = 1; while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) { yy_current_state = (int) yy_def[yy_current_state]; if ( yy_current_state >= 87 ) yy_c = yy_meta[(unsigned int) yy_c]; } yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; yy_is_jam = (yy_current_state == 86); if ( ! yy_is_jam ) *(yy_state_ptr)++ = yy_current_state; return yy_is_jam ? 0 : yy_current_state; } #ifndef YY_NO_INPUT #ifdef __cplusplus static int yyinput (void) #else static int input (void) #endif { int c; *(yy_c_buf_p) = (yy_hold_char); if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR ) { /* yy_c_buf_p now points to the character we want to return. * If this occurs *before* the EOB characters, then it's a * valid NUL; if not, then we've hit the end of the buffer. */ if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] ) /* This was really a NUL. */ *(yy_c_buf_p) = '\0'; else { /* need more input */ int offset = (yy_c_buf_p) - (yytext_ptr); ++(yy_c_buf_p); switch ( yy_get_next_buffer( ) ) { case EOB_ACT_LAST_MATCH: /* This happens because yy_g_n_b() * sees that we've accumulated a * token and flags that we need to * try matching the token before * proceeding. But for input(), * there's no matching to consider. * So convert the EOB_ACT_LAST_MATCH * to EOB_ACT_END_OF_FILE. */ /* Reset buffer status. */ coll_ml_config_yyrestart(coll_ml_config_yyin ); /*FALLTHROUGH*/ case EOB_ACT_END_OF_FILE: { if ( coll_ml_config_yywrap( ) ) return EOF; if ( ! (yy_did_buffer_switch_on_eof) ) YY_NEW_FILE; #ifdef __cplusplus return yyinput(); #else return input(); #endif } case EOB_ACT_CONTINUE_SCAN: (yy_c_buf_p) = (yytext_ptr) + offset; break; } } } c = *(unsigned char *) (yy_c_buf_p); /* cast for 8-bit char's */ *(yy_c_buf_p) = '\0'; /* preserve coll_ml_config_yytext */ (yy_hold_char) = *++(yy_c_buf_p); return c; } #endif /* ifndef YY_NO_INPUT */ /** Immediately switch to a different input stream. * @param input_file A readable stream. * * @note This function does not reset the start condition to @c INITIAL . */ void coll_ml_config_yyrestart (FILE * input_file ) { if ( ! YY_CURRENT_BUFFER ){ coll_ml_config_yyensure_buffer_stack (); YY_CURRENT_BUFFER_LVALUE = coll_ml_config_yy_create_buffer(coll_ml_config_yyin,YY_BUF_SIZE ); } coll_ml_config_yy_init_buffer(YY_CURRENT_BUFFER,input_file ); coll_ml_config_yy_load_buffer_state( ); } /** Switch to a different input buffer. * @param new_buffer The new input buffer. * */ void coll_ml_config_yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ) { /* TODO. We should be able to replace this entire function body * with * coll_ml_config_yypop_buffer_state(); * coll_ml_config_yypush_buffer_state(new_buffer); */ coll_ml_config_yyensure_buffer_stack (); if ( YY_CURRENT_BUFFER == new_buffer ) return; if ( YY_CURRENT_BUFFER ) { /* Flush out information for old buffer. */ *(yy_c_buf_p) = (yy_hold_char); YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); } YY_CURRENT_BUFFER_LVALUE = new_buffer; coll_ml_config_yy_load_buffer_state( ); /* We don't actually know whether we did this switch during * EOF (coll_ml_config_yywrap()) processing, but the only time this flag * is looked at is after coll_ml_config_yywrap() is called, so it's safe * to go ahead and always set it. */ (yy_did_buffer_switch_on_eof) = 1; } static void coll_ml_config_yy_load_buffer_state (void) { (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; (yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos; coll_ml_config_yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file; (yy_hold_char) = *(yy_c_buf_p); } /** Allocate and initialize an input buffer state. * @param file A readable stream. * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE. * * @return the allocated buffer state. */ YY_BUFFER_STATE coll_ml_config_yy_create_buffer (FILE * file, int size ) { YY_BUFFER_STATE b; b = (YY_BUFFER_STATE) coll_ml_config_yyalloc(sizeof( struct yy_buffer_state ) ); if ( ! b ) YY_FATAL_ERROR( "out of dynamic memory in coll_ml_config_yy_create_buffer()" ); b->yy_buf_size = size; /* yy_ch_buf has to be 2 characters longer than the size given because * we need to put in 2 end-of-buffer characters. */ b->yy_ch_buf = (char *) coll_ml_config_yyalloc(b->yy_buf_size + 2 ); if ( ! b->yy_ch_buf ) YY_FATAL_ERROR( "out of dynamic memory in coll_ml_config_yy_create_buffer()" ); b->yy_is_our_buffer = 1; coll_ml_config_yy_init_buffer(b,file ); return b; } /** Destroy the buffer. * @param b a buffer created with coll_ml_config_yy_create_buffer() * */ void coll_ml_config_yy_delete_buffer (YY_BUFFER_STATE b ) { if ( ! b ) return; if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */ YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0; if ( b->yy_is_our_buffer ) coll_ml_config_yyfree((void *) b->yy_ch_buf ); coll_ml_config_yyfree((void *) b ); } #ifndef __cplusplus extern int isatty (int ); #endif /* __cplusplus */ /* Initializes or reinitializes a buffer. * This function is sometimes called more than once on the same buffer, * such as during a coll_ml_config_yyrestart() or at EOF. */ static void coll_ml_config_yy_init_buffer (YY_BUFFER_STATE b, FILE * file ) { int oerrno = errno; coll_ml_config_yy_flush_buffer(b ); b->yy_input_file = file; b->yy_fill_buffer = 1; /* If b is the current buffer, then coll_ml_config_yy_init_buffer was _probably_ * called from coll_ml_config_yyrestart() or through yy_get_next_buffer. * In that case, we don't want to reset the lineno or column. */ if (b != YY_CURRENT_BUFFER){ b->yy_bs_lineno = 1; b->yy_bs_column = 0; } b->yy_is_interactive = file ? (isatty( fileno(file) ) > 0) : 0; errno = oerrno; } /** Discard all buffered characters. On the next scan, YY_INPUT will be called. * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER. * */ void coll_ml_config_yy_flush_buffer (YY_BUFFER_STATE b ) { if ( ! b ) return; b->yy_n_chars = 0; /* We always need two end-of-buffer characters. The first causes * a transition to the end-of-buffer state. The second causes * a jam in that state. */ b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR; b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR; b->yy_buf_pos = &b->yy_ch_buf[0]; b->yy_at_bol = 1; b->yy_buffer_status = YY_BUFFER_NEW; if ( b == YY_CURRENT_BUFFER ) coll_ml_config_yy_load_buffer_state( ); } /** Pushes the new state onto the stack. The new state becomes * the current state. This function will allocate the stack * if necessary. * @param new_buffer The new state. * */ void coll_ml_config_yypush_buffer_state (YY_BUFFER_STATE new_buffer ) { if (new_buffer == NULL) return; coll_ml_config_yyensure_buffer_stack(); /* This block is copied from coll_ml_config_yy_switch_to_buffer. */ if ( YY_CURRENT_BUFFER ) { /* Flush out information for old buffer. */ *(yy_c_buf_p) = (yy_hold_char); YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); } /* Only push if top exists. Otherwise, replace top. */ if (YY_CURRENT_BUFFER) (yy_buffer_stack_top)++; YY_CURRENT_BUFFER_LVALUE = new_buffer; /* copied from coll_ml_config_yy_switch_to_buffer. */ coll_ml_config_yy_load_buffer_state( ); (yy_did_buffer_switch_on_eof) = 1; } /** Removes and deletes the top of the stack, if present. * The next element becomes the new top. * */ void coll_ml_config_yypop_buffer_state (void) { if (!YY_CURRENT_BUFFER) return; coll_ml_config_yy_delete_buffer(YY_CURRENT_BUFFER ); YY_CURRENT_BUFFER_LVALUE = NULL; if ((yy_buffer_stack_top) > 0) --(yy_buffer_stack_top); if (YY_CURRENT_BUFFER) { coll_ml_config_yy_load_buffer_state( ); (yy_did_buffer_switch_on_eof) = 1; } } /* Allocates the stack if it does not exist. * Guarantees space for at least one push. */ static void coll_ml_config_yyensure_buffer_stack (void) { int num_to_alloc; if (!(yy_buffer_stack)) { /* First allocation is just for 2 elements, since we don't know if this * scanner will even need a stack. We use 2 instead of 1 to avoid an * immediate realloc on the next call. */ num_to_alloc = 1; (yy_buffer_stack) = (struct yy_buffer_state**)coll_ml_config_yyalloc (num_to_alloc * sizeof(struct yy_buffer_state*) ); if ( ! (yy_buffer_stack) ) YY_FATAL_ERROR( "out of dynamic memory in coll_ml_config_yyensure_buffer_stack()" ); memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*)); (yy_buffer_stack_max) = num_to_alloc; (yy_buffer_stack_top) = 0; return; } if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){ /* Increase the buffer to prepare for a possible push. */ int grow_size = 8 /* arbitrary grow size */; num_to_alloc = (yy_buffer_stack_max) + grow_size; (yy_buffer_stack) = (struct yy_buffer_state**)coll_ml_config_yyrealloc ((yy_buffer_stack), num_to_alloc * sizeof(struct yy_buffer_state*) ); if ( ! (yy_buffer_stack) ) YY_FATAL_ERROR( "out of dynamic memory in coll_ml_config_yyensure_buffer_stack()" ); /* zero only the new slots.*/ memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*)); (yy_buffer_stack_max) = num_to_alloc; } } /** Setup the input buffer state to scan directly from a user-specified character buffer. * @param base the character buffer * @param size the size in bytes of the character buffer * * @return the newly allocated buffer state object. */ YY_BUFFER_STATE coll_ml_config_yy_scan_buffer (char * base, yy_size_t size ) { YY_BUFFER_STATE b; if ( size < 2 || base[size-2] != YY_END_OF_BUFFER_CHAR || base[size-1] != YY_END_OF_BUFFER_CHAR ) /* They forgot to leave room for the EOB's. */ return 0; b = (YY_BUFFER_STATE) coll_ml_config_yyalloc(sizeof( struct yy_buffer_state ) ); if ( ! b ) YY_FATAL_ERROR( "out of dynamic memory in coll_ml_config_yy_scan_buffer()" ); b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */ b->yy_buf_pos = b->yy_ch_buf = base; b->yy_is_our_buffer = 0; b->yy_input_file = 0; b->yy_n_chars = b->yy_buf_size; b->yy_is_interactive = 0; b->yy_at_bol = 1; b->yy_fill_buffer = 0; b->yy_buffer_status = YY_BUFFER_NEW; coll_ml_config_yy_switch_to_buffer(b ); return b; } /** Setup the input buffer state to scan a string. The next call to coll_ml_config_yylex() will * scan from a @e copy of @a str. * @param yystr a NUL-terminated string to scan * * @return the newly allocated buffer state object. * @note If you want to scan bytes that may contain NUL values, then use * coll_ml_config_yy_scan_bytes() instead. */ YY_BUFFER_STATE coll_ml_config_yy_scan_string (yyconst char * yystr ) { return coll_ml_config_yy_scan_bytes(yystr,strlen(yystr) ); } /** Setup the input buffer state to scan the given bytes. The next call to coll_ml_config_yylex() will * scan from a @e copy of @a bytes. * @param bytes the byte buffer to scan * @param len the number of bytes in the buffer pointed to by @a bytes. * * @return the newly allocated buffer state object. */ YY_BUFFER_STATE coll_ml_config_yy_scan_bytes (yyconst char * yybytes, int _yybytes_len ) { YY_BUFFER_STATE b; char *buf; yy_size_t n; int i; /* Get memory for full buffer, including space for trailing EOB's. */ n = _yybytes_len + 2; buf = (char *) coll_ml_config_yyalloc(n ); if ( ! buf ) YY_FATAL_ERROR( "out of dynamic memory in coll_ml_config_yy_scan_bytes()" ); for ( i = 0; i < _yybytes_len; ++i ) buf[i] = yybytes[i]; buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR; b = coll_ml_config_yy_scan_buffer(buf,n ); if ( ! b ) YY_FATAL_ERROR( "bad buffer in coll_ml_config_yy_scan_bytes()" ); /* It's okay to grow etc. this buffer, and we should throw it * away when we're done. */ b->yy_is_our_buffer = 1; return b; } #ifndef YY_EXIT_FAILURE #define YY_EXIT_FAILURE 2 #endif static void yy_fatal_error (yyconst char* msg ) { (void) fprintf( stderr, "%s\n", msg ); exit( YY_EXIT_FAILURE ); } /* Redefine yyless() so it works in section 3 code. */ #undef yyless #define yyless(n) \ do \ { \ /* Undo effects of setting up coll_ml_config_yytext. */ \ int yyless_macro_arg = (n); \ YY_LESS_LINENO(yyless_macro_arg);\ coll_ml_config_yytext[coll_ml_config_yyleng] = (yy_hold_char); \ (yy_c_buf_p) = coll_ml_config_yytext + yyless_macro_arg; \ (yy_hold_char) = *(yy_c_buf_p); \ *(yy_c_buf_p) = '\0'; \ coll_ml_config_yyleng = yyless_macro_arg; \ } \ while ( 0 ) /* Accessor methods (get/set functions) to struct members. */ /** Get the current line number. * */ int coll_ml_config_yyget_lineno (void) { return coll_ml_config_yylineno; } /** Get the input stream. * */ FILE *coll_ml_config_yyget_in (void) { return coll_ml_config_yyin; } /** Get the output stream. * */ FILE *coll_ml_config_yyget_out (void) { return coll_ml_config_yyout; } /** Get the length of the current token. * */ int coll_ml_config_yyget_leng (void) { return coll_ml_config_yyleng; } /** Get the current token. * */ char *coll_ml_config_yyget_text (void) { return coll_ml_config_yytext; } /** Set the current line number. * @param line_number * */ void coll_ml_config_yyset_lineno (int line_number ) { coll_ml_config_yylineno = line_number; } /** Set the input stream. This does not discard the current * input buffer. * @param in_str A readable stream. * * @see coll_ml_config_yy_switch_to_buffer */ void coll_ml_config_yyset_in (FILE * in_str ) { coll_ml_config_yyin = in_str ; } void coll_ml_config_yyset_out (FILE * out_str ) { coll_ml_config_yyout = out_str ; } int coll_ml_config_yyget_debug (void) { return coll_ml_config_yy_flex_debug; } void coll_ml_config_yyset_debug (int bdebug ) { coll_ml_config_yy_flex_debug = bdebug ; } static int yy_init_globals (void) { /* Initialization is the same as for the non-reentrant scanner. * This function is called from coll_ml_config_yylex_destroy(), so don't allocate here. */ (yy_buffer_stack) = 0; (yy_buffer_stack_top) = 0; (yy_buffer_stack_max) = 0; (yy_c_buf_p) = (char *) 0; (yy_init) = 0; (yy_start) = 0; (yy_state_buf) = 0; (yy_state_ptr) = 0; (yy_full_match) = 0; (yy_lp) = 0; /* Defined in main.c */ #ifdef YY_STDINIT coll_ml_config_yyin = stdin; coll_ml_config_yyout = stdout; #else coll_ml_config_yyin = (FILE *) 0; coll_ml_config_yyout = (FILE *) 0; #endif /* For future reference: Set errno on error, since we are called by * coll_ml_config_yylex_init() */ return 0; } /* coll_ml_config_yylex_destroy is for both reentrant and non-reentrant scanners. */ int coll_ml_config_yylex_destroy (void) { /* Pop the buffer stack, destroying each element. */ while(YY_CURRENT_BUFFER){ coll_ml_config_yy_delete_buffer(YY_CURRENT_BUFFER ); YY_CURRENT_BUFFER_LVALUE = NULL; coll_ml_config_yypop_buffer_state(); } /* Destroy the stack itself. */ coll_ml_config_yyfree((yy_buffer_stack) ); (yy_buffer_stack) = NULL; coll_ml_config_yyfree ( (yy_state_buf) ); (yy_state_buf) = NULL; /* Reset the globals. This is important in a non-reentrant scanner so the next time * coll_ml_config_yylex() is called, initialization will occur. */ yy_init_globals( ); return 0; } /* * Internal utility routines. */ #ifndef yytext_ptr static void yy_flex_strncpy (char* s1, yyconst char * s2, int n ) { register int i; for ( i = 0; i < n; ++i ) s1[i] = s2[i]; } #endif #ifdef YY_NEED_STRLEN static int yy_flex_strlen (yyconst char * s ) { register int n; for ( n = 0; s[n]; ++n ) ; return n; } #endif void *coll_ml_config_yyalloc (yy_size_t size ) { return (void *) malloc( size ); } void *coll_ml_config_yyrealloc (void * ptr, yy_size_t size ) { /* The cast to (char *) in the following accommodates both * implementations that use char* generic pointers, and those * that use void* generic pointers. It works with the latter * because both ANSI C and C++ allow castless assignment from * any pointer type to void*, and deal with argument conversions * as though doing an assignment. */ return (void *) realloc( (char *) ptr, size ); } void coll_ml_config_yyfree (void * ptr ) { free( (char *) ptr ); /* see coll_ml_config_yyrealloc() for (char *) cast */ } #define YYTABLES_NAME "yytables" #line 100 "coll_ml_lex.l" /* * This cleans up at the end of the parse (since, in this case, we * always parse the entire file) and prevents a memory leak. */ static int finish_parsing(void) { if (NULL != YY_CURRENT_BUFFER) { coll_ml_config_yy_delete_buffer(YY_CURRENT_BUFFER); #if defined(YY_CURRENT_BUFFER_LVALUE) YY_CURRENT_BUFFER_LVALUE = NULL; #else YY_CURRENT_BUFFER = NULL; #endif /* YY_CURRENT_BUFFER_LVALUE */ } return YY_NULL; } static int coll_ml_config_yywrap(void) { coll_ml_config_parse_done = true; return 1; } /* * Ensure that we have a valid yybuffer to use. Specifically, if this * scanner is invoked a second time, finish_parsing() (above) will * have been executed, and the current buffer will have been freed. * Flex doesn't recognize this fact because as far as it's concerned, * its internal state was already initialized, so it thinks it should * have a valid buffer. Hence, here we ensure to give it a valid * buffer. */ int coll_ml_config_init_buffer(FILE *file) { YY_BUFFER_STATE buf = coll_ml_config_yy_create_buffer(file,YY_BUF_SIZE); coll_ml_config_yy_switch_to_buffer(buf); return 0; }
365730.c
/* $Id: tif_zip.c,v 1.1 2005/06/17 13:54:52 vp153 Exp $ */ /* * Copyright (c) 1995-1997 Sam Leffler * Copyright (c) 1995-1997 Silicon Graphics, Inc. * * Permission to use, copy, modify, distribute, and sell this software and * its documentation for any purpose is hereby granted without fee, provided * that (i) the above copyright notices and this permission notice appear in * all copies of the software and related documentation, and (ii) the names of * Sam Leffler and Silicon Graphics may not be used in any advertising or * publicity relating to the software without the specific, prior written * permission of Sam Leffler and Silicon Graphics. * * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. * * IN NO EVENT SHALL SAM LEFFLER OR SILICON GRAPHICS BE LIABLE FOR * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include "tiffiop.h" #ifdef ZIP_SUPPORT /* * TIFF Library. * * ZIP (aka Deflate) Compression Support * * This file is simply an interface to the zlib library written by * Jean-loup Gailly and Mark Adler. You must use version 1.0 or later * of the library: this code assumes the 1.0 API and also depends on * the ability to write the zlib header multiple times (one per strip) * which was not possible with versions prior to 0.95. Note also that * older versions of this codec avoided this bug by supressing the header * entirely. This means that files written with the old library cannot * be read; they should be converted to a different compression scheme * and then reconverted. * * The data format used by the zlib library is described in the files * zlib-3.1.doc, deflate-1.1.doc and gzip-4.1.doc, available in the * directory ftp://ftp.uu.net/pub/archiving/zip/doc. The library was * last found at ftp://ftp.uu.net/pub/archiving/zip/zlib/zlib-0.99.tar.gz. */ #include "tif_predict.h" #include "zlib.h" #include <stdio.h> /* * Sigh, ZLIB_VERSION is defined as a string so there's no * way to do a proper check here. Instead we guess based * on the presence of #defines that were added between the * 0.95 and 1.0 distributions. */ #if !defined(Z_NO_COMPRESSION) || !defined(Z_DEFLATED) #error "Antiquated ZLIB software; you must use version 1.0 or later" #endif /* * State block for each open TIFF * file using ZIP compression/decompression. */ typedef struct { TIFFPredictorState predict; z_stream stream; int zipquality; /* compression level */ int state; /* state flags */ #define ZSTATE_INIT 0x1 /* zlib setup successfully */ TIFFVGetMethod vgetparent; /* super-class method */ TIFFVSetMethod vsetparent; /* super-class method */ } ZIPState; #define ZState(tif) ((ZIPState*) (tif)->tif_data) #define DecoderState(tif) ZState(tif) #define EncoderState(tif) ZState(tif) static int ZIPEncode(TIFF*, tidata_t, tsize_t, tsample_t); static int ZIPDecode(TIFF*, tidata_t, tsize_t, tsample_t); static int ZIPSetupDecode(TIFF* tif) { ZIPState* sp = DecoderState(tif); static const char module[] = "ZIPSetupDecode"; assert(sp != NULL); if (inflateInit(&sp->stream) != Z_OK) { TIFFError(module, "%s: %s", tif->tif_name, sp->stream.msg); return (0); } else { sp->state |= ZSTATE_INIT; return (1); } } /* * Setup state for decoding a strip. */ static int ZIPPreDecode(TIFF* tif, tsample_t s) { ZIPState* sp = DecoderState(tif); (void) s; assert(sp != NULL); sp->stream.next_in = tif->tif_rawdata; sp->stream.avail_in = tif->tif_rawcc; return (inflateReset(&sp->stream) == Z_OK); } static int ZIPDecode(TIFF* tif, tidata_t op, tsize_t occ, tsample_t s) { ZIPState* sp = DecoderState(tif); static const char module[] = "ZIPDecode"; (void) s; assert(sp != NULL); sp->stream.next_out = op; sp->stream.avail_out = occ; do { int state = inflate(&sp->stream, Z_PARTIAL_FLUSH); if (state == Z_STREAM_END) break; if (state == Z_DATA_ERROR) { TIFFError(module, "%s: Decoding error at scanline %d, %s", tif->tif_name, tif->tif_row, sp->stream.msg); if (inflateSync(&sp->stream) != Z_OK) return (0); continue; } if (state != Z_OK) { TIFFError(module, "%s: zlib error: %s", tif->tif_name, sp->stream.msg); return (0); } } while (sp->stream.avail_out > 0); if (sp->stream.avail_out != 0) { TIFFError(module, "%s: Not enough data at scanline %d (short %d bytes)", tif->tif_name, tif->tif_row, sp->stream.avail_out); return (0); } return (1); } static int ZIPSetupEncode(TIFF* tif) { ZIPState* sp = EncoderState(tif); static const char module[] = "ZIPSetupEncode"; assert(sp != NULL); if (deflateInit(&sp->stream, sp->zipquality) != Z_OK) { TIFFError(module, "%s: %s", tif->tif_name, sp->stream.msg); return (0); } else { sp->state |= ZSTATE_INIT; return (1); } } /* * Reset encoding state at the start of a strip. */ static int ZIPPreEncode(TIFF* tif, tsample_t s) { ZIPState *sp = EncoderState(tif); (void) s; assert(sp != NULL); sp->stream.next_out = tif->tif_rawdata; sp->stream.avail_out = tif->tif_rawdatasize; return (deflateReset(&sp->stream) == Z_OK); } /* * Encode a chunk of pixels. */ static int ZIPEncode(TIFF* tif, tidata_t bp, tsize_t cc, tsample_t s) { ZIPState *sp = EncoderState(tif); static const char module[] = "ZIPEncode"; (void) s; sp->stream.next_in = bp; sp->stream.avail_in = cc; do { if (deflate(&sp->stream, Z_NO_FLUSH) != Z_OK) { TIFFError(module, "%s: Encoder error: %s", tif->tif_name, sp->stream.msg); return (0); } if (sp->stream.avail_out == 0) { tif->tif_rawcc = tif->tif_rawdatasize; TIFFFlushData1(tif); sp->stream.next_out = tif->tif_rawdata; sp->stream.avail_out = tif->tif_rawdatasize; } } while (sp->stream.avail_in > 0); return (1); } /* * Finish off an encoded strip by flushing the last * string and tacking on an End Of Information code. */ static int ZIPPostEncode(TIFF* tif) { ZIPState *sp = EncoderState(tif); static const char module[] = "ZIPPostEncode"; int state; sp->stream.avail_in = 0; do { state = deflate(&sp->stream, Z_FINISH); switch (state) { case Z_STREAM_END: case Z_OK: if ((int)sp->stream.avail_out != (int)tif->tif_rawdatasize) { tif->tif_rawcc = tif->tif_rawdatasize - sp->stream.avail_out; TIFFFlushData1(tif); sp->stream.next_out = tif->tif_rawdata; sp->stream.avail_out = tif->tif_rawdatasize; } break; default: TIFFError(module, "%s: zlib error: %s", tif->tif_name, sp->stream.msg); return (0); } } while (state != Z_STREAM_END); return (1); } static void ZIPCleanup(TIFF* tif) { ZIPState* sp = ZState(tif); if (sp) { if (sp->state&ZSTATE_INIT) { /* NB: avoid problems in the library */ if (tif->tif_mode == O_RDONLY) inflateEnd(&sp->stream); else deflateEnd(&sp->stream); } _TIFFfree(sp); tif->tif_data = NULL; } } static int ZIPVSetField(TIFF* tif, ttag_t tag, va_list ap) { ZIPState* sp = ZState(tif); static const char module[] = "ZIPVSetField"; switch (tag) { case TIFFTAG_ZIPQUALITY: sp->zipquality = va_arg(ap, int); if (tif->tif_mode != O_RDONLY && (sp->state&ZSTATE_INIT)) { if (deflateParams(&sp->stream, sp->zipquality, Z_DEFAULT_STRATEGY) != Z_OK) { TIFFError(module, "%s: zlib error: %s", tif->tif_name, sp->stream.msg); return (0); } } return (1); default: return (*sp->vsetparent)(tif, tag, ap); } /*NOTREACHED*/ } static int ZIPVGetField(TIFF* tif, ttag_t tag, va_list ap) { ZIPState* sp = ZState(tif); switch (tag) { case TIFFTAG_ZIPQUALITY: *va_arg(ap, int*) = sp->zipquality; break; default: return (*sp->vgetparent)(tif, tag, ap); } return (1); } static const TIFFFieldInfo zipFieldInfo[] = { { TIFFTAG_ZIPQUALITY, 0, 0, TIFF_ANY, FIELD_PSEUDO, TRUE, FALSE, "" }, }; #define N(a) (sizeof (a) / sizeof (a[0])) int TIFFInitZIP(TIFF* tif, int scheme) { ZIPState* sp; assert( (scheme == COMPRESSION_DEFLATE) || (scheme == COMPRESSION_ADOBE_DEFLATE)); /* * Allocate state block so tag methods have storage to record values. */ tif->tif_data = (tidata_t) _TIFFmalloc(sizeof (ZIPState)); if (tif->tif_data == NULL) goto bad; sp = ZState(tif); sp->stream.zalloc = NULL; sp->stream.zfree = NULL; sp->stream.opaque = NULL; sp->stream.data_type = Z_BINARY; /* * Merge codec-specific tag information and * override parent get/set field methods. */ _TIFFMergeFieldInfo(tif, zipFieldInfo, N(zipFieldInfo)); sp->vgetparent = tif->tif_tagmethods.vgetfield; tif->tif_tagmethods.vgetfield = ZIPVGetField; /* hook for codec tags */ sp->vsetparent = tif->tif_tagmethods.vsetfield; tif->tif_tagmethods.vsetfield = ZIPVSetField; /* hook for codec tags */ /* Default values for codec-specific fields */ sp->zipquality = Z_DEFAULT_COMPRESSION; /* default comp. level */ sp->state = 0; /* * Install codec methods. */ tif->tif_setupdecode = ZIPSetupDecode; tif->tif_predecode = ZIPPreDecode; tif->tif_decoderow = ZIPDecode; tif->tif_decodestrip = ZIPDecode; tif->tif_decodetile = ZIPDecode; tif->tif_setupencode = ZIPSetupEncode; tif->tif_preencode = ZIPPreEncode; tif->tif_postencode = ZIPPostEncode; tif->tif_encoderow = ZIPEncode; tif->tif_encodestrip = ZIPEncode; tif->tif_encodetile = ZIPEncode; tif->tif_cleanup = ZIPCleanup; /* * Setup predictor setup. */ (void) TIFFPredictorInit(tif); return (1); bad: TIFFError("TIFFInitZIP", "No space for ZIP state block"); return (0); } #endif /* ZIP_SUPORT */ /* vim: set ts=8 sts=8 sw=8 noet: */
342076.c
#include "trace_points.h" #undef C #define C(x) #x, const char* trace_point_names[] = { TRACE_POINTS};
754912.c
#include <acutest.h> #include <gkyl_util.h> #include <slu_ddefs.h> void test_slu_example() { /* * This is the small 5x5 example used in the Sections 2 and 3 of the * Users' Guide to illustrate how to call a SuperLU routine, and the * matrix data structures used by SuperLU. * */ SuperMatrix A, L, U, B; double *a, *rhs; double s, u, p, e, r, l; int *asub, *xa; int *perm_r; /* row permutations from partial pivoting */ int *perm_c; /* column permutation vector */ int nrhs, info, i, m, n, nnz, permc_spec; superlu_options_t options; SuperLUStat_t stat; /* Initialize matrix A. */ /* A : matrix([s,0,u,u,0],[l,u,0,0,0],[0,l,p,0,0],[0,0,0,e,u],[l,l,0,0,r]); */ m = n = 5; nnz = 12; if ( !(a = doubleMalloc(nnz)) ) ABORT("Malloc fails for a[]."); if ( !(asub = intMalloc(nnz)) ) ABORT("Malloc fails for asub[]."); if ( !(xa = intMalloc(n+1)) ) ABORT("Malloc fails for xa[]."); s = 19.0; u = 21.0; p = 16.0; e = 5.0; r = 18.0; l = 12.0; /* A : matrix([s,0,u,u,0],[l,u,0,0,0],[0,l,p,0,0],[0,0,0,e,u],[l,l,0,0,r]); */ a[0] = s; a[1] = l; a[2] = l; a[3] = u; a[4] = l; a[5] = l; a[6] = u; a[7] = p; a[8] = u; a[9] = e; a[10]= u; a[11]= r; asub[0] = 0; asub[1] = 1; asub[2] = 4; asub[3] = 1; asub[4] = 2; asub[5] = 4; asub[6] = 0; asub[7] = 2; asub[8] = 0; asub[9] = 3; asub[10]= 3; asub[11]= 4; xa[0] = 0; xa[1] = 3; xa[2] = 6; xa[3] = 8; xa[4] = 10; xa[5] = 12; /* Create matrix A in the format expected by SuperLU. */ dCreate_CompCol_Matrix(&A, m, n, nnz, a, asub, xa, SLU_NC, SLU_D, SLU_GE); /* Create right-hand side matrix B. */ nrhs = 1; if ( !(rhs = doubleMalloc(m * nrhs)) ) ABORT("Malloc fails for rhs[]."); /* B : transpose([1,1,1,1,1]);*/ for (i = 0; i < m; ++i) rhs[i] = 1.0; dCreate_Dense_Matrix(&B, m, nrhs, rhs, m, SLU_DN, SLU_D, SLU_GE); if ( !(perm_r = intMalloc(m)) ) ABORT("Malloc fails for perm_r[]."); if ( !(perm_c = intMalloc(n)) ) ABORT("Malloc fails for perm_c[]."); /* Set the default input options. */ set_default_options(&options); options.ColPerm = NATURAL; /* Initialize the statistics variables. */ StatInit(&stat); /* Solve linear system */ dgssv(&options, &A, perm_c, perm_r, &L, &U, &B, &stat, &info); /* Solution is: [-1/32, 11/168, 3/224, 1/16, 11/336] */ TEST_CHECK( gkyl_compare(-1.0/32.0, rhs[0], 1e-14) ); TEST_CHECK( gkyl_compare( 11.0/168.0, rhs[1], 1e-14) ); TEST_CHECK( gkyl_compare( 3.0/224.0, rhs[2], 1e-14) ); TEST_CHECK( gkyl_compare( 1.0/16.0, rhs[3], 1e-14) ); TEST_CHECK( gkyl_compare( 11.0/336.0, rhs[4], 1e-14) ); /* De-allocate storage */ SUPERLU_FREE (rhs); SUPERLU_FREE (perm_r); SUPERLU_FREE (perm_c); Destroy_CompCol_Matrix(&A); Destroy_SuperMatrix_Store(&B); Destroy_SuperNode_Matrix(&L); Destroy_CompCol_Matrix(&U); StatFree(&stat); } TEST_LIST = { { "slu_example", test_slu_example }, { NULL, NULL } };